id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
26409 | # Generated by Django 3.1 on 2020-09-01 17:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='client',
name='link',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='client',
name='time',
field=models.DateTimeField(blank=True, max_length=50, null=True),
),
]
| StarcoderdataPython |
3250671 | from os import path
import cv2
import dlib
import numpy as np
from trash import filter
class dlibTracker:
"""
Tracks faces in video frames, extracts facial features
"""
def _iter_predict(self, prediction, start, end):
for p in range(start, end):
yield (int(prediction[p, 0]), int(prediction[p, 1]))
def iter_eye_l(self, prediction):
return self._iter_predict(prediction, 36, 42)
def iter_eye_r(self, prediction):
return self._iter_predict(prediction, 42, 48)
def iter_mouth(self, prediction):
return self._iter_predict(prediction, 48, 68)
def iter_all(self, prediction):
return self._iter_predict(prediction, 0, 68)
def get_pred(self, prediction):
return prediction.parts()
def get_rect(self, prediction):
return prediction.rect
def get_ndarray(self, prediction):
np_pts = np.zeros([68, 2], dtype=np.float32)
for i, p in enumerate(prediction.parts()):
np_pts[i, :] = (p.x, p.y)
return np_pts
def __init__(self,
detection_threshold=0.05,
tracking_threshold=10,
use_opencv=False):
self.detector = dlib.get_frontal_face_detector()
self.tracker = dlib.correlation_tracker()
model_dir = path.join(path.dirname(path.abspath(__file__)), 'models')
self.predictor = dlib.shape_predictor(
path.join(model_dir, 'shape_predictor_68_face_landmarks.dat'))
self.face_cascade = cv2.CascadeClassifier(
path.join(model_dir, 'haarcascade_frontalface_alt2.xml'))
self.good = False
self.dt = detection_threshold
self.tt = tracking_threshold
self.u_cv2 = use_opencv
self.roi_size = (800, 600)
self.roi_buffer = np.zeros((self.roi_size[0], self.roi_size[1], 3), np.uint8)
self.dlib_roi = dlib.rectangle(0, 0, self.roi_size[1], self.roi_size[0])
self.filterLandmarks = filter.ButterFilterArray(n=136, n_filter=1, cutoff=0.2)
def reset(self):
self.good = False
def track_face(self, opencv_img):
if not self.good:
self.filterLandmarks.reset()
dets, scores, _ = self.detector.run(opencv_img, 1, self.dt)
# print('detection scores: {}'.format(scores))
if not dets: # try opencv detector
if self.u_cv2:
faces = self.face_cascade.detectMultiScale(opencv_img, 1.2, 7, 0, (50, 50))
if len(faces) > 0:
(x, y, w, h) = faces[0]
dets = [dlib.rectangle(int(x), int(y), int(x + w), int(y + h))]
if not dets:
return None
self.good = True
self.tracker.start_track(opencv_img, dets[0])
else:
score = self.tracker.update(opencv_img)
# print('tracking score: {}'.format(score))
if score < self.tt:
self.good = False
return self.track_face(opencv_img)
d = self.tracker.get_position()
# print self.history[0:self.N - 1, :]
# print y[0:self.N - 1, :]
# print '----------------------------------------------'
rect = dlib.rectangle(
int(round(d.left())),
int(round(d.top())),
int(round(d.right())),
int(round(d.bottom())))
if min(rect.top(), rect.left()) < 0 \
or rect.bottom() > opencv_img.shape[0] \
or rect.right() > opencv_img.shape[1]:
return None
cv2.resize(opencv_img[rect.top():rect.bottom(), rect.left():rect.right()],
(self.roi_size[1], self.roi_size[0]), self.roi_buffer)
prediction = self.predictor(self.roi_buffer, self.dlib_roi)
scale = np.array([(d.right() - d.left()) / self.roi_size[1],
(d.bottom() - d.top()) / self.roi_size[0]], dtype=np.float32)
shift = np.array([d.left(), d.top()], dtype=np.float32)
# landmarks = self.get_ndarray(prediction) * scale + shift
# mean = np.mean(landmarks, axis=0)
# return np.reshape(
# self.filterLandmarks.append_and_filter(
# np.reshape(landmarks - mean, (1, -1))),
# (-1, 2)) + mean
return self.get_ndarray(prediction) * scale + shift
| StarcoderdataPython |
4805508 | <filename>tests/pandas/validation/test_pandas_validator_invalid_values.py<gh_stars>10-100
import pytest
import pandas as pd
from collections import ChainMap
import datetime
from arize.pandas.logger import Schema
from arize.pandas.validation.validator import Validator
import arize.pandas.validation.errors as err
def test_zero_errors():
errors = Validator.validate_values(**kwargs)
assert len(errors) == 0
def test_invalid_ts_missing_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
(
datetime.datetime.now()
- datetime.timedelta(days=365)
).date(),
float("NaN"),
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_valid_ts_empty_df():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"prediction_timestamp": pd.Series([], dtype=float)},
)
},
kwargs,
)
)
assert len(errors) == 0
def test_invalid_ts_date32_min():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
(
datetime.datetime.now()
- datetime.timedelta(days=365)
).date()
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_date32_max():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
(
datetime.datetime.now()
+ datetime.timedelta(
days=366
) # need to fudge a little b/c time is always moving forward
).date()
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_float64_min():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
(
datetime.datetime.now()
- datetime.timedelta(days=365)
).timestamp()
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_float64_max():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
(
datetime.datetime.now()
+ datetime.timedelta(
days=366
) # need to fudge a little b/c time is always moving forward
).timestamp()
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_int64_min():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
int(
(
datetime.datetime.now()
- datetime.timedelta(days=365)
).timestamp()
)
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_int64_max():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
int(
(
datetime.datetime.now()
+ datetime.timedelta(
days=366
) # need to fudge a little b/c time is always moving forward
).timestamp()
)
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_datetime_min():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[(datetime.datetime.now() - datetime.timedelta(days=365))]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_ts_datetime_max():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
datetime.datetime.now()
+ datetime.timedelta(
days=366
) # need to fudge a little b/c time is always moving forward
]
)
}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueTimestamp
def test_invalid_prediction_label_none_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"prediction_label": pd.Series(["foo", None, "baz"])}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_invalid_actual_label_none_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"actual_label": pd.Series(["foo", None, "baz"])}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_invalid_prediction_label_nan_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"prediction_label": pd.Series([0, float("NaN"), 1])}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_invalid_actual_label_nan_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"actual_label": pd.Series([0, float("NaN"), 1])}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_invalid_prediction_label_inf_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"prediction_label": pd.Series([0, float("-inf"), 1])}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_invalid_actual_label_inf_value():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{"actual_label": pd.Series([0, float("-inf"), 1])}
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_invalid_prediction_id_none():
errors = Validator.validate_values(
**ChainMap(
{"dataframe": pd.DataFrame({"prediction_id": pd.Series(["0", None, "1"])})},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidValueMissingValue
def test_multiple():
errors = Validator.validate_values(
**ChainMap(
{
"dataframe": pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[
(
datetime.datetime.now()
- datetime.timedelta(days=365)
).date()
]
* 3
),
"prediction_label": pd.Series(["foo", None, "baz"]),
"actual_label": pd.Series([0, float("NaN"), 1]),
}
)
},
kwargs,
)
)
assert len(errors) == 3
assert any(type(e) is err.InvalidValueTimestamp for e in errors)
assert any(type(e) is err.InvalidValueMissingValue for e in errors)
kwargs = {
"schema": Schema(
prediction_id_column_name="prediction_id",
timestamp_column_name="prediction_timestamp",
prediction_label_column_name="prediction_label",
actual_label_column_name="actual_label",
prediction_score_column_name="prediction_score",
actual_score_column_name="actual_score",
feature_column_names=list("ABCDEFG"),
shap_values_column_names=dict(zip("ABCDEF", "abcdef")),
),
"dataframe": pd.DataFrame(
{
"prediction_id": pd.Series(["0", "1", "2"]),
"prediction_timestamp": pd.Series(
[
datetime.datetime.now(),
datetime.datetime.now() - datetime.timedelta(days=364),
datetime.datetime.now() + datetime.timedelta(days=364),
]
),
"prediction_label": pd.Series(["fraud", "not fraud", "fraud"]),
"prediction_score": pd.Series([0.2, 0.3, 0.4]),
"actual_label": pd.Series(["not fraud", "fraud", "not fraud"]),
"actual_score": pd.Series([0, 1, 0]),
#####
"A": pd.Series([0, 1, 2]),
"B": pd.Series([0.0, 1.0, 2.0]),
"C": pd.Series([float("NaN"), float("NaN"), float("NaN")]),
"D": pd.Series([0, float("NaN"), 2]),
"E": pd.Series([0, None, 2]),
"F": pd.Series([None, float("NaN"), None]),
"G": pd.Series(["foo", "bar", "baz"]),
"H": pd.Series([True, False, True]),
#####
"a": pd.Series([0, 1, 2]),
"b": pd.Series([0.0, 1.0, 2.0]),
"c": pd.Series([float("NaN"), float("NaN"), float("NaN")]),
"d": pd.Series([0, float("NaN"), 2]),
"e": pd.Series([0, None, 2]),
"f": pd.Series([None, float("NaN"), None]),
}
),
}
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__]))
| StarcoderdataPython |
3281181 | #!/usr/bin/env python
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Configuration Plugin.
This module is used to override the configuration with platform specific constraints and extensions
"""
import abc
from . import specs
class ConfigPluginBase(object, metaclass=abc.ABCMeta):
"""Base class for config plugins."""
class InitializationFailure(Exception):
"""Used in case of any init failure."""
def __init__(self, config):
"""Save configuration."""
if not config:
raise ConfigPluginBase.InitializationFailure(
'Initialization parameters need to be assigned.')
self.config = config
@abc.abstractmethod
def get_config(self):
"""Return updated default configuration file."""
def set_config(self, config):
"""Set a new configuration.
This method is called when the config has changed after this instance was initialized.
This is needed in the frequent case where the main config is changed in a copy and to
prevent this instance to keep pointing to the old copy of the config
"""
self.config = config
@abc.abstractmethod
def get_openstack_spec(self):
"""Return OpenStack specs for host."""
@abc.abstractmethod
def get_run_spec(self, config, openstack_spec):
"""Return RunSpec for given platform."""
@abc.abstractmethod
def validate_config(self, cfg, openstack_spec):
"""Validate config file."""
@abc.abstractmethod
def prepare_results_config(self, cfg):
"""Insert any plugin specific information to the results.
This function is called before running configuration is copied.
Example usage is to remove sensitive information like switch credentials.
"""
@abc.abstractmethod
def get_version(self):
"""Return platform version."""
class ConfigPlugin(ConfigPluginBase):
"""No-op config plugin class. Does not change anything."""
def __init__(self, config):
"""Invoke the base class constructor."""
ConfigPluginBase.__init__(self, config)
def get_config(self):
"""Public interface for updating config file. Just returns given config."""
return self.config
def get_openstack_spec(self):
"""Return OpenStack specs for host."""
return specs.OpenStackSpec()
def get_run_spec(self, config, openstack_spec):
"""Return RunSpec for given platform."""
return specs.RunSpec(config.no_vswitch_access, openstack_spec)
def validate_config(self, cfg, openstack_spec):
"""Nothing to validate by default."""
def prepare_results_config(self, cfg):
"""Nothing to add the results by default."""
return cfg
def get_version(self):
"""Return an empty version."""
return {}
| StarcoderdataPython |
1600007 | <filename>server/app/schema.py
"""GraphQL Schema Module"""
from datetime import datetime
import graphene
from graphene_sqlalchemy import SQLAlchemyObjectType, SQLAlchemyConnectionField
from database.model_questions import QuestionModel
from database.model_users import UserModel, CourseModel
from database.base import db_session
import utils
from rx import Observable
import rx
from schemas.schema_user import (
User,
CreateUser,
UpdateUser,
Course
)
from schemas.schema_question import (
Question,
CreateQuestion,
UpdateQuestion,
RemoveQuestion
)
import logging
logger = logging.getLogger("TestLogger")
class Query(graphene.ObjectType):
"""Query objects with GraphQL API."""
node = graphene.relay.Node.Field()
# Get user by unique ID
user = graphene.relay.Node.Field(User)
# List all users (w/ Pagination) -> Similar to /users route
userList = SQLAlchemyConnectionField(User)
# Get question by unique ID
question = graphene.relay.Node.Field(Question)
# List all questions (w/ Pagination) -> Similar to /requests route
questionList = SQLAlchemyConnectionField(Question)
# Get user by netid
user_netid = graphene.Field(User, netid=graphene.String(required=True))
# check queue pos by user id -> Similar to /check_pos/<int:user_id>
queue_pos = graphene.Field(graphene.Int, netid=graphene.String(required=True), course_id=graphene.Int(required=True))
# check entire course queue -> Similar to /requests/<int:course_id>
course_queue = graphene.List(Question, course_id=graphene.Int(required=True), active=graphene.Boolean(required=False, default_value=True))
course_name_id = graphene.Field(graphene.Int, course_name=graphene.String(required=True))
def resolve_user_netid(self, info, netid):
"""Find users by netid"""
query = User.get_query(info=info)
query = query.filter(UserModel.netid == netid)
user = query.first()
return user
def resolve_queue_pos(self, info, netid, course_id):
"""Find queue pos based on netid"""
query = User.get_query(info=info)
query = query.filter(
QuestionModel.netid == netid
).filter(
QuestionModel.queue_pos != -1
).filter(
QuestionModel.course_id == course_id
).with_entities(
QuestionModel.queue_pos
)
pos = query.first()[0] if query.first() else -2
return pos
def resolve_course_queue(self, info, course_id, active):
"""List of all questions in course"""
query = Question.get_query(info=info)
query = query.filter(QuestionModel.course_id == course_id)
if active:
query = query.filter(QuestionModel.queue_pos >= 0)
return query.all()
def resolve_course_name_id(self, info, course_name):
"""Course id from a name"""
query = Course.get_query(info=info)
query = query.filter(CourseModel.course_name == course_name).with_entities(CourseModel.course_id)
res = query.first()[0] if query.first() else 0
return res
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
update_user = UpdateUser.Field()
create_question = CreateQuestion.Field()
update_question = UpdateQuestion.Field()
class Subscription(graphene.ObjectType):
queue_len = graphene.Int(course_id=graphene.String())
count_seconds = graphene.Int(up_to=graphene.Int())
def resolve_queue_len(self, info, course_id):
course_id = Course.get_query(info=info).filter(
CourseModel.course_name == course_id
).with_entities(CourseModel.course_id).first()[0]
return Observable.interval(2000)\
.map(lambda i: "{0}".format(
len(Question.get_query(info = info).filter(
QuestionModel.course_id == course_id
).filter(
QuestionModel.queue_pos > 0
).all()
)
)).take_while(lambda i: True)
def resolve_count_seconds(self, info, up_to):
return Observable.interval(1000)\
.map(lambda i: "{0}".format(i))\
.take_while(lambda i: int(i) <= up_to)
| StarcoderdataPython |
1757172 | import io
import sys
from glob import iglob
from os import makedirs, path, remove, removedirs
from pprint import pformat
import nbformat
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from mkdocs.structure.files import File
from mkdocs.utils import log
from nbconvert import MarkdownExporter
PYTHON_VERSION_MAJOR_MINOR = '{0}.{1}'.format(*sys.version_info)
class NbConvertPlugin(BasePlugin):
config_scheme = (
(
'input_dir',
config_options.OptionallyRequired(default='notebooks')
), (
'recursive',
config_options.OptionallyRequired(default=False)
), (
'output_dir',
config_options.OptionallyRequired(default='notebooks')
),
)
def on_files(self, files, config, **kwargs):
log.info('nbconvert: plugin config=%s', pformat(self.config))
self._src_files = []
# deal with dirs
config_file_dir = path.dirname(config['config_file_path'])
input_dir = path.normpath(self.config['input_dir'])
output_dir = path.realpath(path.join(
config['docs_dir'],
path.normpath(self.config['output_dir'])
))
if not path.isabs(input_dir):
input_dir = path.realpath(path.join(config_file_dir, input_dir))
# glob match
glob_recursive = self.config['recursive'] if PYTHON_VERSION_MAJOR_MINOR >= '3.5' else False
if glob_recursive:
nb_paths_iter = iglob(
path.join(config_file_dir, input_dir, '**', '*.ipynb'),
recursive=True
)
else:
nb_paths_iter = iglob(path.join(config_file_dir, input_dir, '*.ipynb'))
# Exporter
exporter = MarkdownExporter()
# Converting
for nb_path in nb_paths_iter:
# Prepare output file/dir
nb_dirname, nb_basename = path.split(nb_path)
nb_basename_root, _ = path.splitext(nb_basename)
nb_subdir = path.relpath(nb_dirname, input_dir)
md_dir = path.join(output_dir, nb_subdir)
md_basename = '{0}.md'.format(nb_basename_root)
md_path = path.join(md_dir, md_basename)
md_rel_dir = path.relpath(md_dir, config['docs_dir'])
md_rel_path = path.join(md_rel_dir, md_basename)
#
log.debug(
'nbconvert: markdown export %s => %s',
nb_path, md_path
)
# run nbconvert
with io.open(nb_path, encoding='utf-8') as fp:
nb_node = nbformat.read(fp, nbformat.NO_CONVERT)
body, resources = exporter.from_notebook_node(nb_node)
# save exported
if not path.exists(md_dir):
makedirs(md_dir)
with io.open(md_path, 'w', encoding='utf-8') as fp:
fp.write(body)
file_obj = File(
path=md_rel_path,
src_dir=config['docs_dir'],
dest_dir=config['site_dir'],
use_directory_urls=config['use_directory_urls']
)
for resource_name, resource_data in resources['outputs'].items():
resource_src_dir = path.dirname(file_obj.abs_src_path)
resource_src_path = path.join(resource_src_dir, resource_name)
if not path.isdir(resource_src_dir):
makedirs(resource_src_dir)
with io.open(resource_src_path, 'wb') as fp:
fp.write(resource_data)
self._src_files.append(resource_src_path)
resource_dest_dir = path.dirname(file_obj.abs_dest_path)
resource_dest_path = path.join(resource_dest_dir, resource_name)
log.debug(
'nbconvert: resource output(%dBytes): %s => %s',
len(resource_data), resource_name, resource_dest_path
)
if not path.isdir(resource_dest_dir):
makedirs(resource_dest_dir)
with io.open(resource_dest_path, 'wb') as fp:
fp.write(resource_data)
log.debug(
'nbconvert: add file object<abs_src_path=%s abs_dest_path=%s url=%s>',
file_obj.abs_src_path, file_obj.abs_dest_path, file_obj.url
)
self._src_files.append(file_obj.abs_src_path)
files.append(file_obj)
return files
def on_post_build(self, config, **kwargs):
for file in self._src_files:
log.debug('nbconvert: remove %s', file)
remove(file)
output_dir = path.join(
config['docs_dir'],
path.normpath(self.config['output_dir'])
)
log.debug('nbconvert: removedirs %s', output_dir)
try:
removedirs(output_dir)
except OSError as err:
log.warning('nbconvert: removedirs %s', err)
| StarcoderdataPython |
165915 | <filename>nbpresent/tasks/requirejs.py<gh_stars>0
from subprocess import Popen
import sys
from ._env import (
SRC,
join,
node_bin,
IS_WIN,
)
def main(**opts):
args = [
node_bin("r.js{}".format(".cmd" if IS_WIN else "")),
"-o", join(SRC, "js", "build.js"),
] + opts.get("requirejs", [])
return Popen(args, shell=IS_WIN).wait()
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1766661 | <gh_stars>0
import unittest
from unittest.mock import MagicMock
from lxml import html
from naotomori.cogs.source.manga.mangadex import MangaDex
class TestMangaDex(unittest.TestCase):
"""Tests for the MangaDex"""
def setUp(self):
self.mangadex = MangaDex()
def test_findMangaElements(self):
"""Test getting the manga html elements from a given MangaDex html file"""
with open('test/test_data/mangadex.html') as file:
tree = html.fromstring(file.read())
mangas = self.mangadex._findMangaElements(tree)
self.assertEqual(len(mangas), 42)
titles = []
for manga in mangas:
title = manga.xpath(".//a[contains(concat(' ', normalize-space(@class), ' '), ' manga_title ')]")[0]
titles.append(title.text_content())
self.assertEqual(titles[:10], [
'Hana wa Junai ni Junjiru',
'trash.',
'Paradise Baby',
'Kumo Desu ga, Nani ka? Daily Life of the Four Spider Sisters',
'Kannou Shousetsuka no Neko',
'Persona 5 Mementos Mission',
'Metropolitan System',
'Ouji Hiroimashita',
'Honoo no Suna',
'Enslave Lover'
])
def test_getRecentManga(self):
"""Test getting the most recent manga from the MangaDex homepage"""
with open('test/test_data/mangadex.html') as file:
tree = html.fromstring(file.read())
mangas = self.mangadex._findMangaElements(tree)
self.mangadex._findMangaElements = MagicMock(return_value=mangas)
recentManga = self.mangadex.getRecent()
self.assertEqual(len(recentManga), 16)
self.assertEqual([manga.title for manga in recentManga], [
'Hana wa Junai ni Junjiru',
'trash.',
'Paradise Baby',
'Kumo Desu ga, Nani ka? Daily Life of the Four Spider Sisters',
'Kannou Shousetsuka no Neko',
'Persona 5 Mementos Mission',
'Metropolitan System',
'Ouji Hiroimashita',
'Honoo no Suna',
'Enslave Lover',
'2D Partner',
'Nessa no Kusari',
'Bliss~End Of Gods',
'Shikanoko Nokonoko Koshitantan',
'Koi no Myouyaku',
'Sawatte, Tokashite'
])
self.assertEqual([manga.progress for manga in recentManga], [
'Vol. 1 Chapter 9.5',
'Vol. 7 Chapter 51',
'Vol. 1 Chapter 7.5',
'Chapter 46',
'Vol. 1 Chapter 2',
'Vol. 1 Chapter 4.5',
'Chapter 323',
'Vol. 1 Chapter 7.5',
'Vol. 1 Chapter 6.5',
'Vol. 1 Chapter 7.5',
'Chapter 16',
'Vol. 1 Chapter 6.6',
'Chapter 2',
'Chapter 2',
'Vol. 1 Chapter 4',
'Vol. 1 Chapter 6.5'
])
self.assertEqual([manga.link for manga in recentManga], [
'https://mangadex.org/chapter/1002666',
'https://mangadex.org/chapter/1002657',
'https://mangadex.org/chapter/1002656',
'https://mangadex.org/chapter/1002650',
'https://mangadex.org/chapter/1002644',
'https://mangadex.org/chapter/1002636',
'https://mangadex.org/chapter/1002635',
'https://mangadex.org/chapter/1002634',
'https://mangadex.org/chapter/1002624',
'https://mangadex.org/chapter/1002612',
'https://mangadex.org/chapter/1002608',
'https://mangadex.org/chapter/1002601',
'https://mangadex.org/chapter/1002586',
'https://mangadex.org/chapter/1002583',
'https://mangadex.org/chapter/1002576',
'https://mangadex.org/chapter/1002572'
])
| StarcoderdataPython |
3226869 | from .conf import *
from gym_electric_motor.physical_systems import *
from gym_electric_motor.utils import make_module, set_state_array
from gym_electric_motor import ReferenceGenerator, RewardFunction, PhysicalSystem, ElectricMotorVisualization, \
ConstraintMonitor
from gym_electric_motor.physical_systems import PowerElectronicConverter, MechanicalLoad, ElectricMotor, OdeSolver, \
VoltageSupply, NoiseGenerator
import gym_electric_motor.physical_systems.converters as cv
from gym_electric_motor.physical_systems.physical_systems import SCMLSystem
import numpy as np
from gym.spaces import Box, Discrete
from scipy.integrate import ode
from tests.conf import system, jacobian, permex_motor_parameter
from gym_electric_motor.utils import instantiate
from gym_electric_motor.core import Callback
# region first version
def setup_physical_system(motor_type, converter_type, subconverters=None, three_phase=False):
"""
Function to set up a physical system with test parameters
:param motor_type: motor name (string)
:param converter_type: converter name (string)
:param three_phase: if True, than a synchronous motor system will be instantiated
:return: instantiated physical system
"""
# get test parameter
tau = converter_parameter['tau']
u_sup = test_motor_parameter[motor_type]['motor_parameter']['u_sup']
motor_parameter = test_motor_parameter[motor_type]['motor_parameter'] # dict
nominal_values = test_motor_parameter[motor_type]['nominal_values'] # dict
limit_values = test_motor_parameter[motor_type]['limit_values'] # dict
# setup load
load = PolynomialStaticLoad(load_parameter=load_parameter['parameter'])
# setup voltage supply
voltage_supply = IdealVoltageSupply(u_sup)
# setup converter
if motor_type == 'DcExtEx':
if 'Disc' in converter_type:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
subconverters=[converter_type, converter_type],
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
else:
converter = make_module(PowerElectronicConverter, converter_type,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
# setup motor
motor = make_module(ElectricMotor, motor_type, motor_parameter=motor_parameter, nominal_values=nominal_values,
limit_values=limit_values)
# setup solver
solver = ScipySolveIvpSolver(method='RK45')
# combine all modules to a physical system
if three_phase:
if motor_type == "SCIM":
physical_system = SquirrelCageInductionMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
elif motor_type == "DFIM":
physical_system = DoublyFedInductionMotor(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = SynchronousMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = DcMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
return physical_system
def setup_reference_generator(reference_type, physical_system, reference_state='omega'):
"""
Function to setup the reference generator
:param reference_type: name of reference generator
:param physical_system: instantiated physical system
:param reference_state: referenced state name (string)
:return: instantiated reference generator
"""
reference_generator = make_module(ReferenceGenerator, reference_type, reference_state=reference_state)
reference_generator.set_modules(physical_system)
reference_generator.reset()
return reference_generator
def setup_reward_function(reward_function_type, physical_system, reference_generator, reward_weights, observed_states):
reward_function = make_module(RewardFunction, reward_function_type, observed_states=observed_states,
reward_weights=reward_weights)
reward_function.set_modules(physical_system, reference_generator)
return reward_function
def setup_dc_converter(conv, motor_type, subconverters=None):
"""
This function initializes the converter.
It differentiates between single and double converter and can be used for discrete and continuous converters.
:param conv: converter name (string)
:param motor_type: motor name (string)
:return: initialized converter
"""
if motor_type == 'DcExtEx':
# setup double converter
if 'Disc' in conv:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
interlocking_time=converter_parameter['interlocking_time'],
dead_time=converter_parameter['dead_time'],
subconverters=[make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time']),
make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])])
else:
# setup single converter
converter = make_module(PowerElectronicConverter, conv,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
return converter
# endregion
# region second version
instantiate_dict = {}
def mock_instantiate(superclass, key, **kwargs):
# Instantiate the object and log the passed and returned values to validate correct function calls
instantiate_dict[superclass] = {}
instantiate_dict[superclass]['key'] = key
inst = instantiate(superclass, key, **kwargs)
instantiate_dict[superclass]['instance'] = inst
return inst
class DummyReferenceGenerator(ReferenceGenerator):
reference_space = Box(0, 1, shape=(1,))
_reset_counter = 0
def __init__(self, reference_observation=np.array([1]), reference_state='dummy_state_0', **kwargs):
self.kwargs = kwargs
self.closed = False
self.physical_system = None
self.get_reference_state = None
self.get_reference_obs_state = None
self.trajectory = np.sin(np.linspace(0, 50, 100))
self._reference_state = reference_state
self.reference_observation = reference_observation
self.reference_array = None
self.kwargs = kwargs
def set_modules(self, physical_system):
self.physical_system = physical_system
self.reference_array = np.ones_like(physical_system.state_names).astype(float)
super().set_modules(physical_system)
self._referenced_states = set_state_array(
{self._reference_state: 1}, physical_system.state_names
).astype(bool)
def reset(self, initial_state=None, initial_reference=None):
self._reset_counter += 1
res = super().reset(initial_state, initial_reference)
return res[0], res[1], self.trajectory
def get_reference(self, state, *_, **__):
self.get_reference_state = state
return self.reference_array
def get_reference_observation(self, state, *_, **__):
self.get_reference_obs_state = state
return self.reference_observation
def close(self):
self.closed = True
super().close()
class DummyRewardFunction(RewardFunction):
def __init__(self, **kwargs):
self.last_state = None
self.last_reference = None
self.last_action = None
self.last_time_step = None
self.closed = False
self.done = False
self.kwargs = kwargs
super().__init__()
def reset(self, initial_state=None, initial_reference=None):
self.last_state = initial_state
self.last_reference = initial_reference
super().reset(initial_state, initial_reference)
def reward(self, state, reference, k=None, action=None, violation_degree=0.0):
self.last_state = state
self.last_reference = reference
self.last_action = action
self.last_time_step = k
return -1 if violation_degree == 1 else 1
def close(self):
self.closed = True
super().close()
def _limit_violation_reward(self, state):
pass
def _reward(self, state, reference, action):
pass
class DummyPhysicalSystem(PhysicalSystem):
@property
def limits(self):
"""
Returns:
ndarray(float): An array containing the maximum allowed physical values for each state variable.
"""
return self._limits
@property
def nominal_state(self):
"""
Returns:
ndarray(float): An array containing the nominal values for each state variable.
"""
return self._nominal_values
def __init__(self, state_length=1, state_names='dummy_state', **kwargs):
super().__init__(
Box(-1, 1, shape=(1,)), Box(-1, 1, shape=(state_length,)),
[f'{state_names}_{i}' for i in range(state_length)], 1
)
self._limits = np.array([10 * (i + 1) for i in range(state_length)])
self._nominal_values = np.array([(i + 1) for i in range(state_length)])
self.action = None
self.state = None
self.closed = False
self.kwargs = kwargs
def reset(self, initial_state=None):
self.state = np.array([0] * len(self._state_names))
return self.state
def simulate(self, action):
self.action = action
self.state = np.array([action * (i + 1) for i in range(len(self._state_names))])
return self.state
def close(self):
self.closed = True
super().close()
class DummyVisualization(ElectricMotorVisualization):
def __init__(self, **kwargs):
self.closed = False
self.state = None
self.reference = None
self.reward = None
self.reference_trajectory = None
self.physical_system = None
self.reference_generator = None
self.reward_function = None
self.kwargs = kwargs
super().__init__()
def step(self, state, reference, reward, *_, **__):
self.state = state
self.reference = reference
self.reward = reward
def reset(self, reference_trajectories=None, *_, **__):
self.reference_trajectory = reference_trajectories
def set_modules(self, physical_system, reference_generator, reward_function):
self.physical_system = physical_system
self.reference_generator = reference_generator
self.reward_function = reward_function
class DummyVoltageSupply(VoltageSupply):
def __init__(self, u_nominal=560, tau=1e-4, **kwargs):
super().__init__(u_nominal, tau=tau)
self.i_sup = None
self.t = None
self.reset_counter = 0
self.args = None
self.kwargs = kwargs
self.get_voltage_counter = 0
def reset(self):
self.reset_counter += 1
return super().reset()
def get_voltage(self, i_sup, t, *args, **kwargs):
self.get_voltage_counter += 1
self.i_sup = i_sup
self.t = t
self.args = args
self.kwargs = kwargs
return [self._u_nominal]
class DummyConverter(PowerElectronicConverter):
voltages = Box(0, 1, shape=(1,))
currents = Box(-1, 1, shape=(1,))
action_space = Discrete(4)
def __init__(self, tau=2E-4, dead_time=False, interlocking_time=0, action_space=None, voltages=None, currents=None, **kwargs):
super().__init__(tau, dead_time, interlocking_time)
self.action_space = action_space or self.action_space
self.voltages = voltages or self.voltages
self.currents = currents or self.currents
self.reset_counter = 0
self.convert_counter = 0
self.switching_times = [tau]
self.action = None
self.action_set_time = None
self.i_out = None
self.last_i_out = None
self.t = None
self.kwargs = kwargs
self.u_in = None
def i_sup(self, i_out):
self.last_i_out = i_out
return i_out[0]
def set_switching_times(self, switching_times):
self.switching_times = switching_times
def set_action(self, action, t):
self.action_set_time = t
self.action = action
return [t + self._tau / 2, t + self._tau]
def reset(self):
self.reset_counter += 1
return [0.0] * self.voltages.shape[0]
def convert(self, i_out, t):
self.i_out = i_out
self.t = t
self.convert_counter += 1
self.u_in = [self.action] if type(self.action_space) is Discrete else self.action
return self.u_in
class DummyElectricMotor(ElectricMotor):
# defined test values
_default_motor_parameter = permex_motor_parameter['motor_parameter']
_default_limits = dict(omega=16, torque=26, u=15, i=26, i_0=26, i_1=21, u_0=15)
_default_nominal_values = dict(omega=14, torque=20, u=15, i=22, i_0=22, i_1=20)
HAS_JACOBIAN = True
electrical_jac_return = None
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_0', 'i_1']
VOLTAGES = ['u_0']
def __init__(self, tau=1e-5, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
self.u_in = None
super().__init__(tau=tau, **kwargs)
def electrical_ode(self, state, u_in, omega, *_):
self.u_in = u_in
return state - u_in
def reset(self, state_space, state_positions):
self.reset_counter += 1
return super().reset(state_space, state_positions)
def torque(self, currents):
return np.prod(currents)
def i_in(self, state):
return [np.sum(state)]
def electrical_jacobian(self, state, u_in, omega, *_):
return self.electrical_jac_return
class PowerElectronicConverterWrapper(cv.PowerElectronicConverter):
def __init__(self, subconverter, **kwargs):
super().__init__(**kwargs)
self._converter = subconverter
self.action_space = self._converter.action_space
self.currents = self._converter.currents
self.voltages = self._converter.voltages
self.reset_calls = 0
self.set_action_calls = 0
self.last_action = None
self.last_t = None
self.last_i_out = None
self.last_u = None
self.last_i_sup = None
def reset(self):
self.reset_calls += 1
return self._converter.reset()
def set_action(self, action, t):
self.last_action = action
self.last_t = t
return self._converter.set_action(action, t)
def convert(self, i_out, t):
self.last_i_out = i_out
self.last_t = t
self.last_u = self._converter.convert(i_out, t)
return self.last_u
def i_sup(self, i_out):
self.last_i_out = i_out
self.last_i_sup = self._converter.i_sup(i_out)
return self.last_i_sup
class DummyScipyOdeSolver(ode):
"""
Dummy class for ScipyOdeSolver
"""
# defined test values
_kwargs = {'nsteps': 5}
_integrator = 'dop853'
_y = np.zeros(2)
_y_init = np.array([1, 6])
_t = 0
_tau = 1e-3
_t_init = 0.1
jac = None
# counter
_init_counter = 0
_set_integrator_counter = 0
_set_initial_value_counter = 0
_set_f_params_counter = 0
_set_jac_params_counter = 0
_integrate_counter = 0
def __init__(self, system_equation, jacobian_):
self._init_counter += 1
assert system_equation == system
assert jacobian_ == jacobian
super().__init__(system_equation, jacobian_)
def set_integrator(self, integrator, **args):
self._set_integrator_counter += 1
assert integrator == self._integrator
assert args == self._kwargs
return super().set_integrator(integrator, **args)
def set_initial_value(self, y, t=0.0):
self._set_initial_value_counter += 1
assert all(y == self._y_init)
assert t == self._t_init
def set_f_params(self, *args):
self._set_f_params_counter += 1
assert args == (2,)
super().set_f_params(2)
def set_jac_params(self, *args):
self._set_jac_params_counter += 1
assert args == (2,)
super().set_jac_params(*args)
def integrate(self, t, *_):
self._integrate_counter += 1
assert t == self._t_init + self._tau
return self._y_init * 2
class DummyLoad(MechanicalLoad):
"""
dummy class for mechanical load
"""
state_names = ['omega', 'position']
limits = dict(omega=15, position=10)
nominal_values = dict(omega=15, position=10)
mechanical_state = None
t = None
mechanical_ode_return = None
mechanical_jac_return = None
omega_range = None
HAS_JACOBIAN = True
def __init__(self, tau=1e-4, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__(tau=tau, **kwargs)
def reset(self, state_space, state_positions, nominal_state, *_, **__):
self.reset_counter += 1
return np.zeros(2)
def mechanical_ode(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_ode_return
def mechanical_jacobian(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_jac_return
def get_state_space(self, omega_range):
self.omega_range = omega_range
return {'omega': 0, 'position': -1}, {'omega': 1, 'position': -1}
class DummyNoise(NoiseGenerator):
"""
dummy class for noise generator
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__()
def reset(self):
return np.ones_like(self._state_variables, dtype=float) * 0.36
def noise(self, *_, **__):
return np.ones_like(self._state_variables, dtype=float) * 0.42
class DummyOdeSolver(OdeSolver):
"""
Dummy class for ode solver
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
super().__init__()
def integrate(self, t):
self.last_y = self._y
self._y = self._y + t - self._t
self._t = t
return self._y
class DummyConstraint(Constraint):
def __init__(self, violation_degree=0.0):
super().__init__()
self.modules_set = False
self.violation_degree = violation_degree
def __call__(self, state):
return self.violation_degree
def set_modules(self, ps):
super().set_modules(ps)
self.modules_set = True
class DummyConstraintMonitor(ConstraintMonitor):
def __init__(self, no_of_dummy_constraints=1):
constraints = [DummyConstraint() for _ in range(no_of_dummy_constraints)]
super().__init__(additional_constraints=constraints)
class DummySCMLSystem(SCMLSystem):
"""
dummy class for SCMLSystem
"""
# defined test values
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
_limits = {}
_nominal_state = {}
_supply = None
_converter = None
_electrical_motor = None
_mechanical_load = None
_state_names = ['omega_me', 'torque', 'u', 'i', 'u_sup']
_state_length = 5
# counter
_set_limits_counter = 0
_set_nominal_state_counter = 0
def _set_limits(self):
self._set_limits_counter += 1
def _set_nominal_state(self):
self._set_nominal_state_counter += 1
def _build_state_space(self, state_names):
assert state_names == self._state_names
return None
def _build_state_names(self):
return self._state_names
def _set_indices(self):
pass
def simulate(self, action, *_, **__):
return np.ones(self._state_length) * 0.46
def _system_equation(self, t, state, u_in, **__):
return np.ones(self._state_length) * 0.87
def reset(self, *_):
return np.ones(self._state_length) * 0.12
def _forward_transform(self, quantities, motor_state):
return quantities
def _build_state(self, motor_state, torque, u_in, u_sup):
pass
def _action_transformation(self, action):
return action
class DummyRandom:
_expected_low = None
_expected_high = None
_expected_left = None
_expected_mode = None
_expected_right = None
_expected_values = None
_expected_probabilities = None
_expected_loc = None
_expected_scale = None
_expected_size = None
# counter
_monkey_random_rand_counter = 0
_monkey_random_triangular_counter = 0
_monkey_random_randint_counter = 0
_monkey_random_choice_counter = 0
_monkey_random_normal_counter = 0
def __init__(self, exp_low=None, exp_high=None, exp_left=None, exp_right=None, exp_mode=None, exp_values=None,
exp_probabilities=None, exp_loc=None, exp_scale=None, exp_size=None):
"""
set expected values
:param exp_low: expected lower value
:param exp_high: expected upper value
:param exp_mode: expected mode value
:param exp_right: expected right value
:param exp_left: expected left value
:param exp_values: expected values for choice
:param exp_probabilities: expected probabilities for choice
:param exp_loc: expected loc value
:param exp_scale: expected scale value
:param exp_size: expected size value
"""
self._expected_low = exp_low
self._expected_high = exp_high
self._expected_mode = exp_mode
self._expected_left = exp_left
self._expected_right = exp_right
self._expected_values = exp_values
self._expected_probabilities = exp_probabilities
self._expected_loc = exp_loc
self._expected_scale = exp_scale
self._expected_size = exp_size
def monkey_random_rand(self):
self._monkey_random_rand_counter += 1
"""
mock function for np.random.rand()
:return:
"""
return 0.25
def monkey_random_triangular(self, left, mode, right):
self._monkey_random_triangular_counter += 1
if self._expected_left is not None:
assert left == self._expected_left
if self._expected_high is not None:
assert right == self._expected_right
if self._expected_mode is not None:
assert mode == self._expected_mode
"""
mock function for np.random.triangular()
:return:
"""
return 0.45
def monkey_random_randint(self, low, high):
if self._expected_low is not None:
assert low == self._expected_low
if self._expected_high is not None:
assert high == self._expected_high
self._monkey_random_randint_counter += 1
"""
mock function for random.randint()
:param low:
:param high:
:return:
"""
return 7
def monkey_random_choice(self, a, p):
self._monkey_random_choice_counter += 1
assert len(a) == len(p)
if self._expected_values is not None:
assert a == self._expected_values
if self._expected_probabilities is not None:
assert p == self._expected_probabilities
return a[0]
def monkey_random_normal(self, loc=0, scale=1.0, size=None):
if self._expected_loc is not None:
assert loc == self._expected_loc
if self._expected_scale is not None:
assert scale == self._expected_scale
if self._expected_size is not None:
assert size == self._expected_size
else:
size = 1
self._monkey_random_normal_counter += 1
result = np.array([0.1, -0.2, 0.6, 0.1, -0.5, -0.3, -1.7, 0.1, -0.2, 0.4])
return result[:size]
class DummyElectricMotorEnvironment(ElectricMotorEnvironment):
"""Dummy environment to test pre implemented callbacks. Extend for further testing cases"""
def __init__(self, reference_generator=None, callbacks=(), **kwargs):
reference_generator = reference_generator or DummyReferenceGenerator()
super().__init__(DummyPhysicalSystem(), reference_generator, DummyRewardFunction(), callbacks=callbacks)
def step(self):
self._call_callbacks('on_step_begin', 0, 0)
self._call_callbacks('on_step_end', 0, 0, 0, 0, 0)
def reset(self):
self._call_callbacks('on_reset_begin')
self._call_callbacks('on_reset_end', 0, 0)
def close(self):
self._call_callbacks(self._callbacks, 'on_close')
class DummyCallback(Callback):
def __init__(self):
super().__init__()
self.reset_begin = 0
self.reset_end = 0
self.step_begin = 0
self.step_end = 0
self.close = 0
def on_reset_begin(self):
self.reset_begin += 1
def on_reset_end(self, *_):
self.reset_end += 1
def on_step_begin(self, *_):
self.step_begin += 1
def on_step_end(self, *_):
self.step_end += 1
def on_close(self):
self.close += 1
# endregion
| StarcoderdataPython |
118702 | import unittest
from pybox.math import util
class MathUtilTest(unittest.TestCase):
def test_dot(self):
l1 = [1, 2, 3]
l2 = [3, 4, 6]
self.assertEqual(util.dot(l1, l2), 29)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
176396 |
__version__ = "1.12.0"
__version_info__ = ( 1, 12, 0 )
| StarcoderdataPython |
146444 | <filename>main/watchlist_app/migrations/0005_alter_movielist_streaming_platform.py
# Generated by Django 3.2.4 on 2021-08-24 15:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('watchlist_app', '0004_alter_movielist_streaming_platform'),
]
operations = [
migrations.AlterField(
model_name='movielist',
name='streaming_platform',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stream_platform_field', to='watchlist_app.streamplatform'),
),
]
| StarcoderdataPython |
1612329 | # This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Methods to render input forms for the different types of input parameters
that can be defined for flowServ workflow templates.
"""
import streamlit as st
from typing import Callable, Dict, List, Tuple
from flowserv.model.parameter.base import Parameter
@st.cache(allow_output_mutation=True)
def enum_options(values: List[Dict]) -> Tuple[List[str], int, Callable]:
"""Enumerate options of a enum parameter for display in a select box.
Returns a 3-tuple containing a list of option value, the list index of
the default option, and a function that provides a mapping from option
values to their names (identifier).
Parameters
----------
values: list of dict
List of enumeration values from the parameter declaration.
Returns
-------
(list, int, callable)
"""
options = list()
default_index = 0
mapping = dict()
for i, obj in enumerate(values):
identifier = obj['value']
options.append(identifier)
mapping[identifier] = obj['name']
if obj.get('isDefault', False):
default_index = i
def mapfunc(value: str) -> str:
"""Mapping for option values to thier identifier."""
return mapping[value]
return options, default_index, mapfunc
def show_form(parameters: List[Parameter]) -> Tuple[bool, Dict]:
"""Display input controlls for the different parameters in a workflow
template. Returns the value for the submit button and a mapping of
parameter identifier to the values that are returned by the respective
controlls.
Parameters
----------
parameters: list of flowserv.model.parameter.base.ParameterBase
List of parameter declarations in a workflow template.
Returns
-------
(bool, dict)
"""
# Collect return values for the rendered controlls for each of the
# parameters. This is a mapping from parameter identifier to the value
# that was provided by the user via the rendered input form element.
arguments = dict()
# flowServ currently distinguishes five main types of parameters: bool,
# enumeration, file, numeric and text.
for para in parameters:
if para.is_actor():
# Render a checkbox for Boolean parameters.
atype, aconfig = para.default if para.default else ('container', dict())
if atype == 'container':
image = st.text_input(para.label + ' (Docker Image)', aconfig.get('image', ''))
commands = st.text_area(
para.label + ' (Commands)',
'\n'.join(aconfig.get('commands', [])).strip()
)
val = ('container', {'image': image, 'commands': commands.split('\n')})
else:
raise ValueError("invalid actor type '{}'".format(atype))
elif para.is_bool():
# Render a checkbox for Boolean parameters.
checked = para.default if para.default else False
val = st.checkbox(label=para.label, value=checked)
elif para.is_select():
# Render a selct box for all the options in an enumeration
# parameter.
options, index, mapfunc = enum_options(para.values)
val = st.selectbox(
label=para.label,
options=options,
index=index,
format_func=mapfunc
)
elif para.is_file():
# Render a file uploader for input files.
val = st.file_uploader(label=para.label)
elif para.is_numeric():
# For numeric parameters we either render a text box or a slider if
# the parameter has a range constraint.
constraint = para.constraint
default_value = para.default
if constraint is not None and constraint.is_closed():
if default_value is None:
default_value = constraint.min_value()
val = st.slider(
label=para.label,
min_value=constraint.min_value(),
max_value=constraint.max_value(),
value=default_value
)
else:
val = st.text_input(para.label, para.default)
else:
# Render a text box for all other parameter types.
val = st.text_input(para.label, para.default)
arguments[para.name] = val
submit = st.button('Run')
return submit, arguments
| StarcoderdataPython |
135495 | <gh_stars>100-1000
from contextlib import contextmanager
import torch
import torch.nn as nn
import torch.nn.functional as F
@contextmanager
def save_sample_grads(model: nn.Module):
handles = []
for module in model.children():
params = list(module.parameters())
params = [p for p in params if p.requires_grad]
if len(params) == 0:
continue
handles.append(module.register_forward_hook(_forward_postprocess))
handles.append(module.register_backward_hook(_backward_postprocess))
yield
for handle in handles:
handle.remove()
def _forward_postprocess(module: nn.Module, input: torch.Tensor, output: torch.Tensor):
data_input = input[0].clone().detach()
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
bnorm = module
f = bnorm.num_features
if isinstance(module, nn.BatchNorm1d):
shape = (1, f)
elif isinstance(module, nn.BatchNorm2d):
shape = (1, f, 1, 1)
else:
shape = (1, f, 1, 1, 1)
# restore normalized input
data_input_norm = (output - bnorm.bias.view(shape)).div(bnorm.weight.view(shape))
data_input = data_input_norm
setattr(module, 'data_input', data_input)
def _backward_postprocess(module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor):
grad_output = grad_output[0].clone().detach()
data_input = getattr(module, 'data_input', None)
assert data_input is not None, 'backward is called before forward.'
assert data_input.size(0) == grad_output.size(0)
args = [module, data_input, grad_output]
if isinstance(module, nn.Linear):
grad_linear(*args)
elif isinstance(module, nn.Conv2d):
grad_conv2d(*args)
elif isinstance(module, nn.BatchNorm1d):
grad_batchnorm1d(*args)
elif isinstance(module, nn.BatchNorm2d):
grad_batchnorm2d(*args)
else:
raise ValueError(f'Unsupported module class: {module.__class__}.')
def grad_linear(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.Linear)
linear = module
assert data_input.ndimension() == 2 # n x f_in
assert grad_output.ndimension() == 2 # n x f_out
if linear.weight.requires_grad:
grads = torch.einsum('bi,bj->bij', grad_output, data_input) # n x f_out x f_in
setattr(linear.weight, 'grads', grads) # n x f_out x f_in
if hasattr(linear, 'bias') and linear.bias.requires_grad:
setattr(linear.bias, 'grads', grad_output) # n x f_out
def grad_conv2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.Conv2d)
conv2d = module
assert data_input.ndimension() == 4 # n x c_in x h_in x w_in
assert grad_output.ndimension() == 4 # n x c_out x h_out x w_out
if conv2d.weight.requires_grad:
# n x (c_in)(k_h)(k_w) x (h_out)(w_out)
input2d = F.unfold(data_input,
kernel_size=conv2d.kernel_size, stride=conv2d.stride,
padding=conv2d.padding, dilation=conv2d.dilation)
# n x c_out x h_out x w_out
n, c_out, h, w = grad_output.size()
# n x c_out x (h_out)(w_out)
grad_output2d = grad_output.view(n, c_out, -1)
c_out, c_in, k_h, k_w = conv2d.weight.size()
grads_2d = torch.einsum('bik,bjk->bij', grad_output2d, input2d) # n x c_out x (c_in)(k_h)(k_w)
setattr(conv2d.weight, 'grads', grads_2d.view(n, c_out, c_in, k_h, k_w)) # n x c_out x c_in x k_h x k_w
if hasattr(conv2d, 'bias') and conv2d.bias.requires_grad:
setattr(conv2d.bias, 'grads', grad_output.sum(dim=(2, 3))) # n x c_out
def grad_batchnorm1d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.BatchNorm1d)
batchnorm1d = module
assert data_input.ndimension() == 2 # n x f
assert grad_output.ndimension() == 2 # n x f
assert batchnorm1d.affine
if batchnorm1d.weight.requires_grad:
grads = data_input.mul(grad_output) # n x f
setattr(batchnorm1d.weight, 'grads', grads)
if batchnorm1d.bias.requires_grad:
setattr(batchnorm1d.bias, 'grads', grad_output) # n x f
def grad_batchnorm2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.BatchNorm2d)
batchnorm2d = module
assert data_input.ndimension() == 4 # n x c x h x w
assert grad_output.ndimension() == 4 # n x c x h x w
assert batchnorm2d.affine
if batchnorm2d.weight.requires_grad:
grads = data_input.mul(grad_output).sum(dim=(2, 3)) # n x c
setattr(batchnorm2d.weight, 'grads', grads)
if batchnorm2d.bias.requires_grad:
setattr(batchnorm2d.bias, 'grads', grad_output.sum(dim=(2, 3))) # n x c
| StarcoderdataPython |
3214503 | #!/usr/bin/env python
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
#
import sys
import json
import boto3
import argparse
#aws ec2 describe-images --owners 309956199498 --region us-west-2 --filters Name=name,Values=RHEL-7.3_HVM_GA-20161026-x86_64-1-Hourly2-GP2
def arg_parse():
parser = argparse.ArgumentParser(prog='get_ami_id')
parser.add_argument('--amzn',
dest='alinux',
type=str,
help='Base Amazon Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Amazon Linux: '
' https://aws.amazon.com/amazon-linux-ami/',
required=False
)
parser.add_argument('--amzn2',
dest='alinux2',
type=str,
help='Base Amazon Linux 2 AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Amazon Linux 2: '
' https://aws.amazon.com/amazon-linux-ami/',
required=False
)
parser.add_argument('--centos6',
dest='centos6',
type=str,
help='Base CentOS6 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for CentOS AMI info: '
' https://wiki.centos.org/Cloud/AWS',
required=False
)
parser.add_argument('--centos7',
dest='centos7',
type=str,
help='Base Centos7 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for CentOS AMI info: '
' https://wiki.centos.org/Cloud/AWS',
required=False
)
parser.add_argument('--rhel7',
dest='rhel7',
type=str,
help='Base RHEL7 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for RHEL 7 AMI info'
' AWS Console',
required=False
)
parser.add_argument('--suse11',
dest='suse11',
type=str,
help='Base SUSE 11 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for SuSE 11 info: '
' AWS Console',
required=False
)
parser.add_argument('--suse12',
dest='suse12',
type=str,
help='Base SUSE 12 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for SuSE 12 info: '
' AWS Console',
required=False
)
parser.add_argument('--ubuntu14',
dest='ubuntu14',
type=str,
help='Base Ubuntu 14 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Ubuntu14: '
' AWS Console',
required=False
)
parser.add_argument('--ubuntu16',
dest='ubuntu16',
type=str,
help='Base Ubuntu 16 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Ubuntu16: '
' AWS Console',
required=False
)
return parser.parse_args()
def image_info(client, owners, ami_name, region):
response = client.describe_images(
DryRun=False,
Owners=[
owners,
],
Filters=[
{
'Name': 'name',
'Values': [
ami_name,
]
},
]
)
try:
if response["Images"][0]["ImageId"]:
return response
except:
print("Does the AMI requested exist in {0}? Not adding region {0} to list. Continuing...".format(region))
return "NONE"
def get_image_info(client, ami_id):
try:
response = client.describe_images(
DryRun=False,
ImageIds=[
ami_id,
],
)
except Exception as e:
print(e)
print("Does {0} exist in us-east-1? Checking next region ...".format(ami_id))
sys.exit(1)
ami_name = response["Images"][0]["Name"]
owners = 'NONE'
description = 'NONE'
ena = 'NONE'
sriov = 'NONE'
try:
owners = response["Images"][0]["OwnerId"]
description = response["Images"][0]["Description"]
ena = response["Images"][0]["EnaSupport"]
sriov = response["Images"][0]["SriovNetSupport"]
except KeyError as e:
pass
return ami_name, owners, description, ena, sriov
def print_image_info(args, client):
for arg_n, ami_id in vars(args).items():
if ami_id:
(ami_name, owners, description, ena, sriov) = get_image_info(client, ami_id)
print('Building mappings for:\n'
' Argument Name: {0}\n'
' AMI Name: {1}\n'
' AMI ID: {2}\n'
' Owners ID: {3}\n'
' AMI Desc: {4}\n'
' ENA Support: {5}\n'
' SRIOV Support: {6}\n'
.format(arg_n, ami_name, ami_id, owners, description, ena, sriov))
def main():
rc = 0
ami_map = dict()
args = arg_parse()
client_iad = boto3.client('ec2', region_name='us-east-1')
r_response_iad = client_iad.describe_regions()
print_image_info(args, client_iad)
print("Getting AMI IDs from regions: ")
for r in r_response_iad["Regions"]:
region=r["RegionName"]
print(" " + region)
client = boto3.client('ec2', region_name=region)
response = dict()
ami_map[region] = dict()
for arg_n, ami_id_iad in vars(args).items():
if ami_id_iad:
(ami_name, owners, description, ena, sriov) = get_image_info(client_iad, ami_id_iad)
response[arg_n] = image_info(client, owners, ami_name, region)
if response[arg_n] is not "NONE":
ami_map[region].update({arg_n: response[arg_n]["Images"][0]["ImageId"]})
ami_map = { "AWSRegionAMI": ami_map }
ami_map = { "Mappings": ami_map }
print(json.dumps(ami_map, indent=2, sort_keys=True))
##print(ami_map)
return rc
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
print('\nReceived Keyboard interrupt.')
print('Exiting...')
except ValueError as e:
print('ERROR: {0}'.format(e))
| StarcoderdataPython |
95445 | #encoding:utf8
import requests
def func1():
r = requests.get(url='http://www.itwhy.org') # 最基本的GET请求
print(r.status_code) # 获取返回状态
r = requests.get(url='http://dict.baidu.com/s', params={'wd': 'python'}) # 带参数的GET请求
print(r.url)
print type(r.text), len(r.text) #, r.text
print (u'中国')
with open('out.txt', 'wb') as fo:
fo.write(r.text.encode('utf8'))
#print(r.text) # 打印解码后的返回数据
def func2():
a=u'中国'
#return
with open('out.txt', 'wb') as fo:
fo.write(a.encode('utf8'))
def func3():
'https://movie.douban.com/j/new_search_subjects?sort=T&range=0,10&tags=&start=20'
r = requests.get(url='https://movie.douban.com/j/new_search_subjects?sort=T&range=0,10&tags=&start=40') # 带参数的GET请求
print(r.url)
with open('out.txt', 'wb') as fo:
fo.write(r.text.encode('utf8'))
if __name__=='__main__':
func3() | StarcoderdataPython |
1714568 | <reponame>jadamowi/docs_automations<gh_stars>0
from pathlib import Path
import PyPDF2
def pdfencryption(inpath,
outpath,
pdfpass):
"""
Function loops through the given path looking for PDF files, encrypts them
and save in a given folder.
:param inpath: Path to the folder with PDF files to be encrypted
:param outpath: Path where the PDFs have to be saved
:param pdfpass: Password the files are encrypted with
:return: Function does not return any object
"""
pathlist = Path(inpath).glob('*.pdf')
for path in pathlist:
path_in_str = str(path)
pdf_file = open(path_in_str, 'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
pdf_writer = PyPDF2.PdfFileWriter()
for page_num in range(pdf_reader.numPages):
pdf_writer.addPage(pdf_reader.getPage(page_num))
pdf_writer.encrypt(pdfpass)
result_pdf = open(outpath + path_in_str[(len(inpath)):], 'wb')
pdf_writer.write(result_pdf)
result_pdf.close()
# Values that the user enters
in_path = input("Please enter a path where the PDFs are stored:\n")
out_path = input("Please enter a new path where the encrypted PDFs have to be stored:\n")
password = input("Please enter a password:\n")
if __name__ == '__main__':
pdfencryption(in_path, out_path, password)
| StarcoderdataPython |
1791248 | from aspen.config import config
# Docker-compose environment config
class DockerComposeConfig(config.Config):
@config.flaskproperty
def DEBUG(self) -> bool:
return True
@config.flaskproperty
def SESSION_COOKIE_SECURE(self) -> bool:
return False
@config.flaskproperty
def SESSION_COOKIE_HTTPONLY(self) -> bool:
return True
@config.flaskproperty
def SESSION_COOKIE_SAMESITE(self) -> str:
return "Lax"
@property
def AUTH0_CALLBACK_URL(self) -> str:
return "http://backend.genepinet.localdev:3000/callback"
| StarcoderdataPython |
1799911 | # coding: utf-8
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QWidget, QApplication, QFrame,
QVBoxLayout, QSplitter, QDesktopWidget)
from .params import Params
from .introduction import Introduction
from .type_of_task import TypeOfTask
from .set_file import SetFile
from .data_check import DataCheck
from .overfitting import Overfitting
from .analysis import Analysis
from .menuview import MenuView
from .results import Results, Results2
from .bias_variance import BiasVariance
from .learning_curve import LearningCurve, LearningCurve2
from .feature_selection import FeatureSelection
from .prediction import Prediction
from .error import Error
class App(QWidget):
def __init__(self, lang='en'):
super().__init__()
self.params = Params(lang)
self.initUI()
def initUI(self):
self.txt2func = {
'はじめに': Introduction, 'Introduction': Introduction,
'分析タスク': TypeOfTask, 'Task': TypeOfTask,
'入力データ': SetFile, 'Input data': SetFile,
'データの確認': DataCheck, 'Data check': DataCheck,
'過学習': Overfitting, 'Overfitting': Overfitting,
'分析の実行': Analysis, 'Analysis': Analysis,
'結果の確認': Results, 'Results': Results,
'バイアスとバリアンス': BiasVariance, 'Bias and Variance': BiasVariance,
'学習曲線': LearningCurve, 'Learning curve': LearningCurve,
'特徴量選択': FeatureSelection, 'Feature selection': FeatureSelection,
'結果の確認2': Results2, 'Results 2': Results2,
'学習曲線2': LearningCurve2, 'Learning curve 2': LearningCurve2,
'予測': Prediction, 'Prediction': Prediction,
'Error': Error}
self.setMinimumSize(1280, 960)
self.setStyleSheet('background-color: rgb(242, 242, 242)')
vbox = QVBoxLayout(self)
vbox.setSpacing(0)
vbox.setContentsMargins(0, 0, 0, 0)
top = QFrame(self)
top.setFrameShape(QFrame.StyledPanel)
top.setFixedHeight(50)
top.setStyleSheet('background-color: white')
self.splitter = QSplitter(Qt.Horizontal, self)
self.splitter.setHandleWidth(0)
self.menuview = MenuView(self.splitter, self.update_content,
self.params)
self.menuview.setWidgetResizable(True)
self.contentview = Introduction(self.splitter,
self.menuview.edit_button, self.params)
self.contentview.setWidgetResizable(True)
self.splitter.addWidget(self.menuview)
self.splitter.addWidget(self.contentview)
vbox.addWidget(top)
vbox.addWidget(self.splitter)
self.setLayout(vbox)
self.center()
# self.showMaximized()
self.setWindowTitle('MALSS interactive')
self.show()
def center(self):
# Get a rectangle of the main window.
qr = self.frameGeometry()
# Figure out the screen resolution; and from this resolution,
# get the center point (x, y)
cp = QDesktopWidget().availableGeometry().center()
# Set the center of the rectangle to the center of the screen.
qr.moveCenter(cp)
self.move(qr.topLeft())
def update_content(self, text):
content = self.splitter.widget(1)
if content is not None:
if text in self.txt2func:
content.hide()
content.deleteLater()
self.contentview =\
self.txt2func[text](self.splitter,
self.menuview.edit_button,
self.params)
self.contentview.setWidgetResizable(True)
self.splitter.addWidget(self.contentview)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| StarcoderdataPython |
157966 | <gh_stars>1000+
"""
Example that shows how to receive updates on discovered chromecasts.
"""
# pylint: disable=invalid-name
import argparse
import logging
import time
import zeroconf
import pychromecast
parser = argparse.ArgumentParser(
description="Example on how to receive updates on discovered chromecasts."
)
parser.add_argument(
"--known-host",
help="Add known host (IP), can be used multiple times",
action="append",
)
parser.add_argument(
"--force-zeroconf",
help="Zeroconf will be used even if --known-host is present",
action="store_true",
)
parser.add_argument("--show-debug", help="Enable debug log", action="store_true")
parser.add_argument(
"--show-zeroconf-debug", help="Enable zeroconf debug log", action="store_true"
)
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
if args.show_zeroconf_debug:
print("Zeroconf version: " + zeroconf.__version__)
logging.getLogger("zeroconf").setLevel(logging.DEBUG)
def list_devices():
"""Print a list of known devices."""
print("Currently known cast devices:")
for uuid, service in browser.services.items():
print(f" {uuid} {service}")
class MyCastListener(pychromecast.discovery.AbstractCastListener):
"""Listener for discovering chromecasts."""
def add_cast(self, uuid, _service):
"""Called when a new cast has beeen discovered."""
print(f"Found cast device with UUID {uuid}")
list_devices()
def remove_cast(self, uuid, _service, cast_info):
"""Called when a cast has beeen lost (MDNS info expired or host down)."""
print(f"Lost cast device with UUID {uuid} {cast_info}")
list_devices()
def update_cast(self, uuid, _service):
"""Called when a cast has beeen updated (MDNS info renewed or changed)."""
print(f"Updated cast device with UUID {uuid}")
list_devices()
if args.known_host and not args.force_zeroconf:
zconf = None
else:
zconf = zeroconf.Zeroconf()
browser = pychromecast.discovery.CastBrowser(MyCastListener(), zconf, args.known_host)
browser.start_discovery()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Shut down discovery
browser.stop_discovery()
| StarcoderdataPython |
22391 | <filename>abcvoting/preferences.py
"""
Dichotomous (approval) preferences and preference profiles
Voters are indexed by 0, ..., len(profile)
Candidates are indexed by 0, ..., profile.num_cand
"""
from abcvoting.misc import str_candset
from collections import OrderedDict
class Profile(object):
"""
Preference profiles
"""
def __init__(self, num_cand, names=None):
if num_cand <= 0:
raise ValueError(str(num_cand) +
" is not a valid number of candidates")
self.num_cand = num_cand
self.preferences = []
self.names = [str(c) for c in range(num_cand)]
if names:
if len(names) < num_cand:
raise ValueError("names " + str(names) + " has length "
+ str(len(names)) + " < num_cand ("
+ str(num_cand) + ")")
self.names = [str(names[i]) for i in range(num_cand)]
def __len__(self):
return len(self.preferences)
def add_preferences(self, pref):
if type(pref) in [list, tuple]:
if len(pref) == 0:
return
if type(pref[0]) is int:
# list of integers
self.preferences.append(DichotomousPreferences(pref))
else:
# list of integer-lists or DichotomousPreferences
for p in pref:
if type(p) in [list, tuple]:
newpref = DichotomousPreferences(p)
newpref.is_valid(self.num_cand)
self.preferences.append(newpref)
elif isinstance(p, DichotomousPreferences):
p.is_valid(self.num_cand)
self.preferences.append(p)
else:
raise TypeError("Object of type " + str(type(p)) +
" not suitable as preferences")
elif isinstance(pref, DichotomousPreferences):
pref.is_valid(self.num_cand)
self.preferences.append(pref)
else:
raise TypeError("Object of type " + str(type(pref)) +
" not suitable as preferences")
def totalweight(self):
return sum(pref.weight for pref in self.preferences)
def has_unit_weights(self):
for p in self.preferences:
if p.weight != 1:
return False
return True
def __iter__(self):
return iter(self.preferences)
def __getitem__(self, i):
return self.preferences[i]
def __str__(self):
if self.has_unit_weights():
output = ("profile with %d votes and %d candidates:\n"
% (len(self.preferences), self.num_cand))
for p in self.preferences:
output += " " + str_candset(p.approved, self.names) + ",\n"
else:
output = ("weighted profile with %d votes and %d candidates:\n"
% (len(self.preferences), self.num_cand))
for p in self.preferences:
output += (" " + str(p.weight) + " * "
+ str_candset(p.approved, self.names) + ",\n")
return output[:-2]
def party_list(self):
"""
Is this party a party-list profile?
In a party-list profile all approval sets are either
disjoint or equal (see https://arxiv.org/abs/1704.02453).
"""
for pref1 in self.preferences:
for pref2 in self.preferences:
if ((len(pref1.approved & pref2.approved)
not in [0, len(pref1.approved)])):
return False
return True
def str_compact(self):
compact = OrderedDict()
for p in self.preferences:
if tuple(p.approved) in compact:
compact[tuple(p.approved)] += p.weight
else:
compact[tuple(p.approved)] = p.weight
if self.has_unit_weights():
output = ""
else:
output = "weighted "
output += ("profile with %d votes and %d candidates:\n"
% (len(self.preferences), self.num_cand))
for apprset in compact:
output += (" " + str(compact[apprset]) + " x "
+ str_candset(apprset, self.names) + ",\n")
output = output[:-2]
if not self.has_unit_weights():
output += "\ntotal weight: " + str(self.totalweight())
output += "\n"
return output
def aslist(self):
return [list(pref.approved) for pref in self.preferences]
class DichotomousPreferences():
def __init__(self, approved, weight=1):
self.approved = set(approved)
if approved: # empty approval sets are fine
self.is_valid(max(approved) + 1)
self.weight = weight
def __str__(self):
return str(list(self.approved))
def __len__(self):
return len(self.approved)
def __iter__(self):
return iter(self.approved)
def is_valid(self, num_cand):
for c in self.approved:
if c < 0 or c >= num_cand:
raise ValueError(str(self) + " not valid for num_cand = " +
str(num_cand))
return True
| StarcoderdataPython |
135385 | <gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
from rules.contrib.models import RulesModel
import rules
@rules.predicate
def is_alarm_creator(user, alarm):
return alarm.creator == user
rules.add_rule('can_edit_alarm',is_alarm_creator)
rules.add_perm('alarm.edit_alarm', is_alarm_creator)
# Create your models here.
class Alarm(RulesModel):
"""Model representing an alarm"""
title = models.CharField(max_length=200, default = 'Alarm', help_text = 'Enter a nice name for the alarm')
creator = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
sound = models.ForeignKey('Sound', on_delete=models.CASCADE, default=1, help_text = 'Choose the sound for your alarm')
time = models.DateTimeField(help_text = 'Choose a date and time for your alarm that is BEFORE the current time')
class Meta:
ordering = ['time']
def __str__(self):
return f'{self.title} at {self.time}'
def get_absolute_url(self):
"""Returns the url to access a detail record for this alarm."""
return '/'
class Sound(models.Model):
name = models.CharField(max_length = 200)
audio = models.FileField()
def __str__(self):
return self.name | StarcoderdataPython |
3364063 | <reponame>Mahesh1822/evalml<filename>evalml/utils/logger.py<gh_stars>100-1000
"""Logging functions."""
import logging
import sys
import time
def get_logger(name):
"""Get the logger with the associated name.
Args:
name (str): Name of the logger to get.
Returns:
The logger object with the associated name.
"""
logger = logging.getLogger(name)
if not len(logger.handlers):
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(stdout_handler)
return logger
def log_title(logger, title):
"""Log with a title."""
logger.info("\n" + "*" * (len(title) + 4))
logger.info("* %s *" % title)
logger.info("*" * (len(title) + 4))
logger.info("")
def log_subtitle(logger, title, underline="="):
"""Log with a subtitle."""
logger.info("")
logger.info("%s" % title)
logger.info(underline * len(title))
def time_elapsed(start_time):
"""How much time has elapsed since the search started.
Args:
start_time (int): Time when search started.
Returns:
str: elapsed time formatted as a string [H:]MM:SS
"""
time_diff = time.time() - start_time
# Source: tqdm.std.tqdm.format_interval
mins, s = divmod(int(time_diff), 60)
h, m = divmod(mins, 60)
if h:
return "{0:d}:{1:02d}:{2:02d}".format(h, m, s)
else:
return "{0:02d}:{1:02d}".format(m, s)
| StarcoderdataPython |
3233937 | <reponame>bcongdon/agdq-2017-schedule-analysis<gh_stars>1-10
import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
from scrape_genres import get_game_genres
def get_games_list():
req = requests.get('https://gamesdonequick.com/schedule')
soup = BeautifulSoup(req.text)
table = soup.find('tbody')
first_rows = table.findAll('tr', attrs={'class': None})
games = list()
for row in first_rows:
second_row = row.findNext('tr', attrs={'class': 'second-row'})
duration = 0
if second_row:
duration = second_row.findNext('td').text.strip()
runner_text = row.find('td', attrs={'rowspan': 2})
runner = runner_text.text.strip() if runner_text else ""
game = {
'title': row.find('td', attrs={'class': None}).text,
'duration': duration,
'runner': runner
}
games.append(game)
return games
def giant_bomb_search(name):
base_url = "http://www.giantbomb.com/api/search/"
with open('api_keys.json', 'r') as f:
api_key = json.load(f)['giant_bomb']
headers = {'User-agent': 'Python'}
params = {
'api_key': api_key,
'format': 'json',
'query': name.encode('ascii', 'replace'),
'resources': 'game',
'limit': 10
}
results = requests.get(base_url, headers=headers, params=params).json()
manual_results = ['-1: **None of these results**']
for i, res in enumerate(results['results']):
if(res['name'] == name):
return res
res_name = res['name'].encode('ascii', 'replace')
manual_results.append("{0}: {1}".format(i, res_name))
else:
print "\n".join(manual_results)
print "Title: {0}".format(name.encode('ascii', 'replace'))
correct = int(input("Correct Index: "))
if correct < 0:
return {}
return results['results'][correct]
def giant_bomb_game_data(game_id):
base_url = "http://www.giantbomb.com/api/game/{}".format(game_id)
with open('api_keys.json', 'r') as f:
api_key = json.load(f)['giant_bomb']
headers = {'User-agent': 'Python'}
params = {
'api_key': api_key,
'format': 'json'
}
response = requests.get(base_url, headers=headers, params=params).text
try:
return json.loads(response)['results']
except Exception:
print response
def match_games_auto(games_list):
for i, game in enumerate(games_list):
if 'data' in game and game['data'] is not None:
continue
ascii_title = game['title'].encode('ascii', 'ignore')
print "({0}/{1}) Searching for: {2}".format(i + 1,
len(games_list),
ascii_title)
game['data'] = giant_bomb_search(games_list[i]['title'])
return games_list
def match_games_manual(games_list):
for game in games_list:
if game['data'] == {}:
print "Title: {0}".format(game['title'].encode('ascii', 'replace'))
game_id = raw_input("Game ID: ")
if game_id == "x":
continue
game['data'] = giant_bomb_game_data(game_id)
return [x for x in games_list if x['data']]
def process_game_platforms(games_list):
games_list_length = len(games_list)
for i, game in enumerate(games_list):
if "platform" in game:
continue
game_title = game['title'].encode('ascii', 'replace')
p = raw_input("({0}/{1}) {2}:".format(i + 1,
games_list_length,
game_title))
games_list[i]['platform'] = p
return games_list
def filter_blacklisted_games(games_list):
black_list = ['Pre-Show', 'Setup Block', 'Finale']
black_list = map(lambda x: x.lower(), black_list)
return [x for x in games_list if not any(x['title'].lower().startswith(y)
for y in black_list)]
def add_game_genres(games_list):
for i, g in enumerate(games_list):
ascii_title = g['title'].encode('ascii', 'ignore')
print("({0}/{1}) Searching for: {2}".format(i + 1,
len(games_list),
ascii_title))
g['data']['genres'] = get_game_genres(g['data']['id'])
return games_list
if __name__ == '__main__':
print("*** [1/5] Getting games list from Schedule...")
raw_games = get_games_list()
raw_games = filter_blacklisted_games(raw_games)
print("*** [2/5] Attempting to automatically match games to data...")
raw_games = match_games_auto(raw_games)
print("*** [3/5] Manually fixing game data...")
raw_games = match_games_manual(raw_games)
print("*** [4/5] Prompting for marathon platform...")
raw_games = process_game_platforms(raw_games)
print("*** [5/5] Downloading game genres...")
raw_games = process_game_platforms(raw_games)
with open('scraped_games.json', 'w+') as f:
json.dump(raw_games, f)
| StarcoderdataPython |
1782766 | import argparse
import math
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
T = 10
def weak_learner(temp_X, temp_y, D):
m, d = temp_X.shape
F_star, theta_star, j_star = float('inf'), 0, 0
for j in range(d):
sorted_indexes = temp_X[:, j].argsort()
xj = temp_X[sorted_indexes, j]
xj = np.append(xj, xj[-1] + 1)
y_sorted = temp_y[sorted_indexes]
D_sorted = D[sorted_indexes]
F = np.sum(D_sorted[np.where(y_sorted == 1)])
if F < F_star:
F_star, theta_star, j_star = F, xj[0] - 1, j
for i in range(m):
F = F - y_sorted[i] * D_sorted[i]
if F < F_star and xj[i] != xj[i + 1]:
F_star, theta_star, j_star = F, (xj[i] + xj[i + 1]) / 2, j
return theta_star, j_star
def adaboost(temp_X, temp_y, adaboost_t=1):
m, d = temp_X.shape
D = np.array([1 / m for i in range(m)])
weak_learners, errors, weights = [], [], []
for t in range(adaboost_t):
weak_learners.append(weak_learner(temp_X, temp_y, D))
theta, j = weak_learners[-1]
preds = np.where((theta >= temp_X[:, j]), 1, -1)
error = np.sum(D[np.where(preds != temp_y)])
errors.append(error)
w = 0.5 * math.log(1 / error - 1)
weights.append(w)
D = D * np.exp(-w * temp_y * preds)
D = D / sum(D)
# print(errors)
return weak_learners, weights
def compute_error(temp_X, temp_y, weak_learners, weights):
preds = np.zeros(temp_y.shape)
for i, learner in enumerate(weak_learners):
theta, j = learner
preds = preds + weights[i] * np.where((theta >= temp_X[:, j]), 1, -1)
preds = np.where(preds < 0, -1, 1)
return len(np.where(preds != temp_y)[0]) / m
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', dest='dataset_path', action='store', type=str, help='path to dataset')
parser.add_argument('--mode', dest='mode', action='store', type=str, help='mode of algorithm', default='erm')
args = parser.parse_args()
df = pd.read_csv(args.dataset_path)
df.head()
y = df.iloc[:, -1].values
y = np.where(y == 0, -1, 1)
X = df.iloc[:, :-1].values
m, d = X.shape
if args.mode == 'erm':
weak_learners, weights = adaboost(X, y)
error = compute_error(X, y, weak_learners, weights)
print('Decision Stumps: %s \nWeights: %s \nError: %f' % (weak_learners, weights, error))
elif args.mode == 'cv':
m, d = X.shape
k = 10
s = int(m / k) + (1 if m % k != 0 else 0)
batches = []
indexes = list(range(X.shape[0]))
random.shuffle(indexes)
X = X[indexes]
y = y[indexes]
for i in range(k):
start_index, end_index = s * i, s * (i + 1)
batches.append((X[start_index:end_index], y[start_index:end_index]))
mean_validation_errors, mean_empirical_risks = [], []
for adaboost_t in range(T):
empirical_risks, validation_errors = [], []
for i in range(k):
print('Executing Fold #: %d' % (i + 1))
train_X, train_y, test_X, test_y = None, None, None, None
for j, (X_j, y_j) in enumerate(batches):
if j == i:
test_X, test_y = X_j, y_j
else:
if train_X is None:
train_X, train_y = X_j, y_j
else:
train_X, train_y = np.append(train_X, X_j, axis=0), np.append(train_y, y_j, axis=0)
weak_learners, weights = adaboost(train_X, train_y, adaboost_t)
empirical_risk = compute_error(X, y, weak_learners, weights)
validation_error = compute_error(test_X, test_y, weak_learners, weights)
empirical_risks.append(empirical_risk)
validation_errors.append(validation_error)
print('Decision Stumps: %s \nWeights: %s \nError: %f' % (weak_learners, weights, validation_error))
mean_validation_errors.append(np.mean(validation_errors))
mean_empirical_risks.append(np.mean(empirical_risks))
print('T: %d \nErrors: %s \nMean Error: %s' % (adaboost_t, validation_errors, np.mean(validation_errors)))
# fig, ax = plt.subplots()
# ax.plot(list(range(T)), mean_validation_errors, '-r', label='Validation Errors')
# ax.plot(list(range(T)), mean_empirical_risks, '-0', label='Empirical risk on whole dataset')
# plt.xlabel('T: number of rounds in adaboost')
# plt.ylabel('Error')
# ax.legend(loc='upper right')
# plt.tight_layout()
# plt.show()
else:
print('Incorrect mode of operation. Use "erm" or "cv".')
| StarcoderdataPython |
28293 | <reponame>AaronFriel/pulumi-google-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['RolloutArgs', 'Rollout']
@pulumi.input_type
class RolloutArgs:
def __init__(__self__, *,
delivery_pipeline_id: pulumi.Input[str],
release_id: pulumi.Input[str],
rollout_id: pulumi.Input[str],
target_id: pulumi.Input[str],
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
validate_only: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Rollout resource.
:param pulumi.Input[str] target_id: The ID of Target to which this `Rollout` is deploying.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
:param pulumi.Input[str] description: Description of the `Rollout` for user purposes. Max length is 255 characters.
:param pulumi.Input[str] etag: This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
:param pulumi.Input[str] name: Optional. Name of the `Rollout`. Format is projects/{project}/ locations/{location}/deliveryPipelines/{deliveryPipeline}/ releases/{release}/rollouts/a-z{0,62}.
"""
pulumi.set(__self__, "delivery_pipeline_id", delivery_pipeline_id)
pulumi.set(__self__, "release_id", release_id)
pulumi.set(__self__, "rollout_id", rollout_id)
pulumi.set(__self__, "target_id", target_id)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if description is not None:
pulumi.set(__self__, "description", description)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if validate_only is not None:
pulumi.set(__self__, "validate_only", validate_only)
@property
@pulumi.getter(name="deliveryPipelineId")
def delivery_pipeline_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "delivery_pipeline_id")
@delivery_pipeline_id.setter
def delivery_pipeline_id(self, value: pulumi.Input[str]):
pulumi.set(self, "delivery_pipeline_id", value)
@property
@pulumi.getter(name="releaseId")
def release_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "release_id")
@release_id.setter
def release_id(self, value: pulumi.Input[str]):
pulumi.set(self, "release_id", value)
@property
@pulumi.getter(name="rolloutId")
def rollout_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "rollout_id")
@rollout_id.setter
def rollout_id(self, value: pulumi.Input[str]):
pulumi.set(self, "rollout_id", value)
@property
@pulumi.getter(name="targetId")
def target_id(self) -> pulumi.Input[str]:
"""
The ID of Target to which this `Rollout` is deploying.
"""
return pulumi.get(self, "target_id")
@target_id.setter
def target_id(self, value: pulumi.Input[str]):
pulumi.set(self, "target_id", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the `Rollout` for user purposes. Max length is 255 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Name of the `Rollout`. Format is projects/{project}/ locations/{location}/deliveryPipelines/{deliveryPipeline}/ releases/{release}/rollouts/a-z{0,62}.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="validateOnly")
def validate_only(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "validate_only")
@validate_only.setter
def validate_only(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "validate_only", value)
class Rollout(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
delivery_pipeline_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_id: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
rollout_id: Optional[pulumi.Input[str]] = None,
target_id: Optional[pulumi.Input[str]] = None,
validate_only: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new Rollout in a given project and location.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
:param pulumi.Input[str] description: Description of the `Rollout` for user purposes. Max length is 255 characters.
:param pulumi.Input[str] etag: This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
:param pulumi.Input[str] name: Optional. Name of the `Rollout`. Format is projects/{project}/ locations/{location}/deliveryPipelines/{deliveryPipeline}/ releases/{release}/rollouts/a-z{0,62}.
:param pulumi.Input[str] target_id: The ID of Target to which this `Rollout` is deploying.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RolloutArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new Rollout in a given project and location.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param RolloutArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RolloutArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
delivery_pipeline_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
release_id: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
rollout_id: Optional[pulumi.Input[str]] = None,
target_id: Optional[pulumi.Input[str]] = None,
validate_only: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RolloutArgs.__new__(RolloutArgs)
__props__.__dict__["annotations"] = annotations
if delivery_pipeline_id is None and not opts.urn:
raise TypeError("Missing required property 'delivery_pipeline_id'")
__props__.__dict__["delivery_pipeline_id"] = delivery_pipeline_id
__props__.__dict__["description"] = description
__props__.__dict__["etag"] = etag
__props__.__dict__["labels"] = labels
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
if release_id is None and not opts.urn:
raise TypeError("Missing required property 'release_id'")
__props__.__dict__["release_id"] = release_id
__props__.__dict__["request_id"] = request_id
if rollout_id is None and not opts.urn:
raise TypeError("Missing required property 'rollout_id'")
__props__.__dict__["rollout_id"] = rollout_id
if target_id is None and not opts.urn:
raise TypeError("Missing required property 'target_id'")
__props__.__dict__["target_id"] = target_id
__props__.__dict__["validate_only"] = validate_only
__props__.__dict__["approval_state"] = None
__props__.__dict__["approve_time"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["deploy_end_time"] = None
__props__.__dict__["deploy_failure_cause"] = None
__props__.__dict__["deploy_start_time"] = None
__props__.__dict__["deploying_build"] = None
__props__.__dict__["enqueue_time"] = None
__props__.__dict__["failure_reason"] = None
__props__.__dict__["state"] = None
__props__.__dict__["uid"] = None
super(Rollout, __self__).__init__(
'google-native:clouddeploy/v1:Rollout',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Rollout':
"""
Get an existing Rollout resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RolloutArgs.__new__(RolloutArgs)
__props__.__dict__["annotations"] = None
__props__.__dict__["approval_state"] = None
__props__.__dict__["approve_time"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["deploy_end_time"] = None
__props__.__dict__["deploy_failure_cause"] = None
__props__.__dict__["deploy_start_time"] = None
__props__.__dict__["deploying_build"] = None
__props__.__dict__["description"] = None
__props__.__dict__["enqueue_time"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["failure_reason"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
__props__.__dict__["target_id"] = None
__props__.__dict__["uid"] = None
return Rollout(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, str]]:
"""
User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter(name="approvalState")
def approval_state(self) -> pulumi.Output[str]:
"""
Approval state of the `Rollout`.
"""
return pulumi.get(self, "approval_state")
@property
@pulumi.getter(name="approveTime")
def approve_time(self) -> pulumi.Output[str]:
"""
Time at which the `Rollout` was approved.
"""
return pulumi.get(self, "approve_time")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
Time at which the `Rollout` was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="deployEndTime")
def deploy_end_time(self) -> pulumi.Output[str]:
"""
Time at which the `Rollout` finished deploying.
"""
return pulumi.get(self, "deploy_end_time")
@property
@pulumi.getter(name="deployFailureCause")
def deploy_failure_cause(self) -> pulumi.Output[str]:
"""
The reason this deploy failed. This will always be unspecified while the deploy in progress.
"""
return pulumi.get(self, "deploy_failure_cause")
@property
@pulumi.getter(name="deployStartTime")
def deploy_start_time(self) -> pulumi.Output[str]:
"""
Time at which the `Rollout` started deploying.
"""
return pulumi.get(self, "deploy_start_time")
@property
@pulumi.getter(name="deployingBuild")
def deploying_build(self) -> pulumi.Output[str]:
"""
The resource name of the Cloud Build `Build` object that is used to deploy the Rollout. Format is `projects/{project}/locations/{location}/builds/{build}`.
"""
return pulumi.get(self, "deploying_build")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description of the `Rollout` for user purposes. Max length is 255 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="enqueueTime")
def enqueue_time(self) -> pulumi.Output[str]:
"""
Time at which the `Rollout` was enqueued.
"""
return pulumi.get(self, "enqueue_time")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="failureReason")
def failure_reason(self) -> pulumi.Output[str]:
"""
Reason the build failed. Empty if the build succeeded.
"""
return pulumi.get(self, "failure_reason")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, str]]:
"""
Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Optional. Name of the `Rollout`. Format is projects/{project}/ locations/{location}/deliveryPipelines/{deliveryPipeline}/ releases/{release}/rollouts/a-z{0,62}.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Current state of the `Rollout`.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="targetId")
def target_id(self) -> pulumi.Output[str]:
"""
The ID of Target to which this `Rollout` is deploying.
"""
return pulumi.get(self, "target_id")
@property
@pulumi.getter
def uid(self) -> pulumi.Output[str]:
"""
Unique identifier of the `Rollout`.
"""
return pulumi.get(self, "uid")
| StarcoderdataPython |
3387037 | # Celery workers
import base64
import json
import time
from enum import Enum, auto
from typing import Dict
import requests
from celery import Celery
from celery.result import AsyncResult
from celery.task import periodic_task
from backend.blueprints.spa_api.service_layers.leaderboards import Leaderboards
from backend.database.startup import lazy_get_redis, lazy_startup
from backend.database.wrapper.player_wrapper import PlayerWrapper
from backend.database.wrapper.stats.item_stats_wrapper import ItemStatsWrapper
from backend.database.wrapper.stats.player_stat_wrapper import PlayerStatWrapper
from backend.tasks import celeryconfig
from backend.tasks.add_replay import parse_replay
from backend.tasks.middleware import DBTask
from backend.tasks.periodic_stats import calculate_global_distributions
try:
from backend.tasks.training_packs.task import TrainingPackCreation
from backend.utils.metrics import METRICS_TRAINING_PACK_CREATION_TIME
except (ModuleNotFoundError, ImportError):
TrainingPackCreation = None
print("Missing config or AES Key and CRC, not creating training packs")
try:
from backend.tasks.training_packs.training_packs import create_pack_from_replays
except:
pass
celery = Celery(__name__, broker=celeryconfig.broker_url)
def create_celery_config():
celery.config_from_object(celeryconfig)
player_wrapper = PlayerWrapper(limit=10)
player_stat_wrapper = PlayerStatWrapper(player_wrapper)
@celery.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(60 * 60 * 24 * 3, calc_global_stats.s(), name='calculate global stats every 3 days')
sender.add_periodic_task(60 * 60 * 24, calc_global_dists.s(), name='calculate global dists every day')
sender.add_periodic_task(60 * 60 * 24, calc_leaderboards.s(), name='calculate leaderboards every day')
sender.add_periodic_task(60 * 60 * 24 * 3, calc_leaderboards.s(), name='calculate item stats every 3 days')
def add_replay_parse_task(replay_to_parse_path, query_params: Dict[str, any] = None, **kwargs):
return parse_replay_task.delay(*[replay_to_parse_path], **{**kwargs, **{'query_params': query_params}}, )
@celery.task(bind=True, priority=5)
def parse_replay_task(self, *args, **kwargs):
return parse_replay(self, *args, **kwargs)
@celery.task(base=DBTask, bind=True, priority=9)
def parse_replay_task_low_priority(self, fn):
return parse_replay_task(replay_to_parse_path=fn, preserve_upload_date=True)
@celery.task(base=DBTask, bind=True, priority=9)
def parse_replay_gcp(self, fn, gcp_url):
with open(fn, 'rb') as f:
encoded_file = base64.b64encode(f.read())
r = requests.post(gcp_url, data=encoded_file, timeout=0.5)
@periodic_task(run_every=30.0, base=DBTask, bind=True, priority=0)
def calc_global_stats(self):
sess = self.session()
result = player_stat_wrapper.get_global_stats(sess)
sess.close()
if lazy_get_redis() is not None:
lazy_get_redis().set('global_stats', json.dumps(result))
lazy_get_redis().set('global_stats_expire', json.dumps(True))
print('Done')
return result
@periodic_task(run_every=24 * 60, base=DBTask, bind=True, priority=0)
def calc_leaderboards(self):
leaderboards = Leaderboards.create()
if lazy_get_redis() is not None:
lazy_get_redis().set("leaderboards", json.dumps([l.__dict__ for l in leaderboards]))
@periodic_task(run_every=60 * 10, base=DBTask, bind=True, priority=0)
def calc_global_dists(self):
sess = self.session()
calculate_global_distributions()
sess.close()
@periodic_task(run_every=24 * 60 * 60 * 3, base=DBTask, bind=True, priority=0)
def calc_item_stats(self, session=None):
if session is None:
sess = self.session()
else:
sess = session
results = ItemStatsWrapper.create_stats(sess)
if lazy_get_redis() is not None:
lazy_get_redis().set('item_stats', json.dumps(results))
@celery.task(base=DBTask, bind=True, priority=9)
def create_training_pack(self, id_, n=10, date_start=None, date_end=None, session=None):
if session is None:
sess = self.session()
else:
sess = session
start = time.time()
url = TrainingPackCreation.create_from_player(id_, n, date_start, date_end, sess)
end = time.time()
METRICS_TRAINING_PACK_CREATION_TIME.observe(
start - end
)
return url
class ResultState(Enum):
PENDING = auto()
STARTED = auto()
RETRY = auto()
FAILURE = auto()
SUCCESS = auto()
def get_task_state(id_) -> ResultState:
# NB: State will be PENDING for unknown ids.
return ResultState[AsyncResult(id_, app=celery).state]
if __name__ == '__main__':
sess = lazy_startup()
calc_item_stats(None, session=sess())
| StarcoderdataPython |
128542 | <filename>examples/driving.py
from __future__ import division
import pygame
import rabbyt
from math import cos, sin, radians
import random
import os.path
rabbyt.data_directory = os.path.dirname(__file__)
class Car(rabbyt.Sprite):
boost_particles = set()
dust_particles = set()
def __init__(self, name):
rabbyt.Sprite.__init__(self, name+'.png', (-30, -20, 50, 20))
self.shadow = rabbyt.Sprite(name+"shadow.png", self.shape)
self.shadow.alpha = .5
# These three lines make use of a rather experimental feature of rabbyt.
# At the end of update() I have left commented out a more traditional
# method of doing the same thing.
self.shadow.rot = lambda: self.rot
self.shadow.x = lambda: self.x - 4
self.shadow.y = lambda: self.y - 5
# Here is another method with identical results (only faster):
#self.shadow.rot = self.attrgetter('rot')
#self.shadow.x = self.attrgetter('x') - 4
#self.shadow.y = self.attrgetter('y') - 5
self.dust_r = (-15, 10)
self.dust_l = (-15,-10)
self.accelerating = False
self.turning_right = False
self.turning_left = False
self.boost_endtime = 0
self.boost_rot = 0
self.boost_length = 1
self.xy = [0,0]
self.velocity = [0,0]
self.rot = 0
def boost(self):
if self.boost_endtime > rabbyt.get_time():
return
self.boost_rot = self.rot
self.boost_endtime = rabbyt.get_time() + self.boost_length
def update(self):
if self.turning_right:
self.rot -= 5
if self.turning_left:
self.rot += 5
a = [0.0,0.0]
if self.boost_endtime > rabbyt.get_time():
f = 3*(self.boost_endtime - rabbyt.get_time())/self.boost_length
a[0] += cos(radians(self.boost_rot))*f
a[1] += sin(radians(self.boost_rot))*f
self.create_boost_particle()
if self.accelerating:
a[0] += cos(radians(self.rot))*.9
a[1] += sin(radians(self.rot))*.9
self.create_dust_particle(self.dust_r)
self.create_dust_particle(self.dust_l)
ff = .9 # Friction Factor
self.velocity[0] *= ff
self.velocity[1] *= ff
self.velocity[0] += a[0]
self.velocity[1] += a[1]
self.x += self.velocity[0]
self.y += self.velocity[1]
#self.shadow.x = self.x - 4
#self.shadow.y = self.y - 5
#self.shadow.rot = self.rot
def create_boost_particle(self):
s = rabbyt.Sprite(self.texture_id, self.shape)
lifetime = .5
s.xy = self.xy
s.rot = self.rot
s.scale = rabbyt.lerp(1, 2, dt=lifetime)
s.alpha = rabbyt.lerp(.8, 0, dt=lifetime)
Car.boost_particles.add(s)
rabbyt.scheduler.add(rabbyt.get_time()+lifetime,
lambda:Car.boost_particles.remove(s))
lt = .8
star = rabbyt.Sprite("star2.png")
x = random.random()*80-40
y = random.random()*80-40
star.x = rabbyt.lerp(self.x+x, self.convert_offset((-20,0))[0]+x, dt=lt)
star.y = rabbyt.lerp(self.y+y, self.convert_offset((-20,0))[1]+y, dt=lt)
star.rot = rabbyt.lerp(0, 190*random.choice([-2,-1,1,2]), dt=5, extend="extrapolate")
star.scale = rabbyt.lerp(random.random()+.2,0, rabbyt.get_time()+lt/2, dt=lt/2)
star.rgb = 0, .5, .9
Car.boost_particles.add(star)
rabbyt.scheduler.add(rabbyt.get_time()+lt,
lambda:Car.boost_particles.remove(star))
def create_dust_particle(self, offset):
s = rabbyt.Sprite("star.png")
lifetime = 4
x, y = self.convert_offset(offset)
r = random.random # (shortcut)
s.x = rabbyt.lerp(x+r()*10-5, x+r()*60-30, dt=lifetime)
s.y = rabbyt.lerp(y+r()*10-5, y+r()*60-30, dt=lifetime)
s.rot = rabbyt.lerp(0, 90*random.choice(range(-2,3)), dt=6)
s.scale = rabbyt.lerp(1, 4, dt=lifetime)
now = rabbyt.get_time()
s.rgba = .7, .5, 0, rabbyt.lerp(.1, 0, now+lifetime/2, now+lifetime)
Car.dust_particles.add(s)
rabbyt.scheduler.add(rabbyt.get_time()+lifetime,
lambda:Car.dust_particles.remove(s))
def render(self):
self.shadow.render()
rabbyt.Sprite.render(self)
if __name__ == "__main__":
pygame.init()
pygame.display.set_mode((800,600), pygame.OPENGL | pygame.DOUBLEBUF)
rabbyt.set_viewport((800, 600))
rabbyt.set_default_attribs()
print """
This is a simple example for using rabbyt.
Use the arrow keys to steer. Press SPACE to boost.
"""
car1 = Car("car")
cars = [car1]
clock = pygame.time.Clock()
while True:
clock.tick(40)
for event in pygame.event.get():
if event.type == pygame.QUIT:
import sys; sys.exit(0)
elif event.type == pygame.KEYDOWN:
if event.key in (pygame.K_ESCAPE, pygame.K_q):
import sys
sys.exit(0)
elif event.key == pygame.K_SPACE:
car1.boost()
pressed = pygame.key.get_pressed()
car1.accelerating = pressed[pygame.K_UP]
car1.turning_right = pressed[pygame.K_RIGHT]
car1.turning_left = pressed[pygame.K_LEFT]
rabbyt.set_time(pygame.time.get_ticks()/1000.0)
for c in cars:
c.update()
rabbyt.scheduler.pump()
rabbyt.clear((.56, .3, 0, 1))
for c in cars:
c.render()
rabbyt.render_unsorted(Car.dust_particles)
rabbyt.render_unsorted(Car.boost_particles)
pygame.display.flip()
| StarcoderdataPython |
1662220 | from ttictoc import TicToc
import db.connections_manager as conn_mng
from utils.algorithms import extract_added_words
from clf.wiki_classifier import WikiClassifier
from clf.classifier_manager import reload_classifier
from lang.langs import Lang
from jobs.base_job import BaseJob
class AddRevsJob(BaseJob):
"""
this is a job manager that add revisions to database
"""
# minimum bad score to add to database for human verify
_MIN_BAD_SCORE = 0.0 # TODO: change this to 0.5+-
# number of revisions to fetch
_NUM_REVS = 50
# scalar for extra part size to fetch by the formula: new_part_size = _EX_PART_SIZE*part_size
_EX_PART_SIZE = 2
# minimum part size to fetch, for effectivity
_MIN_PART_SIZE = 10
def __init__(self, lang: Lang):
"""
initialize the class
param lang: language
param local_conn: local database connection
param wiki_conn: Wikimedia database connection
"""
super().__init__()
self.lang = lang
def start(self):
"""
start the job
the job includes the following things:
* fetch new unverified revisions
* score this revisions
* filter all suspected bad revisions
* insert revisions to table
"""
t = TicToc()
t.tic()
local_conn, wiki_conn = conn_mng.open_connections(self.lang)
wiki_classifier = reload_classifier(self.lang.name)
wikimedia_db, wikishield_db, wikimedia_api = conn_mng.init_sources(wiki_conn.ctx, local_conn.ctx, self.lang)
max_rev_id = None
revs, _ = wikimedia_db.fetch_natural_revs(self.lang, self._NUM_REVS, max_rev_id,
self._EX_PART_SIZE, self._MIN_PART_SIZE)
for rev in revs:
diff_text, page_title = wikimedia_api.fetch_rev_diff(rev['wiki_id'], rev['parent_id'])
rev['page_title'] = page_title
print(rev)
words_content = extract_added_words(self.lang, diff_text)
if len(words_content) > 0:
score = wiki_classifier.score_rev(words_content)
rev['score'] = bad_score = score[0]
if bad_score >= self._MIN_BAD_SCORE:
wikishield_db.insert_rev(rev, diff_text, words_content)
wikishield_db.commit()
conn_mng.close_connections(local_conn, wiki_conn)
t.toc()
print("add_revs_job: elapsed time = ", t.elapsed, "seconds") #TODO: remove this
| StarcoderdataPython |
3337676 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__version__ = '0.0.1'
"""
# mbGDML Predict Set Creator
Creates a mbGDML predict set.
## Requirements
- numpy
- mbgdml
"""
import os
import argparse
import json
import numpy as np
from mbgdml.data import mbGDMLPredictset
def main():
parser = argparse.ArgumentParser(
description='Creates mbGDML predict sets from data sets and models.'
)
parser.add_argument(
'dataset', metavar='datasets', type=str, nargs='?',
help='Path to mbGDML data set to predict energies and forces.'
)
parser.add_argument(
'-m', '--models', metavar='models', nargs='+', default=[],
help='Path to mbGDML data set to predict energies and forces.'
)
parser.add_argument(
'--name', metavar='name', type=str, nargs='?', default='predictset',
help='File name for the npz data set. Defaults to dataset.'
)
parser.add_argument(
'--save_dir', metavar='save_dir', type=str, nargs='?', default='.',
help='Path to save npz data set. Defaults to current directory.'
)
parser.add_argument(
'-o', '--overwrite', action='store_true', help='Overwrite npz data set.'
)
args = parser.parse_args()
print('Making mbGDML predict sets')
print('Written by <NAME> (@aalexmmaldonado)\n')
# Ensures paths end in a '/'.
save_dir = args.save_dir
if save_dir[-1] != '/':
save_dir += '/'
# Checks to see if data set already exists.
if os.path.isfile(f'{save_dir}{args.name}.npz') and not args.overwrite:
print(f'{save_dir}{args.name}.npz already exists and overwrite is False.\n')
raise FileExistsError
# Creating the predict set.
predictset = mbGDMLPredictset()
print(f'Loading the {args.dataset} data set ... ', end='')
predictset.load_dataset(args.dataset)
print('done')
print(f'Loading the {len(args.models)} models ... ', end='')
predictset.load_models(args.models)
print('done')
print(f'Creating the predict set ...')
predictset.save(args.name, predictset.predictset, save_dir)
print(f'\nYour predict set is: {save_dir}{args.name}.npz')
if __name__ == "__main__":
main() | StarcoderdataPython |
61983 | <filename>flaskapp/app/craft/network.py
import json
import keras_ocr
import os
import pickle
from app import root_dir
from app.config import Config
from pathlib import Path
import tensorflow as tf
# from app.rc_ocr.rc_stream import weights_from_s3, coder_from_s3
data_dir = Path(root_dir)
class CRAFT():
def __init__(self):
self.decoder_path = os.path.join(
root_dir, 'data', 'craft','character_label_decoder.json')
with open(self.decoder_path) as f:
self.decoder = json.load(f)
recognizer_alphabet = ''.join(self.decoder.values())
try:
#initialize recognizer
self.recognizer = keras_ocr.recognition.Recognizer(
alphabet=recognizer_alphabet,
weights='kurapan'
)
self.recognizer.include_top = True
self.recognizer.compile()
for layer in self.recognizer.backbone.layers:
layer.trainable = False
except Exception as e:
print(e)
# Default model
self.recognizer = keras_ocr.recognition.Recognizer()
self.recognizer.compile()
#initialize detector
self.detector = keras_ocr.detection.Detector()
#load weights
# recognizer_weights = weights_from_s3('recognizer-weights')
# use local weight
recognizer_weights = self.recognizer.model.load_weights(
os.path.join(
root_dir, 'data', 'craft', 'recognizer_registrationtext.h5')
)
# self.recognizer.model.set_weights(recognizer_weights)
self.pipeline = keras_ocr.pipeline.Pipeline(recognizer=self.recognizer)
#compete pipeline predictions
def __call__(self, img):
prediction_groups = self.pipeline.recognize(img)
def get_recognizer(self):
return self.recognizer
def get_pipeline(self):
return self.pipeline
def recognize(self, img):
recognitions = self.recognizer.recognize([img])
return recognitions
def detect(self, img):
detections = self.detector.detect([img])
return detections
def predict(self, img):
prediction_groups = self.pipeline.recognize([img])
return prediction_groups
def create_craft():
return CRAFT()
| StarcoderdataPython |
3323627 | <reponame>xylar/cdat
import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
boxfill = canvas.createboxfill()
boxfill.color_1 = 242
boxfill.color_2 = 250
boxfill.colormap = "classic"
canvas.plot(data, boxfill, bg=1)
regression.run(canvas, "test_fewer_colors_than_levels.png")
| StarcoderdataPython |
1625790 | #!/usr/bin/env python
import sys
sys.path.insert(0, '../..')
from app import app, create_tables
create_tables()
app.run()
| StarcoderdataPython |
3280940 | from typing import Optional
from src.data.mongo.secret import get_random_key
def get_access_token(access_token: Optional[str] = None) -> str:
if access_token is not None:
return access_token
return get_random_key()
| StarcoderdataPython |
3227526 | <reponame>HaozhengAN/PaddleFlow<gh_stars>0
"""
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding:utf8 -*-
import json
from urllib import parse
from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException
from paddleflow.utils import api_client
from paddleflow.common import api
from paddleflow.queue.queue_info import QueueInfo
from paddleflow.queue.queue_info import GrantInfo
class QueueServiceApi(object):
"""queue service api"""
def __init__(self):
"""
"""
@classmethod
def add_queue(self, host, name, namespace, clusterName, maxResources, minResources=None,
schedulingPolicy=None, location=None, quotaType=None, header=None):
"""
add queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
body = {
"namespace": namespace,
"name": name,
"clusterName": clusterName,
"maxResources": maxResources,
}
if minResources:
body['minResources'] = minResources
if schedulingPolicy:
body['schedulingPolicy'] = schedulingPolicy
if location:
body['location'] = location
if quotaType:
body['quotaType'] = quotaType
response = api_client.call_api(method="POST", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE), headers=header,
json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "add queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def grant_queue(self, host, username, queuename, header=None):
"""
grant queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
body = {
"username": username,
"resourceType": "queue",
"resourceID": queuename
}
response = api_client.call_api(method="POST", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
headers=header, json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "grant queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def ungrant_queue(self, host, username, queuename, header=None):
"""
ungrant queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
## call grant
params = {
"username": username,
"resourceType": "queue",
"resourceID": queuename
}
response = api_client.call_api(method="DELETE", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
headers=header, params=params)
if not response.text:
return True, None
if not response:
raise PaddleFlowSDKException("Connection Error", "ungrant queue failed due to HTTPError")
data = json.loads(response.text)
if data and 'message' in data:
return False, data['message']
return True, None
@classmethod
def del_queue(self, host, queuename, header=None):
"""
delete queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="DELETE", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE +
"/%s" % queuename),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "delete queue failed due to HTTPError")
if not response.text:
return True, None
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def stop_queue(self, host, queuename, action=None, header=None):
"""
delete queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if action:
params = {"action":action}
else:
params = {"action":'close'}
response = api_client.call_api(method="PUT", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE +
"/%s" % queuename),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "stop queue failed due to HTTPError")
if not response.text:
return True, None
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, None
@classmethod
def list_queue(self, host, header=None, maxsize=100, marker=None):
"""
list queue
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if not isinstance(maxsize, int) or maxsize <= 0:
raise PaddleFlowSDKException("InvalidRequest", "maxsize should be int and greater than 0")
params = {
"maxKeys": maxsize
}
if marker:
params['marker'] = marker
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "list queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
queueList = []
if len(data['queueList']):
for queue in data['queueList']:
queueinfo = QueueInfo(queue['name'], queue['status'], queue['namespace'], queue['clusterName'], None,
queue['maxResources'], queue['minResources'], None, None,
queue['createTime'], queue['updateTime'])
queueList.append(queueinfo)
return True, queueList, data.get('nextMarker', None)
@classmethod
def show_queue(self, host, queuename, header=None):
"""
show queue info
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_QUEUE + "/%s" % queuename),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "show queue failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
queueInfo = QueueInfo(data['name'], data['status'], data['namespace'], data['clusterName'], None,
data['maxResources'], data.get('minResources'), data.get('location'),
data.get('schedulingPolicy'), data['createTime'], data['updateTime'])
return True, queueInfo
@classmethod
def show_grant(self, host, username=None, header=None, maxsize=100):
"""
show grant resources
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
if not isinstance(maxsize, int) or maxsize <= 0:
raise PaddleFlowSDKException("InvalidRequest", "maxsize should be int and greater than 0")
params = {
"maxKeys": maxsize
}
if username:
params['username'] = username
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_GRANT),
params=params, headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "show grant failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
grantList = []
if len(data['grantList']):
for grant in data['grantList']:
if grant['resourceType'] != "queue":
continue
grantinfo = GrantInfo(grant['userName'], grant['resourceID'])
grantList.append(grantinfo)
return True, grantList
@classmethod
def flavour(self, host, header=None):
"""
list flavour
"""
if not header:
raise PaddleFlowSDKException("InvalidRequest", "paddleflow should login first")
response = api_client.call_api(method="GET", url=parse.urljoin(host, api.PADDLE_FLOW_FLAVOUR),
headers=header)
if not response:
raise PaddleFlowSDKException("Connection Error", "list flavour failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
return True, data | StarcoderdataPython |
1660842 | <reponame>junekim00/ITP115
# <NAME>
# ITP115, Fall 2019
# Final Project Part 1
# <EMAIL>
# This program will define the MenuItem class for the final program.
class MenuItem:
# set proper classes for different aspects of class
def __init__(self, name, itemType, price, description):
self.name = name
self.itemType = itemType
self.price = float(price)
self.description = description
# MenuItem getters
def getName(self):
return self.name
def getType(self):
return self.itemType
def getPrice(self):
return self.price
def getDescription(self):
return self.description
# MenuItem setters
def setName(self, newName):
self.name = newName
def setType(self, newType):
self.itemType = newType
def setPrice(self, newPrice):
self.price = newPrice
def setDescription(self, newDescription):
self.description = newDescription
# set a str to return all 4 aspects of menu item in a readable format
def __str__(self):
a = self.getName() + " (" + self.getType() + "): $" + str(self.getPrice()) + "\n\t" + self.getDescription()
return a
| StarcoderdataPython |
1763880 | <reponame>ceprio/xl_vb2py
#!/usr/bin/python
"""
__version__ = "$Revision: 1.10 $"
__date__ = "$Date: 2005/12/13 11:13:22 $"
"""
"""
use a comma separated value file as a database
Author: <NAME>
eMail: <EMAIL>
Date: 21-Mar-02
"""
import PythonCard
from PythonCard import dialog, model
configFile = 'custdb.ini'
columns = [ 'name',
'firstName',
'title',
'gender',
'function',
'company',
'telBusi',
'telPriv',
'telFax',
'telMobi',
'email',
'homepage',
'zipCode',
'city',
'street',
'notes',
'res1',
'res2',
'res3']
def sortByCompany(a,b):
if a['company'] > b['company']:
return 1
if a['company'] == b['company']:
return 0
if a['company'] < b['company']:
return -1
def sortByName(a,b):
if a['name'] > b['name']:
return 1
if a['name'] == b['name']:
return 0
if a['name'] < b['name']:
return -1
class CustDbStack(model.Background):
def on_initialize(self, event):
import ConfigParser
self.parser = ConfigParser.ConfigParser()
self.parser.read( configFile )
"put the company list into the listbox"
self.dataFile = self.parser.get('ConfigData','data')
rows = open(self.dataFile,'r').readlines()
self.selected = self.parser.getint('ConfigData','selected')
self.rowsDict = []
line = 0
for r in rows:
line += 1
r = r[:-1] # remove the \n here
r = r.replace(r'\012','\n') # convert coded 012 back to \n
if r.count(',') == 18:
d = {}
i = 0
values = r.split(',')
for k in columns:
d[k]=values[i].replace(r'\054',',') # kk convert octal coded comma to real comma
i+=1
self.rowsDict.append(d)
else:
msg = "Data inconsistent: number of commas = %s in line: %s in file: %s"%(r.count(','), line, self.dataFile)
dlg = dialog.alertDialog(self, msg, '%s inconsistent'%self.dataFile)
self.close()
self.components.companyList.insertItems(self.getCompanyList(), 0)
self.components.sortBy.stringSelection = self.parser.get('ConfigData','sortBy')
self.showSelected()
def getCompanyList(self):
l = []
if self.parser.get('ConfigData','sortBy') == 'name':
self.rowsDict.sort(sortByName)
else:
self.rowsDict.sort(sortByCompany)
for r in self.rowsDict:
if self.parser.get('ConfigData','sortBy') == 'name':
if not r['name']:
l.append(', '+r['company'])
else:
l.append(r['name']+', '+r['firstName'])
else:
l.append( '%s, %s'%(r['company'], r['name']))
return l
def showSelected(self):
if self.selected <0:
self.selected = 0
if self.selected >= len(self.rowsDict):
self.selected = len(self.rowsDict)-1
self.components.firstName.text = self.rowsDict[self.selected]['firstName']
self.components.name.text = self.rowsDict[self.selected]['name']
self.components.street.text = self.rowsDict[self.selected]['street']
self.components.zipCode.text = self.rowsDict[self.selected]['zipCode']
self.components.city.text = self.rowsDict[self.selected]['city']
self.components.email.text = self.rowsDict[self.selected]['email']
self.components.company.text = self.rowsDict[self.selected]['company']
self.components.notes.text = self.rowsDict[self.selected]['notes']
self.components.title.text = self.rowsDict[self.selected]['title']
self.components.function.text = self.rowsDict[self.selected]['function']
self.components.telMobi.text = self.rowsDict[self.selected]['telMobi']
self.components.telBusi.text = self.rowsDict[self.selected]['telBusi']
self.components.telFax.text = self.rowsDict[self.selected]['telFax']
self.components.telPriv.text = self.rowsDict[self.selected]['telPriv']
self.components.homepage.text = self.rowsDict[self.selected]['homepage']
self.components.selectedFld.text = str(self.selected)
self.components.companyList._setSelection(int(self.selected))
self.parser.set('ConfigData','selected',self.selected)
self.parser.write(open(configFile,'w'))
def on_sortBy_select(self, event):
self.parser.set('ConfigData','sortBy', event.target.stringSelection)
self.parser.write(open(configFile,'w'))
# change the content of the companyList
self.components.companyList.clear()
self.components.companyList.insertItems(self.getCompanyList(), 0)
def on_companyList_select(self, event):
self.selected = event.target.selection
self.showSelected()
def on_loseFocus(self, event):
if event.target.name in ['firstName', 'name', 'street', 'zipCode',
'city', 'email', 'company', 'title',
'function', 'telMobi', 'telBusi', 'telFax',
'telPriv', 'homepage', 'notes']:
self.rowsDict[self.selected][event.target.name] = event.target.text
self.store()
def on_selectedFld_keyUp(self, event):
try:
self.selected = int(event.target.text)
except ValueError:
self.selected = 0
self.showSelected()
def on_nextButt_mouseDown(self, event):
self.selected += 1
self.showSelected()
def on_prevButt_mouseDown(self, event):
self.selected -= 1
self.showSelected()
def on_newButt_mouseDown(self, event):
d = {}
i = 0
for k in columns:
d[k]= ''
d['gender'] = 'm' # default
self.rowsDict.append(d)
self.selected = len(self.rowsDict)
self.showSelected()
def on_delButt_mouseUp(self, event):
result = dialog.messageDialog(self, 'Are you sure you want to delete the entry: %s ?'%self.rowsDict[self.selected]['name'], 'Delete Entry.' )
if result.accepted:
print "messageDialog result:\naccepted: %s\nreturnedString: %s" % (result.accepted, result.returnedString)
del self.rowsDict[self.selected]
self.selected -= 1
self.showSelected()
self.store()
def store(self):
lines = []
for r in self.rowsDict:
l = ''
for c in columns:
txt=r[c].replace(',',r'\054') # convert comma to ocal representation
l = l + txt + ','
l = l.replace('\n',r'\012') # convert \n to it's octal coding
lines.append(l[:-1]+'\n') # give'em the \n back
lines.sort() # this is not quite right because it sorts 'A-Za-z'
file = open(self.dataFile,'wb') # so we'll independantly of os terminate w/ \n
file.writelines(lines)
file.close()
if __name__ == '__main__':
app = model.Application(CustDbStack)
app.MainLoop()
| StarcoderdataPython |
3303766 | """Run sickle.
The trimmers are specified using the TRIMMOMATIC_TRIMMERS environment variable,
e.g.:
export TRIMMOMATIC_TRIMMERS="ILLUMINACLIP:TruSeq3-PE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36"
"""
import os
from smarttoolbase import SmartTool, Command, parse_args
from dtoolcore.utils import generate_identifier
BASE_COMMANDS = [
Command("sickle pe -t sanger -f {forward_read_fpath} -r {reverse_read_fpath} -o sickled_1.fq -p sickled_2.fq -s trash.fq") # NOQA
]
OUTPUTS = [
'sickled_1.fq',
'sickled_2.fq',
]
def find_paired_read(dataset, identifier):
pair_id = dataset.get_overlay('pair_id')
return pair_id[identifier]
class TrimSeqsTrimmomatic(SmartTool):
def pre_run(self, identifier):
self.base_command_props['forward_read_fpath'] = self.input_dataset.item_content_abspath(identifier) # NOQA
paired_read_identifier = find_paired_read(self.input_dataset, identifier) # NOQA
self.base_command_props['reverse_read_fpath'] = self.input_dataset.item_content_abspath(paired_read_identifier) # NOQA
def stage_outputs(self, identifier):
read1_handle = None
read2_handle = None
for filename in self.outputs:
useful_name = self.input_dataset.get_overlay(
'useful_name'
)[identifier]
fpath = os.path.join(self.working_directory, filename)
relpath = os.path.join(useful_name, filename)
out_id = self.output_proto_dataset.put_item(fpath, relpath)
self.output_proto_dataset.add_item_metadata(
out_id,
'from',
"{}/{}".format(self.input_dataset.uri, identifier)
)
# Add is_read1 overlay.
if filename.find("_1") != -1:
self.output_proto_dataset.add_item_metadata(
out_id,
"is_read1",
True
)
read1_handle = out_id
else:
self.output_proto_dataset.add_item_metadata(
out_id,
"is_read1",
False
)
read2_handle = out_id
# Add pair_id overlay.
self.output_proto_dataset.add_item_metadata(
read1_handle,
"pair_id",
generate_identifier(read2_handle)
)
self.output_proto_dataset.add_item_metadata(
read2_handle,
"pair_id",
generate_identifier(read1_handle)
)
def main():
args = parse_args()
with TrimSeqsTrimmomatic(args.input_uri, args.output_uri) as smart_tool:
smart_tool.base_commands = BASE_COMMANDS
smart_tool.outputs = OUTPUTS
smart_tool(args.identifier)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3213692 | <reponame>Setti7/Stardew-Web
import json
import random
from datetime import datetime, timedelta
from django.db.models import Sum, Avg, Max
from django.shortcuts import render
from rest_framework.authtoken.models import Token
from .models import UserData, Profile
def home_page(request):
return render(request, 'Data/home_page.html', context={})
def ranking(request):
# User token:
# ----------------------------------------------------
token = Token.objects.get(user=request.user) if request.user.is_authenticated else None
# Best Contributors table:
# ----------------------------------------------------
# Get best first 25 contributors from db
best_friends = Profile.objects.order_by('-score')[:25]
# Format data to json for frontend
bffs = [{'user': profile.user, 'score': profile.score, 'position': i + 1} for i, profile in enumerate(best_friends)]
# Graph data:
# ----------------------------------------------------
# Creating list of days of this week
days_this_week = []
today = datetime.today().date()
for i in range(8):
date = (today + timedelta(days=-i))
days_this_week.append(str(date))
# Creating list of scores from this week
score_this_week = []
for i in range(8):
score = sum([obj.score for obj in
UserData.objects.filter(uploaded_at__date=datetime.today().date() - timedelta(days=i))])
score_this_week.append(score)
# Zipping scores and dates into one dict
data = dict(zip(days_this_week, score_this_week))
# Progress Bar data:
# ----------------------------------------------------
score_sum = Profile.objects.aggregate(Sum('score'))['score__sum']
score_sum = score_sum if score_sum is not None else 0
# Percent of individual help
total_time_played = round(score_sum / 3600, 2)
if request.user.is_authenticated and score_sum > 0:
help_percent = round(100 * (Profile.objects.get(user=request.user).score) / score_sum, 1)
else:
help_percent = 0
# Data Submitted:
# ----------------------------------------------------
if request.user.is_authenticated:
uploads = UserData.objects.filter(user=request.user).order_by('-uploaded_at')
user_data = []
for upload in uploads:
date = upload.uploaded_at.strftime('%Y-%m-%d %H:%M:%S')
user_data.append({"score": upload.score, "id": upload.id, "uploaded_at": date})
else:
user_data = {}
# Number of users:
# ----------------------------------------------------
n_users = Profile.objects.all().count()
# Average number of frames per user
# ----------------------------------------------------
avg_user_score = Profile.objects.aggregate(Avg('score'))['score__avg']
avg_user_score = round(avg_user_score) if avg_user_score is not None else 0
# Average number of sessions per user
# ----------------------------------------------------
avg_session_score = UserData.objects.aggregate(Avg('score'))['score__avg']
avg_session_score = round(avg_session_score) if avg_session_score is not None else 0
avg_session_time = round(avg_session_score / 60, 2) if avg_session_score is not None else 0
# Top 3 users
# ----------------------------------------------------
top_3_score_sum = Profile.objects.order_by('-score')[:3].aggregate(Sum('score'))['score__sum']
if top_3_score_sum is not None and score_sum > 0:
top_3_score_percent = round(100 * top_3_score_sum / score_sum, 2)
else:
top_3_score_percent = 0
# Longest fishing session
# ----------------------------------------------------
max_score = UserData.objects.aggregate(Max('score'))['score__max']
max_score_users = UserData.objects.filter(score=max_score)
if max_score_users is not None and max_score is not None:
rand_user = random.randint(0, len(max_score_users) - 1)
max_score_user = [user for user in max_score_users][rand_user]
time = round(max_score / 60, 1)
else:
max_score = 0
max_score_user = 'admin'
time = 0
longest_session_dict = {'max_score': max_score, 'user': max_score_user, 'time': time}
return render(request, 'Data/dashboard.html', context={
'bffs_dict': bffs,
'data': json.dumps(data),
'score_sum': score_sum,
'total_time_played': total_time_played,
'user_data': user_data,
'help_percent': help_percent,
'n_users': n_users,
'avg_user_score': avg_user_score,
'avg_session_score': avg_session_score,
'avg_session_time': avg_session_time,
'top_3_score_percent': top_3_score_percent,
'longest_session': longest_session_dict,
'token': token
})
| StarcoderdataPython |
3394637 | #!/usr/bin/python
"""This test tries to open and create a file in multiple modes
If any errors occur the test displays a "FAILED" message"""
import os
import subprocess
import sys
import pysec
import pysec.io
import pysec.io.fcheck
import pysec.io.fd
import pysec.io.fs
import pysec.io.temp
FILE_NAME = '/tmp/__pysec_open_test.tmp '
FILE_MODES = {
pysec.io.fd.FO_READNEW: 'READNEW',
pysec.io.fd.FO_READEX: 'READEX',
pysec.io.fd.FO_WRNEW: 'WRNEW',
pysec.io.fd.FO_WREX: 'WREX',
pysec.io.fd.FO_WREXTR: 'WREXTR',
pysec.io.fd.FO_APNEW: 'APNEW',
pysec.io.fd.FO_APEX: 'APEX',
pysec.io.fd.FO_APEXTR: 'APEXTR',
pysec.io.fd.FO_READ: 'READ',
pysec.io.fd.FO_WRITE: 'WRITE',
pysec.io.fd.FO_APPEND: 'APPEND',
pysec.io.fd.FO_READNEW: 'READNEW'
}
# This Array marks the iteration where we generate an exception
# If the exception is not expected we mark the test as FAILED
# For example in the last iteration we attempt to open a file using
# FO_READNEW, but the file already exists
FILE_EXCEPTIONS = 2, 5, 11
def is_file_open(pid, name):
sp = subprocess.Popen(['lsof', '-p', str(pid), '-F', 'n'], stdout=subprocess.PIPE)
for line in sp.stdout:
if name in line:
return True
return False
def main():
sys.stdout.write("BASIC OPEN TEST: ")
pid = os.getpid()
if os.path.exists(FILE_NAME):
os.unlink(FILE_NAME)
for step, test_mode in enumerate(FILE_MODES):
test_name = FILE_MODES[test_mode]
try:
with pysec.io.fd.File.open(FILE_NAME, test_mode) as ftest:
if not is_file_open(pid, FILE_NAME):
print "FAILED %s, file %r is not open" % (test_name, FILE_NAME)
return
# Check if the file has been closed
if is_file_open(pid, FILE_NAME):
print "FAILED %s, file %r is not closed" % (test_name, FILE_NAME)
return
except OSError,ex:
# Check if the file has been closed
if is_file_open(pid, FILE_NAME):
print "FAILED %s, file %r is not closed" % (test_name, FILE_NAME)
return
if step not in FILE_EXCEPTIONS:
print "FAILED %s, unexpected error %r" % (test_name, FILE_NAME, str(ex))
return
else:
if step in FILE_EXCEPTIONS:
print "FAILED %s, expected error" % test_name
return
sys.stdout.write("PASSED\n")
if __name__ == "__main__":
main()
os.remove(FILE_NAME); | StarcoderdataPython |
1600556 | import sys
import numpy as np
from scipy.stats import describe
import os
import time
import matplotlib
import pandas as pd
from sklearn.base import ClassifierMixin, BaseEstimator
import warnings
import scipy
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import itertools
import multiprocessing as mp
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy.linalg import svd
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import plot_tree
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LassoLars
h = .02 # step size in the mesh
delimiter = ";"
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# plt.interactive(True)
# http://ksrowell.com/blog-visualizing-data/2012/02/02/
# optimal-colors-for-graphs/
MY_BLUE = (57, 106, 177)
MY_ORANGE = (218, 124, 48)
MY_GREEN = (62, 150, 81)
MY_RED = (204, 37, 41)
MY_BLACK = (83, 81, 84)
MY_GOLD = (148, 139, 61)
MY_VIOLET = (107, 76, 154)
MY_BROWN = (146, 36, 40)
MY_OWN = (25, 150, 10)
def get_color(COLOR_TUPLE_255):
return [x / 255 for x in COLOR_TUPLE_255]
def plot(nr_samples, error_rates,
title='error rate vs. # of train samples'):
plt.plot(nr_samples, error_rates, color=get_color(MY_RED))
plt.title(title)
plt.xlabel('# of train samples')
plt.ylabel('Error rate')
file_name = title.replace(': ', '_').replace(' ', '_')
file_name = file_name.replace(', ', '_').replace('.', '-')
# plt.savefig("./iris_" + file_name + "2.pdf")
plt.savefig("./muscle.pdf")
plt.clf()
plt.close()
class LeastSquareClassifier(BaseEstimator, ClassifierMixin):
def add_ones_short(self, X):
ones = np.ones(X.shape[0])
X = np.concatenate((ones[:, np.newaxis], X), axis=1)
return X
def fit(self, X, y=None):
# make the classifier affine
X = self.add_ones_short(X)
self.w = find_w(X, y)
def predict(self, X, y=None):
X = self.add_ones_short(X)
return [x for x in np.sign(np.matmul(X, self.w))]
def score(self, X, y):
X = self.add_ones_short(X)
n, _ = X.shape
y_hat = np.sign(np.matmul(X, self.w))
score = np.sum(y_hat == y)
return score / n
def predict_proba(self, X):
X = self.add_ones_short(X)
ys = np.matmul(X, self.w)
probs = []
for y in ys:
if y < 0:
if y < -1:
probs.append([1, 0.0])
else:
y = 0.5 * np.abs(y) + 0.5
probs.append([y, 1 - y])
else:
if y > 1:
probs.append([0.0, 1])
else:
y = 0.5 * np.abs(y) + 0.5
probs.append([1 - y, y])
probs = np.array(probs)
return probs
classifiers = {
"Least Squares": LeastSquareClassifier(),
"Nearest Neighbors": KNeighborsClassifier(3),
"SVM": SVC(kernel="linear", C=0.025, probability=True),
"RBF SVM": SVC(gamma=2, C=1, probability=True),
"Gaussian Process": GaussianProcessClassifier(1.0 * RBF(1.0)),
"Decision Tree": DecisionTreeClassifier(max_depth=None),
"Random Forest": RandomForestClassifier(max_depth=5, n_estimators=10,
max_features=1),
"Neural Net": MLPClassifier(alpha=0.01, max_iter=1000),
"AdaBoost": AdaBoostClassifier(),
"Naive Bayes": GaussianNB(),
"QDA": QuadraticDiscriminantAnalysis()
}
def find_w_X_more_rows_than_cols(X, y):
H, W = X.shape
assert H >= W
X_t = X.transpose()
X_t_X = np.matmul(X_t, X)
X_t_X_inv = np.linalg.inv(X_t_X)
X_t_X_inv_X_t = np.matmul(X_t_X_inv, X_t)
w_hat = np.matmul(X_t_X_inv_X_t, y)
return w_hat
def find_w_X_more_cols_than_rows(X, y):
H, W = X.shape
assert H < W
X_t = X.transpose()
X_X_t = np.matmul(X, X_t)
X_X_t_inv = np.linalg.inv(X_X_t)
X_t_X_X_t_inv = np.matmul(X_t, X_X_t_inv)
w_hat = np.matmul(X_t_X_X_t_inv, y)
return w_hat
def find_w_svd(X, y):
H, W = X.shape
assert W >= H
u, s, vh = svd(a=X, full_matrices=False)
s = 1 / s
u_v = np.matmul(u * s[..., None, :], vh)
w = np.matmul(u_v.T, y)
return w
def find_w(X, y):
H, W = X.shape
if H >= W:
return find_w_X_more_rows_than_cols(X, y)
else:
# return find_w_X_more_cols_than_rows(X, y)
return find_w_svd(X, y)
def take_n_samples_each_clas(X, Y, nr_class, nr_samples_each_class):
n, _ = X.shape
n_class = n // nr_class
x = []
y = []
start_index = 0
end_index = n_class
# We need to extract samples for each class separately
# ensure that there are the same number of samples for
# each class in the train and the validation sets.
for i in range(nr_class):
x.append(X[start_index:end_index, ...])
y.append(Y[start_index:end_index])
start_index += n_class
end_index += n_class
# Randomize the samples within this class.
# We could also do it after the extraction
# of the validation set.
randomized_indices = np.random.choice(
n_class, nr_samples_each_class, replace=False)
x[i] = x[i][randomized_indices]
y[i] = y[i][randomized_indices]
x = np.concatenate(x, axis=0)
y = np.concatenate(y, axis=0)
return x, y
def cross_validate(X, Y, classifier, cv_count=6, nr_class=2, repeat=3,
col_names=None, train_limit=None):
"""
Cross-validate the model.
:param X: the input matrix of features
We expect that the samples for each class are of
the same number and arranged in the continuous way in
the input dataset.
:param Y: the input vector of correct predictions
:param cv_count: cross validation count
how many subsets of the data we want, where
one of the subsets is the validation set
and the remaining subsets create constitute
the train set. We have cv_count iterations,
where each of the cv_count subsets is
validation set in one of the iterations.
:param nr_class: number of classes in the dataset
:param repeat: how many times to repeat the process
:param is_affine: add the column with all 1's (ones)
:return: the average accuracy across all repetitions
and cross-validations within the repetitions.
"""
n, _ = X.shape
n_class = n // nr_class
# number of samples per class
assert n_class % cv_count == 0
# length of the validated set from a single class
cv_len = n_class // cv_count
all_accuracies = []
all_aucs = []
for _ in range(repeat):
x = []
y = []
start_index = 0
end_index = n_class
# We need to extract samples for each class separately
# ensure that there are the same number of samples for
# each class in the train and the validation sets.
for i in range(nr_class):
x.append(X[start_index:end_index, ...])
y.append(Y[start_index:end_index])
start_index += n_class
end_index += n_class
# Randomize the samples within this class.
# We could also do it after the extraction
# of the validation set.
randomized_indices = np.random.choice(
n_class, n_class, replace=False)
x[i] = x[i][randomized_indices, ...]
y[i] = y[i][randomized_indices]
# Cross-validate the model cv_count times.
for i in range(cv_count):
bottom_index = i * cv_len
top_index = (i + 1) * cv_len
bottom_x = []
top_x = []
bottom_y = []
top_y = []
for j in range(nr_class):
bottom_x.append(x[j][:bottom_index, :])
top_x.append(x[j][top_index:, :])
bottom_y.append(y[j][:bottom_index])
top_y.append(y[j][top_index:])
bottom_x = np.concatenate(bottom_x, axis=0)
top_x = np.concatenate(top_x, axis=0)
bottom_y = np.concatenate(bottom_y, axis=0)
top_y = np.concatenate(top_y, axis=0)
if i == 0:
x_train = top_x
y_train = top_y
elif i == cv_count - 1:
x_train = bottom_x
y_train = bottom_y
else:
x_train = np.concatenate((bottom_x, top_x), axis=0)
y_train = np.concatenate((bottom_y, top_y), axis=0)
if train_limit:
x_train = x_train[:train_limit, :]
y_train = y_train[:train_limit]
x_train, means, stds = normalize_with_nans(x_train)
x_test = []
y_test = []
for j in range(nr_class):
x_test.append(x[j][bottom_index:top_index, :])
y_test.append(y[j][bottom_index:top_index])
x_test = np.concatenate(x_test, axis=0)
y_test = np.concatenate(y_test, axis=0)
x_test, _, _ = normalize_with_nans(x_test, means=means, stds=stds,
col_names=col_names)
clf = classifier
clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
# y_score = clf.predict(x_test)
y_probs = clf.predict_proba(x_test)
auc = sklearn.metrics.roc_auc_score(y_true=y_test,
y_score=y_probs[:, 1])
all_aucs.append(auc)
all_accuracies.append(score)
return np.average(all_accuracies), np.average(all_aucs)
def err_percent(error_rate):
return str(100 * error_rate) + " %"
def accuracy_percent(accuracy):
return str(100 * accuracy) + " %"
def missing_values_col(data, nans, col_names, missing_rate=0.5):
remove_cols = []
missing_values_col = []
for col_nr in range(data.shape[1]):
col = data[:, col_nr].copy()
col_clean = col[col != nans]
nr_missing_values = len(col) - len(col_clean)
col_name = col_names[col_nr]
if nr_missing_values >= (missing_rate * len(col)):
print(f'More than {missing_rate} of the patients have missing '
f'value for column number {col_nr} labeled {col_name}')
remove_cols.append(col_nr)
missing_values_col.append(nr_missing_values)
avg_missing_values_per_column = np.average(missing_values_col)
print('average number of missing values per column: ',
avg_missing_values_per_column)
return remove_cols
def missing_values_row(data, nans, missing_rate=0.5):
missing_values_row = []
remove_patients = []
for row_nr in range(data.shape[0]):
row = data[row_nr, :].copy()
row_clean = row[row != nans]
nr_missing_values = len(row) - len(row_clean)
missing_values_row.append(nr_missing_values)
if nr_missing_values >= (missing_rate * len(row)):
print(
f'{nr_missing_values} (more than {missing_rate * 100}%) of the '
f'measurements are missing for patient number: {row_nr}')
remove_patients.append(row_nr)
avg_missing_values_per_row = np.average(missing_values_row)
print('average number of missing values per row: ',
avg_missing_values_per_row)
return remove_patients
def normalize_with_nans(data, nans=999, means=None, stds=None,
col_names=None):
"""
Normalize the data after setting nans to mean values.
:param data: the input data
:param nans: values for non-applied data items
:param means: the mean values for each feature column
:param stds: the standard deviations for each feature column
:return: normalized data
"""
if means is None and stds is not None:
raise Exception('Provide also means.')
if means is not None and stds is None:
raise Exception('Provide also stds.')
is_test = True
if means is None and stds is None:
is_test = False
means = []
stds = []
for col_nr in range(data.shape[1]):
col = data[:, col_nr].copy()
col_clean = col[col != nans]
if np.count_nonzero(col_clean) == 0:
message = f'All data elements in column nr: {col_nr} are zero.'
if col_names is not None:
message += f' The column name is: {col_names[col_nr]}'
# print('normalization message: ', message)
# raise Exception(message)
if is_test:
mean = means[col_nr]
std = stds[col_nr]
else:
mean = np.mean(col_clean)
std = np.std(col_clean)
means.append(mean)
stds.append(std)
# normalize the column
col[col == nans] = mean
col -= mean
if std != 0:
col /= std
data[:, col_nr] = col
return data, means, stds
priority_classifiers = {
"Least Squares": LeastSquareClassifier(),
"Decision Tree": DecisionTreeClassifier(max_depth=5)
}
def column_priority(X, y, X_cv, y_cv, labels, classifiers=priority_classifiers):
w = find_w_svd(X, y)
w_abs = np.abs(w)
index_w = [[i, w] for i, w in enumerate(w_abs)]
# sort in descending order
sort_index_w = sorted(index_w, key=lambda index_w: [-index_w[1]])
w_sorted_indexes = [index for (index, _) in sort_index_w]
for index, w in sort_index_w:
print(index, ';', labels[index], ';', w)
# print('sort_index_w: ', sort_index_w)
print('# of columns', end="")
classifier_names = classifiers.keys()
for classifier_name in classifier_names:
print(delimiter, classifier_name, "accuracy train,", classifier_name,
",accuracy cross-validation", end="")
print()
for i in range(1, len(w_sorted_indexes) + 1):
print(i, end="")
# Extract most important columns from the dataset X.
column_subset = w_sorted_indexes[:i]
X_short = X[:, column_subset]
X_cv_short = X_cv[:, column_subset]
for clf in classifiers.values():
clf.fit(X_short, y)
train_score = clf.score(X_short, y)
print(delimiter, train_score, end="")
try:
cv_score = np.average(
cross_val_score(clf, X_cv_short, y_cv, cv=6))
print(delimiter, cv_score, end="")
except np.linalg.LinAlgError as err:
print(delimiter, "N/A", end="")
print()
return w_sorted_indexes
def show_decision_tree(estimator, col_names, means, stds):
# source: https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
feature_nr = feature[i]
print(
# "%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
# "node %s."
"%snode=%s test node: go to node %s if '%s' <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
# feature[i],
col_names[feature_nr],
# threshold[i],
threshold[i] * stds[feature_nr] + means[feature_nr],
children_right[i],
))
# Utility function to report best scores:
# source: https://scikit-learn.org/stable/auto_examples/model_selection/
# plot_randomized_search.html#sphx-glr-auto-examples-model-selection-plot
# randomized-search-py
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
def run_param_search(X, y, clf=SVC(probability=True)):
# specify parameters and distributions to sample from
param_dist = {'C': scipy.stats.expon(scale=100),
'gamma': scipy.stats.expon(scale=.1),
'kernel': ['rbf', 'linear'],
'class_weight': ['balanced', None]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, cv=5, iid=False)
start = time.time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time.time() - start), n_iter_search))
report(random_search.cv_results_)
def show_param_performance(X_cv, y_cv, nr_class, col_names):
print(
'Accuracy on self-crafted cross-validation with normalization: ')
for C in np.linspace(start=0.0001, stop=200, num=100):
for name, clf in [('SVM', SVC(kernel="linear", C=C, probability=True))]:
accuracy, auc = cross_validate(X_cv, y_cv, classifier=clf,
nr_class=nr_class,
col_names=col_names)
print(name, "C=", C, delimiter, accuracy_percent(accuracy), auc)
print()
def svd_spectrum(X):
u, s, vh = svd(a=X, full_matrices=False)
print("Rank of X: ", len(s))
s = [x ** 2 for x in s]
sum_s = np.sum(s)
s = [x / sum_s for x in s]
print("Importance of singular values: ", s)
print("length of singular values: ", len(s))
ax1 = plt.subplot(111)
ax1.plot(
range(len(s)), s, label="$\frac{\sigma_i^2}{\sum_j^N \sigma_j^2}$",
marker="o", linestyle="", color=get_color(MY_BLUE))
ax1.set_title("Spectrum of X")
ax1.set_xlabel("index i of $\sigma_i$")
# ax1.set_ylabel("$\sigma_i^2$/\n$\sum_j^N \sigma_j^2$", rotation=0)
ax1.set_ylabel("$\sigma_i^2$", rotation=0)
# ax1.legend(["true rating", "predicted rating"], loc="upper left")
# ax1.axis([0, num_train, -15, 10])
# plt.show()
dir_path = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(dir_path, "svd_spectrum.png")
plt.tight_layout()
plt.savefig(output_path)
plt.close()
def pca(X, index):
"""
Compute PCA for X.
:param X: the whole input data
:param index: how many singular values retain
(the dimension of the subspace)
:return: the projected rows of X on a lower dimensional space
"""
XT = X.T # samples in columns
u, s, vh = svd(a=XT, full_matrices=False)
# We want to project the samples on a lower dimensional space.
u = u[:, :index]
# Columns of z are the new lower dimensional coordinates for the intial samples.
z = np.matmul(X, u)
return z
def pca_scikit_whole_train(X, index):
pca = PCA(n_components=index) # adjust yourself
pca.fit(X)
z = pca.transform(X)
return z
def pca_scikit_train_test(X, y, clf):
print("PCA train test from scikit learn:")
print("index, score")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=0)
for index in range(1, X_test.shape[0]):
pca = PCA(n_components=index) # adjust yourself
pca.fit(X_train)
X_t_train = pca.transform(X_train)
X_t_test = pca.transform(X_test)
clf.fit(X_t_train, y_train)
print(index, ',', clf.score(X_t_test, y_test))
def pca_scikit_train(X, y, clf):
print("PCA train from scikit learn:")
print("index, score")
for index in range(1, X.shape[0]):
pca = PCA(n_components=index) # adjust yourself
pca.fit(X)
xpca = pca.transform(X)
clf.fit(xpca, y)
print(index, ',', clf.score(xpca, y))
def accuracy_on_pca_data(X, y, classifiers, nr_class, col_names,
pca_function=pca):
H, W = X.shape
print('PCA:')
print('index' + delimiter + 'accuracy' + delimiter + 'auc')
for index in range(1, H):
xpca = pca_function(X, index=index)
for name, clf in classifiers.items():
accuracy, auc = cross_validate(
X=xpca, Y=y, classifier=clf, nr_class=nr_class,
col_names=col_names)
result = [index, accuracy, auc]
str_result = delimiter.join([str(x) for x in result])
print(str_result)
def findsubsets(s, max=None):
if max is None:
n = len(s) + 1
else:
n = max + 1
subsets = []
for i in range(1, n):
subset = list(itertools.combinations(s, i))
for x in subset:
subsets.append(x)
return subsets
def col_scores_single_thread(X, y, clf, nr_class, col_names, max_col_nr):
H, W = X.shape
col_scores = {}
for col in range(W):
col_scores[col] = 0
subsets = findsubsets(np.arange(W), max_col_nr)
print('subsets count: ', len(subsets))
for set in subsets:
Xset = X[:, set]
accuracy, auc = cross_validate(Xset, y, classifier=clf,
nr_class=nr_class, col_names=col_names)
for col in set:
col_scores[col] += accuracy
for w in sorted(col_scores, key=col_scores.get, reverse=True):
result = [w, col_names[w], col_scores[w]]
result_str = delimiter.join([str(x) for x in result])
print(result_str)
return col_scores
col_scores = {}
def get_accuracy(X, set, y, clf, nr_class, col_names):
Xset = X[:, set]
accuracy, _ = cross_validate(Xset, y, classifier=clf,
nr_class=nr_class, col_names=col_names)
return set, accuracy
def collect_accuracies(result):
set, accuracy = result
for col in set:
col_scores[col] += accuracy
def col_scores_parallel(X, y, clf, nr_class, col_names, max_col_nr):
H, W = X.shape
global col_scores
for col in range(W):
col_scores[col] = 0
subsets = findsubsets(np.arange(W), max_col_nr)
print('subsets count: ', len(subsets))
# parallel python
# source:
# https://www.machinelearningplus.com/python/parallel-processing-python/
# Step 1: Init multiprocessing.Pool()
pool = mp.Pool(mp.cpu_count())
# Step 2: pool.apply to a function
for set in subsets:
pool.apply_async(
get_accuracy,
args=(X, set, y, clf, nr_class, col_names),
callback=collect_accuracies
)
# Step 3: close the pool
pool.close()
# Step 4: postpones the execution of next line of code until all
# processes in the queue are done.
pool.join()
for w in sorted(col_scores, key=col_scores.get, reverse=True):
result = [w, col_names[w], col_scores[w]]
result_str = delimiter.join([str(x) for x in result])
print(result_str)
sys.stdout.flush()
return col_scores_single_thread
def accuracy_column_order(clf, X_cv, y_cv, nr_class, col_names, data_path):
col_order = []
if 'garrett' in data_path:
col_order = [126, 120, 130, 128, 111, 132, 116, 99, 100, 114, 129, 103,
125, 94, 117, 131, 127, 115, 104, 121, 98, 92, 97, 112,
113, 95, 119, 96, 118, 102, 101, 105, 122, 109, 184, 135,
134, 179, 123, 107, 89, 180, 124, 178, 185, 106, 150, 10,
155, 170, 55, 2, 176, 140, 160, 6, 52, 11, 74, 91, 56, 75,
93, 110, 90, 154, 51, 174, 32, 145, 5, 133, 19, 73, 183,
29, 27, 21, 78, 18, 14, 80, 144, 164, 69, 48, 151, 171,
149, 169, 72, 1, 163, 88, 68, 31, 86, 143, 46, 84, 23, 44,
85, 67, 79, 33, 24, 54, 63, 22, 82, 153, 173, 159, 139, 7,
83, 181, 61, 30, 66, 62, 49, 16, 71, 161, 141, 58, 166,
177, 42, 146, 45, 77, 142, 57, 162, 70, 65, 35, 20, 36, 41,
39, 40, 60, 168, 148, 0, 26, 76, 157, 34, 87, 158, 156, 4,
136, 138, 13, 147, 167, 137, 25, 172, 9, 43, 59, 152, 108,
50, 8, 28, 64, 182, 17, 38, 12, 37, 81, 53, 47, 3, 175,
165, 15]
if 'remy' in data_path:
col_order = [46, 89, 112, 62, 110, 90, 77, 31, 71, 76, 14, 63, 105, 70,
80, 66, 100, 67, 30, 95, 113, 11, 78, 45, 73, 75, 72, 47,
55, 35, 15, 41, 109, 10, 98, 108, 27, 79, 84, 65, 104, 51,
74, 39, 25, 24, 26, 99, 83, 21, 54, 111, 40, 33, 32, 64,
59, 28, 53, 87, 69, 61, 88, 106, 82, 20, 37, 29, 50, 44,
34, 94, 18, 22, 8, 101, 58, 7, 43, 38, 81, 13, 6, 36, 103,
60, 85, 49, 2, 56, 102, 16, 19, 3, 5, 1, 57, 12, 23, 42,
17, 48, 86, 93, 97, 96, 91, 52, 92, 107, 4, 0, 9, 68]
print('SVM accuracy col order')
print('nr of priority columns', delimiter, 'accuracy')
for i in range(1, len(col_order) + 1):
cols_order = col_order[:i]
X_order = X_cv[:, cols_order]
accuracy, auc = cross_validate(
X_order, y_cv, classifier=clf, nr_class=nr_class,
col_names=col_names)
print(i, delimiter, accuracy)
def f_importances(coef, names, topk=5):
"""
Source: https://stackoverflow.com/questions/41592661/determining-the-most-contributing-features-for-svm-classifier-in-sklearn
:param coef: SVM coefficients
:param names: the names of features
:param topk: how many top features to show
Save the graph.
"""
imp = coef.tolist()[0]
imp, names = zip(*sorted(zip(imp, names)))
imp = imp[:topk] + imp[-topk:]
names = names[:topk] + names[-topk:]
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
# plt.show()
dir_path = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(dir_path, "svm_importance_features.pdf")
plt.tight_layout()
plt.savefig(output_path)
plt.close()
def plot_coefficients(classifier, feature_names, top_features=20):
"""
Source: https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d
:param classifier: a linear SVM classifier
:param feature_names: the names of features
:param top_features: how many top features to show
Save graph.
"""
coef = classifier.coef_.ravel()
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients = np.argsort(coef)[:top_features]
top_coefficients = np.hstack(
[top_negative_coefficients, top_positive_coefficients])
# create plot
plt.figure(figsize=(15, 5))
colors = ['red' if c < 0 else 'blue' for c in coef[top_coefficients]]
coefficients = coef[top_coefficients]
plt.bar(np.arange(2 * top_features), coefficients, color=colors)
feature_names = np.array(feature_names)
feature_names = feature_names[top_coefficients]
plt.xticks(np.arange(0, 1 + 2 * top_features),
feature_names,
rotation=60, ha='right')
# plt.show()
dir_path = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(dir_path, "svm_importance_features2.pdf")
plt.tight_layout()
plt.savefig(output_path)
plt.close()
print('feature name, coefficient value')
for name, coef in zip(feature_names, coefficients):
print(name, ';', coef)
def compute():
warnings.filterwarnings("ignore")
dir_path = os.path.dirname(os.path.realpath(__file__))
# data_path = os.path.join(dir_path, "remy_data_all.csv")
# data_path = os.path.join(dir_path, "remy_data_cleaned_with_header.csv")
# data_path = os.path.join(dir_path, "remy_data_final.csv")
# data_path = os.path.join(dir_path, "remy_data_final_sign_class.csv")
# data_path = os.path.join(dir_path, "clean-2019-11-24-3.csv")
# data_path = os.path.join(dir_path, "remy_2019_10_29.csv")
# data_path = os.path.join(dir_path, "arnold_2019_12_07.csv")
data_path = os.path.join(dir_path, "garrett_2019_11_24.csv")
print('data_path: ', data_path)
data_all = pd.read_csv(data_path, header=0)
labels = np.asarray(data_all.iloc[:, 0], dtype=np.int)
nr_class = len(np.unique(labels))
X = np.asarray(data_all.iloc[:, 1:], dtype=np.float)
y = labels
row_nr, col_nr = X.shape
assert len(y) == row_nr
col_names = np.array(list(data_all.columns.values))
col_names = col_names[1:] # skip the ASD column name
assert len(col_names) == col_nr
# print('X: ', X)
# print('y: ', y)
# print('size of X: ', X.shape)
# print('size of y: ', y.shape)
print('row number: ', row_nr)
print('column number: ', col_nr)
# remove the dependent columns
# Q, R = qr(a=X, mode='reduced')
# print('descriptive statistics for X: ', describe(X))
# print('X affine: ', X)
# remove column with all zeros
# print('columns with all zeros: ', np.where(~X_norm.any(axis=0))[0])
nans = 999
"""
Special case:
Column: “Asymmetry Total CSA > 12% at C3” – it has only zero values or
‘999’s only (it is the 3rd column from the end).
"""
# X = np.delete(X, -3, axis=1)
# col_names = np.delete(col_names, -3)
remove_cols = missing_values_col(data=X, nans=nans, col_names=col_names)
remove_rows = missing_values_row(data=X, nans=nans)
print('Delete columns: ', remove_cols)
X = np.delete(X, remove_cols, axis=1)
col_names = np.delete(col_names, remove_cols)
print('Delete rows: ', remove_rows)
X = np.delete(X, remove_rows, axis=0)
y = np.delete(y, remove_rows)
X_norm, means, stds = normalize_with_nans(data=X.copy(), nans=nans)
# print('means: ', means)
# print('stds: ', stds)
# show the SVD spectrum.
# svd_spectrum(X)
w_hat = find_w(X_norm, y)
y_hat = np.sign(np.matmul(X_norm, w_hat))
# print("check y_hat: ", y_hat)
diff = np.sum(y_hat == y)
accuracy = diff / len(y)
print('On the whole data: ')
print('Full Least Squares accuracy: ',
accuracy_percent(accuracy))
# for cross validation we take the same number of samples for each class
num_pos = np.count_nonzero(y == 1)
num_neg = np.count_nonzero(y == -1)
count = min(num_pos, num_neg)
X_cv = np.concatenate((X[:count, :], X[-count:, :]))
y_cv = np.concatenate((y[:count], y[-count:]))
features_names = col_names.tolist()
print('index;feature_name')
for index, feature_name in enumerate(features_names):
print(index, ';', feature_name)
clf = DecisionTreeClassifier()
clf = clf.fit(X, y)
# plt.figure(figsize=(11,9))
plt.figure()
plot_tree(clf, filled=True, feature_names=features_names,
class_names=['neg', 'pos'])
dir_path = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(dir_path, "plot_tree13_full_data.pdf")
# plt.tight_layout()
plt.savefig(output_path, bbox_inches='tight')
plt.close()
for alpha in np.linspace(0, 0.1, 100):
clf = LassoLars(alpha=alpha)
clf = clf.fit(X, y)
# Indices of active variables at the end of the path.
active = clf.active_
active = sorted(active)
feature_names = np.array(features_names)
print('alpha regularization: ', alpha, ' number of variables: ', len(active))
print('variable names: ', feature_names[active])
print('variable coefficients (how important they are): ', clf.coef_[active])
# STOP
exit(0)
SVM = classifiers["SVM"]
clf = SVM
clf.fit(X_norm, y)
print("clf.coef_: ", clf.coef_)
score = clf.score(X_norm, y)
print('SVM accuracy: ', accuracy_percent(score))
features_names = col_names.tolist()
f_importances(clf.coef_, features_names)
plot_coefficients(clf, feature_names=features_names, top_features=20)
# accuracy_column_order(clf=SVM, nr_class=nr_class, col_names=col_names,
# X_cv=X_cv, y_cv=y_cv, data_path=data_path)
max_col_nr = 3
for name, clf in classifiers.items():
print('classifier name: ', clf)
start = time.time()
col_scores_parallel(
X=X_cv, y=y_cv, col_names=col_names, clf=SVM, nr_class=nr_class,
max_col_nr=max_col_nr)
print('col scores parallel timing: ', time.time() - start)
sys.stdout.flush()
# start = time.time()
# col_scores_single_thread(
# X=X_cv, y=y_cv, col_names=col_names, clf=SVM, nr_class=nr_class,
# max_col_nr=max_col_nr)
# print('col scores single timing: ', time.time() - start)
# pca_scikit_train_test(X=X_cv, y=y_cv, clf=SVM)
pca_scikit_train(X=X_cv, y=y_cv, clf=SVM)
# pca_function=pca
pca_function = pca_scikit_whole_train
accuracy_on_pca_data(X=X_cv, y=y_cv, classifiers={"SVM": SVM},
nr_class=nr_class, col_names=col_names,
pca_function=pca_function)
# run_param_search(X=X_cv, y=y_cv, clf=SVC(probability=True))
# show_param_performance(X_cv=X_cv, y_cv=y_cv, nr_class=nr_class,
# col_names=col_names)
print("Column priority: ")
w_sorted_indexes = column_priority(X_norm, y, X_cv, y_cv, labels=col_names)
print('labels len: ', len(col_names))
print('w_hat len: ', len(w_hat))
ones = np.ones(X_norm.shape[0])
X_ones = np.concatenate((ones[:, np.newaxis], X_norm), axis=1)
w_hat = find_w_svd(X_ones, y)
y_hat = np.sign(np.matmul(X_ones, w_hat))
# print("check y_hat: ", y_hat)
diff = np.sum(y_hat == y)
accuracy = diff / len(y)
print('Least Squares accuracy: ', accuracy_percent(accuracy))
clf = LeastSquareClassifier()
clf.fit(X_norm, y)
score = clf.score(X_norm, y)
print('Least Squares accuracy: ', accuracy_percent(score))
clf = classifiers['Neural Net']
clf.fit(X_norm, y)
score = clf.score(X_norm, y)
print('Neural net accuracy: ', accuracy_percent(score))
clf = classifiers['Decision Tree']
clf.fit(X_norm, y)
score = clf.score(X_norm, y)
print('Decision Tree accuracy: ', accuracy_percent(score))
show_decision_tree(estimator=clf, col_names=col_names, means=means,
stds=stds)
print('Accuracy on normalized X_norm: ')
for name, clf in classifiers.items():
clf.fit(X_norm, y)
score = clf.score(X_norm, y)
print(name, accuracy_percent(score))
col_subset = 32
print(
f'Accuracy and AUC on self-crafted cross-validation with normalization '
f'and subset of {col_subset} columns: ')
X_subset = X_cv[:, w_sorted_indexes[:col_subset]]
for name, clf in classifiers.items():
accuracy, auc = cross_validate(X_subset, y_cv, classifier=clf,
nr_class=nr_class, col_names=col_names)
print(name, delimiter, accuracy_percent(accuracy), delimiter, auc)
print()
print('Accuracy from cross-validation (non-normalized data): ')
for name, clf in classifiers.items():
accuracy = np.average(cross_val_score(clf, X_cv, y_cv, cv=6))
print(name, delimiter, accuracy_percent(accuracy))
print()
X_norm2 = np.concatenate((X_norm[:30, :], X_norm[31:61, :]))
print('Accuracy from cross-validation (normalized the whole): ')
for name, clf in classifiers.items():
accuracy = np.average(cross_val_score(clf, X_norm2, y_cv, cv=6))
print(name, delimiter, accuracy_percent(accuracy))
print()
print(
'Accuracy and AUC on self-crafted cross-validation with normalization: ')
print("model name, accuracy (%), AUC")
for name, clf in classifiers.items():
accuracy, auc = cross_validate(X_cv, y_cv, classifier=clf,
nr_class=nr_class, col_names=col_names)
print(name, delimiter, accuracy_percent(accuracy), delimiter, auc)
print()
if __name__ == "__main__":
compute()
| StarcoderdataPython |
1747542 | from pydantic.main import BaseModel
from morpho.rest.models import (
ListServicesResponse,
ServiceInfo,
TransformDocumentPipeRequest,
TransformDocumentPipeResponse,
TransformDocumentRequest,
TransformDocumentResponse,
)
from morpho.util import decode_base64, encode_base64
class TestTransformDocumentRequest:
class TempOptions(BaseModel):
offset: int
def test_asdict_required_params(self):
request = TransformDocumentRequest(
document=encode_base64("Hello World!"), service_name="QDS.TEST"
)
request_dict = request.dict()
assert len(request_dict.keys()) == 4
assert request_dict["document"] == "SGVsbG8gV29ybGQh"
assert request_dict["service_name"] == "QDS.TEST"
assert request_dict["file_name"] is None
assert request_dict["options"] is None
def test_asdict_all_params(self):
# TODO: pyright issue?
options = self.TempOptions(offset=8)
request = TransformDocumentRequest(
document=encode_base64("Hello World2!"),
service_name="QDS.TEST",
file_name="file.txt",
options=options,
)
request_dict = request.dict()
assert len(request_dict.keys()) == 4
assert request_dict["document"] == "SGVsbG8gV29ybGQyIQ=="
assert request_dict["service_name"] == "QDS.TEST"
assert request_dict["file_name"] == "file.txt"
assert request_dict["options"] == options.dict()
def test_asjson_required_params(self):
request = TransformDocumentRequest(
document=encode_base64("Hello World3!"), service_name="QDS.TEST"
)
assert (
request.json()
== '{"document": "SGVsbG8gV29ybGQzIQ==", "service_name": "QDS.TEST", "file_name": null, "options": null}'
)
def test_asjson_all_params(self):
request = TransformDocumentRequest(
document=encode_base64("Hello World4!"),
service_name="QDS.TEST",
file_name="file.txt",
options=self.TempOptions(offset=4),
)
assert (
request.json()
== '{"document": "SGVsbG8gV29ybGQ0IQ==", "service_name": "QDS.TEST", "file_name": "file.txt", "options": {"offset": 4}}'
)
class TestTransformDocumentResponse:
def test_asdict_required_params(self):
response = TransformDocumentResponse(
document=encode_base64("Hello World Response2!")
)
response_dict = response.dict()
assert len(response_dict.keys()) == 3
assert response_dict["document"] == "SGVsbG8gV29ybGQgUmVzcG9uc2UyIQ=="
assert response_dict["output"] is None
assert response_dict["error"] is None
def test_asdict_all_params(self):
response = TransformDocumentResponse(
document=encode_base64("Hello World Response3!"),
output=["OUTPUT 1", "OUTPUT 2"],
error=["ERROR 1", "ERROR 2"],
)
response_dict = response.dict()
assert len(response_dict.keys()) == 3
assert response_dict["document"] == "SGVsbG8gV29ybGQgUmVzcG9uc2UzIQ=="
assert response_dict["output"] == ["OUTPUT 1", "OUTPUT 2"]
assert response_dict["error"] == ["ERROR 1", "ERROR 2"]
def test_asjson_required_params(self):
response = TransformDocumentResponse(
document=encode_base64("Hello World Response4!")
)
assert (
response.json()
== '{"document": "SGVsbG8gV29ybGQgUmVzcG9uc2U0IQ==", "output": null, "error": null}'
)
def test_asjson_all_params(self):
response = TransformDocumentResponse(
document=encode_base64("Hello World Response5!"),
output=["OUTPUT 1", "OUTPUT 2"],
error=["ERROR 1", "ERROR 2"],
)
assert (
response.json()
== '{"document": "SGVsbG8gV29ybGQgUmVzcG9uc2U1IQ==", "output": ["OUTPUT 1", "OUTPUT 2"], "error": ["ERROR 1", "ERROR 2"]}'
)
class TestListServicesResponse:
def test_asdict(self):
response = ListServicesResponse(
services=[ServiceInfo(name="QDS.TEST"), ServiceInfo(name="QDS.ECHO")]
)
response_dict = response.dict()
assert len(response_dict.keys()) == 1
assert response_dict["services"] == [{"name": "QDS.TEST", "options": None}, {"name": "QDS.ECHO", "options": None}]
def test_asjson(self):
response = ListServicesResponse(
services=[ServiceInfo(name="QDS.TEST"), ServiceInfo(name="QDS.ECHO")]
)
assert (
response.json()
== '{"services": [{"name": "QDS.TEST", "options": null}, {"name": "QDS.ECHO", "options": null}]}'
)
class TestTransformDocumentPipeRequest:
def test_asdict_required_params(self):
request = TransformDocumentPipeRequest(
document=encode_base64("Hello Pipe Request2!"),
services=[ServiceInfo(name="QDS.ECHO")],
file_name=None,
)
request_dict = request.dict()
assert len(request_dict.keys()) == 3
assert request_dict["document"] == "SGVsbG8gUGlwZSBSZXF1ZXN0MiE="
assert request_dict["services"] == [{"name": "QDS.ECHO", "options": None}]
assert request_dict["file_name"] is None
def test_asdict_all_params(self):
request = TransformDocumentPipeRequest(
document=encode_base64("Hello Pipe Request3!"),
services=[ServiceInfo(name="QDS.COUNT")],
file_name="secret.txt",
)
request_dict = request.dict()
assert len(request_dict.keys()) == 3
assert request_dict["document"] == "SGVsbG8gUGlwZSBSZXF1ZXN0MyE="
assert request_dict["services"] == [{"name": "QDS.COUNT", "options": None}]
assert request_dict["file_name"] == "secret.txt"
def test_asjson_required_params(self):
request = TransformDocumentPipeRequest(
document=encode_base64("Hello Pipe Request4!"),
services=[ServiceInfo(name="QDS.CAESER")],
file_name=None,
)
assert (
request.json()
== '{"document": "SGVsbG8gUGlwZSBSZXF1ZXN0NCE=", "services": [{"name": "QDS.CAESER", "options": null}], "file_name": null}'
)
def test_asjson_all_params(self):
request = TransformDocumentPipeRequest(
document=encode_base64("Hello Pipe Request5!"),
services=[ServiceInfo(name="QDS.MAIL")],
file_name="sec.txt",
)
assert (
request.json()
== '{"document": "SGVsbG8gUGlwZSBSZXF1ZXN0NSE=", "services": [{"name": "QDS.MAIL", "options": null}], "file_name": "sec.txt"}'
)
# class TestTransformDocumentPipeResponse():
# B64_HELLO_WORLD_PIPE_BACK = b64encode("Hello World Pipe Back!".encode("utf-8")).decode("utf-8")
# def test_trans_document_validation_success(self):
# response = TransformDocumentPipeResponse(trans_document=self.B64_HELLO_WORLD_PIPE_BACK, sender="QDS.TST")
# print(vars(response))
# assert response.trans_output == self.B64_HELLO_WORLD_PIPE_BACK
| StarcoderdataPython |
3291331 | # -*- coding: utf-8 -*-
def main():
n, m, d = map(int, input().split())
# KeyInsight
# 期待値の線形性
# See:
# https://img.atcoder.jp/soundhound2018-summer-qual/editorial.pdf
# https://mathtrain.jp/expectation
# 気がつけた点
# 愚直解を書き出した
# 隣り合う2項がm - 1通りある
# 解答までのギャップ
# dが0かどうかで場合分け
# 整数のペアを考える
ans = m - 1
if d == 0:
# d = 0: (1, 1), ..., (n, n)のn通り
ans /= n
else:
# d ≠ 0: (1, d + 1), ..., (n -d, n)と(d - 1, 1), ..., (n, n - d)で2 * (n - d)通り
ans *= 2 * (n - d)
ans /= n ** 2
print(ans)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3281774 | <reponame>unicornis/pybrreg
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
from io import BytesIO
from zipfile import ZipFile
from .new_inquiry import BrregNewInquiry
from .manifest import BrregManifest
from .recipients import BrregRecipientList
class BrregPackage(object):
"""
Class for unpacking shipments from Brreg.
"""
manifest = None
inquiry = None
recipients = None
attachments = ()
def __init__(self, file_data=None, b64_encoded=True):
"""
Unpack and verify an incoming package.
:param file_data: The file data stream from the package
:type file_data: str
:param b64_encoded: Flag to set if file_data is b64_encoded
:type b64_encoded: bool
"""
if file_data:
self._read_package_file(file_data, b64_encoded=b64_encoded)
def _read_package_file(self, file_data, b64_encoded=True):
decoded = base64.b64decode(file_data) if b64_encoded else file_data
memory_file = BytesIO(decoded)
package_contents = {}
with ZipFile(memory_file, "r") as f:
for _file in f.infolist():
opened_file = f.open(_file)
filename = opened_file.name
content = opened_file.read()
package_contents[filename.lower()] = content
manifest_xml = package_contents.pop('manifest.xml', None)
if not manifest_xml:
raise ValueError('Missing manifest.xml in package file.')
self.manifest = BrregManifest(xml=manifest_xml)
recipients_xml = package_contents.pop('recipients.xml', None)
if recipients_xml:
self.recipients = BrregRecipientList(xml=recipients_xml)
inquiry_xml = package_contents.pop('henvendelse.xml', None)
if not inquiry_xml:
raise ValueError('Missing henvendelse.xml in package file.')
self.inquiry = BrregNewInquiry(xml=inquiry_xml)
for file_name, file_data in package_contents.items():
att = self.inquiry.get_attachment(file_name)
if att:
att.file_data = file_data
self.attachments = self.inquiry.attachments
def get_attachment(self, file_name):
return self.inquiry.get_attachment(file_name)
| StarcoderdataPython |
3254528 | <gh_stars>10-100
# Generated by Django 3.0.8 on 2020-07-12 14:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0011_closedbid'),
]
operations = [
migrations.AddField(
model_name='closedbid',
name='description',
field=models.TextField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name='closedbid',
name='link',
field=models.CharField(default=None, max_length=64),
preserve_default=False,
),
migrations.AddField(
model_name='closedbid',
name='time',
field=models.CharField(default=None, max_length=64),
preserve_default=False,
),
migrations.AddField(
model_name='closedbid',
name='title',
field=models.CharField(default=None, max_length=64),
preserve_default=False,
),
]
| StarcoderdataPython |
152765 | <gh_stars>1-10
import mock
from requests import ConnectionError
from slumber.exceptions import HttpClientError
from django.test.testcases import SimpleTestCase
from django.conf import settings
from ..backend import get_backend, ClaBackend
from . import base
class TestClaBackend(ClaBackend):
zone_name = base.DEFAULT_ZONE_NAME
class GetBackendTestCase(SimpleTestCase):
"""
Test get_backend without mocking anything
"""
def test_invalid_zone_name(self):
backend = get_backend("invalid_zone")
self.assertEqual(backend, None)
def test_success(self):
zone_name = settings.ZONE_PROFILES.keys()[0]
backend = get_backend(zone_name)
self.assertTrue(backend)
self.assertEqual(backend.zone_name, zone_name)
self.assertTrue(issubclass(backend.__class__, ClaBackend))
class ClaBackendTestCase(SimpleTestCase):
@mock.patch("cla_auth.backend.get_auth_connection")
def __call__(self, result, mocked_get_auth_connection, *args, **kwargs):
self.mocked_get_auth_connection = mocked_get_auth_connection
self.credentials = {"username": "my-username", "password": "<PASSWORD>"}
super(ClaBackendTestCase, self).__call__(result, *args, **kwargs)
def test_authenticate_invalid_zone(self):
class InvalidClaBackend(ClaBackend):
zone_name = "invalid_zone"
backend = InvalidClaBackend()
self.assertEqual(backend.authenticate(**self.credentials), None)
self.assertEqual(self.mocked_get_auth_connection.called, False)
def test_authenticate_connection_error(self):
backend = TestClaBackend()
connection = mock.MagicMock()
connection.oauth2.access_token.post.side_effect = ConnectionError()
self.mocked_get_auth_connection.return_value = connection
self.assertEqual(backend.authenticate(**self.credentials), None)
self.assertEqual(self.mocked_get_auth_connection.called, True)
connection.oauth2.access_token.post.assert_called_with(
{
"client_id": base.DEFAULT_ZONE_PROFILE["CLIENT_ID"],
"client_secret": base.DEFAULT_ZONE_PROFILE["CLIENT_SECRET"],
"grant_type": "password",
"username": self.credentials["username"],
"password": self.credentials["password"],
}
)
def test_authenticate_invalid_credentials(self):
backend = TestClaBackend()
credentials = {"username": "my-username", "password": "<PASSWORD>"}
connection = mock.MagicMock()
connection.oauth2.access_token.post.side_effect = HttpClientError(content='{"error": "invalid grant"}')
self.mocked_get_auth_connection.return_value = connection
self.assertEqual(backend.authenticate(**credentials), None)
self.assertEqual(self.mocked_get_auth_connection.called, True)
connection.oauth2.access_token.post.assert_called_with(
{
"client_id": base.DEFAULT_ZONE_PROFILE["CLIENT_ID"],
"client_secret": base.DEFAULT_ZONE_PROFILE["CLIENT_SECRET"],
"grant_type": "password",
"username": credentials["username"],
"password": <PASSWORD>["password"],
}
)
def test_authenticate_success(self):
token = "123456789"
backend = TestClaBackend()
connection = mock.MagicMock()
connection.oauth2.access_token.post.return_value = {"access_token": token}
self.mocked_get_auth_connection.return_value = connection
user = backend.authenticate(**self.credentials)
self.assertTrue(user)
self.assertEqual(user.pk, token)
self.assertEqual(self.mocked_get_auth_connection.called, True)
connection.oauth2.access_token.post.assert_called_with(
{
"client_id": base.DEFAULT_ZONE_PROFILE["CLIENT_ID"],
"client_secret": base.DEFAULT_ZONE_PROFILE["CLIENT_SECRET"],
"grant_type": "password",
"username": self.credentials["username"],
"password": self.credentials["password"],
}
)
| StarcoderdataPython |
95961 | # -*- coding: utf-8 -*-
"""
Main model architecture.
reference: https://github.com/andy840314/QANet-pytorch-
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules.cnn import DepthwiseSeparableConv
# revised two things: head set to 1, d_model set to 96
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * (-1e30) # !!!!!!!!!!!!!!! do we need * mask after target?
class InitializedConv1d(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=1, stride=1, padding=0, groups=1,
relu=False, bias=False):
super().__init__()
self.out = nn.Conv1d(
in_channels, out_channels,
kernel_size, stride=stride,
padding=padding, groups=groups, bias=bias)
if relu is True:
self.relu = True
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
self.relu = False
nn.init.xavier_uniform_(self.out.weight)
def forward(self, x):
if self.relu is True:
return F.relu(self.out(x))
else:
return self.out(x)
def encode_position(x, min_timescale=1.0, max_timescale=1.0e4):
x = x.transpose(1, 2)
length = x.size()[1]
channels = x.size()[2]
signal = get_timing_signal(length, channels, min_timescale, max_timescale)
if x.is_cuda:
signal = signal.to(x.get_device())
return (x + signal).transpose(1, 2)
def get_timing_signal(length, channels,
min_timescale=1.0, max_timescale=1.0e4):
position = torch.arange(length).type(torch.float32)
num_timescales = channels // 2
log_timescale_increment = (math.log(float(max_timescale) / float(min_timescale)) / (float(num_timescales) - 1))
inv_timescales = min_timescale * torch.exp(
torch.arange(num_timescales).type(torch.float32) * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
m = nn.ZeroPad2d((0, (channels % 2), 0, 0))
signal = m(signal)
signal = signal.view(1, length, channels)
return signal
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=in_ch, kernel_size=k, groups=in_ch,
padding=k // 2, bias=False)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, padding=0, bias=bias)
def forward(self, x):
return F.relu(self.pointwise_conv(self.depthwise_conv(x)))
class Highway(nn.Module):
def __init__(self, layer_num, size):
super().__init__()
self.n = layer_num
self.linear = nn.ModuleList([InitializedConv1d(size, size, relu=False, bias=True) for _ in range(self.n)])
self.gate = nn.ModuleList([InitializedConv1d(size, size, bias=True) for _ in range(self.n)])
def forward(self, x):
# x: shape [batch_size, hidden_size, length]
dropout = 0.1
for i in range(self.n):
gate = F.sigmoid(self.gate[i](x))
nonlinear = self.linear[i](x)
nonlinear = F.dropout(nonlinear, p=dropout, training=self.training)
x = gate * nonlinear + (1 - gate) * x
return x
class SelfAttention(nn.Module):
def __init__(self, d_model, num_head, dropout):
super().__init__()
self.d_model = d_model
self.num_head = num_head
self.dropout = dropout
self.mem_conv = InitializedConv1d(in_channels=d_model, out_channels=d_model * 2, kernel_size=1, relu=False,
bias=False)
self.query_conv = InitializedConv1d(in_channels=d_model, out_channels=d_model, kernel_size=1, relu=False,
bias=False)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
def forward(self, queries, mask):
memory = queries
memory = self.mem_conv(memory)
query = self.query_conv(queries)
memory = memory.transpose(1, 2)
query = query.transpose(1, 2)
Q = self.split_last_dim(query, self.num_head)
K, V = [self.split_last_dim(tensor, self.num_head) for tensor in torch.split(memory, self.d_model, dim=2)]
key_depth_per_head = self.d_model // self.num_head
Q *= key_depth_per_head ** -0.5
x = self.dot_product_attention(Q, K, V, mask=mask)
return self.combine_last_two_dim(x.permute(0, 2, 1, 3)).transpose(1, 2)
def dot_product_attention(self, q, k, v, bias=False, mask=None):
"""dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
is_training: a bool of training
scope: an optional string
Returns:
A Tensor.
"""
logits = torch.matmul(q, k.permute(0, 1, 3, 2))
if bias:
logits += self.bias
if mask is not None:
shapes = [x if x != None else -1 for x in list(logits.size())]
mask = mask.view(shapes[0], 1, 1, shapes[-1])
logits = mask_logits(logits, mask)
weights = F.softmax(logits, dim=-1)
# dropping out the attention links for each of the heads
weights = F.dropout(weights, p=self.dropout, training=self.training)
return torch.matmul(weights, v)
def split_last_dim(self, x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
old_shape = list(x.size())
last = old_shape[-1]
new_shape = old_shape[:-1] + [n] + [last // n if last else None]
ret = x.view(new_shape)
return ret.permute(0, 2, 1, 3)
def combine_last_two_dim(self, x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
old_shape = list(x.size())
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else None]
ret = x.contiguous().view(new_shape)
return ret
class EmbeddingLayer(nn.Module):
def __init__(self, wemb_dim, cemb_dim, d_model,
dropout_w=0.1, dropout_c=0.05):
super().__init__()
self.conv2d = nn.Conv2d(cemb_dim, d_model, kernel_size=(1, 5), padding=0, bias=True)
nn.init.kaiming_normal_(self.conv2d.weight, nonlinearity='relu')
self.conv1d = InitializedConv1d(wemb_dim + d_model, d_model, bias=False)
self.high = Highway(2, d_model)
self.dropout_w = dropout_w
self.dropout_c = dropout_c
def forward(self, ch_emb, wd_emb, length=None):
ch_emb = ch_emb.permute(0, 3, 1, 2)
ch_emb = F.dropout(ch_emb, p=self.dropout_c, training=self.training)
ch_emb = self.conv2d(ch_emb)
ch_emb = F.relu(ch_emb)
ch_emb, _ = torch.max(ch_emb, dim=3)
wd_emb = F.dropout(wd_emb, p=self.dropout_w, training=self.training)
wd_emb = wd_emb.transpose(1, 2)
emb = torch.cat([ch_emb, wd_emb], dim=1)
emb = self.conv1d(emb)
emb = self.high(emb)
return emb
class EncoderBlock(nn.Module):
def __init__(self, conv_num, d_model, num_head, k, dropout=0.1):
super().__init__()
self.convs = nn.ModuleList([DepthwiseSeparableConv(d_model, d_model, k) for _ in range(conv_num)])
self.self_att = SelfAttention(d_model, num_head, dropout=dropout)
self.FFN_1 = InitializedConv1d(d_model, d_model, relu=True, bias=True)
self.FFN_2 = InitializedConv1d(d_model, d_model, bias=True)
self.norm_C = nn.ModuleList([nn.LayerNorm(d_model) for _ in range(conv_num)])
self.norm_1 = nn.LayerNorm(d_model)
self.norm_2 = nn.LayerNorm(d_model)
self.conv_num = conv_num
self.dropout = dropout
def forward(self, x, mask, l, blks):
total_layers = (self.conv_num + 1) * blks
dropout = self.dropout
out = encode_position(x)
for i, conv in enumerate(self.convs):
res = out
out = self.norm_C[i](out.transpose(1, 2)).transpose(1, 2)
if i % 2 == 0:
out = F.dropout(out, p=dropout, training=self.training)
out = conv(out)
out = self.layer_dropout(out, res, dropout * float(l) / total_layers)
l += 1
res = out
out = self.norm_1(out.transpose(1, 2)).transpose(1, 2)
out = F.dropout(out, p=dropout, training=self.training)
out = self.self_att(out, mask)
out = self.layer_dropout(out, res, dropout * float(l) / total_layers)
l += 1
res = out
out = self.norm_2(out.transpose(1, 2)).transpose(1, 2)
out = F.dropout(out, p=dropout, training=self.training)
out = self.FFN_1(out)
out = self.FFN_2(out)
out = self.layer_dropout(out, res, dropout * float(l) / total_layers)
return out
def layer_dropout(self, inputs, residual, dropout):
if self.training == True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout, training=self.training) + residual
else:
return inputs + residual
class CQAttention(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
w4C = torch.empty(d_model, 1)
w4Q = torch.empty(d_model, 1)
w4mlu = torch.empty(1, 1, d_model)
nn.init.xavier_uniform_(w4C)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4C = nn.Parameter(w4C)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
self.dropout = dropout
def forward(self, C, Q, Cmask, Qmask):
C = C.transpose(1, 2)
Q = Q.transpose(1, 2)
batch_size_c = C.size()[0]
batch_size, Lc, d_model = C.shape
batch_size, Lq, d_model = Q.shape
S = self.trilinear_for_attention(C, Q)
Cmask = Cmask.view(batch_size_c, Lc, 1)
Qmask = Qmask.view(batch_size_c, 1, Lq)
S1 = F.softmax(mask_logits(S, Qmask), dim=2)
S2 = F.softmax(mask_logits(S, Cmask), dim=1)
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out.transpose(1, 2)
def trilinear_for_attention(self, C, Q):
batch_size, Lc, d_model = C.shape
batch_size, Lq, d_model = Q.shape
dropout = self.dropout
C = F.dropout(C, p=dropout, training=self.training)
Q = F.dropout(Q, p=dropout, training=self.training)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1])
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
import logging
logger = logging.getLogger(__name__)
class Pointer(nn.Module):
def __init__(self, d_model, normalize=True):
super(Pointer, self).__init__()
self.normalize = normalize
self.w1 = InitializedConv1d(d_model * 2, 1)
self.w2 = InitializedConv1d(d_model * 2, 1)
def forward(self, M1, M2, M3, mask):
x1 = torch.cat([M1, M2], dim=1)
x2 = torch.cat([M1, M3], dim=1)
y1 = mask_logits(self.w1(x1).squeeze(), mask)
y2 = mask_logits(self.w2(x2).squeeze(), mask)
# logger.info('y1: %s' % y1)
# logger.info('y2: %s' % y2)
if self.normalize:
if self.training:
# In training we output log-softmax for NLL
p_s = F.log_softmax(y1, dim=1) # [B, S]
p_e = F.log_softmax(y2, dim=1) # [B, S]
else:
# ...Otherwise 0-1 probabilities
p_s = F.softmax(y1, dim=1) # [B, S]
p_e = F.softmax(y2, dim=1) # [B, S]
else:
p_s = y1.exp()
p_e = y2.exp()
# logger.info('p_s: %s' % p_s)
# logger.info('p_e: %s' % p_e)
return p_s, p_e
class QANet(nn.Module):
def __init__(self, args, normalize=True):
super(QANet, self).__init__()
# Store config
self.args = args
# Word embeddings (+1 for padding)
self.embedding = nn.Embedding(args.vocab_size,
args.embedding_dim,
padding_idx=0)
# Char embeddings (+1 for padding)
self.char_embedding = nn.Embedding(args.char_size,
args.char_embedding_dim,
padding_idx=0)
d_model = args.hidden_size
self.dropout = args.dropout
self.emb = EmbeddingLayer(args.embedding_dim, args.char_embedding_dim, d_model)
num_head = args.num_head
self.emb_enc = EncoderBlock(conv_num=4, d_model=d_model, num_head=num_head, k=7, dropout=self.dropout)
self.cq_att = CQAttention(d_model=d_model)
self.cq_resizer = InitializedConv1d(d_model * 4, d_model)
self.model_enc_blks = nn.ModuleList(
[EncoderBlock(conv_num=2, d_model=d_model, num_head=num_head, k=5, dropout=self.dropout)
for _ in range(7)])
self.out = Pointer(d_model, normalize=normalize)
self.PAD = 0
def forward(self, cw_idx, cc_idx, c_f, c_mask, qw_idx, qc_idx, q_mask):
c_mask = (torch.ones_like(cw_idx) * self.PAD != cw_idx).float()
q_mask = (torch.ones_like(qw_idx) * self.PAD != qw_idx).float()
cw_emb, cc_emb = self.embedding(cw_idx), self.char_embedding(cc_idx)
qw_emb, qc_emb = self.embedding(qw_idx), self.char_embedding(qc_idx)
if self.args.dropout_emb > 0:
cw_emb = F.dropout(cw_emb, p=self.args.dropout_emb, training=self.training)
qw_emb = F.dropout(qw_emb, p=self.args.dropout_emb, training=self.training)
cc_emb = F.dropout(cc_emb, p=self.args.dropout_emb, training=self.training)
qc_emb = F.dropout(qc_emb, p=self.args.dropout_emb, training=self.training)
c_emb, q_emb = self.emb(cc_emb, cw_emb), self.emb(qc_emb, qw_emb)
c_enc_emb = self.emb_enc(c_emb, c_mask, 1, 1)
q_enc_emb = self.emb_enc(q_emb, q_mask, 1, 1)
cq_sim_x = self.cq_att(c_enc_emb, q_enc_emb, c_mask, q_mask)
m_0 = self.cq_resizer(cq_sim_x)
# m_0 = F.dropout(m_0, p=self.dropout, training=self.training)
enc = [m_0]
for i in range(3):
if i % 2 == 0: # dropout every 2 blocks
enc[i] = F.dropout(enc[i], p=self.dropout, training=self.training)
blk_outputs = [enc[i]]
for j, blk in enumerate(self.model_enc_blks):
blk_in = blk_outputs[-1]
blk_outputs.append(blk(blk_in, c_mask, j * (2 + 2) + 1, 7))
enc.append(blk_outputs[-1])
start_scores, end_scores = self.out(enc[1], enc[2], enc[3], c_mask)
return start_scores, end_scores
| StarcoderdataPython |
3339819 | import time
import random
import pygame
from syslogic import CGGPYG
from cggframe import cggframe
class linetest(cggframe):
def __init__(self):
self.cgg=CGGPYG("")
self.gamestate="play"
"""x1,y1:startpoint x2,y2:endpoint of line"""
self.x1=100
self.x2=400
self.y1=100
self.y2=400
self.dx1=10
self.dy1=10
self.dx2=10
self.dy2=10
self.ct=0
def routine(self):
self.ct=self.ct+1
if self.ct>=100:
self.cgg.cls()
self.ct=0
self.x1=self.x1+self.dx1
self.y1=self.y1+self.dy1
self.x2=self.x2+self.dx2
self.y2=self.y2+self.dy2
if self.x1<0 and self.dx1<0:self.dx1=-self.dx1
if self.x2<0 and self.dx2<0:self.dx2=-self.dx2
if self.y1<0 and self.dy1<0:self.dy1=-self.dy1
if self.y2<0 and self.dy2<0:self.dy2=-self.dy2
if self.x1>640 and self.dx1>0:self.dx1=-self.dx1
if self.x2>640 and self.dx2>0:self.dx2=-self.dx2
if self.y1>400 and self.dy1>0:self.dy1=-self.dy1
if self.y2>400 and self.dy2>0:self.dy2=-self.dy2
self.draw()
def keyin(self,key):
if self.gamestate=="play" and key==pygame.K_SPACE:
self.dx1=-self.dx1
self.dx2=-self.dx2
self.dy1=-self.dy1
self.dy2=-self.dy2
def draw(self):
self.cgg.setcolor(4)
self.cgg.line(self.x1,self.y1,self.x2,self.y2)
lt=linetest()
lt.main(0.2)
| StarcoderdataPython |
3380832 | <gh_stars>0
from flask import Flask,render_template, request,redirect, session
import backend
import os
from datetime import date
import smtplib
from email.message import EmailMessage
import random
app = Flask(__name__)
app.secret_key = os.urandom(24)
backend.connect()
def get_fname(s):
n = ''
for char in s:
if char!=' ':
n+=char
elif char==' ':
break
return n
def send_mail(receiver,name):
otp = random.randint(1111,9999)
email = EmailMessage()
email['from'] = '<EMAIL>'
email['to'] = receiver
email['subject'] = 'Authentication mail from Raghav kakar'
email.set_content('Hey {} Your otp is {}!'.format(get_fname(name),otp))
with smtplib.SMTP(host = 'smtp.gmail.com',port = 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('<EMAIL>','Vkmehta89@@')
smtp.send_message(email)
return otp
@app.route('/')
def index():
return render_template('index.html')
@app.route('/index.html')
def home():
return render_template('index.html')
@app.route('/register',methods=['POST'])
def register():
data = request.form.to_dict()
rows = backend.search(data['email'],data['password'])
otp = send_mail(data['email'],data['name'])
if len(rows)>=1:
return redirect('/')
else:
backend.insert(data['email'],data['password'],data['name'],data['address'],data['city'],data['state'],data['zip'])
return render_template('/verification.html',otp=otp)
@app.route('/verify/<int:otp>',methods=['POST'])
def verify(otp):
data = request.form.to_dict()
otp_ins = data['otp']
if int(otp) == int(otp_ins):
return redirect('/')
else:
return render_template('/verification.html',otp=otp)
@app.route('/login',methods=['POST'])
def login():
data = request.form.to_dict()
rows = backend.search(data['email'],data['password'])
if len(rows)==1:
session['user_id'] = get_fname(rows[0][3])
session['user_pk'] = rows[0][0]
session['user_email'] = rows[0][1]
return redirect('/dashboard.html')
else:
return redirect('/')
@app.route('/dashboard.html')
def dashboard():
if 'user_id' in session:
data = backend.display_all_posts(session['user_pk'])
return render_template('dashboard.html',pk=session['user_pk'],name=session['user_id'],data=data)
else:
return redirect('/index.html')
@app.route('/logout',methods=['POST'])
def logout():
session.pop('user_id')
session.pop('user_pk')
session.pop('user_email')
return redirect('/index.html')
@app.route('/newmessage.html')
def opennewpost():
if 'user_id' in session:
return render_template('newmessage.html',name=session['user_id'])
else:
return redirect('/index.html')
@app.route('/sendamessage',methods=['POST'])
def addpost():
data = request.form.to_dict()
backend.insert_in_messages(session['user_pk'],data['emaddress'],date.today(),data['text'])
return redirect('/viewall.html')
@app.route('/viewall.html')
def viewall():
data = backend.display_user_posts(session['user_pk'])
if 'user_id' in session:
return render_template('viewall.html',data=data,name=session['user_id'])
else:
return redirect('/index.html')
@app.route('/viewthispost/<int:message_id>',methods=['POST'])
def view(message_id):
message = backend.search_messages(message_id)
if 'user_id' in session:
return render_template('viewthispost.html',message=message)
else:
return render_template('/')
@app.route("/deletepost/<int:message_id>",methods=['POST'])
def delete(message_id):
backend.delete_message(message_id)
return redirect("/viewall.html")
@app.route('/editpost/<int:message_id>',methods=['POST'])
def edit(message_id):
message = backend.search_messages(message_id)
return render_template("/editpost.html",message = message[0])
@app.route('/editpost/updatemessage/<int:message_id>',methods=['POST'])
def update(message_id):
data = request.form.to_dict()
print(data)
backend.update(message_id,data)
return redirect('/viewall.html')
app.run(debug=True) | StarcoderdataPython |
162311 | <reponame>melwinmpk/Django_RestAPI<gh_stars>1-10
from django.contrib.auth.models import User,auth
from django.shortcuts import render, redirect
from django.contrib import messages
from testsetup.models import SubjectDefinition,Questions,QuestionDefinition
import json
import random
import base64
class testsetup:
subjectname = ''
questiontype = 1
Question = ''
options = {}
Ans = 1
subjectid = 1
questionid = 0
def userlogincheck(self):
print("userlogin check calling respective function")
def __init__(self,data=None):
if data == None:
return
data = json.loads(data)
if "subjectname" in data:
self.subjectname = data['subjectname']
if "subjectid" in data:
self.subjectid = data['subjectid']
if "questiontype" in data:
self.questiontype = data['questiontype']
if int(self.questiontype) == 1:
if "options" in data:
self.options = data['options']
if "Question" in data:
self.Question = data['Question']
if "Ans" in data:
self.Ans = int(data['Ans'])
if "questionid" in data:
self.questionid = data['questionid']
# @userlogincheck
def savesubjectAck(self,request):
if self.subjectname != None and self.subjectname != '':
self.userlogincheck()
subjaectobj = SubjectDefinition(SubjectName = self.subjectname)
subjaectobj.save()
return json.dumps({'status':'success'})
else:
return json.dumps({'status': 'fali','message':'data subjectname missing'})
def savequestionAck(self,request):
self.userlogincheck()
questionobj = Questions(
SubjectId = SubjectDefinition.objects.get(SubjectId=self.subjectid),
Question = self.Question,
Options = self.options,
QuestionTypeId = QuestionDefinition.objects.get(QuestionTypeId=self.questiontype),
Ans = self.Ans)
questionobj.save()
return json.dumps({'status':'success'})
def taketestAck(self,request,data=None):
self.userlogincheck()
subject_question_list = {}
subjectid_data = {}
fromAjaxcall = True
if data == None:
subjectid_data = json.loads(self.subjectid)
else:
fromAjaxcall = False
subjectid_data = data['subjectids']
for subjectid in subjectid_data:
# print(subjectid)
Subject_questionid_list = list(Questions.objects.filter(SubjectId=subjectid).values_list('id'))
if len(Subject_questionid_list) > 0 :
randomIds = self.randomgenerator(len(Subject_questionid_list) - 1)
questionIds = []
i = 0
for index in randomIds:
print(index)
questionIds.append(Subject_questionid_list[index][0])
i+=1
subject_question_list[str(subjectid)] = questionIds
if fromAjaxcall:
return json.dumps({'status': 'success','data':subject_question_list})
else:
return subject_question_list
def randomgenerator(self,Subject_question_len):
subjectIds = []
i = 0
# Subject_question_len = Subject_question_len1
print(Subject_question_len)
while True:
index = random.randint(0, Subject_question_len)
if index not in subjectIds:
subjectIds.append(index)
i += 1
if i >= 5 or (Subject_question_len < 5 and i >= (Subject_question_len)):
break
return subjectIds
def getquestiondataAck(self,request,questionid = None):
self.userlogincheck()
if questionid == None:
questionid = self.questionid
fromAjaxcall = True
else:
fromAjaxcall = False
questiondata = Questions.objects.filter(id=questionid)
result = questiondata.values() # return ValuesQuerySet object
list_result = [entry for entry in result][0]
if fromAjaxcall:
return json.dumps({'status': 'success','data':list_result})
else:
return list_result
def checkanswerAck(self,request):
self.userlogincheck()
question_ans = Questions.objects.filter(id=self.questionid).values_list('Ans')
return json.dumps({'status': 'success', 'data': {'questionid':self.questionid,"Ans":list(question_ans)[0][0]}})
def gettestquestionidsAck(self,request):
self.userlogincheck()
# request.session['questiontypes'] = QuestionDefinition.objects.all() "QuestionTypes":request.session['questiontypes']
# request.session['subjectdata'] = SubjectDefinition.objects.filter(SubjectId=request.GET['subjectids'])
return json.dumps({'status': 'success', 'data': {'subjectids': request.session['subjectids'], "QuestionIds": request.session['QuestionIds'],"Subjectdata":request.session['subjectdata'],"QuestionTypes":request.session['questiontypes']}})
def render_data(self,view, data=None):
if data == None:
return {'title': view, 'view_js': view + '.js', 'view_css': view + '.css'}
else:
return {'title': view, 'view_js': view + '.js', 'view_css': view + '.css', 'data': data}
| StarcoderdataPython |
57654 | from .elasticsearch_connector import *
from .index_handler import *
from .policy_handler import *
| StarcoderdataPython |
4819307 | <gh_stars>0
# -*- coding: utf-8 -*-
import urllib.parse
import asyncio
import aiohttp
import lxml.html
import lxml.html.clean
class BaseAsyncScraper(object):
def __init__(self, concurrency: int=10):
self.urls = []
self.concurrency = concurrency
self.pages = []
@staticmethod
def _format_url(base: str, data: dict):
return '{0}?{1}'.format(base, urllib.parse.urlencode(data))
def _process(self, params: dict, page: bytes):
raise NotImplementedError()
@asyncio.coroutine
def _fetch(self, *args, **kwargs):
response = yield from aiohttp.request('GET', *args, **kwargs)
return (yield from response.read())
@asyncio.coroutine
def _buffer(self, sem: asyncio.Semaphore, base: str, data: dict):
url = self._format_url(base, data)
with (yield from sem):
page = yield from self._fetch(url)
tree = lxml.html.fromstring(lxml.html.clean.clean_html(page.decode('utf-8', 'ignore')))
self.pages.append(self._process(data, tree))
def _get(self, base: str, urls: list):
sem = asyncio.Semaphore(self.concurrency)
loop = asyncio.get_event_loop()
f = asyncio.wait([self._buffer(sem, base, url) for url in urls])
loop.run_until_complete(f)
return self.pages
def get(self, *args, **kwargs):
raise NotImplementedError()
| StarcoderdataPython |
130768 | #! /usr/bin/env python3
"""
run_sim.py
Run FPGA simulations via Icarus, NCVerilog, Modelsim or Isim.
"""
import json
import os
import shlex
import subprocess
import sys
import argparse
import string
def which(program):
"""
Find the path to an executable program
"""
def is_exe(fpath):
"""
Return True is the fpath exists and is executable. This is needed since the
executables are specifed in the JSON files, but not the path to them. The
executables may be in different locations based on which PC is running this.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Run FPGA Simulation')
parser.add_argument("-D", "--debug",
help="Debug this script",
action="store_true")
parser.add_argument("--icarus",
help="Use Icarus Verilog",
action="store_true")
parser.add_argument("--xcelium",
help="Use Xcelium",
action="store_true")
parser.add_argument("--xcelium_gate",
help="Use Xcelium",
action="store_true")
parser.add_argument("--xcelium_synthesis",
help="Use Xcelium",
action="store_true")
parser.add_argument("--corner",
help="Fast or Slow",
default="RTL",
action="store")
parser.add_argument("--modelsim",
help="Use Altera Modelsim",
action="store_true")
parser.add_argument("--vivado",
help="Use Xilinx Vivado XSim",
action="store_true")
parser.add_argument("--simulation",
help="Which simulation test case to run",
required=True,
action="store")
print(os.environ['PATH'])
args = parser.parse_args()
if args.debug:
print(args)
if args.icarus:
json_file = "../configurations/simulate_iverilog.json"
tool = "icarus"
if args.xcelium:
json_file = "../configurations/simulate_xcelium.json"
tool = "xcelium"
if args.xcelium_gate:
json_file = "../configurations/simulate_xcelium_gate.json"
tool = "xcelium"
if args.xcelium_synthesis:
json_file = "../configurations/simulate_xcelium_synthesis.json"
tool = "xcelium"
if args.modelsim:
json_file = "../configurations/simulate_modelsim.json"
tool = "modelsim"
if args.vivado:
json_file = "../configurations/simulate_vivado.json"
tool = "vivado"
try:
f = open(json_file, "r")
json_data = json.load(f)
except:
print("Failed to open %s" % (json_file))
sys.exit(-1)
flow_steps = json_data['flow_steps']
print(flow_steps)
for step in sorted(flow_steps.keys()):
print("Running Step: %s " % step)
executable = json_data['flow'][flow_steps[step]]['executable']
arguments = string.Template(
json_data['flow'][flow_steps[step]]['arguments'])
arguments_str = arguments.safe_substitute(
simulation=args.simulation, corner=args.corner, tool=tool)
#executable = which(executable)
print(executable)
if (arguments == None):
command = executable
else:
command = executable + " " + arguments_str
print(command)
command = shlex.split(command)
p = subprocess.Popen(command)
p.communicate()
| StarcoderdataPython |
108843 | """Logical optimization, composition, and transformation rules.
"""
import json
from pyfpm.matcher import Matcher
from .. import util
from .. import operators as _op
from ..operators import PhysicalOperator # required for the physical planning rules to compile
from .symbols import *
__pop__ = PhysicalOperator # this is a dummy statement to keep IDEs from pruning the reference to PhysicalOperator
#
# Utility functions
#
def _rewrite_formula(formula, projection):
"""Rewrites a select formula based on any aliased attributes in a projection.
:param formula: a select formula
:param projection: a projection list
:return: the rewritten formula
"""
if not formula:
return formula
# Create a map from alias name -> original column name
aliases = {}
for element in projection:
if isinstance(element, AttributeAlias):
aliases[element.alias] = element.name
if not aliases:
return formula
# Rewrite comparisons in formula
if isinstance(formula, Comparison) and formula.operand1 in aliases:
return Comparison(aliases[formula.operand1], formula.operator, formula.operand2)
elif isinstance(formula, Conjunction):
return Conjunction(
tuple([
Comparison(aliases.get(comp.operand1, comp.operand1), comp.operator, comp.operand2)
for comp in formula.comparisons
])
)
elif isinstance(formula, Disjunction):
return Disjunction(
tuple([
Comparison(aliases.get(comp.operand1, comp.operand1), comp.operator, comp.operand2)
for comp in formula.comparisons
])
)
else:
return formula
#
# Planning and optimization rules
#
#: general purpose rules for optimizing logical operator expressions
logical_optimization_rules = Matcher([
(
'Distinct(Nil(), _)',
lambda: Nil()
),
(
'Deduplicate(Nil(), _, _, _)',
lambda: Nil()
),
(
'Deduplicate(child, attributes, None, _)',
lambda child, attributes: Distinct(child, attributes)
),
(
'Project(Nil(), _)',
lambda: Nil()
),
(
'Rename(child, dict())',
lambda child: child
),
(
'Rename(Project(child, attributes), renames)',
lambda child, attributes, renames: Project(child, tuple([
a for a in attributes if a not in [r.name for r in renames]
]) + renames)
),
(
'Select(Nil(), _)',
lambda: Nil()
),
(
'Unnest(Nil(), _, _)',
lambda: Nil()
),
(
'Select(Project(child, attributes), formula)',
lambda child, attributes, formula:
Project(
Select(child, _rewrite_formula(formula, attributes)),
attributes
)
)
])
#: composite operator rules defined as functional pattern matching expressions
logical_composition_rules = Matcher([
(
'Reify(child, keys, attributes)',
lambda child, keys, attributes:
AddKey(
Distinct(
Project(child, keys + attributes),
keys
),
keys
)
),
(
'ReifySub(_, tuple())',
lambda: Nil()
),
(
'ReifySub(child, attributes)',
lambda child, attributes:
AddForeignKey(
Project(child, (IntrospectionFunction(util.introspect_key_fn),) + attributes),
child, (IntrospectionFunction(util.introspect_key_fn),), None
)
),
(
'Atomize(_, _, "")',
lambda: Nil()
),
(
'Atomize(child, unnest_fn, attribute)',
lambda child, unnest_fn, attribute: Unnest(ReifySub(child, (attribute,)), unnest_fn, attribute)
),
(
'Domainify(child, attribute, similarity_fn, grouping_fn)',
lambda child, attribute, similarity_fn, grouping_fn:
Deduplicate(
Rename(
Project(child, (attribute,)),
(AttributeAlias(name=attribute, alias='name'),)
),
('name',), similarity_fn, grouping_fn
)
),
(
'Canonicalize(child, attribute, similarity_fn, grouping_fn)',
lambda child, attribute, similarity_fn, grouping_fn:
Nest(
Rename(
Project(child, (attribute, attribute)),
(AttributeAlias(name=attribute, alias='name'), AttributeAlias(name=attribute, alias='synonyms'))
),
('name',), ('synonyms',), similarity_fn, grouping_fn
)
),
(
'Align(domain, child, attribute, similarity_fn, grouping_fn)',
lambda domain, child, attribute, similarity_fn, grouping_fn:
Rename(
Project(
SimilarityJoin(
child,
Project(domain, ('name', 'synonyms')),
Similar(attribute, 'name', 'synonyms', similarity_fn, grouping_fn),
),
(AllAttributes(), AttributeDrop(attribute), AttributeDrop('synonyms'))
),
(AttributeAlias(name='name', alias=attribute),)
)
),
(
'Tagify(domain, child, attribute, unnest_fn, similarity_fn, grouping_fn)',
lambda domain, child, attribute, unnest_fn, similarity_fn, grouping_fn:
Align(domain, Atomize(child, unnest_fn, attribute), attribute, similarity_fn, grouping_fn)
)
])
#: rules for transforming logical plans to physical plans
physical_transformation_rules = Matcher([
(
'Assign(child:PhysicalOperator, schema, table)',
lambda child, schema, table: _op.Assign(child, schema, table)
),
(
'Assign(child:str, schema, table)',
lambda child, schema, table: _op.Create(_op.Metadata(json.loads(child)), schema, table)
),
(
'Assign(Project(TableExtant(model, src_sname, src_tname), attributes), dst_sname, dst_tname)'
' if (src_sname, src_tname) == (dst_sname, dst_tname)',
lambda model, src_sname, src_tname, dst_sname, dst_tname, attributes:
_op.Alter(_op.ERMrestSelectProject(model, src_sname, src_tname, attributes), src_sname, src_tname, dst_sname, dst_tname, attributes)
),
(
'Assign(Rename(TableExtant(model, src_sname, src_tname), attributes), dst_sname, dst_tname)',
lambda model, src_sname, src_tname, dst_sname, dst_tname, attributes:
_op.Alter(_op.ERMrestSelectProject(model, src_sname, src_tname, attributes), src_sname, src_tname, dst_sname, dst_tname, attributes)
),
(
'Assign(Nil(), schema, table)',
lambda schema, table: _op.Drop(_op.Metadata({'schema_name': schema, 'table_name': table}), schema, table)
),
(
'TempVar(child)',
lambda child: _op.TempVarRef(child)
),
(
'Distinct(child:PhysicalOperator, attributes)',
lambda child, attributes: _op.HashDistinct(child, attributes)
),
(
'Deduplicate(child:PhysicalOperator, attributes, similarity_fn, grouping_fn)',
lambda child, attributes, similarity_fn, grouping_fn: _op.NestedLoopsSimilarityAggregation(_op.HashDistinct(child, attributes), attributes, [], similarity_fn, grouping_fn)
),
(
'Project(Select(TableExtant(model, sname, tname), formula), attributes)',
lambda model, sname, tname, formula, attributes: _op.ERMrestSelectProject(model, sname, tname, attributes, formula)
),
(
'Project(TableExtant(model, sname, tname), attributes)',
lambda model, sname, tname, attributes: _op.ERMrestSelectProject(model, sname, tname, attributes)
),
(
'Select(TableExtant(model, sname, tname), formula)',
lambda model, sname, tname, formula: _op.ERMrestSelectProject(model, sname, tname, formula=formula)
),
(
'TableExtant(model, sname, tname)',
lambda model, sname, tname: _op.ERMrestSelect(model, sname, tname)
),
(
'JSONDataExtant(input_filename, json_content, object_payload, key_regex)',
lambda input_filename, json_content, object_payload, key_regex: _op.JSONScan(input_filename, json_content, object_payload, key_regex)
),
(
'Project(child:PhysicalOperator, attributes)',
lambda child, attributes: _op.Project(child, attributes)
),
(
'Unnest(child:PhysicalOperator, unnest_fn, attribute)',
lambda child, unnest_fn, attribute: _op.Unnest(child, unnest_fn, attribute)
),
(
'Nest(child:PhysicalOperator, grouping, nesting, similarity_fn, grouping_fn)',
lambda child, grouping, nesting, similarity_fn, grouping_fn:
_op.NestedLoopsSimilarityAggregation(
_op.HashDistinct(child, grouping + nesting), # inject distinct on group/nest attributes in tuples
grouping, nesting, similarity_fn, grouping_fn
)
),
(
'Rename(child:PhysicalOperator, renames)',
lambda child, renames: _op.Rename(child, renames)
),
(
'Select(child:PhysicalOperator, formula)',
lambda child, formula: _op.Select(child, formula)
),
(
'Shred(graph, expression)',
lambda graph, expression: _op.Shred(graph, expression)
),
(
'TabularDataExtant(filename)',
lambda filename: _op.TabularFileScan(filename)
),
(
'SimilarityJoin(left:PhysicalOperator, right:PhysicalOperator, condition)',
lambda left, right, condition: _op.NestedLoopsSimilarityJoin(left, right, condition)
),
(
'Join(left:PhysicalOperator, right:PhysicalOperator)',
lambda left, right: _op.CrossJoin(left, right)
),
(
'Union(child:PhysicalOperator, right:PhysicalOperator)',
lambda child, right: _op.Union(child, right)
),
(
'AddKey(child:PhysicalOperator, unique_columns)',
lambda child, unique_columns: _op.AddKey(child, unique_columns)
),
(
'AddForeignKey(left:PhysicalOperator, right, referenced_columns, foreign_key_columns)',
lambda left, right, referenced_columns, foreign_key_columns:
_op.AddForeignKey(left, right, referenced_columns, foreign_key_columns)
)
])
| StarcoderdataPython |
3217486 | <filename>p99/python3/p22.py
# create a list containing all integers within a given range
def rng(i, k):
return list(range(i, k+1))
def test_rng():
assert rng(4, 9) == [4, 5, 6, 7, 8, 9]
| StarcoderdataPython |
1746443 | import random
import json
import os
import shutil
def initiate_files(gossip_activated):
for filename in os.listdir('temporary'):
file_path = os.path.join('temporary', filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
write_file('temporary/confirmation_log.json', {})
write_file('temporary/miner_wallets_log.json', {})
if gossip_activated:
write_file('temporary/longest_chain.json', {'chain': {}, 'from': 'Miner_1'})
def read_file(file_path):
# random_waiting_time = random.randint(1, 10) / 100
while True:
try:
with open(file_path, 'r') as f:
file = json.load(f)
break
except Exception as e:
pass
# print(e)
# time.sleep(random_waiting_time)
return file
def write_file(file_path, contents):
with open(file_path, 'w') as f:
json.dump(contents, f, indent=4)
def rewrite_file(file_path, new_version):
while True:
try:
try:
os.remove(file_path)
except Exception as problem_with_file:
pass
with open(file_path, "w") as f:
json.dump(new_version, f, indent=4)
break
except Exception as e:
pass
| StarcoderdataPython |
1683075 | <gh_stars>1-10
from collectors.spiders.committee_event import CommitteeEventSpider
from collectors.spiders.committee_speech import CommitteeSpeechSpider
| StarcoderdataPython |
3216180 | <reponame>SpleefDinamix/SoftuniPythonProgrammingBasics
inches = float(input("Inches = "))
centimeters = inches * 2.54
print("Centimeters =", centimeters) | StarcoderdataPython |
1654319 | <reponame>netbofia/WindowsAccessLogger
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.switch import Switch
from kivy.uix.progressbar import ProgressBar
from kivy.uix.scrollview import ScrollView
from kivy.core.window import Window
from functools import partial
from database import Db
import sqlite3
import hashlib
import subprocess
import re
## TODO
##!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SHOULD ONLY ALLOW ONE TO OPEN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
class LoginScreen(GridLayout):
def __init__(self, **kwargs):
super(LoginScreen, self).__init__(**kwargs)
self.titles="""
[anchor=title1][size=24]Forest-Biotech[/size]
[anchor=subtitle][size=20]Nanodrop usage logger[/size]
"""
self.content="""
[anchor=content]
Be sure to clean the equipment before and after each usage.
To request a login to use this equipment please send an email to: <EMAIL>
For a fast addition respect the following structure:
- Subject: account nanodrop
- Text: Desired login, name & group name.
"""
self.cols = 1
self.padding=[100,10,100,10]
self.add_widget(Label(text=self.titles,markup=True,halign="center"))
self.add_widget(Label(text=self.content,markup=True,halign="justify"))
self.username = TextInput(multiline=False)
self.password = TextInput(password=True, multiline=False)
self.button = Button(text="Login",size_hint_x=None, width=100)
self.button.bind(on_press=self._on_validate)
self.loginGrid=GridLayout(cols=3,row_force_default=True,row_default_height=40)
self.loginGrid.add_widget(Label(text='username',halign="right"))
self.loginGrid.add_widget(self.username)
self.loginGrid.add_widget(Label(text=''))
self.loginGrid.add_widget(Label(text='password',halign="right"))
self.loginGrid.add_widget(self.password)
self.loginGrid.add_widget(self.button)
self.add_widget(self.loginGrid)
###### REMOVE #################
##self.db=Db()
##self.loadLoggedMenu()
###### ###### #################
self.username.focus=True
self.username.bind(text=self._on_type_username)
self.password.bind(on_text_validate=self._on_validate)
def _on_type_username(self, instance, value):
if re.search("\t",value):
##Change this somehow to on keydown
instance.text=value.replace("\t","")
instance.focus=False
self.password.focus=True
def makePopup(self,content):
popupGrid=GridLayout(cols=1)
popupGrid.add_widget(Label(text=content,halign="justify"))
popupButton=Button(text='OK')
popupGrid.add_widget(popupButton)
self.popup = Popup(title='Authentication',
content=popupGrid,
size_hint=(None,None),
size=(200,200))
popupButton.bind(on_press=self.popup.dismiss)
def _on_validate(self,instance):
self.passHash=<PASSWORD>lib.sha256()
self.passHash.update(bytes(self.password.text, 'utf-8'))
self.validateLogin()
def validateLogin(self):
self.db=Db()
userdata=self.db.getUser(self.username.text)
if ( type(userdata)==tuple and len(userdata)>4):
self.ID=str(userdata[0])
self.USERNAME=userdata[1]
self.NAME=userdata[2]
dbHash=userdata[4]
if userdata[5] == 1:
self.ADMIN=True
else:
self.ADMIN=False
if userdata[6] == 1:
self.ENABLED=True
else:
self.ENABLED=False
else:
dbHash=None
passwordHash=self.passHash.hexdigest()
if ( passwordHash == dbHash and self.ENABLED == 1 ):
self.makePopup("Login succeeded!")
self.loadLoggedMenu()
else:
self.makePopup("Login Error!\n Wrong login or password!")
self.popup.open()
def loadLoggedMenu(self,instance=None):
self.clear_widgets()
self.btn1 = Button(text="Start Nanodrop")
self.btn1.bind(on_press=self.startProgram)
self.btn2 = Button(text="settings") #change password ?name?
self.btn2.bind(on_press=self.loadChangePasswordMenu)
self.btn3 = Button(text="admin") #Create user, Change path, see all users
self.btn3.bind(on_press=self.loadAdminMenu)
self.btn4 = Button(text="show activity") #Table, export data
self.btn4.bind(on_press=self.showActivity)
self.add_widget(Label(text=self.titles,markup=True,halign="center"))
#Login name and logout button
self.add_widget(self.btn1)
self.add_widget(self.btn2)
if self.ADMIN:
self.add_widget(self.btn3)
self.add_widget(self.btn4)
def userPanel(self,instance):
userGrid=GridLayout(cols=4,row_force_default=True,row_default_height=40)
#Label User: Label(Name), button(logout), button(main menu)
userGrid.add_widget(Label(text="User: ",font_size="20sp",halign="left",size_hint_x=None, width=80))
userGrid.add_widget(Label(text=self.NAME,font_size="20sp",halign="left",size_hint_x=None, width=150))
btnLogout=Button(text="logout",size_hint_x=None, width=100)
#btnLogout.bint(on_press=)
btnMainMenu=Button(text="main menu")
btnMainMenu.bind(on_press=self.loadLoggedMenu)
userGrid.add_widget(btnLogout)
userGrid.add_widget(btnMainMenu)
instance.add_widget(userGrid)
def startProgram(self,instance):
self.makePopup("Simulating Nanodrop!")
self.popup.open()
path=self.db.getPath()
subprocess.call([path])
self.db.logActivity(self.ID)
def loadAdminMenu(self,instance):
self.clear_widgets()
self.add_widget(Label(text=self.titles,markup=True,halign="center"))
self.btn1 = Button(text="create user")
self.btn1.bind(on_press=self.loadCreateUserMenu)
self.btn2 = Button(text="change path") #Set nanodrop path
self.btn2.bind(on_press=self.loadSettingsMenu)
self.btn3 = Button(text="see users") #Create user, Change password, see all users
self.btn3.bind(on_press=self.loadShowUsers)
self.btn4 = Button(text="back to main menu") #Table, export data
self.btn4.bind(on_press=self.loadLoggedMenu)
self.add_widget(self.btn1)
self.add_widget(self.btn2)
self.add_widget(self.btn3)
self.add_widget(self.btn4)
def loadSettingsMenu(self,instance):
self.clear_widgets()
self.add_widget(Label(text=self.titles,markup=True,halign="center"))
#uix.filechooser https://kivy.org/doc/stable/api-kivy.uix.filechooser.html
pathValue=self.db.getPath()
if( type(pathValue) == tuple ):
pathValue=pathValue[0]
else:
pathValue=""
self.path=TextInput(text=pathValue,multiline=False)
settingsGrid=GridLayout(cols=2,row_force_default=True,row_default_height=40)
settingsGrid.add_widget(Label(text="path",halign="left",size_hint_x=None, width=100))
settingsGrid.add_widget(self.path)
settingsGrid.add_widget(Label(text="",halign="left",size_hint_x=None, width=100))
self.btn1 = Button(text="save path")
self.btn1.bind(on_press=self.savePath)
settingsGrid.add_widget(self.btn1)
self.btn2 = Button(text="back to main menu")
self.btn2.bind(on_press=self.loadLoggedMenu)
self.add_widget(settingsGrid)
self.add_widget(self.btn2)
def test(self,instance):
self.clear_widgets()
layout = GridLayout(cols=1, spacing=10, size_hint_y=None)
# Make sure the height is such that there is something to scroll.
layout.bind(minimum_height=layout.setter('height'))
for i in range(100):
btn = Button(text=str(i), size_hint_y=None,height=40)
layout.add_widget(btn)
root = ScrollView(size_hint=(1, None), size=(Window.width, Window.height/2))
root.add_widget(layout)
self.add_widget(root)
def showActivity(self,instance):
TABLE_HEADER="30sp"
self.clear_widgets()
self.add_widget(Label(text=self.titles,markup=True,halign="center"))
self.userPanel(self)
tableHeader=GridLayout(cols=4,spacing=10,size_hint_y=None)
tableHeader.add_widget(Label(text="Group",font_size=TABLE_HEADER,halign="center",size_hint_x=None, width=100,height=80))
tableHeader.add_widget(Label(text="Name",font_size=TABLE_HEADER,halign="center",size_hint_x=None, width=100,height=80))
tableHeader.add_widget(Label(text="Date",font_size=TABLE_HEADER,halign="center",size_hint_x=None, width=200,height=80))
tableHeader.add_widget(Label(text="Samples",font_size=TABLE_HEADER,halign="center",size_hint_x=None, width=100,height=80))
self.add_widget(tableHeader)
settingsGrid=GridLayout(cols=4,spacing=10,size_hint_y=None,row_default_height=40)
settingsGrid.bind(minimum_height=settingsGrid.setter('height'))
for row in self.db.getLogs(4):
for cell in row:
settingsGrid.add_widget(Label(text=str(cell),halign="center",height=80))
self.scrollView=ScrollView(size_hint=(1,None),size=(Window.width, Window.height/2))
self.scrollView.add_widget(settingsGrid)
self.add_widget(self.scrollView)
def savePath(self,instance):
#Check that path exists
self.db.setPath(self.path.text)
self.makePopup("Path added")
self.popup.open()
def loadCreateUserMenu(self,instance):
self.clear_widgets()
self.username=TextInput(multiline=False)
self.name=TextInput(multiline=False)
self.password=TextInput(password=True,multiline=False)
self.passwordRep=TextInput(password=True,multiline=False)
self.group=TextInput(multiline=False)
self.admin=Switch(active=False)
self.enabled=Switch(active=True)
self.pb = ProgressBar(max=100,size_hint_x=None, width=120)
self.passwordRep.bind(text=self.on_password)
settingsGrid=GridLayout(cols=2,row_force_default=True,row_default_height=40)
settingsGrid.add_widget(Label(text="username",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.username)
settingsGrid.add_widget(Label(text="name",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.name)
settingsGrid.add_widget(Label(text="password",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.password)
settingsGrid.add_widget(Label(text="repeat password",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.passwordRep)
settingsGrid.add_widget(Label(text="group",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.group)
settingsGrid.add_widget(Label(text="admin",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.admin)
settingsGrid.add_widget(self.pb)
self.btn1 = Button(text="add user")
self.btn1.bind(on_press=self.createUser)
settingsGrid.add_widget(self.btn1)
self.add_widget(settingsGrid)
self.btn4 = Button(text="back to main menu") #Table, export data
self.btn4.bind(on_press=self.loadLoggedMenu)
self.add_widget(self.btn4)
def on_password(self,instance,value):
score=0
if(len(value)<=8):
score=len(value)*10
if(len(value)>8):
score=80
if(value==self.password.text):
score=score*1
else:
score=score*0
self.pb.value= score
def loadChangePasswordMenu(self,instance):
self.clear_widgets()
self.username=TextInput(multiline=False)
self.password=TextInput(password=True,multiline=False)
self.passwordRep=TextInput(password=True,multiline=False)
self.pb = ProgressBar(max=100,size_hint_x=None, width=120)
settingsGrid=GridLayout(cols=2,row_force_default=True,row_default_height=40)
settingsGrid.add_widget(Label(text="username",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(Label(text=self.USERNAME,halign="left")) #### SET THIS TO user
settingsGrid.add_widget(Label(text="password",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.password)
settingsGrid.add_widget(Label(text="repeat password",halign="left",size_hint_x=None, width=120))
settingsGrid.add_widget(self.passwordRep)
settingsGrid.add_widget(self.pb)
self.passwordRep.bind(text=self.on_password)
self.btn1 = Button(text="change password")
self.btn1.bind(on_press=self.changePassword)
settingsGrid.add_widget(self.btn1)
self.add_widget(settingsGrid)
self.btn4 = Button(text="back to main menu") #Table, export data
self.btn4.bind(on_press=self.loadLoggedMenu)
self.add_widget(self.btn4)
def createUser(self,instance):
self.passHash=<PASSWORD>.sha256()
self.passHash.update(bytes(self.passwordRep.text, 'utf-8'))
hash=self.passHash.hexdigest()
admin=0
print(self.admin.active)
if(self.admin.active):
admin=1
if( self.pb.value > 50 ):
if (self.username is None or self.name is None or self.group is None):
self.makePopup("Error: Not all fields were filled!")
self.popup.open()
else:
try:
self.db.setUser(self.username.text,self.name.text,self.group.text,hash,admin)
self.makePopup("User created")
self.popup.open()
except sqlite3.OperationalError as err:
self.makePopup("Error: "+err)
self.popup.open()
def changePassword(self,instance):
self.passHash=hashlib.sha256()
self.passHash.update(bytes(self.passwordRep.text, 'utf-8'))
hash=self.passHash.hexdigest()
if( self.pb.value > 50 ):
try:
self.db.setPassword(self.ID,hash)
self.makePopup("Password changed")
self.popup.open()
except sqlite3.OperationalError as err:
self.makePopup("Error: "+err)
self.popup.open()
def switchState(self,instance,value,**rest):
if(value):
boolean=1
else:
boolean=0
self.db.setUsersAttr(rest['userId'],boolean,rest['switchType'])
def loadShowUsers(self, instance):
TABLE_HEADER="30sp"
self.clear_widgets()
self.add_widget(Label(text=self.titles,markup=True,halign="center"))
self.userPanel(self)
settingsGrid=GridLayout(cols=5,row_default_height=40,size=(Window.width,40))
settingsGrid.add_widget(Label(text="User",font_size=TABLE_HEADER,halign="center",size_hint_x=None))
settingsGrid.add_widget(Label(text="Name",font_size=TABLE_HEADER,halign="center",size_hint_x=None))
settingsGrid.add_widget(Label(text="Group",font_size=TABLE_HEADER,halign="center",size_hint_x=None))
settingsGrid.add_widget(Label(text="admin",font_size=TABLE_HEADER,halign="center",size_hint_x=None))
settingsGrid.add_widget(Label(text="enabled",font_size=TABLE_HEADER,halign="center",size_hint_x=None))
self.add_widget(settingsGrid)
self.switches={}
usersGrid=GridLayout(cols=5,spacing=10,size_hint_y=None,row_default_height=40)
usersGrid.bind(minimum_height=usersGrid.setter('height'))
for row in self.db.listUsers():
for cell in row[1:4]: #Exclude ID and booleans
usersGrid.add_widget(Label(text=str(cell),halign="center"))
if( row[4] is 1 ):
admin=True
else:
admin=False
if( row[5] is 1 ):
enabled=True
else:
enabled=False
id=row[0]
self.switches["admin"+str(id)]=Switch(active=admin)
self.switches["enabled"+str(id)]=Switch(active=enabled)
usersGrid.add_widget(self.switches["admin"+str(id)])
usersGrid.add_widget(self.switches["enabled"+str(id)])
#append extra parameters
switchCallBackAdmin=partial(self.switchState,userId=id,switchType="admin")
switchCallBackEnabled=partial(self.switchState,userId=id,switchType="enabled")
self.switches["admin"+str(id)].bind(active=switchCallBackAdmin)
self.switches["enabled"+str(id)].bind(active=switchCallBackEnabled)
self.scrollView=ScrollView(size_hint=(1,None),size=(Window.width, Window.height/2))
self.scrollView.add_widget(usersGrid)
self.add_widget(self.scrollView)
class MainMenu(App):
def build(self):
return LoginScreen()
if __name__ == '__main__':
MainMenu().run()
| StarcoderdataPython |
4819586 | from app.search.SearchQuery import SearchQuery, BadQueryException
from app import app
from app.search import SearchForm
from app.provider import Provider
from flask import Blueprint, render_template, jsonify, request
from flask_api import status
# Create a search blueprint
searchbp = Blueprint("searchbp", __name__)
@searchbp.route('/search', methods=["GET"])
def search():
search_form = SearchForm()
gmaps_api_key = app.config['GOOGLE_MAPS_API_KEY']
return render_template(
'search.html',
title='Search',
search_form=search_form,
GOOGLE_MAPS_API_KEY=gmaps_api_key,
)
@searchbp.route('/search', methods=['POST'])
def results():
query_data: dict = request.json
if query_data is None:
match = Provider.query.limit(30).all()
return jsonify([provider.to_dict() for provider in match])
query = query_data.get('query', None)
position = query_data.get('position', None)
search_range = query_data.get('range', None)
try:
search_range = float(search_range)
except ValueError:
search_range = None
try:
search_query = SearchQuery(
query=query, position=position, search_range=search_range
)
search_results = search_query.search(Provider)
return jsonify(search_results)
except BadQueryException as e:
response = jsonify(error=e.get_msg()), status.HTTP_400_BAD_REQUEST
return response
| StarcoderdataPython |
3258535 | from django import forms
from stocks.models import PartsMaster
from .models import IndentMaster, IndentTransactions
from django.forms.models import inlineformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Fieldset, Div, HTML, ButtonHolder, Submit
from .custom_layout_object import *
class IndentTransactionsForm(forms.ModelForm):
class Meta:
model = IndentTransactions
exclude = ()
IndentTransactionsFormset = inlineformset_factory(
IndentMaster, IndentTransactions, form=IndentTransactionsForm,
fields = ['part', 'indent_quantity', 'required_for'], extra=1, can_delete = True
)
class IndentMasterForm(forms.ModelForm):
class Meta:
model = IndentMaster
exclude = ['indented_by']
def __init__(self, *args, **kwargs):
super(IndentMasterForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3 create-label'
self.helper.field_class = 'col-md-9'
self.helper.layout = Layout(
Div(
#Field('indent_no'),
# Field('indent_date'),
Fieldset('Add indents',
Formset('indents')),
Field('note'),
HTML("<br>"),
ButtonHolder(Submit('submit', 'save')),
)
) | StarcoderdataPython |
135982 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from datetime import datetime
import os
from unittest import TestCase
from unittest.mock import patch, MagicMock, call
with patch("boto3.client") as boto_client_mock:
from functions.usergamedata.UpdateItem import index
ITEMS_TABLE_NAME = 'test_bundleitems_table'
class MockConditionalCheckFailedException(BaseException):
def __init__(self):
pass
# Patch Lambda environment variables:
@patch.dict(os.environ, {
'BUNDLE_ITEMS_TABLE_NAME': ITEMS_TABLE_NAME
})
class TestUpdateItem(TestCase):
def setUp(self):
index.ddb_client = MagicMock()
def test_update_item_invalid_player_returns_401_error(self):
# Arrange
event = self.get_lambda_event()
event['requestContext'] = {'authorizer': {'claims': {}}}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(401, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_path_parameters_empty_returns_400_error(self):
# Arrange
event = self.get_lambda_event()
event['pathParameters'] = {}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(400, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_name_is_empty_returns_400_error(self):
# Arrange
event = self.get_lambda_event()
event['pathParameters'] = {'bundle_name': None}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(400, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_name_invalid_returns_414_error(self):
# Arrange
event = self.get_lambda_event()
event['pathParameters'] = {'bundle_name': 'x' * 256}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(414, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_item_key_is_empty_returns_400_error(self):
# Arrange
event = self.get_lambda_event()
event['pathParameters'] = {'bundle_item_key': None}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(400, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_item_key_invalid_returns_414_error(self):
# Arrange
event = self.get_lambda_event()
event['pathParameters'] = {'bundle_name': 'TESTBundle', 'bundle_item_key': 'x' * 256}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(414, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_item_key_is_missing_returns_400_error(self):
# Arrange
event = self.get_lambda_event()
event['body'] = '{}'
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(400, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_item_key_is_invalid_returns_400_error(self):
# Arrange
event = self.get_lambda_event()
event['body'] = '{"bundle_item_value": "<script></script>"}'
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(400, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
def test_update_item_bundle_item_not_found_returns_404_error(self):
# Arrange
event = self.get_lambda_event()
index.ddb_client.exceptions.ConditionalCheckFailedException = MockConditionalCheckFailedException
index.ddb_client.update_item.side_effect = index.ddb_client.exceptions.ConditionalCheckFailedException()
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(404, result['statusCode'])
index.ddb_client.batch_write_item.assert_not_called()
@patch('functions.usergamedata.UpdateItem.index.datetime')
def test_update_item_bundle_returns_success(self, mock_datetime: MagicMock):
# Arrange
event = self.get_lambda_event()
mock_datetime.utcnow.return_value = datetime(2021, 8, 4, 1, 23, 34, 56)
result = index.lambda_handler(event, None)
calls = [
call(ExpressionAttributeNames={'#bundle_item_value': 'bundle_item_value',
'#updated_at': 'updated_at'},
ExpressionAttributeValues={':bundle_item_value': {'S': 'Banana'},
':updated_at': {'S': '2021-08-04T01:23:34.000056+00:00'}},
Key={'player_id_bundle': {'S': 'test_gamekit_player_id_BANANA_BUNDLE'}, 'bundle_item_key': {'S': 'MAX_BANANAS'}},
ReturnValues='UPDATED_NEW',
TableName=ITEMS_TABLE_NAME,
ConditionExpression='attribute_exists(player_id_bundle) and attribute_exists(bundle_item_key)',
UpdateExpression='SET #bundle_item_value = :bundle_item_value, '
'#updated_at = :updated_at')
]
self.assertEqual(result['statusCode'], 204)
index.ddb_client.update_item.assert_has_calls(calls, any_order=False)
@staticmethod
def get_lambda_event():
return {
'resource': '/usergamedata/{bundle_name}/{bundle_item_id}',
'path': '/usergamedata/BANANA_BUNDLE/MAX_BANANAS',
'httpMethod': 'PUT',
'headers': {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json',
'Host': 'abcdefghij.execute-api.us-west-2.amazonaws.com',
'User-Agent': 'TestAgent',
'X-Amzn-Trace-Id': 'Root=1-61003a02-7e1356b05a1e1569614c0c46',
'X-Forwarded-For': '127.0.0.1',
'X-Forwarded-Port': '443',
'X-Forwarded-Proto': 'https'
},
'multiValueHeaders': {
'Accept': ['*/*'],
'Accept-Encoding': ['gzip, deflate, br'],
'Content-Type': ['application/json'],
'Host': ['abcdefghij.execute-api.us-west-2.amazonaws.com'],
'User-Agent': ['TestAgent'],
'X-Amzn-Trace-Id': ['Root=1-61003a02-7e1356b05a1e1569614c0c46'],
'X-Forwarded-For': ['127.0.0.1'],
'X-Forwarded-Port': ['443'],
'X-Forwarded-Proto': ['https']
},
'queryStringParameters': None,
'multiValueQueryStringParameters': None,
'pathParameters': {
'bundle_name': 'BANANA_BUNDLE',
'bundle_item_key': 'MAX_BANANAS'
},
'stageVariables': None,
'requestContext': {
'resourceId': 'abcdef',
'authorizer': {
'claims': {
'sub': 'test_gamekit_player_id',
'iss': 'https://cognito-idp.us-west-2.amazonaws.com/us-west-2_123456789',
'cognito:username': 'jakschic',
'origin_jti': 'test_gamekit_player_id',
'aud': '7s24tlabcn8n0defbfoghijsgn',
'event_id': '6234d920-b637-4cdf-bd44-3a5e53f51569',
'token_use': 'id',
'auth_time': '1627438909',
'custom:gk_user_id': 'test_gamekit_player_id',
'exp': 'Wed Jul 28 03:21:49 UTC 2021',
'iat': 'Wed Jul 28 02:21:49 UTC 2021',
'jti': '7s24tlabcn8n0defbfoghijsgn',
'email': '<EMAIL>'
}
},
'domainName': 'abcdefghij.execute-api.us-west-2.amazonaws.com',
'apiId': 'abcdefghij'
},
'body': '{"bundle_item_value": "Banana"}',
'isBase64Encoded': False
}
| StarcoderdataPython |
19039 | <reponame>webguru001/Python-Django-Web<filename>Francisco_Trujillo/Assignments/registration/serverre.py
from flask import Flask, render_template, request, redirect, session, flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = 'irtndvieurnviur'
@app.route('/')
def index():
return render_template("index.html")
#check all for empty and password >=8
def checkForValuelength(form):
if ((len(form['email']))< 1 or
(len(form['fname']))< 1 or
(len(form['lname']))< 1 or
(len(form['password']))<=8 or
(len(form['cpassword']))<= 8):
return False
return True
# check for valid name and last name
def validNamefileds(form):
if not form['fname'].isalpha() or not form['lname'].isalpha():
return False
return True
# invalid EMAIL
def matchPassword(form):
if not form['password'] == form['cpassword']:
return False
return True
@app.route('/process', methods=['POST'])
def form_page():
if not checkForValuelength(request.form):
flash("All fileds are required and password must be 8 or more characater")
return redirect('/')
elif not validNamefileds(request.form):
flash("Name and last name must not contain numbers")
return redirect('/')
elif not EMAIL_REGEX.match(request.form['email']):
flash("Invalid Email address")
return redirect('/')
elif not matchPassword(request.form):
flash("Password do not match")
return redirect ('/')
flash("Form sccessfully submitted")
return redirect('/')
@app.route('/')
def result_page():
return redirect('/')
app.run(debug=True)
| StarcoderdataPython |
166462 | <filename>pramp/diff_between_two_strings.py
def diff_strings_rec(source, target, dp={}):
dp_key = (source, target)
if dp_key in dp:
return dp[dp_key]
if not source and not target:
result = []
dp[dp_key] = (0, result)
return dp[dp_key]
if not source:
result = ["+" + ch for ch in target]
dp[dp_key] = (len(target), result)
return dp[dp_key]
if not target:
result = ["-" + ch for ch in source]
dp[dp_key] = (len(source), result)
return dp[dp_key]
if source[0] == target[0]:
result = [source[0]]
num_edits, edits = diff_strings_rec(source[1:], target[1:], dp)
result.extend(edits)
dp[dp_key] = (num_edits, result)
return dp[dp_key]
else:
num_edits_del, edits_del = diff_strings_rec(source[1:], target, dp)
num_edits_ins, edits_ins = diff_strings_rec(source, target[1:], dp)
if num_edits_ins < num_edits_del:
result = ["+" + target[0]]
result.extend(edits_ins)
dp[dp_key] = (num_edits_ins + 1, result)
return dp[dp_key]
else:
result = ["-" + source[0]]
result.extend(edits_del)
dp[dp_key] = (num_edits_del + 1, result)
return dp[dp_key]
def diffBetweenTwoStrings(source, target):
_, edits = diff_strings_rec(source, target)
return edits
| StarcoderdataPython |
135551 | <filename>tools/train_lanenet.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 19-4-24 下午9:33
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection
# @File : train_lanenet.py
# @IDE: PyCharm
"""
Train lanenet script
"""
import argparse
import math
import os
import os.path as ops
import time
import cv2
import glog as log
import numpy as np
import tensorflow as tf
from lanenet.config import global_config
from lanenet.data_provider import lanenet_data_feed_pipline
from lanenet.model import lanenet
from tools import evaluate_model_utils
CFG = global_config.cfg
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset_dir', type=str,
help='Lanenet Dataset dir')
parser.add_argument('-w', '--weights_path', type=str,
help='Path to pre-trained weights to continue training')
parser.add_argument('-m', '--multi_gpus', type=args_str2bool, default=False,
nargs='?', const=True, help='Use multi gpus to train')
parser.add_argument('--net_flag', type=str, default='vgg',
help='The net flag which determins the net\'s architecture')
return parser.parse_args()
def args_str2bool(arg_value):
"""
:param arg_value:
:return:
"""
if arg_value.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg_value.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def minmax_scale(input_arr):
"""
:param input_arr:
:return:
"""
min_val = np.min(input_arr)
max_val = np.max(input_arr)
output_arr = (input_arr - min_val) * 255.0 / (max_val - min_val)
return output_arr
def load_pretrained_weights(variables, pretrained_weights_path, sess):
"""
:param variables:
:param pretrained_weights_path:
:param sess:
:return:
"""
assert ops.exists(pretrained_weights_path), '{:s} not exist'.format(pretrained_weights_path)
pretrained_weights = np.load(
'./data/vgg16.npy', encoding='latin1').item()
for vv in variables:
weights_key = vv.name.split('/')[-3]
if 'conv5' in weights_key:
weights_key = '{:s}_{:s}'.format(weights_key.split('_')[0], weights_key.split('_')[1])
try:
weights = pretrained_weights[weights_key][0]
_op = tf.assign(vv, weights)
sess.run(_op)
except Exception as _:
continue
return
def record_training_intermediate_result(gt_images, gt_binary_labels, gt_instance_labels,
binary_seg_images, pix_embeddings, flag='train',
save_dir='./tmp'):
"""
record intermediate result during training process for monitoring
:param gt_images:
:param gt_binary_labels:
:param gt_instance_labels:
:param binary_seg_images:
:param pix_embeddings:
:param flag:
:param save_dir:
:return:
"""
os.makedirs(save_dir, exist_ok=True)
for index, gt_image in enumerate(gt_images):
gt_image_name = '{:s}_{:d}_gt_image.png'.format(flag, index + 1)
gt_image_path = ops.join(save_dir, gt_image_name)
gt_image = (gt_images[index] + 1.0) * 127.5
cv2.imwrite(gt_image_path, np.array(gt_image, dtype=np.uint8))
gt_binary_label_name = '{:s}_{:d}_gt_binary_label.png'.format(flag, index + 1)
gt_binary_label_path = ops.join(save_dir, gt_binary_label_name)
cv2.imwrite(gt_binary_label_path, np.array(gt_binary_labels[index][:, :, 0] * 255, dtype=np.uint8))
gt_instance_label_name = '{:s}_{:d}_gt_instance_label.png'.format(flag, index + 1)
gt_instance_label_path = ops.join(save_dir, gt_instance_label_name)
cv2.imwrite(gt_instance_label_path, np.array(gt_instance_labels[index][:, :, 0], dtype=np.uint8))
gt_binary_seg_name = '{:s}_{:d}_gt_binary_seg.png'.format(flag, index + 1)
gt_binary_seg_path = ops.join(save_dir, gt_binary_seg_name)
cv2.imwrite(gt_binary_seg_path, np.array(binary_seg_images[index] * 255, dtype=np.uint8))
embedding_image_name = '{:s}_{:d}_pix_embedding.png'.format(flag, index + 1)
embedding_image_path = ops.join(save_dir, embedding_image_name)
embedding_image = pix_embeddings[index]
for i in range(CFG.TRAIN.EMBEDDING_FEATS_DIMS):
embedding_image[:, :, i] = minmax_scale(embedding_image[:, :, i])
embedding_image = np.array(embedding_image, np.uint8)
cv2.imwrite(embedding_image_path, embedding_image)
return
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def compute_net_gradients(gt_images, gt_binary_labels, gt_instance_labels,
net, optimizer=None):
"""
Calculate gradients for single GPU
:param gt_images:
:param gt_binary_labels:
:param gt_instance_labels:
:param net:
:param optimizer:
:return:
"""
compute_ret = net.compute_loss(
input_tensor=gt_images, binary_label=gt_binary_labels,
instance_label=gt_instance_labels, name='lanenet_model'
)
total_loss = compute_ret['total_loss']
if optimizer is not None:
grads = optimizer.compute_gradients(total_loss)
else:
grads = None
return total_loss, grads
def train_lanenet(dataset_dir, weights_path=None, net_flag='vgg'):
"""
:param dataset_dir:
:param net_flag: choose which base network to use
:param weights_path:
:return:
"""
train_dataset = lanenet_data_feed_pipline.LaneNetDataFeeder(
dataset_dir=dataset_dir, flags='train'
)
val_dataset = lanenet_data_feed_pipline.LaneNetDataFeeder(
dataset_dir=dataset_dir, flags='val'
)
with tf.device('/gpu:1'):
# set lanenet
train_net = lanenet.LaneNet(net_flag=net_flag, phase='train', reuse=False)
val_net = lanenet.LaneNet(net_flag=net_flag, phase='val', reuse=True)
# set compute graph node for training
train_images, train_binary_labels, train_instance_labels = train_dataset.inputs(
CFG.TRAIN.BATCH_SIZE, 1
)
train_compute_ret = train_net.compute_loss(
input_tensor=train_images, binary_label=train_binary_labels,
instance_label=train_instance_labels, name='lanenet_model'
)
train_total_loss = train_compute_ret['total_loss']
train_binary_seg_loss = train_compute_ret['binary_seg_loss']
train_disc_loss = train_compute_ret['discriminative_loss']
train_pix_embedding = train_compute_ret['instance_seg_logits']
train_prediction_logits = train_compute_ret['binary_seg_logits']
train_prediction_score = tf.nn.softmax(logits=train_prediction_logits)
train_prediction = tf.argmax(train_prediction_score, axis=-1)
train_accuracy = evaluate_model_utils.calculate_model_precision(
train_compute_ret['binary_seg_logits'], train_binary_labels
)
train_fp = evaluate_model_utils.calculate_model_fp(
train_compute_ret['binary_seg_logits'], train_binary_labels
)
train_fn = evaluate_model_utils.calculate_model_fn(
train_compute_ret['binary_seg_logits'], train_binary_labels
)
train_binary_seg_ret_for_summary = evaluate_model_utils.get_image_summary(
img=train_prediction
)
train_embedding_ret_for_summary = evaluate_model_utils.get_image_summary(
img=train_pix_embedding
)
train_cost_scalar = tf.summary.scalar(
name='train_cost', tensor=train_total_loss
)
train_accuracy_scalar = tf.summary.scalar(
name='train_accuracy', tensor=train_accuracy
)
train_binary_seg_loss_scalar = tf.summary.scalar(
name='train_binary_seg_loss', tensor=train_binary_seg_loss
)
train_instance_seg_loss_scalar = tf.summary.scalar(
name='train_instance_seg_loss', tensor=train_disc_loss
)
train_fn_scalar = tf.summary.scalar(
name='train_fn', tensor=train_fn
)
train_fp_scalar = tf.summary.scalar(
name='train_fp', tensor=train_fp
)
train_binary_seg_ret_img = tf.summary.image(
name='train_binary_seg_ret', tensor=train_binary_seg_ret_for_summary
)
train_embedding_feats_ret_img = tf.summary.image(
name='train_embedding_feats_ret', tensor=train_embedding_ret_for_summary
)
train_merge_summary_op = tf.summary.merge(
[train_accuracy_scalar, train_cost_scalar, train_binary_seg_loss_scalar,
train_instance_seg_loss_scalar, train_fn_scalar, train_fp_scalar,
train_binary_seg_ret_img, train_embedding_feats_ret_img]
)
# set compute graph node for validation
val_images, val_binary_labels, val_instance_labels = val_dataset.inputs(
CFG.TRAIN.VAL_BATCH_SIZE, 1
)
val_compute_ret = val_net.compute_loss(
input_tensor=val_images, binary_label=val_binary_labels,
instance_label=val_instance_labels, name='lanenet_model'
)
val_total_loss = val_compute_ret['total_loss']
val_binary_seg_loss = val_compute_ret['binary_seg_loss']
val_disc_loss = val_compute_ret['discriminative_loss']
val_pix_embedding = val_compute_ret['instance_seg_logits']
val_prediction_logits = val_compute_ret['binary_seg_logits']
val_prediction_score = tf.nn.softmax(logits=val_prediction_logits)
val_prediction = tf.argmax(val_prediction_score, axis=-1)
val_accuracy = evaluate_model_utils.calculate_model_precision(
val_compute_ret['binary_seg_logits'], val_binary_labels
)
val_fp = evaluate_model_utils.calculate_model_fp(
val_compute_ret['binary_seg_logits'], val_binary_labels
)
val_fn = evaluate_model_utils.calculate_model_fn(
val_compute_ret['binary_seg_logits'], val_binary_labels
)
val_binary_seg_ret_for_summary = evaluate_model_utils.get_image_summary(
img=val_prediction
)
val_embedding_ret_for_summary = evaluate_model_utils.get_image_summary(
img=val_pix_embedding
)
val_cost_scalar = tf.summary.scalar(
name='val_cost', tensor=val_total_loss
)
val_accuracy_scalar = tf.summary.scalar(
name='val_accuracy', tensor=val_accuracy
)
val_binary_seg_loss_scalar = tf.summary.scalar(
name='val_binary_seg_loss', tensor=val_binary_seg_loss
)
val_instance_seg_loss_scalar = tf.summary.scalar(
name='val_instance_seg_loss', tensor=val_disc_loss
)
val_fn_scalar = tf.summary.scalar(
name='val_fn', tensor=val_fn
)
val_fp_scalar = tf.summary.scalar(
name='val_fp', tensor=val_fp
)
val_binary_seg_ret_img = tf.summary.image(
name='val_binary_seg_ret', tensor=val_binary_seg_ret_for_summary
)
val_embedding_feats_ret_img = tf.summary.image(
name='val_embedding_feats_ret', tensor=val_embedding_ret_for_summary
)
val_merge_summary_op = tf.summary.merge(
[val_accuracy_scalar, val_cost_scalar, val_binary_seg_loss_scalar,
val_instance_seg_loss_scalar, val_fn_scalar, val_fp_scalar,
val_binary_seg_ret_img, val_embedding_feats_ret_img]
)
# set optimizer
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.polynomial_decay(
learning_rate=CFG.TRAIN.LEARNING_RATE,
global_step=global_step,
decay_steps=CFG.TRAIN.EPOCHS,
power=0.9
)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=CFG.TRAIN.MOMENTUM).minimize(
loss=train_total_loss,
var_list=tf.trainable_variables(),
global_step=global_step
)
# Set tf model save path
model_save_dir = 'model/tusimple_lanenet_{:s}'.format(net_flag)
os.makedirs(model_save_dir, exist_ok=True)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'tusimple_lanenet_{:s}_{:s}.ckpt'.format(net_flag, str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
saver = tf.train.Saver()
# Set tf summary save path
tboard_save_path = 'tboard/tusimple_lanenet_{:s}'.format(net_flag)
os.makedirs(tboard_save_path, exist_ok=True)
# Set sess configuration
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
summary_writer = tf.summary.FileWriter(tboard_save_path)
summary_writer.add_graph(sess.graph)
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
with sess.as_default():
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
if net_flag == 'vgg' and weights_path is None:
load_pretrained_weights(tf.trainable_variables(), './data/vgg16.npy', sess)
train_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
_, train_c, train_accuracy_figure, train_fn_figure, train_fp_figure, \
lr, train_summary, train_binary_loss, \
train_instance_loss, train_embeddings, train_binary_seg_imgs, train_gt_imgs, \
train_binary_gt_labels, train_instance_gt_labels = \
sess.run([optimizer, train_total_loss, train_accuracy, train_fn, train_fp,
learning_rate, train_merge_summary_op, train_binary_seg_loss,
train_disc_loss, train_pix_embedding, train_prediction,
train_images, train_binary_labels, train_instance_labels])
if math.isnan(train_c) or math.isnan(train_binary_loss) or math.isnan(train_instance_loss):
log.error('cost is: {:.5f}'.format(train_c))
log.error('binary cost is: {:.5f}'.format(train_binary_loss))
log.error('instance cost is: {:.5f}'.format(train_instance_loss))
return
if epoch % 100 == 0:
record_training_intermediate_result(
gt_images=train_gt_imgs, gt_binary_labels=train_binary_gt_labels,
gt_instance_labels=train_instance_gt_labels, binary_seg_images=train_binary_seg_imgs,
pix_embeddings=train_embeddings
)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch: {:d} total_loss= {:6f} binary_seg_loss= {:6f} '
'instance_seg_loss= {:6f} accuracy= {:6f} fp= {:6f} fn= {:6f}'
' lr= {:6f} mean_cost_time= {:5f}s '.
format(epoch + 1, train_c, train_binary_loss, train_instance_loss, train_accuracy_figure,
train_fp_figure, train_fn_figure, lr, np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
# validation part
val_c, val_accuracy_figure, val_fn_figure, val_fp_figure, \
val_summary, val_binary_loss, val_instance_loss, \
val_embeddings, val_binary_seg_imgs, val_gt_imgs, \
val_binary_gt_labels, val_instance_gt_labels = \
sess.run([val_total_loss, val_accuracy, val_fn, val_fp,
val_merge_summary_op, val_binary_seg_loss,
val_disc_loss, val_pix_embedding, val_prediction,
val_images, val_binary_labels, val_instance_labels])
if math.isnan(val_c) or math.isnan(val_binary_loss) or math.isnan(val_instance_loss):
log.error('cost is: {:.5f}'.format(val_c))
log.error('binary cost is: {:.5f}'.format(val_binary_loss))
log.error('instance cost is: {:.5f}'.format(val_instance_loss))
return
if epoch % 100 == 0:
record_training_intermediate_result(
gt_images=val_gt_imgs, gt_binary_labels=val_binary_gt_labels,
gt_instance_labels=val_instance_gt_labels, binary_seg_images=val_binary_seg_imgs,
pix_embeddings=val_embeddings, flag='val'
)
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=val_summary, global_step=epoch)
if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f} binary_seg_loss= {:6f} '
'instance_seg_loss= {:6f} accuracy= {:6f} fp= {:6f} fn= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1, val_c, val_binary_loss, val_instance_loss, val_accuracy_figure,
val_fp_figure, val_fn_figure, np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=global_step)
return
def train_lanenet_multi_gpu(dataset_dir, weights_path=None, net_flag='vgg'):
"""
train lanenet with multi gpu
:param dataset_dir:
:param weights_path:
:param net_flag:
:return:
"""
# set lanenet dataset
train_dataset = lanenet_data_feed_pipline.LaneNetDataFeeder(
dataset_dir=dataset_dir, flags='train'
)
val_dataset = lanenet_data_feed_pipline.LaneNetDataFeeder(
dataset_dir=dataset_dir, flags='val'
)
# set lanenet
train_net = lanenet.LaneNet(net_flag=net_flag, phase='train', reuse=False)
val_net = lanenet.LaneNet(net_flag=net_flag, phase='val', reuse=True)
# set compute graph node
train_images, train_binary_labels, train_instance_labels = train_dataset.inputs(
CFG.TRAIN.BATCH_SIZE, 1
)
val_images, val_binary_labels, val_instance_labels = val_dataset.inputs(
CFG.TRAIN.VAL_BATCH_SIZE, 1
)
# set average container
tower_grads = []
train_tower_loss = []
val_tower_loss = []
batchnorm_updates = None
train_summary_op_updates = None
# set lr
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.polynomial_decay(
learning_rate=CFG.TRAIN.LEARNING_RATE,
global_step=global_step,
decay_steps=CFG.TRAIN.EPOCHS,
power=0.9
)
# set optimizer
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=CFG.TRAIN.MOMENTUM
)
# set distributed train op
with tf.variable_scope(tf.get_variable_scope()):
for i in range(CFG.TRAIN.GPU_NUM):
with tf.device('/gpu:{:d}'.format(i)):
with tf.name_scope('tower_{:d}'.format(i)) as _:
train_loss, grads = compute_net_gradients(
train_images, train_binary_labels, train_instance_labels, train_net, optimizer
)
# Only use the mean and var in the first gpu tower to update the parameter
if i == 0:
batchnorm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_summary_op_updates = tf.get_collection(tf.GraphKeys.SUMMARIES)
tower_grads.append(grads)
train_tower_loss.append(train_loss)
with tf.name_scope('validation_{:d}'.format(i)) as _:
val_loss, _ = compute_net_gradients(
val_images, val_binary_labels, val_instance_labels, val_net, optimizer)
val_tower_loss.append(val_loss)
grads = average_gradients(tower_grads)
avg_train_loss = tf.reduce_mean(train_tower_loss)
avg_val_loss = tf.reduce_mean(val_tower_loss)
# Track the moving averages of all trainable variables
variable_averages = tf.train.ExponentialMovingAverage(
CFG.TRAIN.MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = tf.trainable_variables() + tf.moving_average_variables()
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all the op needed for training
batchnorm_updates_op = tf.group(*batchnorm_updates)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Set tf summary save path
tboard_save_path = 'tboard/tusimple_lanenet_multi_gpu_{:s}'.format(net_flag)
os.makedirs(tboard_save_path, exist_ok=True)
summary_writer = tf.summary.FileWriter(tboard_save_path)
avg_train_loss_scalar = tf.summary.scalar(
name='average_train_loss', tensor=avg_train_loss
)
avg_val_loss_scalar = tf.summary.scalar(
name='average_val_loss', tensor=avg_val_loss
)
learning_rate_scalar = tf.summary.scalar(
name='learning_rate_scalar', tensor=learning_rate
)
train_merge_summary_op = tf.summary.merge(
[avg_train_loss_scalar, learning_rate_scalar] + train_summary_op_updates
)
val_merge_summary_op = tf.summary.merge([avg_val_loss_scalar])
# set tensorflow saver
saver = tf.train.Saver()
model_save_dir = 'model/tusimple_lanenet_multi_gpu_{:s}'.format(net_flag)
os.makedirs(model_save_dir, exist_ok=True)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'tusimple_lanenet_{:s}_{:s}.ckpt'.format(net_flag, str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
# set sess config
sess_config = tf.ConfigProto(device_count={'GPU': CFG.TRAIN.GPU_NUM}, allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
sess = tf.Session(config=sess_config)
summary_writer.add_graph(sess.graph)
with sess.as_default():
tf.train.write_graph(
graph_or_graph_def=sess.graph, logdir='',
name='{:s}/lanenet_model.pb'.format(model_save_dir))
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
train_cost_time_mean = []
val_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
_, train_loss_value, train_summary, lr = \
sess.run(
fetches=[train_op, avg_train_loss,
train_merge_summary_op, learning_rate]
)
if math.isnan(train_loss_value):
log.error('Train loss is nan')
return
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
# validation part
t_start_val = time.time()
val_loss_value, val_summary = \
sess.run(fetches=[avg_val_loss, val_merge_summary_op])
summary_writer.add_summary(val_summary, global_step=epoch)
cost_time_val = time.time() - t_start_val
val_cost_time_mean.append(cost_time_val)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch_Train: {:d} total_loss= {:6f} '
'lr= {:6f} mean_cost_time= {:5f}s '.
format(epoch + 1,
train_loss_value,
lr,
np.mean(train_cost_time_mean))
)
train_cost_time_mean.clear()
if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1,
val_loss_value,
np.mean(val_cost_time_mean))
)
val_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
return
if __name__ == '__main__':
# init args
args = init_args()
if CFG.TRAIN.GPU_NUM < 2:
args.use_multi_gpu = False
# train lanenet
if not args.multi_gpus:
train_lanenet(args.dataset_dir, args.weights_path, net_flag=args.net_flag)
else:
train_lanenet_multi_gpu(args.dataset_dir, args.weights_path, net_flag=args.net_flag)
| StarcoderdataPython |
4821776 | import unittest
import test.testdata.original_pb2 as original_version
import test.testdata.update_pb2 as update_version
from src.comparator.field_comparator import FieldComparator
from src.findings.finding_container import FindingContainer
from src.findings.utils import FindingCategory
class FieldComparatorTest(unittest.TestCase):
def tearDown(self):
FindingContainer.reset()
def fieldRemoval(self):
field_company_address = update_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['company_address']
FieldComparator(field_company_address, None).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'A Field company_address is removed')
self.assertEqual(finding.category.name, 'FIELD_REMOVAL')
def fieldAddition(self):
field_married = update_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['married']
FieldComparator(None, field_married).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'A new Field married is added.')
self.assertEqual(finding.category.name, 'FIELD_ADDITION')
def typeChange(self):
field_id_original = original_version.DESCRIPTOR.message_types_by_name["Person"].fields[1]
field_id_update = update_version.DESCRIPTOR.message_types_by_name["Person"].fields[1]
FieldComparator(field_id_original, field_id_update).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'Type of the Field is changed, the original is TYPE_INT32, but the updated is TYPE_STRING')
self.assertEqual(finding.category.name, 'FIELD_TYPE_CHANGE')
def repeatedLabelChange(self):
field_phones_original = original_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['phones']
field_phones_update = update_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['phones']
FieldComparator(field_phones_original, field_phones_update).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'Repeated state of the Field is changed, the original is LABEL_REPEATED, but the updated is LABEL_OPTIONAL')
self.assertEqual(finding.category.name, 'FIELD_REPEATED_CHANGE')
def nameChange(self):
field_email_original = original_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['email']
field_email_update = update_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['email_address']
FieldComparator(field_email_original, field_email_update).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'Name of the Field is changed, the original is email, but the updated is email_address')
self.assertEqual(finding.category.name, 'FIELD_NAME_CHANGE')
def moveExistingFieldOutofOneof(self):
field_email_original = original_version.DESCRIPTOR.message_types_by_name["AddressBook"].fields_by_name['deprecated']
field_email_update = update_version.DESCRIPTOR.message_types_by_name["AddressBook"].fields_by_name['deprecated']
FieldComparator(field_email_original, field_email_update).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'The Field deprecated is moved out of one-of')
self.assertEqual(finding.category.name, 'FIELD_ONEOF_REMOVAL')
def moveExistingFieldIntoOneof(self):
field_email_original = original_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['home_address']
field_email_update = update_version.DESCRIPTOR.message_types_by_name["Person"].fields_by_name['home_address']
FieldComparator(field_email_original, field_email_update).compare()
finding = FindingContainer.getAllFindings()[0]
self.assertEqual(finding.message, 'The Field home_address is moved into one-of')
self.assertEqual(finding.category.name, 'FIELD_ONEOF_ADDITION')
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1722410 | <filename>Uche Clare/Phase 2/String/Day 31/Task 6.py<gh_stars>1-10
#Write a Python program to display formatted text (width=50) as output.
import textwrap
text = """
Rather than attempting to seek out Python 3-specific recipes, the topics of this book are
merely inspired by existing code and techniques. Using these ideas as a springboard,
the writing is an original work that has been deliberately written with the most modern
Python programming techniques possible. """
print(textwrap.fill(text, width=50)) | StarcoderdataPython |
3216539 | import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder_kinematic, build_decoder
from modeling.backbone import build_backbone
from modeling.kinematic_graph import build_kinematic_graph
class DeepLab(nn.Module):
def __init__(self, args, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False):
super(DeepLab, self).__init__()
self.args = args
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
if self.args.use_kinematic == False:
self.decoder = build_decoder(num_classes, backbone, BatchNorm)
else:
self.decoder = build_decoder_kinematic(backbone, BatchNorm)
self.kinematic_layer = build_kinematic_graph(BatchNorm)
self.freeze_bn = freeze_bn
def forward(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
if not self.args.use_kinematic:
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
else:
x = self.kinematic_layer(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
if self.args.use_kinematic:
modules = [self.aspp, self.decoder, self.kinematic_layer]
elif not self.args.use_kinematic:
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
from args import Args_occ5000
from tensorboardX import SummaryWriter
writer = SummaryWriter('/home/kidd/Documents/graph1')
args = Args_occ5000()
model = DeepLab(args=args, backbone='resnet', output_stride=16)
model.eval()
input = torch.rand(1, 3, 513, 513)
output = model(input)
writer.add_graph(model, input)
writer.close()
print(output.size())
| StarcoderdataPython |
3202595 | import copy
from django.core.exceptions import ValidationError
from django.db import models
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.utils import model_meta
from ..core.validators import validate_password
class SetPasswordSerializer(serializers.Serializer):
password = serializers.CharField(max_length=128, label=_('Password'),
style={'widget': widgets.PasswordInput}, validators=[validate_password])
class HyperlinkedModelSerializer(serializers.HyperlinkedModelSerializer):
""" support for postonly_fields, fields whose value can only be set on post """
def validate(self, attrs):
""" calls model.clean() """
attrs = super(HyperlinkedModelSerializer, self).validate(attrs)
if isinstance(attrs, models.Model):
return attrs
validated_data = dict(attrs)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
info = model_meta.get_field_info(ModelClass)
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
validated_data.pop(field_name)
if self.instance:
# on update: Merge provided fields with instance field
instance = copy.deepcopy(self.instance)
for key, value in validated_data.items():
setattr(instance, key, value)
else:
instance = ModelClass(**validated_data)
instance.clean()
return attrs
def post_only_cleanning(self, instance, validated_data):
""" removes postonly_fields from attrs """
model_attrs = dict(**validated_data)
post_only_fields = getattr(self, 'post_only_fields', None)
if instance is not None and post_only_fields:
for attr, value in validated_data.items():
if attr in post_only_fields:
model_attrs.pop(attr)
return model_attrs
def update(self, instance, validated_data):
""" removes postonly_fields from attrs when not posting """
model_attrs = self.post_only_cleanning(instance, validated_data)
return super(HyperlinkedModelSerializer, self).update(instance, model_attrs)
def partial_update(self, instance, validated_data):
""" removes postonly_fields from attrs when not posting """
model_attrs = self.post_only_cleanning(instance, validated_data)
return super(HyperlinkedModelSerializer, self).partial_update(instance, model_attrs)
class RelatedHyperlinkedModelSerializer(HyperlinkedModelSerializer):
""" returns object on to_internal_value based on URL """
def to_internal_value(self, data):
try:
url = data.get('url')
except AttributeError:
url = None
if not url:
raise ValidationError({
'url': "URL is required."
})
account = self.get_account()
queryset = self.Meta.model.objects.filter(account=account)
self.fields['url'].queryset = queryset
obj = self.fields['url'].to_internal_value(url)
return obj
class SetPasswordHyperlinkedSerializer(HyperlinkedModelSerializer):
password = serializers.CharField(max_length=128, label=_('Password'),
validators=[validate_password], write_only=True, required=False,
style={'widget': widgets.PasswordInput})
def validate_password(self, value):
""" POST only password """
if self.instance:
if value:
raise serializers.ValidationError(_("Can not set password"))
elif not value:
raise serializers.ValidationError(_("Password required"))
return value
def validate(self, attrs):
""" remove password in case is not a real model field """
try:
self.Meta.model._meta.get_field('password')
except models.FieldDoesNotExist:
pass
else:
password = attrs.pop('password', None)
attrs = super().validate(attrs)
if password is not None:
attrs['password'] = password
return attrs
def create(self, validated_data):
password = validated_data.pop('password')
instance = self.Meta.model(**validated_data)
instance.set_password(password)
instance.save()
return instance
| StarcoderdataPython |
117111 | <filename>grammar.py
# author: <NAME>
import re
from stanfordcorenlp import StanfordCoreNLP
def subjectVerbAgreement(text):
nlp = StanfordCoreNLP(r'..\stanford-corenlp-full-2018-02-27')
text = re.sub('[^A-Za-z0-9]+', ' ', text)
tags = []
pos_tags = nlp.pos_tag(text)
# print(len(pos_tags))
for i in range(len(pos_tags)):
tags.append(pos_tags[i][1])
# print(pos_tags)
singular_PRP = ['I', 'You', 'They', 'We', 'i', 'you', 'they', 'we']
plural_PRP = ['He', 'She', 'It', 'he', 'she', 'it']
#print(tags)
sub_verb_errors = 0
# Checking sub_verb_errors for Pronouns
for i in range(len(pos_tags) - 1):
if pos_tags[i][0] in singular_PRP and pos_tags[i+1][1] == 'VBZ':
sub_verb_errors += 1
elif pos_tags[i][0] in plural_PRP and pos_tags[i+1][1] == 'VBP':
sub_verb_errors += 1
elif pos_tags[i][0] in plural_PRP and pos_tags[i+1][1] == 'VB':
sub_verb_errors += 1
# Checking sub_verb_errors for Verb tense
sub_verb_errors += tags.count("'NN', 'VBP'")
sub_verb_errors += tags.count("'NN', 'VB'")
sub_verb_errors += tags.count("'NNP', 'VBP'")
sub_verb_errors += tags.count("'NNPS', 'VBZ'")
sub_verb_errors += tags.count("'NNS', 'VBZ'")
sub_verb_errors += tags.count("'NNP', 'VB'")
sub_verb_errors += tags.count("'NN', 'WDT', 'VB'")
sub_verb_errors += tags.count("'NN', 'WDT', 'VBP'")
sub_verb_errors += tags.count("'NNS', 'WDT', 'VBZ'")
nlp.close()
# print(sub_verb_errors)
return sub_verb_errors
#subjectVerbAgreement("We is a boy. He am a boy.")
| StarcoderdataPython |
197127 | from cement import ex
from .tet_controller import TetController
import json
class Applications(TetController):
class Meta:
label = 'applications'
stacked_type = 'nested'
help= 'Interact with ADM Application from Tetration Cluster'
@ex(help='list applications', arguments=[
])
def list(self):
"""
Return the list of all the applications
"""
response = self.tetration().get('/applications')
content = json.loads(response.content.decode("utf-8"))
self.app.log.debug('{0} - {1}'.format(response.status_code,
response.content.decode('utf-8')))
if response.status_code == 403:
self.app.log.error('{0}Request "app_policy_management" permissions'.format(response.content.decode('utf-8')))
return
headers = ['Application ID', 'Name', 'Scope ID']
data_list = [[x['id'],
x['name'],
x['app_scope_id']] for x in content ]
self.app.render(data_list, headers=headers)
| StarcoderdataPython |
4837806 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AccessMapping(Model):
"""AccessMapping.
:param access_point:
:type access_point: str
:param display_name:
:type display_name: str
:param moniker:
:type moniker: str
:param service_owner: The service which owns this access mapping e.g. TFS, ELS, etc.
:type service_owner: str
:param virtual_directory: Part of the access mapping which applies context after the access point of the server.
:type virtual_directory: str
"""
_attribute_map = {
'access_point': {'key': 'accessPoint', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'moniker': {'key': 'moniker', 'type': 'str'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'},
'virtual_directory': {'key': 'virtualDirectory', 'type': 'str'}
}
def __init__(self, access_point=None, display_name=None, moniker=None, service_owner=None, virtual_directory=None):
super(AccessMapping, self).__init__()
self.access_point = access_point
self.display_name = display_name
self.moniker = moniker
self.service_owner = service_owner
self.virtual_directory = virtual_directory
class ConnectionData(Model):
"""ConnectionData.
:param authenticated_user: The Id of the authenticated user who made this request. More information about the user can be obtained by passing this Id to the Identity service
:type authenticated_user: :class:`Identity <azure.devops.v5_1.locations.models.Identity>`
:param authorized_user: The Id of the authorized user who made this request. More information about the user can be obtained by passing this Id to the Identity service
:type authorized_user: :class:`Identity <azure.devops.v5_1.locations.models.Identity>`
:param deployment_id: The id for the server.
:type deployment_id: str
:param deployment_type: The type for the server Hosted/OnPremises.
:type deployment_type: object
:param instance_id: The instance id for this host.
:type instance_id: str
:param last_user_access: The last user access for this instance. Null if not requested specifically.
:type last_user_access: datetime
:param location_service_data: Data that the location service holds.
:type location_service_data: :class:`LocationServiceData <azure.devops.v5_1.locations.models.LocationServiceData>`
:param web_application_relative_directory: The virtual directory of the host we are talking to.
:type web_application_relative_directory: str
"""
_attribute_map = {
'authenticated_user': {'key': 'authenticatedUser', 'type': 'Identity'},
'authorized_user': {'key': 'authorizedUser', 'type': 'Identity'},
'deployment_id': {'key': 'deploymentId', 'type': 'str'},
'deployment_type': {'key': 'deploymentType', 'type': 'object'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'last_user_access': {'key': 'lastUserAccess', 'type': 'iso-8601'},
'location_service_data': {'key': 'locationServiceData', 'type': 'LocationServiceData'},
'web_application_relative_directory': {'key': 'webApplicationRelativeDirectory', 'type': 'str'}
}
def __init__(self, authenticated_user=None, authorized_user=None, deployment_id=None, deployment_type=None, instance_id=None, last_user_access=None, location_service_data=None, web_application_relative_directory=None):
super(ConnectionData, self).__init__()
self.authenticated_user = authenticated_user
self.authorized_user = authorized_user
self.deployment_id = deployment_id
self.deployment_type = deployment_type
self.instance_id = instance_id
self.last_user_access = last_user_access
self.location_service_data = location_service_data
self.web_application_relative_directory = web_application_relative_directory
class IdentityBase(Model):
"""IdentityBase.
:param custom_display_name: The custom display name for the identity (if any). Setting this property to an empty string will clear the existing custom display name. Setting this property to null will not affect the existing persisted value (since null values do not get sent over the wire or to the database)
:type custom_display_name: str
:param descriptor:
:type descriptor: :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param id:
:type id: str
:param is_active:
:type is_active: bool
:param is_container:
:type is_container: bool
:param master_id:
:type master_id: str
:param member_ids:
:type member_ids: list of str
:param member_of:
:type member_of: list of :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param members:
:type members: list of :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param meta_type_id:
:type meta_type_id: int
:param properties:
:type properties: :class:`object <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.object>`
:param provider_display_name: The display name for the identity as specified by the source identity provider.
:type provider_display_name: str
:param resource_version:
:type resource_version: int
:param social_descriptor:
:type social_descriptor: :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param subject_descriptor:
:type subject_descriptor: :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param unique_user_id:
:type unique_user_id: int
"""
_attribute_map = {
'custom_display_name': {'key': 'customDisplayName', 'type': 'str'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_active': {'key': 'isActive', 'type': 'bool'},
'is_container': {'key': 'isContainer', 'type': 'bool'},
'master_id': {'key': 'masterId', 'type': 'str'},
'member_ids': {'key': 'memberIds', 'type': '[str]'},
'member_of': {'key': 'memberOf', 'type': '[str]'},
'members': {'key': 'members', 'type': '[str]'},
'meta_type_id': {'key': 'metaTypeId', 'type': 'int'},
'properties': {'key': 'properties', 'type': 'object'},
'provider_display_name': {'key': 'providerDisplayName', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'social_descriptor': {'key': 'socialDescriptor', 'type': 'str'},
'subject_descriptor': {'key': 'subjectDescriptor', 'type': 'str'},
'unique_user_id': {'key': 'uniqueUserId', 'type': 'int'}
}
def __init__(self, custom_display_name=None, descriptor=None, id=None, is_active=None, is_container=None, master_id=None, member_ids=None, member_of=None, members=None, meta_type_id=None, properties=None, provider_display_name=None, resource_version=None, social_descriptor=None, subject_descriptor=None, unique_user_id=None):
super(IdentityBase, self).__init__()
self.custom_display_name = custom_display_name
self.descriptor = descriptor
self.id = id
self.is_active = is_active
self.is_container = is_container
self.master_id = master_id
self.member_ids = member_ids
self.member_of = member_of
self.members = members
self.meta_type_id = meta_type_id
self.properties = properties
self.provider_display_name = provider_display_name
self.resource_version = resource_version
self.social_descriptor = social_descriptor
self.subject_descriptor = subject_descriptor
self.unique_user_id = unique_user_id
class LocationMapping(Model):
"""LocationMapping.
:param access_mapping_moniker:
:type access_mapping_moniker: str
:param location:
:type location: str
"""
_attribute_map = {
'access_mapping_moniker': {'key': 'accessMappingMoniker', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'}
}
def __init__(self, access_mapping_moniker=None, location=None):
super(LocationMapping, self).__init__()
self.access_mapping_moniker = access_mapping_moniker
self.location = location
class LocationServiceData(Model):
"""LocationServiceData.
:param access_mappings: Data about the access mappings contained by this location service.
:type access_mappings: list of :class:`AccessMapping <azure.devops.v5_1.locations.models.AccessMapping>`
:param client_cache_fresh: Data that the location service holds.
:type client_cache_fresh: bool
:param client_cache_time_to_live: The time to live on the location service cache.
:type client_cache_time_to_live: int
:param default_access_mapping_moniker: The default access mapping moniker for the server.
:type default_access_mapping_moniker: str
:param last_change_id: The obsolete id for the last change that took place on the server (use LastChangeId64).
:type last_change_id: int
:param last_change_id64: The non-truncated 64-bit id for the last change that took place on the server.
:type last_change_id64: long
:param service_definitions: Data about the service definitions contained by this location service.
:type service_definitions: list of :class:`ServiceDefinition <azure.devops.v5_1.locations.models.ServiceDefinition>`
:param service_owner: The identifier of the deployment which is hosting this location data (e.g. SPS, TFS, ELS, Napa, etc.)
:type service_owner: str
"""
_attribute_map = {
'access_mappings': {'key': 'accessMappings', 'type': '[AccessMapping]'},
'client_cache_fresh': {'key': 'clientCacheFresh', 'type': 'bool'},
'client_cache_time_to_live': {'key': 'clientCacheTimeToLive', 'type': 'int'},
'default_access_mapping_moniker': {'key': 'defaultAccessMappingMoniker', 'type': 'str'},
'last_change_id': {'key': 'lastChangeId', 'type': 'int'},
'last_change_id64': {'key': 'lastChangeId64', 'type': 'long'},
'service_definitions': {'key': 'serviceDefinitions', 'type': '[ServiceDefinition]'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'}
}
def __init__(self, access_mappings=None, client_cache_fresh=None, client_cache_time_to_live=None, default_access_mapping_moniker=None, last_change_id=None, last_change_id64=None, service_definitions=None, service_owner=None):
super(LocationServiceData, self).__init__()
self.access_mappings = access_mappings
self.client_cache_fresh = client_cache_fresh
self.client_cache_time_to_live = client_cache_time_to_live
self.default_access_mapping_moniker = default_access_mapping_moniker
self.last_change_id = last_change_id
self.last_change_id64 = last_change_id64
self.service_definitions = service_definitions
self.service_owner = service_owner
class ResourceAreaInfo(Model):
"""ResourceAreaInfo.
:param id:
:type id: str
:param location_url:
:type location_url: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'location_url': {'key': 'locationUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, location_url=None, name=None):
super(ResourceAreaInfo, self).__init__()
self.id = id
self.location_url = location_url
self.name = name
class ServiceDefinition(Model):
"""ServiceDefinition.
:param description:
:type description: str
:param display_name:
:type display_name: str
:param identifier:
:type identifier: str
:param inherit_level:
:type inherit_level: object
:param location_mappings:
:type location_mappings: list of :class:`LocationMapping <azure.devops.v5_1.locations.models.LocationMapping>`
:param max_version: Maximum api version that this resource supports (current server version for this resource). Copied from <c>ApiResourceLocation</c>.
:type max_version: str
:param min_version: Minimum api version that this resource supports. Copied from <c>ApiResourceLocation</c>.
:type min_version: str
:param parent_identifier:
:type parent_identifier: str
:param parent_service_type:
:type parent_service_type: str
:param properties:
:type properties: :class:`object <azure.devops.v5_1.locations.models.object>`
:param relative_path:
:type relative_path: str
:param relative_to_setting:
:type relative_to_setting: object
:param released_version: The latest version of this resource location that is in "Release" (non-preview) mode. Copied from <c>ApiResourceLocation</c>.
:type released_version: str
:param resource_version: The current resource version supported by this resource location. Copied from <c>ApiResourceLocation</c>.
:type resource_version: int
:param service_owner: The service which owns this definition e.g. TFS, ELS, etc.
:type service_owner: str
:param service_type:
:type service_type: str
:param status:
:type status: object
:param tool_id:
:type tool_id: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'identifier': {'key': 'identifier', 'type': 'str'},
'inherit_level': {'key': 'inheritLevel', 'type': 'object'},
'location_mappings': {'key': 'locationMappings', 'type': '[LocationMapping]'},
'max_version': {'key': 'maxVersion', 'type': 'str'},
'min_version': {'key': 'minVersion', 'type': 'str'},
'parent_identifier': {'key': 'parentIdentifier', 'type': 'str'},
'parent_service_type': {'key': 'parentServiceType', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
'relative_path': {'key': 'relativePath', 'type': 'str'},
'relative_to_setting': {'key': 'relativeToSetting', 'type': 'object'},
'released_version': {'key': 'releasedVersion', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'},
'service_type': {'key': 'serviceType', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'tool_id': {'key': 'toolId', 'type': 'str'}
}
def __init__(self, description=None, display_name=None, identifier=None, inherit_level=None, location_mappings=None, max_version=None, min_version=None, parent_identifier=None, parent_service_type=None, properties=None, relative_path=None, relative_to_setting=None, released_version=None, resource_version=None, service_owner=None, service_type=None, status=None, tool_id=None):
super(ServiceDefinition, self).__init__()
self.description = description
self.display_name = display_name
self.identifier = identifier
self.inherit_level = inherit_level
self.location_mappings = location_mappings
self.max_version = max_version
self.min_version = min_version
self.parent_identifier = parent_identifier
self.parent_service_type = parent_service_type
self.properties = properties
self.relative_path = relative_path
self.relative_to_setting = relative_to_setting
self.released_version = released_version
self.resource_version = resource_version
self.service_owner = service_owner
self.service_type = service_type
self.status = status
self.tool_id = tool_id
class Identity(IdentityBase):
"""Identity.
:param custom_display_name: The custom display name for the identity (if any). Setting this property to an empty string will clear the existing custom display name. Setting this property to null will not affect the existing persisted value (since null values do not get sent over the wire or to the database)
:type custom_display_name: str
:param descriptor:
:type descriptor: :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param id:
:type id: str
:param is_active:
:type is_active: bool
:param is_container:
:type is_container: bool
:param master_id:
:type master_id: str
:param member_ids:
:type member_ids: list of str
:param member_of:
:type member_of: list of :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param members:
:type members: list of :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param meta_type_id:
:type meta_type_id: int
:param properties:
:type properties: :class:`object <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.object>`
:param provider_display_name: The display name for the identity as specified by the source identity provider.
:type provider_display_name: str
:param resource_version:
:type resource_version: int
:param social_descriptor:
:type social_descriptor: :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param subject_descriptor:
:type subject_descriptor: :class:`str <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.str>`
:param unique_user_id:
:type unique_user_id: int
"""
_attribute_map = {
'custom_display_name': {'key': 'customDisplayName', 'type': 'str'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_active': {'key': 'isActive', 'type': 'bool'},
'is_container': {'key': 'isContainer', 'type': 'bool'},
'master_id': {'key': 'masterId', 'type': 'str'},
'member_ids': {'key': 'memberIds', 'type': '[str]'},
'member_of': {'key': 'memberOf', 'type': '[str]'},
'members': {'key': 'members', 'type': '[str]'},
'meta_type_id': {'key': 'metaTypeId', 'type': 'int'},
'properties': {'key': 'properties', 'type': 'object'},
'provider_display_name': {'key': 'providerDisplayName', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'social_descriptor': {'key': 'socialDescriptor', 'type': 'str'},
'subject_descriptor': {'key': 'subjectDescriptor', 'type': 'str'},
'unique_user_id': {'key': 'uniqueUserId', 'type': 'int'},
}
def __init__(self, custom_display_name=None, descriptor=None, id=None, is_active=None, is_container=None, master_id=None, member_ids=None, member_of=None, members=None, meta_type_id=None, properties=None, provider_display_name=None, resource_version=None, social_descriptor=None, subject_descriptor=None, unique_user_id=None):
super(Identity, self).__init__(custom_display_name=custom_display_name, descriptor=descriptor, id=id, is_active=is_active, is_container=is_container, master_id=master_id, member_ids=member_ids, member_of=member_of, members=members, meta_type_id=meta_type_id, properties=properties, provider_display_name=provider_display_name, resource_version=resource_version, social_descriptor=social_descriptor, subject_descriptor=subject_descriptor, unique_user_id=unique_user_id)
__all__ = [
'AccessMapping',
'ConnectionData',
'IdentityBase',
'LocationMapping',
'LocationServiceData',
'ResourceAreaInfo',
'ServiceDefinition',
'Identity',
]
| StarcoderdataPython |
3363265 |
def iterative_levenshtein(s, t):
"""
iterative_levenshtein(s, t) -> ldist
ldist is the Levenshtein distance between the strings s and t.
For all i and j, dist[i,j] will contain the Levenshtein distance
between the first i characters of s and the first j characters of t.
Credit: https://www.python-course.eu/levenshtein_distance.php
"""
rows = len(s)+1
cols = len(t)+1
dist = [[0 for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for i in range(1, rows):
dist[i][0] = i
# target prefixes can be created from an empty source string
# by inserting the characters
for i in range(1, cols):
dist[0][i] = i
row, col = 0, 0
for col in range(1, cols):
for row in range(1, rows):
if s[row-1] == t[col-1]:
cost = 0
else:
cost = 1
dist[row][col] = min(dist[row-1][col] + 1, # deletion
dist[row][col-1] + 1, # insertion
dist[row-1][col-1] + cost) # substitution
ldist = dist[row][col]
edit_ops = list()
dist_last = ldist
ldist2 = 0
while row > 0 or col > 0:
dist_diag = dist[row-1][col-1] if row > 0 and col > 0 else ldist + 1
dist_up = dist[row-1][col] if row > 0 else ldist + 1
dist_left = dist[row][col-1] if col > 0 else ldist + 1
dist_min = min(dist_diag, dist_up, dist_left)
if dist_diag == dist_min and dist_min == dist_last: # no change
row -= 1
col -= 1
edit_ops.insert(0, "-")
elif dist_up == dist_min: # deletion
row -= 1
ldist2 += 1
edit_ops.insert(0, "d")
elif dist_left == dist_min: # insertion
col -= 1
ldist2 += 1
edit_ops.insert(0, "i")
elif dist_diag == dist_min and dist_min < dist_last: # substitution
row -= 1
col -= 1
ldist2 += 1
edit_ops.insert(0, "s")
dist_last = dist_min
if ldist != ldist2:
print(f"WRONG!!! {ldist}/{ldist2}")
for r in range(rows):
print(dist[r])
exit(-1)
return ldist, ''.join(edit_ops)
def _insert_char_at_pos(text, pos, ch):
"""
Inserts a character at the given position in string.
"""
return text[:pos] + ch + text[pos:]
def align_text(text, edit_ops, align_op, mark=172):
"""
Creates an aligned version of a given string based on a sequence of edit operations.
"""
result = text
for idx, op in enumerate(edit_ops):
if op == align_op:
result = _insert_char_at_pos(result, idx, chr(mark))
return result | StarcoderdataPython |
3330918 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 18:00:43 2018
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
imgSrc = cv2.imread('C:\\Users\\praxis\\Documents\\Cours\\2018-2019\\VR-RA\\correct\\animauxFantastiques.jpg',cv2.IMREAD_UNCHANGED )
imgDest = cv2.imread('C:\\Users\\praxis\\Documents\\Cours\\2018-2019\\VR-RA\\correct\\times_square.jpg',cv2.IMREAD_UNCHANGED )
# Dimensions de l'image
height, w, channel = imgSrc.shape
#Coins de l'image
src_pts = np.float32([[0, 0], [0, height - 1], [w - 1, height - 1], [w - 1, 0]])
#Coins de la figure dans l'image de destination
dst_pts = np.float32([[1045, 134], [1086, 323], [1224, 297], [1170, 80]])
#Calcul de l'homographie
h, status = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
#Création d'un masque blanc de la taille de l'image source
mask = np.zeros((height, w, channel), dtype = np.float32) + 1.0
#Application de l'homographie à l'image source
imgReplace = cv2.warpPerspective(imgSrc, h, (imgDest.shape[1], imgDest.shape[0]), flags=cv2.INTER_LANCZOS4, borderValue=(0, 0, 0))#bbox[2], bbox[3]))
#Application de l'homographie à l'image masque, qui est blanche (=1.) uniquement à l'emplacement de l'affiche
#Les bords de l'affiche masque sont automatiquement interpolés pour éviter la pixellisation
mask = cv2.warpPerspective(mask, h, (imgDest.shape[1], imgDest.shape[0]), flags=cv2.INTER_LANCZOS4, borderValue=(0., 0., 0.))#bbox[2], bbox[3]))
plt.imshow(mask)
plt.imshow(imgReplace)
#Alpha blending entre les 2 images en utilisant l'image masque comme carte alpha
imgDest = imgDest * (1. - mask) + imgReplace * mask
#Passage flottant -> entier
imgDest = imgDest.astype(np.uint8)
#Ou plus lent, plus classique et responsable d'artéfacts :
#for i in range(0, imgDest.shape[0]):
# for j in range(0, imgDest.shape[1]):
# for k in range(0, 3):
# if not(imgReplace[i,j,0] == 0 and imgReplace[i,j,1] == 0 and imgReplace[i,j,2] == 0):
# imgDest[i,j,k] = imgReplace[i,j,k]
plt.imshow(imgDest[:,:,::-1])
cv2.waitKey(0) | StarcoderdataPython |
16571 | <reponame>jdvelasq/techMiner
from collections import Counter
import pandas as pd
import ipywidgets as widgets
import techminer.core.dashboard as dash
from techminer.core import (
CA,
Dashboard,
TF_matrix,
TFIDF_matrix,
add_counters_to_axis,
clustering,
corpus_filter,
exclude_terms,
)
# from techminer.core.params import EXCLUDE_COLS
from techminer.plots import counters_to_node_sizes, xy_clusters_plot
from techminer.core.filter_records import filter_records
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
##
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
#
# Filter for cluster members
#
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
self.column = None
self.min_occ = None
self.max_items = None
self.clustering_method = None
self.n_clusters = None
self.affinity = None
self.linkage = None
self.random_state = None
self.top_n = None
self.color_scheme = None
self.x_axis = None
self.y_axis = None
self.width = None
self.height = None
def apply(self):
##
## Comparative analysis
## from https://tlab.it/en/allegati/help_en_online/mcluster.htm
##
##
## Computes TF matrix for terms in min_occurrence
##
TF_matrix_ = TF_matrix(
data=self.data,
column=self.column,
scheme="binary",
min_occurrence=self.min_occ,
)
##
## Exclude Terms
##
TF_matrix_ = exclude_terms(data=TF_matrix_, axis=1)
##
## Adds counter to axies
##
TF_matrix_ = add_counters_to_axis(
X=TF_matrix_, axis=1, data=self.data, column=self.column
)
##
## Computtes TFIDF matrix and select max_term frequent terms
##
## tf-idf = tf * (log(N / df) + 1)
##
TFIDF_matrix_ = TFIDF_matrix(
TF_matrix=TF_matrix_,
norm=None,
use_idf=True,
smooth_idf=False,
sublinear_tf=False,
max_items=self.max_items,
)
##
## Correspondence Analysis
## 10 first factors for ploting
##
ca = CA()
ca.fit(TFIDF_matrix_)
self.eigenvalues_ = ca.eigenvalues_[0:10]
self.explained_variance_ = ca.explained_variance_[0:10]
z = ca.principal_coordinates_rows_
z = z[z.columns[:10]]
self.principal_coordinates_rows_ = z
z = ca.principal_coordinates_cols_
z = z[z.columns[:10]]
self.principal_coordinates_cols_ = z
self.TF_matrix_ = TF_matrix_
self.TFIDF_matrix_ = TFIDF_matrix_
def ca_plot_of_keywords(self):
self.apply()
##
## Selects the first n_factors to cluster
##
X = pd.DataFrame(
self.principal_coordinates_cols_,
columns=["Dim-{}".format(i) for i in range(10)],
index=self.TFIDF_matrix_.columns,
)
return xy_clusters_plot(
x=X["Dim-{}".format(self.x_axis)],
y=X["Dim-{}".format(self.y_axis)],
x_axis_at=0,
y_axis_at=0,
labels=self.TFIDF_matrix_.columns,
node_sizes=counters_to_node_sizes(self.TFIDF_matrix_.columns),
color_scheme=self.color_scheme,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def cluster_plot_of_keywords(self):
self.apply()
X = pd.DataFrame(
self.principal_coordinates_cols_,
columns=["Dim-{}".format(i) for i in range(10)],
index=self.TFIDF_matrix_.columns,
)
(
self.n_clusters,
self.labels_,
self.cluster_members_,
self.cluster_centers_,
self.cluster_names_,
) = clustering(
X=X,
method=self.clustering_method,
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage,
random_state=self.random_state,
top_n=self.top_n,
name_prefix="Cluster {}",
)
##
## Cluster filters
##
self.generate_cluster_filters(
terms=X.index,
labels=self.labels_,
)
y = self.cluster_members_.copy()
y = y.applymap(lambda w: pd.NA if w == "" else w)
node_sizes = [500 + 2500 * len(y[m].dropna()) for m in y.columns]
return xy_clusters_plot(
x=self.cluster_centers_["Dim-{}".format(self.x_axis)],
y=self.cluster_centers_["Dim-{}".format(self.y_axis)],
x_axis_at=0,
y_axis_at=0,
labels=["CLUST_{} xxx".format(i) for i in range(self.n_clusters)],
node_sizes=node_sizes,
color_scheme=self.color_scheme,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
def cluster_plot_of_documents(self):
self.apply()
X = pd.DataFrame(
self.principal_coordinates_rows_,
columns=["Dim-{}".format(i) for i in range(10)],
index=[
"{} {}".format(i, i)
for i in range(len(self.principal_coordinates_rows_))
],
)
(
self.n_clusters,
self.labels_,
self.cluster_members_,
self.cluster_centers_,
self.cluster_names_,
) = clustering(
X=X,
method=self.clustering_method,
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage,
random_state=self.random_state,
top_n=self.top_n,
name_prefix="Cluster {}",
)
##
## Cluster filters
##
self.generate_cluster_filters(
terms=X.index,
labels=self.labels_,
)
y = self.cluster_members_.copy()
y = y.applymap(lambda w: pd.NA if w == "" else w)
node_sizes = [500 + 2500 * len(y[m].dropna()) for m in y.columns]
return xy_clusters_plot(
x=self.cluster_centers_["Dim-{}".format(self.x_axis)],
y=self.cluster_centers_["Dim-{}".format(self.y_axis)],
x_axis_at=0,
y_axis_at=0,
labels=["CLUST_{} xxx".format(i) for i in range(self.n_clusters)],
node_sizes=node_sizes,
color_scheme=self.color_scheme,
xlabel="Dim-{}".format(self.x_axis),
ylabel="Dim-{}".format(self.y_axis),
figsize=(self.width, self.height),
)
###############################################################################
##
## DASHBOARD
##
###############################################################################
COLUMNS = [
"Author_Keywords_CL",
"Author_Keywords",
"Index_Keywords_CL",
"Index_Keywords",
"Keywords_CL",
]
class App(Dashboard, Model):
def __init__(
self,
limit_to=None,
exclude=None,
years_range=None,
):
data = filter_records(pd.read_csv("corpus.csv"))
Model.__init__(
self,
data=data,
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
)
# COLUMNS = sorted(
# [column for column in sorted(data.columns) if column not in EXCLUDE_COLS]
# )
self.command_panel = [
dash.HTML("Display:", hr=False, margin="0px, 0px, 0px, 5px"),
dash.Dropdown(
options=[
"CA plot of keywords",
"Cluster plot of keywords",
"Cluster plot of documents",
],
),
dash.HTML("Parameters:"),
dash.Dropdown(
description="Column:",
options=sorted(data.columns),
),
dash.min_occurrence(),
dash.max_items(),
dash.HTML("Clustering:"),
dash.clustering_method(),
dash.n_clusters(m=3, n=50, i=1),
dash.affinity(),
dash.linkage(),
dash.random_state(),
dash.HTML("Visualization:"),
dash.top_n(m=10, n=51, i=5),
dash.color_scheme(),
dash.x_axis(),
dash.y_axis(),
dash.fig_width(),
dash.fig_height(),
]
#
# interactive output function
#
widgets.interactive_output(
f=self.interactive_output,
controls={
# Display:
"menu": self.command_panel[1],
# Parameters:
"column": self.command_panel[3],
"min_occ": self.command_panel[4],
"max_items": self.command_panel[5],
# Clustering
"clustering_method": self.command_panel[7],
"n_clusters": self.command_panel[8],
"affinity": self.command_panel[9],
"linkage": self.command_panel[10],
"random_state": self.command_panel[11],
# Visualization
"top_n": self.command_panel[13],
"colors": self.command_panel[14],
"x_axis": self.command_panel[15],
"y_axis": self.command_panel[16],
"width": self.command_panel[17],
"height": self.command_panel[18],
},
)
Dashboard.__init__(self)
def interactive_output(self, **kwargs):
Dashboard.interactive_output(self, **kwargs)
def visualization_disabled():
self.set_disabled("Color Scheme:")
self.set_disabled("X-axis:")
self.set_disabled("Y-axis:")
self.set_disabled("Width:")
self.set_disabled("Height:")
def visualization_enabled():
self.set_enabled("Color Scheme:")
self.set_enabled("X-axis:")
self.set_enabled("Y-axis:")
self.set_enabled("Width:")
self.set_enabled("Height:")
def clustering_disabled():
self.set_disabled("N Factors:")
self.set_disabled("Clustering Method:")
self.set_disabled("N Clusters:")
self.set_disabled("Affinity:")
self.set_disabled("Linkage:")
self.set_disabled("Random State:")
def clustering_enabled():
self.set_enabled("N Factors:")
self.set_enabled("Clustering Method:")
self.set_enabled("N Clusters:")
self.set_enabled("Affinity:")
self.set_enabled("Linkage:")
self.set_enabled("Random State:")
self.enable_disable_clustering_options(include_random_state=True)
if self.menu == "Correspondence analysis plot":
clustering_disabled()
visualization_enabled()
if self.menu == "Cluster members":
clustering_enabled()
visualization_disabled()
if self.menu == "Cluster plot":
clustering_enabled()
visualization_enabled()
###############################################################################
##
## EXTERNAL INTERFACE
##
###############################################################################
def comparative_analysis(
limit_to=None,
exclude=None,
years_range=None,
):
return App(
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
).run()
| StarcoderdataPython |
186411 | # coding: utf-8
"""
Copyright (c) 2014 <NAME>
Check LICENSE for details.
"""
from .jlink import ExecJLinkScriptCommand
class Erase(ExecJLinkScriptCommand):
SCRIPT = "erase.jlink"
def execute(self):
return super(Erase, self).execute(self.SCRIPT)
| StarcoderdataPython |
1721025 | # License: BSD 3 clause
import io, unittest
import numpy as np
import pickle
from scipy.sparse import csr_matrix
from tick.solver.tests import TestSolver
from tick.prox import ProxL1
from tick.linear_model import ModelLinReg, SimuLinReg
from tick.linear_model import ModelLogReg, SimuLogReg
from tick.linear_model import ModelPoisReg, SimuPoisReg
from tick.linear_model import ModelHinge, ModelQuadraticHinge, ModelSmoothedHinge
from tick.simulation import weights_sparse_gauss
class Test(TestSolver):
def test_linear_model_serialization(self):
"""...Test serialization of linear models
"""
model_map = {
ModelLinReg: SimuLinReg,
ModelLogReg: SimuLogReg,
ModelPoisReg: SimuPoisReg,
ModelHinge: SimuLogReg,
ModelQuadraticHinge: SimuLogReg,
ModelSmoothedHinge: SimuLogReg,
}
for mod in model_map:
model = mod(fit_intercept=False)
coeffs0 = weights_sparse_gauss(20, nnz=5)
interc0 = None
features, labels = model_map[mod](coeffs0, interc0, n_samples=100,
verbose=False,
seed=123).simulate()
model.fit(features, labels)
pickled = pickle.loads(pickle.dumps(model))
self.assertTrue(model._model.compare(pickled._model))
self.assertEqual(
model.loss(features[0]), pickled.loss(features[0]))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4826843 | import random
class biasedrandom():
def brandrangenum(self, start: int, stop: int, biasednumbers: list, biaschance: int):
if biaschance > 100:
raise ValueError('biaschance cannot be bigger that 100.')
for i in biasednumbers:
if int(i) > stop or int(i) < start:
raise ValueError('All biasednumber must be under stop and over start')
elif isinstance(i, str) != False:
raise ValueError('Your biasednumbers have to be ints')
randomNumber = random.randrange(start, stop)
if random.uniform(0, 100) < biaschance:
n = random.randrange(0, len(biasednumbers))
return biasednumbers[n]
else:
return randomNumber
def dbrandrangemol(self, start: int, stop: int, biasmorethan: int, biaslessthan: int, biaschance: int):
if biaschance > 100:
raise ValueError('biaschance cannot be bigger that 100.')
if biasmorethan < start or biasmorethan > stop:
raise ValueError('biasmorethan has to be has to be more than start and less than stop and less than biaslessthan')
elif biaslessthan < start or biaslessthan > stop:
raise ValueError('biaslessthan has to be has to be more than start and less than stop and more than biasmorethan')
randomNumber = random.randrange(start, stop)
if random.uniform(0, 100) < biaschance:
return random.randrange(biasmorethan, biaslessthan)
else:
return randomNumber
def brandintnum(self, start: int, stop: int, biasednumbers: list, biaschance: int):
return brandrangenum(start, stop+1, biasednumbers, biaschance)
def brandintmol(self, start: int, stop: int, morethan: int, lessthan: int, biaschance: int):
return brandintmol(start, stop+1, morethan, lessthan, biaschance)
def bchoice(self, choices: list, biasedchoices: list, biaschance: int):
if biaschance > 100:
raise ValueError('biaschance cannot be bigger that 100.')
result = all(elem in choices for elem in biasedchoices)
if result:
pass
else:
raise IndexError('Your biased choices have to be in your choices')
choice = random.choice(choices)
if random.uniform(0, 100) < biaschance:
return biasedchoices[random.randrange(0, len(biasedchoices))]
else:
return choice
def bsample(self, sequence: list, size: int, biasedchoices: list, biaschance: int):
if biaschance > 100:
raise ValueError('chance cannot be bigger that 100.')
if size > (len(sequence) - 1):
raise ValueError('size cannot be bigger than the length of sequence')
if size < 1:
raise ValueError('size cannot be smaller than zero')
result = all(elem in sequence for elem in biasedchoices)
if result:
pass
else:
raise IndexError('Your biased choices have to be in your choices')
sample = []
while True:
randomNum = random.randrange(0, len(sequence))
randomPercent = random.uniform(0, 100)
if randomPercent < biaschance:
randomBias = random.randrange(0, len(biasedchoices))
if biasedchoices[randomBias] not in sample:
sample.append(biasedchoices[randomBias])
else:
randomChoice = sequence[random.randrange(0, len(sequence))]
if randomChoice not in sample:
sample.append(randomChoice)
else:
randomChoice = sequence[random.randrange(0, len(sequence))]
if randomChoice not in sample:
sample.append(randomChoice)
if len(sample) == size:
break
return sample
def brandom(self, biasednumbers: list, biaschance: int):
if biaschance > 100:
raise ValueError('biaschance cannot be bigger that 100.')
for i in biasednumbers:
if int(i) > 1 or int(i) < 0:
raise ValueError('All biasednumber must be under 1 and over 0')
randomNumber = random.random()
if random.uniform(0, 100) < biaschance:
n = random.randrange(0, len(biasednumbers))
return biasednumbers[n]
else:
return randomNumber
def buniform(self, start: int, stop: int, biasednumbers: list, biaschance: int):
if biaschance > 100:
raise ValueError('biaschance cannot be bigger that 100.')
for i in biasednumbers:
if int(i) > stop or int(i) < start:
raise ValueError('All biasednumber must be under stop and over start')
elif isinstance(i, str) != False:
raise ValueError('Your biasednumbers have to be numbers')
randomNumber = random.uniform(start, stop)
if random.uniform(0, 100) < biaschance:
n = random.randrange(0, len(biasednumbers))
return biasednumbers[n]
else:
return randomNumber
| StarcoderdataPython |
3308888 | <gh_stars>1-10
from app import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key = True) # User ID.
email = db.Column(db.String(70), nullable = False, unique = True) # User E-Mail.
password = db.Column(db.String(32), nullable = False) # User Password.
name = db.Column(db.String(30), nullable = False) # User Name.
note = db.Column(db.Text, nullable = True) # User Note.
created_at = db.Column(db.DateTime, default = db.func.now(), nullable = False) # User Create Date.
def generate_password_hash(self, password):
"""
This function is for securely generating password digits.
"""
self.password = generate_password_hash(password)
def check_password(self, password):
"""
This function has been added for login verification.
"""
return check_password_hash(self.password, password)
| StarcoderdataPython |
49586 | <filename>botc/gamemodes/sectsandviolets/Oracle.py
"""Contains the Oracle Character class"""
import json
from botc import Character, Townsfolk
from ._utils import SectsAndViolets, SnVRole
with open('botc/gamemodes/sectsandviolets/character_text.json') as json_file:
character_text = json.load(json_file)[SnVRole.oracle.value.lower()]
class Oracle(Townsfolk, SectsAndViolets, Character):
"""Oracle: Each night*, you learn how many dead players are evil.
"""
def __init__(self):
Character.__init__(self)
SectsAndViolets.__init__(self)
Townsfolk.__init__(self)
self._desc_string = character_text["description"]
self._examp_string = character_text["examples"]
self._instr_string = character_text["instruction"]
self._lore_string = character_text["lore"]
self._art_link = "https://bloodontheclocktower.com/wiki/images/2/26/Oracle_Token.png"
self._wiki_link = "https://bloodontheclocktower.com/wiki/Oracle"
self._role_enum = SnVRole.oracle
| StarcoderdataPython |
115534 | <filename>setup.py
import os
import sys
import re
import subprocess
import shlex
try:
from setuptools import setup
from setuptools.command.install import install
except ImportError:
from distutils.core import setup
from distutils.command.install import install
VERSION = '0.4.1'
def get_tag_version():
cmd = 'git tag --points-at HEAD'
versions = subprocess.check_output(shlex.split(cmd)).splitlines()
if not versions:
return None
if len(versions) != 1:
sys.exit(f"Trying to get tag via git: Expected excactly one tag, got {len(versions)}")
version = versions[0].decode()
if re.match('^v[0-9]', version):
version = version[1:]
return version
class VerifyVersionCommand(install):
""" Custom command to verify that the git tag matches our version """
description = 'verify that the git tag matches our version'
def run(self):
tag_version = get_tag_version()
if tag_version and tag_version != VERSION:
sys.exit(f"Git tag: {tag} does not match the version of this app: {VERSION}")
setup(
name='rest_framework_roles',
version=VERSION,
description='Role-based permissions for Django REST Framework and vanilla Django.',
author='<NAME>',
author_email='<EMAIL>',
packages=['rest_framework_roles'],
url='https://pypi.org/project/rest-framework-roles/',
license='LICENSE',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
install_requires=[],
python_requires='>=3',
keywords=[
'permissions',
'roles',
],
classifiers=[
'Framework :: Django',
'Topic :: Security',
],
cmdclass={
'verify': VerifyVersionCommand,
},
)
| StarcoderdataPython |
93218 | <reponame>amsks/SMARTS
import math
import numpy as np
import pybullet
import pybullet_utils.bullet_client as bc
import pytest
from smarts.core.coordinates import Heading, Pose
from smarts.core.chassis import AckermannChassis
@pytest.fixture
def bullet_client():
client = bc.BulletClient(pybullet.DIRECT)
yield client
client.disconnect()
@pytest.fixture
def chassis(bullet_client):
return AckermannChassis(
Pose.from_center([0, 0, 0], Heading(math.pi * 0.5)), bullet_client
)
def step_with_vehicle_commands(bv, steps, throttle=0, brake=0, steering=0):
for _ in range(steps):
bv.control(throttle, brake, steering)
bv._client.stepSimulation()
def test_steering_direction(chassis):
step_with_vehicle_commands(chassis, steps=100, steering=0)
assert math.isclose(chassis.steering, 0, rel_tol=1e-2)
# steer as far right as we can and test that the steering values we read
# back also correspond to a right turn.
step_with_vehicle_commands(chassis, steps=100, steering=1)
assert chassis.steering > 0
# steer as far left as we can and test that the steering values we read
# back also correspond to a left turn.
step_with_vehicle_commands(chassis, steps=100, steering=-1)
assert chassis.steering < 0
def test_set_pose(chassis):
chassis.set_pose(Pose.from_center([137, -5.8, 1], Heading(1.8)))
position, heading = chassis.pose.position, chassis.pose.heading
assert np.linalg.norm(position - np.array([137, -5.8, 1])) < 1e-16
assert np.isclose(heading, 1.8)
| StarcoderdataPython |
1793075 | <reponame>simiotics/djangorestframework-queryfields<gh_stars>100-1000
from rest_framework.test import APIClient
from tests.utils import decode_content
def test_model_list_response_unfiltered():
response = APIClient().get('/snippets/')
expected = [
{
'id': 1,
'title': 'Fork bomb',
'code': ':(){ :|: & };:',
'linenos': False,
'language': 'bash',
},
{
'id': 2,
'title': 'French flag',
'code': "print((u'\x1b[3%s;1m\u2588'*78+u'\n')%((4,)*26+(7,)*26+(1,)*26)*30)",
'linenos': False,
'language': 'python',
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_unfiltered():
response = APIClient().get('/snippets/3/')
expected = {
'id': 3,
'title': 'Russian roulette',
'code': '[ $[ $RANDOM % 6 ] == 0 ] && rm -rf / || echo "click"',
'linenos': False,
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_list_response_filtered_includes():
response = APIClient().get('/snippets/?fields=title,language')
expected = [
{
'title': 'Fork bomb',
'language': 'bash',
},
{
'title': 'French flag',
'language': 'python',
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_filtered_includes():
response = APIClient().get('/snippets/3/?fields=title,language')
expected = {
'title': 'Russian roulette',
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_list_response_filtered_excludes():
response = APIClient().get('/snippets/?fields!=code,language')
expected = [
{
'id': 1,
'title': 'Fork bomb',
'linenos': False,
},
{
'id': 2,
'title': 'French flag',
'linenos': False,
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_filtered_excludes():
response = APIClient().get('/snippets/3/?fields!=id,linenos,code')
expected = {
'title': 'Russian roulette',
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_some_bogus_fields():
response = APIClient().get('/snippets/3/?fields=title,spam,eggs')
expected = {
'title': 'Russian roulette',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_only_bogus_fields():
response = APIClient().get('/snippets/3/?fields=blah')
expected = {}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_multiple_fields_in_separate_query_args():
response = APIClient().get('/snippets/3/?fields=title&fields=linenos,language')
expected = {
'title': 'Russian roulette',
'linenos': False,
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_include_and_exclude():
response = APIClient().get('/snippets/3/?fields=id&fields!=language')
expected = {
'id': 3,
}
content = decode_content(response)
assert content == expected
def test_model_exclude_wins_for_ambiguous_filtering():
response = APIClient().get('/snippets/3/?fields=id,title,code&fields!=id')
expected = {
'title': 'Russian roulette',
'code': '[ $[ $RANDOM % 6 ] == 0 ] && rm -rf / || echo "click"',
}
content = decode_content(response)
assert content == expected
| StarcoderdataPython |
3373860 | <reponame>mikephelan/opendp-ux
"""
Methods for retrieving custom fonts
"""
from pathlib import Path
from os.path import abspath, dirname, join
from borb.pdf.canvas.font.simple_font.true_type_font import TrueTypeFont
FONT_DIR = join(dirname(abspath(__file__)), 'static', 'fonts')
FONT_DIR_OPEN_SANS = join(FONT_DIR, 'Open_Sans', 'static', 'OpenSans')
OPEN_SANS_LIGHT = 'OPEN_SANS_LIGHT'
OPEN_SANS_REGULAR = 'OPEN_SANS_LIGHT'
OPEN_SANS_SEMI_BOLD = 'OPEN_SEMI_BOLD'
OPEN_SANS_BOLD = 'OPEN_SANS_BOLD'
OPEN_SANS_ITALIC = 'OPEN_SANS_ITALIC'
OPEN_SANS_BOLD_ITALIC = 'OPEN_SANS_BOLD_ITALIC'
FONT_INFO = {
OPEN_SANS_LIGHT: join(FONT_DIR_OPEN_SANS, 'OpenSans-Light.ttf'),
OPEN_SANS_REGULAR: join(FONT_DIR_OPEN_SANS, 'OpenSans-Regular.ttf'),
OPEN_SANS_BOLD: join(FONT_DIR_OPEN_SANS, 'OpenSans-Bold.ttf'),
OPEN_SANS_SEMI_BOLD: join(FONT_DIR_OPEN_SANS, 'OpenSans-SemiBold.ttf'),
OPEN_SANS_ITALIC: join(FONT_DIR_OPEN_SANS, 'OpenSans-Italic.ttf'),
OPEN_SANS_BOLD_ITALIC: join(FONT_DIR_OPEN_SANS, 'OpenSans-BoldItalic.ttf'),
}
DEFAULT_FONT = OPEN_SANS_REGULAR
LOADED_FONTS = {}
def get_custom_font(font_name):
"""Get a custom TTF"""
# Has the font been loaded?
if LOADED_FONTS.get(font_name):
# return the font object
return LOADED_FONTS[font_name]
# Get the font path
font_path = FONT_INFO.get(font_name)
if not font_path:
# No path! Use the default font
print('Font not found! {font_name}')
if LOADED_FONTS.get(DEFAULT_FONT):
return LOADED_FONTS[DEFAULT_FONT]
font_path = FONT_INFO.get(DEFAULT_FONT)
# Load the font!
font_obj = TrueTypeFont.true_type_font_from_file(Path(font_path))
# Save the font for future use
LOADED_FONTS[font_name] = font_obj
# return the font object
return font_obj
# construct the Font object
# font_path: Path = Path(__file__).parent / "Jsfont-Regular.ttf"
# font: Font = TrueTypeFont.true_type_font_from_file(font_path)
| StarcoderdataPython |
4817501 | <gh_stars>0
# Copyright (c) 2017 <NAME>
# Software is licensed under the MIT License
# complete license can be found at https://github.com/karreric1/rms/
import numpy as np
import pandas as pd
import decimal
import matplotlib.pyplot as plt
def exp_generator(samples, mean):
'''generates a list of exponentially distributed values
Parameters
----------
samples: integer, number of generated values
mean: integer or float, mean value of distribution'''
exp_list = []
for i in range(0, samples):
current = int(round(np.random.exponential(mean), 0))
if current == 0:
current = int(1) # Minimum time between arrivals is 1 second
exp_list.append(current)
return exp_list
def norm_generator(samples, mean, stdev):
'''generates a list of normally distributed values
Parameters
----------
samples: number of generated values
mean: integer or float, mean value of distribution
stdev: integer or float, standard deviation of distribution '''
norm_list = []
for i in range(0, samples):
current = int(round(np.random.normal(mean, stdev), 0))
if current == 0:
current = int(1) # minimum time between arrivals is 1 second
norm_list.append(current)
return norm_list
def time_totalizer(values):
'''generates an additive list of times since 0 for randomly generated values
Parameters
----------
values: list of randomly generated numbers'''
time_list = [values[0], ]
current_time = values[0]
for i in range(0, len(values[0:-1])):
current_time += values[i+1]
time_list.append(int(round(current_time, 0)))
return time_list
def parallel_plant(arrivals, service_times, breakdowns, initial_queue,
servers, duration, maint_times, maint_duration):
'''simulates milking robot in a X/X/x queue with random breakdowns and
deterministic downtime for maintenance. Arrivals and service times
can be Markovian or Generally Distributed.
Parameters
----------
arrivals: list of arrival times in seconds
service_times: list of service times in seconds
breakdowns: list of breakdown times in seconds
initial_queue: integer length of initial queue in number of customers
servers: integer number of parallel servers
duration: length of model run-time in seconds
maint_interval: list of seconds between maintenance periods for each server
maint_duration: length of deterministic maintenance in seconds'''
print(str(servers) + ' Servers Running')
# initialize lists of output data
time_list = [] # time in seconds after simulation start
customers_list = [] # number of customers in system at each cycle
service_list = [] # time in seconds of each service
server1_downtime = []
server2_downtime = []
server1_occupied = []
server2_occupied = []
cumulative_customers = []
# initialize counting within algorithm
arrival_index = 0 # initialize use of first arr time from arrivals list
service_index = 0 # initialize use of first svc time from arrivals list
time_index = 0 # initialze time counter at 0
queue_length = 0 # initialize queue length at 0
cum_cust_count = 0
# initialize customer status:
# determines whether a customer is in server or not
server1_status = 0 # 0 equals unoccupied. 1 equals occupied
server2_status = 0 # 0 equals unoccupied. 1 equals occupied
# initialize server up status - 1 = up, 0 = down for maintenance
server1_available = 1
server2_available = 1
# initialize server maintenance time
server1_maintenance_time = 0
server2_maintenance_time = 0
for i in range(0, duration):
# keeping time in system
time_index += 1 # increment time index by one unit
time_list.append(time_index) # add curr simulation time to time_list
# bring servers down for scheduled maintenance
# modulo of zero indicates time index is at a multiple of maint time
if time_index % maint_times[0] == 0:
server1_available = 0
server1_maintenance_time = maint_duration # down for maintenance
if time_index % maint_times[1] == 3600: # offset by 1 hour
server2_available = 0
server2_maintenance_time = maint_duration # down for maintenance
# keep servers down until maintenance time = 0
if server1_available == 0:
# decrease server maintenance time by 1 for each cycle
server1_maintenance_time -= 1
# if maintenance time expires, set server to available
if server1_maintenance_time < 1:
server1_available = 1
if server2_available == 0:
server2_maintenance_time -= 1
if server2_maintenance_time < 1:
server2_available = 1
# log server uptime & downtime
server1_downtime.append(server1_available)
server2_downtime.append(server2_available)
# add to queue as customers arrive
# if arrival time equals simulation time, increment queue by 1
if int(arrivals[arrival_index]) == int(time_index):
queue_length += 1
arrival_index += 1 # increment arr index by 1 to set next arr time
cum_cust_count += 1 # increment cumulative customers by 1
# move a customer into server if queue is greater than 0 and unoccupied
# at least one cust in queue-therefore can move into server if empty
if queue_length > 0:
# run two servers in parallel
if server1_available == 1:
if server1_status == 0: # if server is empty:
server1_status = 1 # set server to full
queue_length -= 1 # decrease queue by 1 customer
# set length of time for service
service1_time_remaining = service_times[service_index]
# create record of this service time. list of [server,time]
service_list.append([1, time_index,
service_times[service_index]])
# increment service index for next customer
service_index += 1
if servers == 2: # run second server if servers = 2
if queue_length > 0:
# print('second server up')
if server2_available == 1:
if server2_status == 0: # if server is empty:
server2_status = 1 # set server to full
queue_length -= 1 # decrease queue by 1 customer
# set length of time for service
service2_time_remaining = service_times[service_index]
# create record of service time.list of [server,time]
service_list.append([2, time_index,
service_times[service_index]])
# increment service index for next customer
service_index += 1
# serve customer in servers
if server1_status == 1: # if server is full:
# decrease service time remaining by 1 second
service1_time_remaining -= 1
# current customer is about to leave server
if service1_time_remaining < 1:
server1_status = 0 # set server open for next customer
if server2_status == 1: # if server is full:
# decrease service time remaining by 1 second
service2_time_remaining -= 1
# current customer is about to leave server
if service2_time_remaining < 1:
server2_status = 0 # set server open for next customer
customers_list.append(queue_length)
server1_occupied.append(server1_status)
server2_occupied.append(server2_status)
cumulative_customers.append(cum_cust_count)
total_customers = arrival_index
return (time_list, total_customers, customers_list, service_list,
server1_downtime, server2_downtime, server1_occupied,
server2_occupied, cumulative_customers)
| StarcoderdataPython |
3282093 | <filename>kon/model/ctr_model/model/models.py
# _*_ coding:utf-8 _*_
'''=================================
@Author :tix_hjq
@Date :2020/5/3 上午9:13
@File :models.py
================================='''
from kon.utils.data_prepare import data_prepare, InputFeature
from kon.model.ctr_model.layer.behavior_layer.behavior_layer import *
from kon.model.ctr_model.layer.interactive_layer.interactive_layer import *
from kon.model.ctr_model.layer.core_layer.core_layer import *
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 100)
print(os.getcwd())
#----------------------------------------------------
data_folder='../../data/'
origin_data_folder=data_folder+'origin_data/'
submit_data_folder=data_folder+'submit_data/'
eda_data_folder=data_folder+'eda_data/'
fea_data_folder=data_folder+'fea_data/'
#-----------------------------------------------------------------
# model_tool=base_model(submit_data_folder)
# fea_tool=feature_tool(fea_data_folder)
prepare_tool=data_prepare()
#-----------------------------------------------------------------
def TestModel(inputFea:InputFeature=None):
input_=StackLayer()(inputFea.sparse_embed)
dnn_output=DnnLayer(hidden_units=[100,10])(input_)
output=MergeScoreLayer(use_merge=False)(dnn_output)
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
def FM(inputFea:InputFeature=None):
embed_list=[inputFea.sparse_embed,inputFea.linear_embed]
fm_=FmLayer()(embed_list)
output=MergeScoreLayer(use_merge=False)(tf.squeeze(fm_,axis=1))
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs,output)
def PNN(inputFea:InputFeature=None,hidden_units=None,use_inner=True,use_outer=True):
if hidden_units is None:
hidden_units=[256,256,256]
cross_fea=inputFea.linear_embed
if use_inner:
cross_fea+=IPnnLayer()(inputFea.sparse_embed)
if use_outer:
cross_fea+=OPnnLayer()(inputFea.sparse_embed)
cross_fea=StackLayer()(cross_fea)
dnn_ = DnnLayer(hidden_units)(cross_fea)
output=MergeScoreLayer(use_merge=False)(dnn_)
return tf.keras.Model(inputFea.sparse_inputs,output)
def DeepCross(inputFea:InputFeature=None, hidden_units=None):
if hidden_units is None:
hidden_units = [256, 256, 256]
dnn_inputs=StackLayer()(inputFea.dense_inputs+inputFea.sparse_embed)
dnn_fea=DnnLayer(hidden_units=hidden_units)(dnn_inputs)
output=MergeScoreLayer(use_merge=False)(dnn_fea)
return tf.keras.Model(inputFea.sparse_inputs,output)
def Wide_Deep(inputFea:InputFeature=None, hidden_units=None):
if hidden_units is None:
hidden_units = [256, 128, 64]
dnn_inputs=StackLayer()(inputFea.dense_inputs+inputFea.sparse_embed)
dnn_=DnnLayer(hidden_units=hidden_units)(dnn_inputs)
output=MergeScoreLayer()(inputFea.linear_embed+[dnn_])
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs,output)
def DeepFM(inputFea:InputFeature=None, hidden_units=None):
if hidden_units is None:
hidden_units = [256, 128, 64]
embed_list = [inputFea.sparse_embed, inputFea.linear_embed]
fm_=FmLayer()(embed_list)
dnn_input=StackLayer()(inputFea.dense_inputs+inputFea.sparse_embed)
dnn_ = DnnLayer(hidden_units=hidden_units)(dnn_input)
output = MergeScoreLayer()([fm_, dnn_])
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs, output)
def DCN(inputFea:InputFeature=None,hidden_units=None,cross_hidden=3):
'''
Notice:
cross_hidden==> iter_num(x^k=w(x^k-1*x0)+b+x0)
'''
if hidden_units is None:
hidden_units = [256, 128, 64]
combine_inputs=StackLayer()(inputFea.dense_inputs+inputFea.sparse_embed)
cross_fea=CrossLayer(cross_hidden=cross_hidden)(combine_inputs)
deep_fea=DnnLayer(hidden_units=hidden_units)(combine_inputs)
output=MergeScoreLayer()([cross_fea,deep_fea])
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs,output)
def NFM(inputFea:InputFeature=None,hidden_units=None):
if hidden_units is None:
hidden_units = [256, 128, 64]
cross_inputs=InnerLayer(use_inner=True,use_add=True)(inputFea.sparse_embed)
dnn_inputs=StackLayer()(inputFea.dense_inputs+[cross_inputs])
dnn_fea=DnnLayer(hidden_units=hidden_units,output_dim=1)(dnn_inputs)
final_fea=tf.keras.layers.Add()(inputFea.linear_embed+[dnn_fea])
output=ScoreLayer()(final_fea)
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs,output)
def XDeepFM(inputFea:InputFeature=None, conv_size=None, hidden_units=None):
'''
:param conv_size:
notice:conv_size decision Hk<size>
'''
if conv_size is None:
conv_size = [200, 200, 200]
if hidden_units is None:
hidden_units = [256, 128, 64]
cin_inputs=tf.keras.layers.Concatenate(axis=1)(inputFea.sparse_embed)
dnn_inputs=StackLayer()(inputFea.dense_inputs+inputFea.sparse_embed)
cin_output=CIN(conv_size=conv_size,output_dim=1)(cin_inputs)
dnn_output=DnnLayer(hidden_units=hidden_units,output_dim=1)(dnn_inputs)
output=ScoreLayer(use_add=True)([inputFea.linear_embed,cin_output,dnn_output])
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs,output)
def AFM(inputFea:InputFeature=None):
cross_output=InnerLayer()(inputFea.sparse_embed)
atten_output=AttentionBaseLayer()(cross_output)
output=ScoreLayer(use_add=True)(inputFea.linear_embed+[atten_output])
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs,output)
def AutoInt(inputFea:InputFeature=None,attention_dim=8,attention_head_dim=3):
'''
notice:
origin inputs=[dense+sparse],now inputs=[sparse]
MultHeadAttentionLayer !support Bn&Add&Activate,
because now want to as hidden of DnnLayer,update at soon...
core:
multHead to replace inner of fm
'''
cross_embed = StackLayer(use_flat=False,axis=1)(inputFea.sparse_embed)
atten_layer=MultHeadAttentionLayer(attention_dim=attention_dim,attention_head_dim=attention_head_dim,use_ln=True,atten_mask_mod=1)
atten_vec=DnnLayer(res_unit=1,other_dense=[atten_layer])(cross_embed)
final_input=StackLayer(use_flat=True,axis=-1)([tf.squeeze(i,0) for i in tf.split(atten_vec,[1]*atten_vec.shape[0])])
output=MergeScoreLayer(use_merge=False)(final_input)
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
def DIN(inputFea:InputFeature=None, candidateFea=None, behaviorFea=None,
attention_units=None, hidden_units=None):
'''
Notice:
about data:
build candidate need appear behavior,
about cold start ==> not give method
about candidate&behavior:
must item appear behavior==>attention is useful
because core is find useful seq_item,to metric seq_items
paper view is all_seq===find===>nextClick relate part_item_seq to improve score
not to find seq deal info
find history seq mod===>activate new seq
about Dice:
!achieve because DnnLayer have bn
==>only use PRelu
'''
if hidden_units is None:
hidden_units = [256, 256, 256]
if attention_units is None:
attention_units = [100, 64, 32]
candidate_embed=ExtractLayer(candidateFea,inputFea.sparse_inputs)(inputFea.sparse_embed)
behavior_embed=ExtractLayer(behaviorFea,inputFea.seq_inputs,mask_zero=True)(inputFea.seq_embed_list[0])
base_behavior=SeqBaseLayer()(inputFea.seq_embed_list[0])
attention_behavior=ActivationUnitLayer(hidden_units=attention_units)([candidate_embed,behavior_embed],mask=inputFea.seq_embed_list[1][0])
final_inputs=StackLayer(use_flat=True)(base_behavior+[attention_behavior])
mlp_output=DnnLayer(hidden_units=hidden_units,hidden_activate=tf.keras.layers.PReLU(),res_unit=2,use_bn=True)(final_inputs)
output=MergeScoreLayer(use_merge=False)(mlp_output)
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
def DIEN(inputFea:InputFeature=None,candidateFea=None, behaviorFea=None, classify_units=None, hidden_units=None,
attention_units=None,sample_num=5,useCore=False):
'''
notice:
at auxLoss not use Dice as DnnLayer Activate...
because i think sample seq to BN may be...
In fact,default param:attention&auxLoss size not support BN,too
:param classify_units: AuxLoss classify==DnnLayer ?please input hidden size:ignore
:param hidden_units: final classify DnnLayer hidden size
:param attention_units: attention classify DnnLayer hiddenSize of ActivateUnits
:param sample_num: num of DIEN Nega sample item
'''
if attention_units is None:
attention_units = [100, 64, 32]
if classify_units is None:
classify_units = [100, 64, 32]
if hidden_units is None:
hidden_units = [256, 256, 256]
behavior_embed=StackLayer(use_flat=False)(ExtractLayer(behaviorFea,inputFea.seq_inputs,mask_zero=True)(inputFea.seq_embed_list[0]))
candidate_embed=StackLayer(use_flat=False)(ExtractLayer(candidateFea,inputFea.sparse_inputs)(inputFea.sparse_embed))
behavior_sample = SampleLayer(sample_num=sample_num)(behavior_embed)
[hidden_list,aux_loss]=InterestExtratorLayer(classify_units=classify_units,sample_num=sample_num)([behavior_embed,behavior_sample],mask=inputFea.seq_embed_list[1][0])
final_hidden=InterestEolvingLayer(attention_units=attention_units)([candidate_embed,hidden_list],mask=inputFea.seq_embed_list[1][0])
final_input=StackLayer()([final_hidden]+inputFea.sparse_embed)
output=DnnLayer(hidden_units=hidden_units,hidden_activate=tf.keras.layers.PReLU())(final_input)
output=MergeScoreLayer(use_merge=False,output_dim=2)(output)
if useCore:
return output,aux_loss
model=tf.keras.Model(inputFea.dense_inputs + inputFea.sparse_inputs + inputFea.seq_inputs, output)
model.add_loss(aux_loss)
return model
def DSIN(inputFea:InputFeature=None,candidateFea=None, behaviorFea=None,attention_dim=8,attention_head_dim=5,ffn_hidden_unit=10,
lstm_units=8,lstm_mode='sum',attention_units=None,classify_units=None,sessionMaxLen=10,sessionMaxNum=20):
if attention_units is None:
attention_units = [100, 64, 32]
if classify_units is None:
classify_units = [100, 64, 32]
behavior_embed=StackLayer(use_flat=False)(ExtractLayer(behaviorFea,inputFea.seq_inputs,mask_zero=True)(inputFea.seq_embed_list[0]))
candidate_embed=StackLayer(use_flat=False)(ExtractLayer(candidateFea,inputFea.sparse_inputs)(inputFea.sparse_embed))
pos_behavior=SessionDivisonLayer(sessionMaxLen=sessionMaxLen,sessionMaxNum=sessionMaxNum)(behavior_embed)
self_behavior=SessionInterestExtractorLayer(attention_dim=attention_dim,attention_head_dim=attention_head_dim,ffn_hidden_unit=ffn_hidden_unit)(pos_behavior)
self_atten=ActivationUnitLayer(attention_units,need_stack=False)([candidate_embed,self_behavior],mask=inputFea.seq_embed_list[1][0])
hidden_behavior=SessionInterestInteractingLayer(biLstmUnit=lstm_units,lstm_mode=lstm_mode)(self_behavior)
lstm_atten=ActivationUnitLayer(attention_units,need_stack=False)([candidate_embed,hidden_behavior],mask=inputFea.seq_embed_list[1][0])
dnn_inputs=StackLayer(use_flat=True)(inputFea.sparse_embed+inputFea.dense_inputs+[self_atten,lstm_atten])
output=DnnLayer(hidden_units=classify_units,use_bn=True,res_unit=2)(dnn_inputs)
output=MergeScoreLayer(use_merge=False,output_dim=2)(output)
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
def SeqFM(inputFea:InputFeature=None,hidden_units=None,res_unit=1,atten_dim=8,atten_head=1):
def DynamicViewMask(seq_shape):
max_len = seq_shape[1]
dynamic_mask = tf.convert_to_tensor([[
1.0 if i < j else 0.0 for j in range(max_len)
] for i in range(max_len)])
seq_mask = tf.equal(dynamic_mask, 0)
return seq_mask
def CrossViewMask(cross_inputs:list):
max_len,sparse_len = cross_inputs[0].shape[1],cross_inputs[1].shape[1]
m = sparse_len - 1
cross_mask = tf.convert_to_tensor([
[0.0 if (i <= m and j > m) or (j <= m and i > m) else 1.0
for j in range(max_len + sparse_len)] for i in range(max_len + sparse_len)])
cross_mask = tf.equal(cross_mask, 0)
inputs = tf.concat(cross_inputs, axis=1)
return inputs, cross_mask
if hidden_units is None:
hidden_units = [atten_dim]*2
linear = SparseEmbed(inputFea.sparse_info, is_linear=True)(inputFea.sparse_inputs)
sparse_embed = StackLayer(axis=1,use_flat=False)(SparseEmbed(inputFea.sparse_info, use_flatten=False)(inputFea.sparse_inputs))
seq_embed= StackLayer(use_flat=False)(SparseEmbed(inputFea.seq_info, use_flatten=False, is_linear=False,mask_zero=False)(inputFea.seq_inputs))
sparse_atten=MultHeadAttentionLayer(attention_dim=atten_dim,attention_head_dim=atten_head)(sparse_embed)
sparse_view=IntraViewPoolingLayer()(sparse_atten)
seq_mask=DynamicViewMask(seq_embed.shape)
seq_atten=MultHeadAttentionLayer(attention_dim=atten_dim,attention_head_dim=atten_head,atten_mask_mod=2)(seq_embed,mask=seq_mask)
seq_view=IntraViewPoolingLayer()(seq_atten)
cross_inputs,cross_mask=CrossViewMask([seq_embed,sparse_embed])
cross_atten=MultHeadAttentionLayer(attention_dim=atten_dim,attention_head_dim=atten_head,atten_mask_mod=2)(cross_inputs,mask=cross_mask)
cross_view=IntraViewPoolingLayer()(cross_atten)
ffn_inputs=StackLayer(use_flat=False,axis=1)([sparse_view,seq_view,cross_view])
ffn_output=DnnLayer(hidden_units=hidden_units,use_flatten=True,use_ln=True,use_bn=False,res_unit=res_unit)(ffn_inputs)
output=MergeScoreLayer(use_merge=True,output_dim=2)(linear+[ffn_output])
# output=ScoreLayer(use_add=True,use_global=True)(linear + [ffn_output])
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
def DTS(inputFea:InputFeature=None,userFea:list=None,timestampFea:list=None,behaviorFea:list=None,targetFea:list=None,
ode_mode=1,sample_num=1,is_train=True,loss_lambda:int=0.5):
timestampEmbed = tf.expand_dims(StackLayer(use_flat=False)(ExtractLayer(timestampFea, inputFea.seq_inputs)(inputFea.seq_embed_list[0])),axis=-1)
behaviorEmbed = StackLayer(use_flat=False)(ExtractLayer(behaviorFea, inputFea.seq_inputs)(inputFea.seq_embed_list[0]))
userEmbed = tf.squeeze(StackLayer(use_flat=False)(ExtractLayer(userFea, inputFea.sparse_inputs)(inputFea.sparse_embed)),axis=1)
targetEmbed,sparse_embed=ExtractLayer(targetFea, inputFea.sparse_inputs,need_remove=True)(inputFea.sparse_embed)
behaviorEmbed=[behaviorEmbed,targetEmbed]
behavior,loss_=TimeStreamLayer(sample_num=sample_num,ode_mode=ode_mode,trainable=is_train,loss_lambda=loss_lambda)([timestampEmbed,userEmbed,behaviorEmbed],mask=inputFea.seq_embed_list[1][0])
behavior,targetItem=behavior[0],behavior[1]
behavior=tf.reduce_mean(behavior,axis=1)
dnn_input=StackLayer(use_flat=True)([behavior]+sparse_embed)
dnn_output=DnnLayer(hidden_units=[64,32])(dnn_input)
output=MergeScoreLayer(use_merge=False)(dnn_output)
model=tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
model.add_loss(loss_)
return model
def BST(inputFea:InputFeature=None,behaviorFea=None,
attention_units=8,hidden_units=None,ffn_hidden_unit=8,attention_head=3):
if hidden_units is None:
hidden_units = [100, 64, 32]
behaviorEmb=StackLayer(use_flat=False,axis=1)(ExtractLayer(behaviorFea,inputFea.seq_inputs)(inputFea.seq_embed_list[0]))
transformerFea=[SelfAttentionLayer(attention_dim=attention_units,attention_head_dim=attention_head,ffn_hidden_unit=ffn_hidden_unit)(emb_)
for emb_ in tf.split(behaviorEmb,[2]*(behaviorEmb.shape[1]//2),axis=1)]
dnn_inputs=StackLayer(axis=-1)(inputFea.sparse_embed+transformerFea)
dnn_output=DnnLayer(hidden_units=hidden_units)(dnn_inputs)
output=MergeScoreLayer(use_merge=False)(dnn_output)
return tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
def MIMN(inputFea:InputFeature=None,behaviorFea=None,candidateFea=None,
controller_hidden_units=None,attention_hidden=None,classify_hidden=None,channel_dim=20,memory_slots=128,
memory_bits=20, mult_head=3,use_miu=True):
'''
Warning!!! MIMN need set static batchSize==>please set date_prepare(batch_size=?[e.g 32]),
not support dynamic batchSize!!!
'''
if not controller_hidden_units:
controller_hidden_units=[128,64]
if not attention_hidden:
attention_hidden = [128, 64]
if not classify_hidden:
classify_hidden = [128, 64]
seq_embed=StackLayer(use_flat=False)(ExtractLayer(need_fea=behaviorFea,need_inputs=inputFea.seq_inputs)(inputFea.seq_embed_list[0]))
target_embed=StackLayer(use_flat=False)(ExtractLayer(need_fea=candidateFea,need_inputs=inputFea.sparse_inputs,need_remove=False)(inputFea.sparse_embed))
[M,pre_read,pre_readW,_,S,__,loss]=UICLayer(controller_network=DnnLayer(hidden_units=controller_hidden_units),
controller_input_flat=True, channel_dim=channel_dim,memory_slots=memory_slots,
memory_bits=memory_bits, mult_head=mult_head, use_miu=use_miu,return_hidden=True)(seq_embed)
print(loss)
sFea=ActivationUnitLayer(hidden_units=attention_hidden,need_stack=False)([target_embed, S])
read_input, readW = ReadLayer(addressCal=AddressCalLayer())([pre_readW, M, pre_read])
mFea=ControlWrapLayer(controller_network=DnnLayer(controller_hidden_units)
,controller_input_flat=True)([tf.squeeze(target_embed,axis=1), read_input, pre_read])[1]
dnn_inputs=StackLayer(use_flat=True)([sFea,mFea]+inputFea.sparse_embed)
dnn_output=DnnLayer(hidden_units=classify_hidden)(dnn_inputs)
output=MergeScoreLayer(use_merge=False)(dnn_output)
model=tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
model.add_loss(loss)
return model
def DSTN(inputFea:InputFeature=None):
pass
# [dense_inputs, sparse_inputs, seq_inputs] = prepare_tool.df_prepare(sparseInfo=sparseInfo, denseInfo=denseInfo,seqInfo=seqInfo)
# sparse_embed = SparseEmbed(sparseInfo, use_flatten=False)(sparse_inputs)
def SIM(inputFea:InputFeature=None,reduceFea:list=None,candidateFea:list=None,behaviorFea:list=None,
attention_dim:int=None,attention_head_dim:int=None,hidden_units:list=None,
recent_classify_units=None,recent_hidden_units:int=None,recent_attention_units:int=None,sample_num=5
):
'''
:param reduceFea: hardSearch Feature
warning:
1.only building hardSearch,not building softSearch,now i'don't know how to write alsh
2.example is not true,inFact seq should use recent logs is not all logs to feed to behavior model(DIEN),i'm lazy
'''
if hidden_units is None:
hidden_units = [128,128,128]
if attention_dim is None:
attention_dim = 3
if attention_head_dim is None:
attention_head_dim = 3
if recent_hidden_units is None:
recent_hidden_units = [128,128,128]
if recent_attention_units is None:
recent_attention_units = [128,128,128]
if recent_classify_units is None:
recent_classify_units = [128,128,128]
reduceSeq=ExtractLayer(reduceFea,inputFea.seq_inputs)(inputFea.seq_embed_list[0])
reduceSeq=ESULayer(attention_dim=attention_dim,attention_head_dim=attention_head_dim)(reduceSeq)
dnnOutput=DnnLayer(hidden_units=hidden_units,hidden_activate=tf.keras.layers.PReLU(),use_bn=True,use_flatten=True)(reduceSeq)
shortOutput,auxLoss=DIEN(inputFea,candidateFea=candidateFea,behaviorFea=behaviorFea,classify_units=recent_classify_units,
hidden_units=recent_hidden_units,attention_units=recent_attention_units,sample_num=sample_num,useCore=True)
output=MergeScoreLayer(use_merge=False)(dnnOutput)
model=tf.keras.Model(inputFea.dense_inputs+inputFea.sparse_inputs+inputFea.seq_inputs,output)
model.add_loss(auxLoss)
return model
| StarcoderdataPython |
1635757 | import torch
import torch.nn as nn
import torch.nn.functional as F
class MethEncoder(nn.Module):
def __init__(self, feature_size, embedding_size):
super(MethEncoder, self).__init__()
self.embeddings = nn.Parameter(torch.randn(feature_size, embedding_size, requires_grad=True))
def forward(self, x):
x = torch.unsqueeze(x, 2) # add in dimension
x = self.embeddings * x
return x
class SPECTEncoder(nn.Module):
def __init__(self):
super(SPECTEncoder, self).__init__()
self.conv1 = nn.Conv3d(1, 16, 3, 1)
self.maxpool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(16, 32, 3, 1)
self.maxpool2 = nn.MaxPool3d(2)
self.do1 = nn.Dropout(0.25)
self.conv3 = nn.Conv3d(32, 64, 3, 1)
self.maxpool3 = nn.MaxPool3d(2)
self.conv4 = nn.Conv3d(64, 64, 3, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.maxpool1(x)
x = F.relu(self.conv2(x))
x = self.maxpool2(x)
x = self.do1(x)
x = F.relu(self.conv3(x))
x = self.maxpool3(x)
x = self.conv4(x)
return x
| StarcoderdataPython |
1652884 | <gh_stars>0
import tensorflow as tf
x = tf.Variable(tf.constant(2))
y = tf.Variable(tf.constant(3))
z = x * y
init = tf.initialize_all_variables()
session = tf.Session()
session.run(init)
print(session.run(z))
| StarcoderdataPython |
1623397 | <reponame>ParadoxARG/Recognizers-Text<filename>Python/libraries/recognizers-number/recognizers_number/number/japanese/extractors.py
from typing import List
from enum import Enum
from recognizers_number.number.extractors import ReVal, BaseNumberExtractor
from recognizers_text.utilities import RegExpUtility
from recognizers_number.number.constants import Constants
from recognizers_number.resources.japanese_numeric import JapaneseNumeric
class JapaneseNumberExtractorMode(Enum):
DEFAULT = 0
EXTRACT_ALL = 1
class JapaneseNumberExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM
def __init__(self, mode: JapaneseNumberExtractorMode = JapaneseNumberExtractorMode.DEFAULT):
self.__regexes: List[ReVal] = list()
cardinal_ex = JapaneseCardinalExtractor(mode)
self.__regexes.extend(cardinal_ex.regexes)
fraction_ex = JapaneseFractionExtractor()
self.__regexes.extend(fraction_ex.regexes)
class JapaneseCardinalExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_CARDINAL
def __init__(self, mode: JapaneseNumberExtractorMode = JapaneseNumberExtractorMode.DEFAULT):
self.__regexes: List[ReVal] = list()
integer_ex = JapaneseIntegerExtractor(mode)
self.__regexes.extend(integer_ex.regexes)
double_ex = JapaneseDoubleExtractor()
self.__regexes.extend(double_ex.regexes)
class JapaneseIntegerExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_INTEGER
def __init__(self, mode: JapaneseNumberExtractorMode = JapaneseNumberExtractorMode.DEFAULT):
self.__regexes = [
# 123456, -123456
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersSpecialsChars),
val='IntegerNum'),
# 15k, 16 G
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersSpecialsCharsWithSuffix),
val='IntegerNum'),
# 1,234, 2,332,111
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DottedNumbersSpecialsChar),
val='IntegerNum'),
# 半百 半ダース
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersWithHalfDozen),
val='IntegerJpn'),
# 一ダース 五十ダース
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersWithDozen),
val='IntegerJpn')
]
if mode == JapaneseNumberExtractorMode.DEFAULT:
self.__regexes.append(
# 一百五十五, 负一亿三百二十二. Uses an allow list to avoid extracting "西九条" from "九"
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersWithAllowListRegex),
val='IntegerJpn'
)
)
elif mode == JapaneseNumberExtractorMode.EXTRACT_ALL:
self.__regexes.append(
# 一百五十五, 负一亿三百二十二, "西九条" from "九". Uses no allow lists and extracts all potential integers (useful in Units, for example).
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersAggressiveRegex),
val='IntegerJpn'
)
)
class JapaneseDoubleExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_DOUBLE
def __init__(self):
self.__regexes = [
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DoubleSpecialsChars),
val='DoubleNum'),
# (-)2.5, can avoid cases like ip address xx.xx.xx.xx
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DoubleSpecialsCharsWithNegatives),
val='DoubleNum'),
# (-).2
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SimpleDoubleSpecialsChars),
val='DoubleNum'),
# 1.0 K
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DoubleWithMultiplierRegex),
val='DoubleNum'),
# 15.2万
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DoubleWithThousandsRegex),
val='DoubleJpn'),
# 2e6, 21.2e0
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DoubleExponentialNotationRegex),
val='DoublePow'),
# 2^5
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.DoubleScientificNotationRegex),
val='DoublePow')
]
class JapaneseFractionExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_FRACTION
def __init__(self):
self.__regexes = [
# -4 5/2, 4 6/3
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.FractionNotationSpecialsCharsRegex),
val='FracNum'),
# 8/3
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.FractionNotationRegex),
val='FracNum'),
# 五分の二 七分の三
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.AllFractionNumber),
val='FracJpn')
]
class JapaneseOrdinalExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_ORDINAL
def __init__(self):
self.__regexes = [
# だい一百五十四
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.OrdinalRegex),
val='OrdinalJpn'),
# だい2565
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.OrdinalNumbersRegex),
val='OrdinalJpn'),
# 2折 2.5折
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersFoldsPercentageRegex),
val='OrdinalJpn')
]
class JapanesePercentageExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_PERCENTAGE
def __init__(self):
self.__regexes = [
# 百パーセント 十五パーセント
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SimplePercentageRegex),
val='PerJpn'),
# 19パーセント 1パーセント
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersPercentagePointRegex),
val='PerNum'),
# 3,000パーセント 1,123パーセント
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersPercentageWithSeparatorRegex),
val='PerNum'),
# 3.2 k パーセント
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersPercentageWithMultiplierRegex),
val='PerNum'),
# 15kパーセント
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SimpleNumbersPercentageWithMultiplierRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SimpleIntegerPercentageRegex),
val='PerNum'),
# 2割引 2.5割引
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersFoldsPercentageRegex),
val='PerSpe'),
# 三割引 六点五折 七五折
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.FoldsPercentageRegex),
val='PerSpe'),
# 5割 7割半
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SimpleFoldsPercentageRegex),
val='PerSpe'),
# 七割半
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SpecialsPercentageRegex),
val='PerSpe'),
# 2割 2.5割
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.NumbersSpecialsPercentageRegex),
val='PerSpe'),
# 三割
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SimpleSpecialsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(JapaneseNumeric.SpecialsFoldsPercentageRegex),
val='PerSpe')
]
| StarcoderdataPython |
1732658 | # Databricks notebook source
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
# COMMAND ----------
# MAGIC %md
# MAGIC #REGRESSION MODEL NOTES
# MAGIC ## We Can Conduct a few different version of this regression model by changing the dependent and independent variables
# MAGIC **Dependent Variable**
# MAGIC We can elect to make the dependent variable round score or tournament score
# MAGIC
# MAGIC *Round Score* - would give us more data points, but could also cause higher variation
# MAGIC
# MAGIC *Tournament Score* - would seem to be the better fit, but we may not have enough data points
# MAGIC
# MAGIC The dependent variable can also refer to tournament score across all tournaments, or for a specific tournament
# MAGIC
# MAGIC **Independent Variables**
# MAGIC
# MAGIC 4 major groups of Independent Variables
# MAGIC
# MAGIC *Greens In Regulation* : Describes how frequently the player makes in to the green at least 2 strokes away from par based on a number of situation. Evaluates a players skill in the fairways/middle game
# MAGIC
# MAGIC Consists of ['GIR_PCT_FAIRWAY_BUNKER', 'GIR_PCT_FAIRWAY', 'GIR_PCT_OVERALL', 'GIR_PCT_OVER_100', 'GIR_PCT_OVER_200','GIR_PCT_UNDER_100', 'GREEN_PCT_SCRAMBLE_SAND', 'GREEN_PCT_SCRAMBLE_ROUGH']
# MAGIC
# MAGIC *Tee Box*: Describes different elements of a players driving/tee shots. Evaluates a players skill off the tee/long game
# MAGIC
# MAGIC Consists of ['TEE_AVG_BALL_SPEED', 'TEE_AVG_DRIVING_DISTANCE', 'TEE_DRIVING_ACCURACY_PCT', 'TEE_AVG_LAUNCH_ANGLE', 'TEE_AVG_LEFT_ROUGH_TENDENCY_PCT', 'TEE_AVG_RIGHT_ROUGH_TENDENCY_PCT', 'TEE_AVG_SPIN_RATE']
# MAGIC
# MAGIC *Putting*: Describes a players performance on the green. Evaluates a players putting skill/short game
# MAGIC
# MAGIC Consists of ['PUTTING_AVG_ONE_PUTTS', 'PUTTING_AVG_TWO_PUTTS','PUTTING_AVG_PUTTS','PUTTING_AVG_DIST_BIRDIE_INCH']
# MAGIC
# MAGIC *Performance Based*: Descibes a players performance in terms of previous results and scores. Evaluates a players consistency and past performances
# MAGIC
# MAGIC Consists of ['Par3Average','Par4Average', 'Par5Average', 'HolesPerBirdie', 'HolesPerBogey','FINISHES_TOP10']
# MAGIC
# MAGIC **Independence Between Variables**
# MAGIC
# MAGIC To avoid creating bias in the regression model, we should avoid using the following highly coorelated independent variables together in the same model
# MAGIC
# MAGIC *GIR*: (GIR_PCT_OVERALL: GIR_PCT_OVER_100, GIR_PCT_FAIRWAY)
# MAGIC
# MAGIC *Tee*: (TEE_AVG_BALL_SPEED: TEE_AVG_DRIVING_DISTANCE)
# MAGIC
# MAGIC *Putting*: (PUTTING_AVG_ONE_PUTTS: PUTTING_AVG_TWO_PUTTS : PUTTING_AVG_PUTTS)
# MAGIC
# MAGIC *Performance Based*: (Par4Average: HolesPerBogey)
# COMMAND ----------
# Lets Start with the Dependent Variable as Round Score across all tournaments
roundsDf = pd.read_csv("/dbfs/FileStore/karbide/RoundsReg.txt")
playerStats = pd.read_csv("/dbfs/FileStore/karbide/PlayerStatsComplete.txt")
roundsDf.drop(["Unnamed: 0"], axis = 1, inplace = True)
playerStats.drop(["Unnamed: 0"], axis = 1, inplace = True)
# COMMAND ----------
roundScores = roundsDf[["PlayerID","RoundScore"]]
# COMMAND ----------
roundsReg = roundScores.merge(playerStats, how = "left", on = "PlayerID")
# COMMAND ----------
roundsReg.corr()
# none or the variables are highly coorelated with RoundScore but the performance based ones score the highest
# COMMAND ----------
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.metrics import r2_score
# COMMAND ----------
#selecting Independet Variables (X)
X = roundsReg[["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH","PUTTING_AVG_PUTTS","TEE_AVG_DRIVING_DISTANCE","TEE_DRIVING_ACCURACY_PCT", "FINISHES_TOP10", "GIR_PCT_OVERALL", "GIR_PCT_FAIRWAY_BUNKER"]]
Y = roundsReg[["RoundScore"]]
# COMMAND ----------
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
# COMMAND ----------
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
# COMMAND ----------
pred = reg.predict(X_test)
err = pd.Series(Y_test["RoundScore"]) - [p[0]for p in pred]
# COMMAND ----------
display(err.hist(bins=100))
# seems we get some really crazy predictions
# COMMAND ----------
predDf = pd.DataFrame(pred)
predDf.describe()
# COMMAND ----------
Y_test.describe()
# COMMAND ----------
reg.score(pd.DataFrame(X_train), pd.DataFrame(Y_train))
# COMMAND ----------
#This shows the high variance I was worried about, Lets check accuracy
r2_score(Y_test["RoundScore"],pred)
# COMMAND ----------
import statistics as stats
def rmse(errors):
return(pow(stats.mean([pow(e,2) for e in errors]),0.5))
# COMMAND ----------
rmse(err)
# COMMAND ----------
# seems we are way off, lets change the dependent variable to tournament score
# COMMAND ----------
tournamentScore = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundScore":"sum"})
tournamentScore.reset_index(inplace = True)
#since we doing this across all tournaments, we can drop tournament ID
tournamentScore.drop(["TournamentID"],inplace = True, axis = 1)
# COMMAND ----------
t_Reg = tournamentScore.merge(playerStats, how = "left", on = "PlayerID")
# COMMAND ----------
t_Reg.corr()
# our coorelation are still getting stronger, but still there is little that is very strongly coorelated
# COMMAND ----------
X = t_Reg[["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH","PUTTING_AVG_PUTTS","TEE_AVG_DRIVING_DISTANCE","TEE_DRIVING_ACCURACY_PCT", "FINISHES_TOP10", "GIR_PCT_OVERALL", "GIR_PCT_FAIRWAY_BUNKER"]]
Y = t_Reg[["RoundScore"]]
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.2)
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
# COMMAND ----------
pred = reg.predict(X_test)
err = pd.Series(Y_test["RoundScore"]) - [p[0]for p in pred]
# COMMAND ----------
display(err.hist(bins=100))
# COMMAND ----------
predDf = pd.DataFrame(pred)
print(predDf.describe())
print(Y_test.describe())
# COMMAND ----------
print ("R2 Train")
print(reg.score(pd.DataFrame(X_train), pd.DataFrame(Y_train)))
print("R2 Test")
print(r2_score(Y_test["RoundScore"],pred))
print("RMSE")
print(rmse(err))
# COMMAND ----------
def linearReg(ind,dep,split):
X_train, X_test, Y_train, Y_test = train_test_split(ind,dep, test_size = split)
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
pred = reg.predict(X_test)
err = pd.Series(Y_test["RoundScore"]) - [p[0]for p in pred]
print ("R2 Train")
print(reg.score(pd.DataFrame(X_train), pd.DataFrame(Y_train)))
print("R2 Test")
print(r2_score(Y_test["RoundScore"],pred))
print("RMSE")
print(rmse(err))
return(reg.coef_,reg.intercept_)
# COMMAND ----------
# to make this easier lets make a function
X = t_Reg[["Par4Average","HolesPerBirdie","PUTTING_AVG_PUTTS","TEE_AVG_DRIVING_DISTANCE", "FINISHES_TOP10", "GIR_PCT_OVERALL", "Par5Average", "Par3Average"]]
Y = t_Reg[["RoundScore"]]
c2, i2 = linearReg(X,Y,0.2)
# COMMAND ----------
#we cant use different tournament in the dependant variable as is but its possible we can use the mean standardized version
tournamentScoreNorm = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundScore":"sum"})
tournamentScoreNorm.reset_index(inplace = True)
#pull the mean scores for each tournament
meanScores = tournamentScoreNorm.groupby("TournamentID").agg({"RoundScore":"mean"})
meanScores.reset_index(inplace=True)
meanScores.columns = ["TournamentID","Mean"]
tournamentScoreNorm = tournamentScoreNorm.merge(meanScores, how="left", on="TournamentID")
tournamentScoreNorm["NormScore"] = tournamentScoreNorm["RoundScore"] - tournamentScoreNorm["Mean"]
tournamentScoreNorm.drop(["TournamentID","Mean","RoundScore"], axis =1, inplace = True)
tournamentScoreNorm.columns = ["PlayerID","RoundScore"]
# COMMAND ----------
t_regNorm = tournamentScoreNorm.merge(playerStats, how ="left", on = "PlayerID")
# COMMAND ----------
t_regNorm.corr()
# COMMAND ----------
X = t_regNorm[["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH", "PUTTING_AVG_ONE_PUTTS", "TEE_AVG_DRIVING_DISTANCE", "TEE_AVG_LEFT_ROUGH_TENDENCY_PCT", "FINISHES_TOP10", "GREEN_PCT_SCRAMBLE_SAND", "Par5Average", "Par3Average"]]
Y = t_regNorm[["RoundScore"]]
c1, i1 = linearReg(X,Y,0.2)
#normalizing improves our R2 value
# COMMAND ----------
# again our scores are pretty bad, but maybe they'll get better if we look at just one tournament
tournamentScore2 = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundScore":"sum"})
tournamentScore2.reset_index(inplace = True)
# COMMAND ----------
#lets pick the tournament where we have the most players
roundsDf.groupby("TournamentID").agg({"PlayerID":"nunique"}).sort_values("PlayerID", ascending = False).head(2)
# Tournament 429, the players championship
# COMMAND ----------
tournamentScore2 =tournamentScore2.loc[tournamentScore2["TournamentID"] == 429]
tournamentScore2.drop(["TournamentID"], axis = 1, inplace = True)
# COMMAND ----------
t2_reg = tournamentScore2.merge(playerStats, how = "left", on = "PlayerID")
# COMMAND ----------
t2_reg.corr()
#now we have some much stronger coorelations, lets try to use them
# COMMAND ----------
X = t2_reg[["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH", "PUTTING_AVG_ONE_PUTTS", "TEE_AVG_DRIVING_DISTANCE", "TEE_AVG_LEFT_ROUGH_TENDENCY_PCT", "FINISHES_TOP10", "GREEN_PCT_SCRAMBLE_SAND", "Par5Average", "Par3Average"]]
Y = t2_reg[["RoundScore"]]
c3,i3 = linearReg(X,Y,0.2)
#now our R2 is much higher, but can we still do better?
# COMMAND ----------
#mean standardize for one tournament
mean429 = meanScores.loc[meanScores["TournamentID"]==429]
mean429 = mean429["Mean"][22]
mean429
# COMMAND ----------
Y = t2_reg[["RoundScore"]]
Y["RoundScoreNorm"] = Y["RoundScore"] - mean429
Y = Y[["RoundScoreNorm"]]
Y.columns = ["RoundScore"]
# COMMAND ----------
c4,i4 = linearReg(X,Y,0.2)
#our result is about the same
# COMMAND ----------
# lets just play w the ivs for a bit
X = t2_reg[["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey","GREEN_PCT_SCRAMBLE_SAND"]]
Y = t2_reg[["RoundScore"]]
c5,i5 = linearReg(X,Y,0.2)
# COMMAND ----------
# lets just play w the ivs for a bit
X = t2_reg[["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey","GIR_PCT_OVERALL"]]
Y = t2_reg[["RoundScore"]]
c5,i5 = linearReg(X,Y,0.2)
# COMMAND ----------
# MAGIC %md
# MAGIC Since our most accurate model is specfic tournament results, lets add the 3 Strokes Gained categories
# COMMAND ----------
StrokesGained = pd.read_csv("/dbfs/FileStore/karbide/StrokesGainedIDs.txt")
StrokesGained.drop(["Unnamed: 0"], axis = 1, inplace = True)
# COMMAND ----------
t2_regSG = t2_reg.merge(StrokesGained, how = "inner", on = "PlayerID")
# COMMAND ----------
t2_regSG.columns
# COMMAND ----------
# now with SG
X = t2_regSG[["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey","GREEN_PCT_SCRAMBLE_SAND", 'TOTAL SG:T', "TOTAL SG:T2G", 'TOTAL SG:P']]
Y = t2_regSG[["RoundScore"]]
c6,i6 = linearReg(X,Y,0.2)
# COMMAND ----------
X = t2_regSG[["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey","GREEN_PCT_SCRAMBLE_SAND", 'TOTAL SG:T', "TOTAL SG:T2G", 'TOTAL SG:P']]
Y = t2_regSG[["RoundScore"]]
c7,i7 = linearReg(X,Y,0.2)
# COMMAND ----------
X = t2_regSG[["GREEN_PCT_SCRAMBLE_SAND", 'TOTAL SG:T', "Par4Average","HolesPerBirdie"]]
Y = t2_regSG[["RoundScore"]]
c8,i8 = linearReg(X,Y,0.2)
print(c8,i8)
# COMMAND ----------
# MAGIC %md
# MAGIC # MODEL RESULTS AND COEFFICIENTS
# MAGIC **Tournament 429**
# MAGIC
# MAGIC *Model 1*:
# MAGIC
# MAGIC IVs -> ["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH", "PUTTING_AVG_ONE_PUTTS", "TEE_AVG_DRIVING_DISTANCE", "TEE_AVG_LEFT_ROUGH_TENDENCY_PCT", "FINISHES_TOP10", "GREEN_PCT_SCRAMBLE_SAND", "Par5Average", "Par3Average"]
# MAGIC
# MAGIC DV -> Tournament Score
# MAGIC
# MAGIC R2 and RMSE = 0.3229964494135511, 4.795109852419135
# MAGIC
# MAGIC coef, int = [[ 2.32846057e+01 -2.26333978e-01 6.19652183e-02 1.83444680e-01 -2.53906041e-02 -2.46384591e-01 -1.97731802e-02 -1.40051312e-01 1.61535095e+01 1.39734513e+01]], 16.67094541
# MAGIC
# MAGIC *Model 2*
# MAGIC
# MAGIC IVs -> ["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH", "PUTTING_AVG_ONE_PUTTS", "TEE_AVG_DRIVING_DISTANCE", "TEE_AVG_LEFT_ROUGH_TENDENCY_PCT", "FINISHES_TOP10", "GREEN_PCT_SCRAMBLE_SAND", "Par5Average", "Par3Average"]
# MAGIC
# MAGIC DV -> Tournament Score/Mean Standardized
# MAGIC
# MAGIC R2 and RMSE = 0.27481894608420776, 5.278633537736203
# MAGIC
# MAGIC coef, int =[[ 6.31478232e+00 -8.16209567e-02 7.18796229e-02 -4.30983221e-01 -8.41258947e-03 -2.12066433e-01 7.78575502e-02 -1.20083842e-01 2.55502279e+01 7.95291971e+00]], 15.26182535
# MAGIC
# MAGIC *Model 3*
# MAGIC
# MAGIC IVs -> ["Par4Average","HolesPerBirdie", "HolesPerBogey", "GREEN_PCT_SCRAMBLE_SAND", "Par5Average", "Par3Average"]
# MAGIC
# MAGIC DV -> Tournament Score
# MAGIC
# MAGIC R2 and RMSE = 0.3787100076428008, 4.605715520235553
# MAGIC
# MAGIC coef, int = [[ 6.81122888 1.61102777 20.98822676 19.81415731 0.39739065 -0.09967628]], 2.13676949
# MAGIC
# MAGIC *Model 4*
# MAGIC
# MAGIC IVs -> [["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey","GREEN_PCT_SCRAMBLE_SAND", 'TOTAL SG:T', "TOTAL SG:T2G", 'TOTAL SG:P']]
# MAGIC
# MAGIC DV -> Tournament Score
# MAGIC
# MAGIC R2 and RMSE = 0.5044001176837463,3.52737074470192
# MAGIC
# MAGIC coef, int = [[-1.46756830e+01 7.27174253e-01 4.24465311e+00 -2.42897881e+00 6.32162133e-02 -1.04608027e-01 -6.62482431e+02 6.62435768e+02 6.62423818e+02]] [4.27898458]
# COMMAND ----------
print(c6)
print(i6)
# COMMAND ----------
#Lets try some feature importance to increase our model accuracy
t2_regSG.corr(method = 'pearson')
# COMMAND ----------
t2_regSG2 = t2_regSG.drop(["PlayerID"], axis = 1)
t2_regSG2.corr(method = 'pearson')
# COMMAND ----------
from sklearn.preprocessing import MinMaxScaler
# COMMAND ----------
X = t2_regSG[["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey","GREEN_PCT_SCRAMBLE_SAND", 'TOTAL SG:T', "TOTAL SG:T2G", 'TOTAL SG:P','AVERAGE']]
Y = t2_regSG[["RoundScore"]]
scaler = MinMaxScaler()
scaler.fit(X)
sX = scaler.transform(X)
cols = X.columns
X_new = pd.DataFrame(sX,columns = cols)
c7,i7 = linearReg(X,Y,0.2)
print(c7,i7)
#coefficients sugguest we remove some of the variables
# COMMAND ----------
X = t2_regSG[["Par4Average","HolesPerBirdie", "Par5Average", "Par3Average", "HolesPerBogey",'AVERAGE']]
Y = t2_regSG[["RoundScore"]]
scaler = MinMaxScaler()
scaler.fit(X)
sX = scaler.transform(X)
cols = X.columns
X_new = pd.DataFrame(sX,columns = cols)
c8,i8 = linearReg(X,Y,0.2)
print(c8,i8)
# COMMAND ----------
X = t2_regSG[["Par4Average", "Par5Average", "Par3Average",'AVERAGE']]
Y = t2_regSG[["RoundScore"]]
scaler = MinMaxScaler()
scaler.fit(X)
sX = scaler.transform(X)
cols = X.columns
X_new = pd.DataFrame(sX,columns = cols)
c8,i8 = linearReg(X,Y,0.2)
print(c8,i8)
# COMMAND ----------
X = t2_regSG[['TOTAL SG:T', "TOTAL SG:T2G", 'TOTAL SG:P']]
Y = t2_regSG[["RoundScore"]]
scaler = MinMaxScaler()
scaler.fit(X)
sX = scaler.transform(X)
cols = X.columns
X_new = pd.DataFrame(sX,columns = cols)
c9,i9 = linearReg(X,Y,0.2)
print(c9,i9) | StarcoderdataPython |
3381453 | <gh_stars>0
"""Zarządzanie całym zachowaniem statku obcych."""
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""Klasa przedstawiająca pojedynczego obcego we flocie."""
def __init__(self, ai_settings, screen):
"""Inicjalizacja obcego i zdefiniowanie jego położenia początkowego."""
super(Alien, self).__init__()
self.screen = screen
self.ai_set = ai_settings
# Wczytywanie obrazu obcego i zdefiniowanie jego atrybutu rect
self.image = pygame.image.load('images/aliens.png') # Wczytanie obrazu
self.image = pygame.transform.scale(self.image, (50, 48))
self.rect = self.image.get_rect()
# Umieszczenie nowego obcego w pobliżu lewego górnego rogu ekranu
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# Przechwytywanie dokładnego położenia obcego
self.x = float(self.rect.x)
def blitme(self):
"""Wyświetlenie obcego w jego aktualnym położeniu."""
self.screen.blit(self.image, self.rect)
def update(self):
"""Przesunięcie obcego w prawo."""
self.x += (self.ai_set.alien_speed_factor * self.ai_set.fleet_direction)
self.rect.x = self.x
def check_edges(self):
"""Zwraca wartość True, jeśli obcy znajduje się przy krawędzi ekranu."""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
| StarcoderdataPython |
1696347 | <gh_stars>10-100
import dask.dataframe as dd
import holoviews as hv
import geoviews as gv
from bokeh.models import Slider, Button
from bokeh.layouts import layout
from bokeh.io import curdoc
from bokeh.models import WMTSTileSource
from holoviews.operation.datashader import datashade, aggregate, shade
from holoviews.plotting.util import fire
shade.cmap = fire
hv.extension('bokeh')
renderer = hv.renderer('bokeh').instance(mode='server')
# Load data
usecols = ['tpep_pickup_datetime', 'dropoff_x', 'dropoff_y']
ddf = dd.read_csv('../data/nyc_taxi.csv', parse_dates=['tpep_pickup_datetime'], usecols=usecols)
ddf['hour'] = ddf.tpep_pickup_datetime.dt.hour
ddf = ddf.persist()
from bokeh.models import WMTSTileSource
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'
wmts = gv.WMTS(WMTSTileSource(url=url))
stream = hv.streams.Stream.define('HourSelect', hour=0)()
points = hv.Points(ddf, kdims=['dropoff_x', 'dropoff_y'])
dmap = hv.util.Dynamic(points, operation=lambda obj, hour: obj.select(hour=hour),
streams=[stream])
# Apply aggregation
aggregated = aggregate(dmap, link_inputs=True)
# Shade the data
shaded = shade(aggregated)
# Define PointerX stream, attach to points and declare DynamicMap for cross-section and VLine
pointer = hv.streams.PointerX(x=ddf.dropoff_x.loc[0].compute().iloc[0], source=points)
section = hv.util.Dynamic(aggregated, operation=lambda obj, x: obj.sample(dropoff_x=x),
streams=[pointer], link_inputs=False)
vline = hv.DynamicMap(lambda x: hv.VLine(x), streams=[pointer])
# Define options
hv.opts("RGB [width=800 height=600 xaxis=None yaxis=None] VLine (color='black' line_width=1)")
hv.opts("Curve [width=100 yaxis=None show_frame=False] (color='black') {+framewise} Layout [shared_axes=False]")
# Combine it all into a complex layout
hvobj = (wmts * shaded * vline) << section
### Pass the HoloViews object to the renderer
plot = renderer.get_plot(hvobj, doc=curdoc())
# Define a slider and button
start, end = 0, 23
def slider_update(attrname, old, new):
stream.event(hour=new)
slider = Slider(start=start, end=end, value=0, step=1, title="Hour")
slider.on_change('value', slider_update)
def animate_update():
year = slider.value + 1
if year > end:
year = start
slider.value = year
def animate():
if button.label == '► Play':
button.label = '❚❚ Pause'
curdoc().add_periodic_callback(animate_update, 1000)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(animate_update)
button = Button(label='► Play', width=60)
button.on_click(animate)
# Combine the bokeh plot on plot.state with the widgets
layout = layout([
[plot.state],
[slider, button],
], sizing_mode='fixed')
curdoc().add_root(layout)
| StarcoderdataPython |
180093 | from __future__ import annotations
from pathlib import Path
from typer import echo
from ..resolvers import clone_github, clone_local
from .resolver import Resolver
from .runner import Runner
from .variables import get_variables, read_variables
class NooCore:
def __init__(self, allow_shell: bool = False) -> None:
self.resolver = Resolver()
self.shell = allow_shell
def clone(self, name: str, noofile: str, dest: Path) -> None:
spec = self.resolver.resolve(noofile)
echo(f"Starting clone process for {spec.name or name}.")
if not spec.remote:
echo(f"No remote specified for {spec.name or name}")
return
if spec.remote.startswith("git:"):
clone_github(spec.remote[4:], dest)
elif spec.remote.startswith("file:"):
clone_local(Path(spec.remote[5:]), dest)
else:
raise ValueError(f"Invalid remote: {spec.remote}")
variables = get_variables(name)
variables["var"].update(read_variables(spec.read))
runner = Runner(self, dest, spec.steps, variables, self.shell)
runner.run()
def mod(self, noofile: str, dest: Path) -> None:
spec = self.resolver.resolve(noofile)
echo(f"Starting modification for {spec.name or 'unnamed'}.")
variables = get_variables()
variables["var"].update(read_variables(spec.read))
runner = Runner(self, dest, spec.steps, variables, self.shell)
runner.run()
| StarcoderdataPython |
3258908 | import os
import copy
import unittest
import jsonschema
from yggdrasil.tests import assert_equal
from yggdrasil.communication import new_comm
from yggdrasil.communication.tests import test_CommBase as parent
def test_wait_for_creation():
r"""Test FileComm waiting for creation."""
msg_send = b'Test message\n'
name = 'temp_file_create.txt'
kwargs = {'in_temp': True, 'comm': 'FileComm', 'dont_open': True}
# kwargs = {'wait_for_creation': 5, 'in_temp': True, comm='FileComm'}
send_instance = new_comm(name, direction='send', **kwargs)
recv_instance = new_comm(name, direction='recv',
wait_for_creation=5.0, **kwargs)
if os.path.isfile(send_instance.address):
os.remove(send_instance.address)
def open_and_send(inst, msg):
inst.open()
flag = inst.send(msg)
return flag
send_instance.sched_task(0.5, open_and_send, args=[send_instance, msg_send],
store_output=True)
recv_instance.open()
T = recv_instance.start_timeout(recv_instance.wait_for_creation)
while (not T.is_out) and (send_instance.sched_out is None): # pragma: debug
recv_instance.sleep()
recv_instance.stop_timeout()
assert(send_instance.sched_out)
flag, msg_recv = recv_instance.recv()
assert(flag)
assert_equal(msg_recv, msg_send)
send_instance.close()
recv_instance.close()
recv_instance.remove_file()
class TestFileComm(parent.TestCommBase):
r"""Test for FileComm communication class."""
comm = 'FileComm'
attr_list = (copy.deepcopy(parent.TestCommBase.attr_list)
+ ['fd', 'read_meth', 'append', 'in_temp',
'is_series', 'wait_for_creation', 'serializer',
'platform_newline'])
def teardown(self):
r"""Remove the file."""
super(TestFileComm, self).teardown()
self.send_instance.remove_file()
@property
def send_inst_kwargs(self):
r"""dict: Keyword arguments for send instance."""
out = super(TestFileComm, self).send_inst_kwargs
out['in_temp'] = True
return out
@unittest.skipIf(True, 'File comm')
def test_send_recv_nolimit(self):
r"""Disabled: Test send/recv of a large message."""
pass # pragma: no cover
@unittest.skipIf(True, 'File comm')
def test_work_comm(self):
r"""Disabled: Test creating/removing a work comm."""
pass # pragma: no cover
def test_invalid_read_meth(self):
r"""Test raise of error on invalid read_meth."""
if self.comm == 'FileComm':
kwargs = self.send_inst_kwargs
kwargs['read_meth'] = 'invalid'
kwargs['skip_component_schema_normalization'] = False
self.assert_raises(jsonschema.ValidationError, new_comm, self.name,
**kwargs)
def test_append(self):
r"""Test open of file comm with append."""
send_objects = self.testing_options['send']
recv_objects = self.testing_options['recv']
recv_objects_partial = self.testing_options['recv_partial']
# Write to file
flag = self.send_instance.send(send_objects[0])
assert(flag)
# Create temp file for receving
recv_kwargs = copy.deepcopy(self.inst_kwargs)
recv_kwargs['append'] = True
new_inst_recv = new_comm('partial%s' % self.uuid, **recv_kwargs)
self.recv_message_list(new_inst_recv, recv_objects_partial[0],
break_on_empty=True)
# Open file in append
send_kwargs = copy.deepcopy(self.send_inst_kwargs)
send_kwargs['append'] = True
new_inst_send = new_comm('append%s' % self.uuid, **send_kwargs)
for i in range(1, len(send_objects)):
flag = new_inst_send.send(send_objects[i])
assert(flag)
self.recv_message_list(new_inst_recv, recv_objects_partial[i],
break_on_empty=True)
self.remove_instance(new_inst_send)
self.remove_instance(new_inst_recv)
# Read entire contents
self.recv_message_list(self.recv_instance, recv_objects)
# Check file contents
if self.testing_options.get('exact_contents', True):
with open(self.send_instance.address, 'rb') as fd:
contents = fd.read()
self.assert_equal(contents, self.testing_options['contents'])
def test_series(self):
r"""Test sending/receiving to/from a series of files."""
# Set up series
fname = '%d'.join(os.path.splitext(self.send_instance.address))
self.send_instance.close()
self.recv_instance.close()
self.send_instance.is_series = True
self.recv_instance.is_series = True
self.send_instance.address = fname
self.recv_instance.address = fname
self.send_instance.open()
self.recv_instance.open()
# Send/receive multiple messages
nmsg = 2
for i in range(nmsg):
self.do_send_recv()
def test_remaining_bytes(self):
r"""Test remaining_bytes."""
self.assert_equal(self.send_instance.remaining_bytes, 0)
self.recv_instance.close()
assert(self.recv_instance.is_closed)
self.assert_equal(self.recv_instance.remaining_bytes, 0)
def test_recv_nomsg(self):
r"""Test recieve when there is no waiting message."""
flag, msg_recv = self.recv_instance.recv(timeout=self.sleeptime)
assert(not flag)
self.assert_equal(msg_recv, self.recv_instance.eof_msg)
class TestFileComm_readline(TestFileComm):
r"""Test for FileComm communication class with read_meth = 'readline'."""
@property
def inst_kwargs(self):
r"""dict: Keyword arguments for tested class."""
out = super(TestFileComm, self).inst_kwargs
out['read_meth'] = 'readline'
return out
@property
def testing_options(self):
r"""dict: Testing options."""
out = super(TestFileComm_readline, self).testing_options
out['recv'] = out['send']
return out
| StarcoderdataPython |
79041 | <reponame>acatwithacomputer/proteus<gh_stars>0
from glob import *
from os import *
files = glob('*2D*_n.py')
for f in files:
nf = f.lower()
words = nf.split('_')
nf=words[0]
for sb in words[1:-2]:
if sb != '2d':
nf += '_'+sb
nf += '_2d'
nf += '_'+words[-2]+'_'+words[-1]
#print nf
#dstart=nf.find('2d')
#nstart=nf.find('n.py')
#nf = nf[:dstart]+nf[dstart+3:nstart]+'2d'+nf[nstart:]
#print nf
system('svn mv '+f+' '+nf)
| StarcoderdataPython |
1761337 | n = [3, 5, 7]
def list_extender(lst):
lst.append(9)
return lst
print list_extender(n)
| StarcoderdataPython |
3206974 | <reponame>edugonza/pm4py-source<filename>pm4py/visualization/dfg/factory.py<gh_stars>0
from pm4py.visualization.dfg.versions import simple_visualize
import os, shutil
from pm4py.visualization.common.save import *
FREQUENCY = "frequency"
PERFORMANCE = "performance"
VERSIONS = {FREQUENCY: simple_visualize.apply_frequency, PERFORMANCE: simple_visualize.apply_performance}
def apply(dfg, log=None, activities_count=None, parameters=None, variant="frequency"):
return VERSIONS[variant](dfg, log=log, activities_count=activities_count, parameters=parameters) | StarcoderdataPython |
142619 | <reponame>armandomeeuwenoord/freight<filename>freight/notifiers/base.py<gh_stars>100-1000
from freight.models import Deploy, TaskStatus
from freight import http
__all__ = ["Notifier", "NotifierEvent"]
class NotifierEvent(object):
TASK_STARTED = 0
TASK_FINISHED = 1
TASK_QUEUED = 2
class Notifier(object):
DEFAULT_EVENTS = [
NotifierEvent.TASK_QUEUED,
NotifierEvent.TASK_STARTED,
NotifierEvent.TASK_FINISHED,
]
def get_default_options(self):
return {
# TODO(dcramer): we want to support events, but we need validators
# before that can happen to avoid magical constants
# 'events': {},
}
def get_options(self):
return {}
def send(self, task, config, event):
# TODO(mattrobenolt): Split this out into send_deploy, send_x
# since we want different notifications for different tasks,
# and remove this shim. For now, we there are only deploys
deploy = Deploy.query.filter(Deploy.task_id == task.id).first()
return self.send_deploy(deploy, task, config, event)
def send_deploy(self, deploy, task, config, event):
raise NotImplementedError
def should_send(self, task, config, event):
deploy = Deploy.query.filter(Deploy.task_id == task.id).first()
return self.should_send_deploy(deploy, task, config, event)
def should_send_deploy(self, deploy, task, config, event):
return event in config.get("events", self.DEFAULT_EVENTS)
def generate_event_title(app, deploy, task, user, event):
number = deploy.number
app_name = app.name
params = dict(task.params or {})
env = deploy.environment
ref = task.ref
sha = task.sha[:7] if task.sha else task.ref
status_label = task.status_label
duration = task.duration
user = user.name.split("@", 1)[
0
] # Usernames can either be 'user' or '<EMAIL>'
link = http.absolute_uri(
f"/deploys/{app.name}/{deploy.environment}/{deploy.number}"
)
# TODO(dcramer): show the ref when it differs from the sha
if event == NotifierEvent.TASK_QUEUED:
return f"[{app_name}/{env}] {user} queued deploy <{link}|#{number}> ({sha})"
if event == NotifierEvent.TASK_STARTED:
return f"[{app_name}/{env}] {user} started deploy <{link}|#{number}> ({sha})"
if task.status == TaskStatus.failed:
return f"[{app_name}/{env}] Failed to finish {user}'s deploy <{link}|#{number}> ({sha}) after {duration}s"
if task.status == TaskStatus.cancelled:
return f"[{app_name}/{env}] {user}'s deploy <{link}|#{number}> ({sha}) was cancelled after {duration}s"
if task.status == TaskStatus.finished:
return f"[{app_name}/{env}] Successfully finished {user}'s deploy <{link}|#{number}> ({sha}) after {duration}s"
raise NotImplementedError(task.status)
| StarcoderdataPython |
194202 | <filename>tests/unit/test_auth_sigv4.py<gh_stars>1000+
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import Credentials
SECRET_KEY = "<KEY>"
ACCESS_KEY = 'AKIDEXAMPLE'
class TestSigV4Auth(unittest.TestCase):
def setUp(self):
self.credentials = Credentials(ACCESS_KEY, SECRET_KEY)
self.sigv4 = SigV4Auth(self.credentials, 'host', 'us-weast-1')
def test_signed_host_is_lowercase(self):
endpoint = 'https://S5.Us-WeAsT-2.AmAZonAwS.com'
expected_host = 's5.us-weast-2.amazonaws.com'
request = AWSRequest(method='GET', url=endpoint)
headers_to_sign = self.sigv4.headers_to_sign(request)
self.assertEqual(expected_host, headers_to_sign.get('host'))
| StarcoderdataPython |
3347175 | <reponame>b-whitman/TwitOff
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import os
from dotenv import load_dotenv
from twitoff.models import db, User, Tweet, migrate
from twitoff.routes import my_routes
from twitoff.twitter_service import twitter_api_client
load_dotenv()
DATABASE_URL = os.getenv("DATABASE_URL", default="OOPS")
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URL
app.config["TWITTER_API_CLIENT"] = twitter_api_client()
db.init_app(app)
migrate.init_app(app, db)
app.register_blueprint(my_routes)
return app
# #
# # ROUTING
# #
# @app.route("/")
# def index():
# return render_template("index.html")
# @app.route("/about")
# def about():
# return "About Me"
# @app.route("/users")
# @app.route("/users.json")
# def users():
# users = User.query.all()
# print(type(users))
# print(type(users[0]))
# users_response = []
# for u in users:
# user_dict = u.__dict__
# del user_dict["_sa_instance_state"]
# users_response.append(user_dict)
# return jsonify(users_response)
# @app.route("/users/create", methods=["POST"])
# def create_user():
# print("CREATING A NEW USER...")
# print("FORM DATA:", dict(request.form))
# # return jsonify({"message": "CREATED OK (TODO)"})
# if "name" in request.form:
# name = request.form["name"]
# print(name)
# db.session.add(User(name=name))
# db.session.commit()
# return jsonify({"message": "CREATED OK", "name": name})
# else:
# return jsonify({"message": "OOPS"})
# @app.route("/tweets/create", methods=["POST"])
# def create_tweet():
# print("CREATING A NEW TWEET...")
# print("FORM DATA:", dict(request.form))
# if "status" in request.form:
# string = request.form["status"]
# user_id = int(request.form["user_id"])
# db.session.add(Tweet(status=string, user_id=user_id))
# db.session.commit()
# return jsonify({"message": "CREATED OK", "status": string})
# else:
# return jsonify({"message": "oops"})
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.