id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3440812 | <gh_stars>0
import pyfiglet
result = pyfiglet.figlet_format('LETS START')
print(result)
| StarcoderdataPython |
11301743 | """
Copyright 2015 <NAME>
Licensed under MIT (https://github.com/brianquach/udacity-nano-fullstack-movie-trailer/blob/master/LICENSE) # noqa
"""
import fresh_tomatoes
import media
def get_movie_list():
"""Fetches a list of movie objects.
Returns:
A list of movie objects; each movie object represents one of my
favorite movies with relevant movie data.
"""
shawshank = media.Movie("Shawshank Redemption",
"Two imprisoned men bond over a number of years, finding solace and " +
"eventual redemption through acts of common decency.",
1994,
"142 min",
"R",
"https://upload.wikimedia.org/wikipedia/en/8/81/ShawshankRedemptionMoviePoster.jpg", # noqa
"https://www.youtube.com/watch?v=NmzuHjWmXOc")
batman = media.Movie("Batman: Mask of the Phantasm",
"Batman is wrongly implicated in a series of murders of mob bosses " +
"actually done by a new vigilante assassin.",
1993,
"76 min",
"PG",
"http://www.gstatic.com/tv/thumb/movieposters/15294/p15294_p_v7_ab.jpg", # noqa
"https://www.youtube.com/watch?v=G__jUFD4Ef8")
toy_story_2 = media.Movie("Toy Story 2",
"When Woody is stolen by a toy collector, Buzz and his friends vow " +
" to rescue him, but Woody finds the idea of immortality in a " +
"museum tempting.",
1999,
"92 min",
"G",
"http://www.gstatic.com/tv/thumb/movieposters/24266/p24266_p_v7_ab.jpg", # noqa
"https://www.youtube.com/watch?v=Lu0sotERXhI")
spirited_away = media.Movie(
"Spirited Away",
"During her family's move to the suburbs, a sullen 10-year-old girl " +
"wanders into a world ruled by gods, witches, and spirits, and " +
"where humans are changed into beasts.",
2001,
"125 min",
"PG",
"http://www.gstatic.com/tv/thumb/dvdboxart/29914/p29914_d_v7_aa.jpg",
"https://www.youtube.com/watch?v=ByXuk9QqQkk")
goodfellas = media.Movie(
"Goodfellas",
"<NAME> and his friends work their way up through the mob " +
"hierarchy.",
1990,
"146 min",
"R",
"http://t0.gstatic.com/images?q=tbn:ANd9GcSkuxYKBhyPQq4e_cbYRDfZRjWkUx2GIKlUpUkHiuVeLg2GhN0D", # noqa
"https://www.youtube.com/watch?v=2ilzidi_J8Q")
conjuring = media.Movie(
"The Conjuring",
"Paranormal investigators Ed and Lorraine Warren work to help a " +
"family terrorized by a dark presence in their farmhouse.",
2013,
"112 min",
"R",
"http://t2.gstatic.com/images?q=tbn:ANd9GcQnHDbJFDDZYC5g9gHa6-NqBE8JUet_iRIPXJym8CtaHsVQa76M", # noqa
"https://www.youtube.com/watch?v=k10ETZ41q5o")
rush_hour_2 = media.Movie(
"Rush Hour 2",
"Carter and Lee head to Hong Kong for vacation, but become " +
"embroiled in a counterfeit money scam.",
2001,
"90 min",
"PG-13",
"http://www.gstatic.com/tv/thumb/dvdboxart/28145/p28145_d_v7_aa.jpg",
"https://www.youtube.com/watch?v=SCTzYY95Aw4")
indie_gamer = media.Movie(
"Indie Game: The Movie",
"The Movie is the first feature documentary film about making video " +
"games. It looks specifically at the underdogs of the video game " +
"industry, indie game developers, who sacrifice money, health and " +
"sanity to realize their lifelong dreams of sharing their visions " +
"with the world.",
2012,
"94 min",
"Not Rated",
"https://upload.wikimedia.org/wikipedia/en/f/fc/Indie_Game_The_Movie_poster.png", # noqa
"https://www.youtube.com/watch?v=dINgx0y4GqM")
goonies = media.Movie(
"The Goonies",
"In order to save their home from foreclosure, a group of misfits " +
"set out to find a pirate's ancient treasure.",
1985,
"114 min",
"PG",
"http://ia.media-imdb.com/images/M/MV5BMTY1Mzk3MTg0M15BMl5BanBnXkFtZTcwOTQzODYyMQ@@._V1_SX640_SY720_.jpg", # noqa
"https://www.youtube.com/watch?v=hJ2j4oWdQtU")
return [
shawshank, batman, toy_story_2, spirited_away, goodfellas,
conjuring, rush_hour_2, indie_gamer, goonies
]
print(fresh_tomatoes.open_movies_page(get_movie_list()))
| StarcoderdataPython |
199411 | <reponame>SorosWen/cs501-t1-assessment<gh_stars>0
from flask import Blueprint, request, make_response, jsonify
from flask.views import MethodView
from project.server import bcrypt, db
from project.server.models import User
user_index_blueprint = Blueprint('users', __name__)
class UserIndexAPI(MethodView):
"""
User View API
"""
def get(self):
users_table = User.query.order_by(User.id).all()
list = []
for user in users_table:
user_info = []
user_info.append("User id: " + str(user.id))
if user.email:
user_info.append("email: " + str(user.email))
user_info.append("registered on: " + str(user.registered_on))
user_info.append("admin status: " + str(user.admin))
list.append(user_info)
return make_response(jsonify(list)), 201
# define the API resources
user_index_view = UserIndexAPI.as_view('users_index_api')
# add Rules for API Endpoints
user_index_blueprint.add_url_rule(
'/users/index',
view_func=user_index_view,
methods=['GET']
) | StarcoderdataPython |
198050 | <filename>generateData/constants.py
ACCOUNT_TYPES = [
'Cuenta de ahorro',
'Cuenta vista',
'Cuenta corriente',
'Cuenta rut',
]
BANK_NAMES = [
'BANCO DE CHILE/EDWARDS CITI',
'BANCO ESTADO',
'SCOTIABANK',
'BCI',
'CORPBANCA',
'BICE',
'HSBC',
'SANTANDER',
'ITAU',
'THE BANK OF TOKYO-MITSUBISHI LTD.',
'SECURITY',
'BBVA',
'DEL DESARROLLO',
'FALABELLA',
'RIPLEY',
'BANCO CONSORCIO',
'BANCO PARIS',
'COOPEUCH',
'INTERNACIONAL',
]
# BANK_NAMES2 = [
# 'ABN AMRO BANK (CHILE)',
# 'BANCO BICE',
# 'BANCO DE CHILE / EDWARDS',
# 'BANCO DE CREDITO E INVERSIONES',
# 'BANCO DEL DESARROLLO',
# 'BANCO DEL ESTADO DE CHILE',
# 'BANCO FALABELLA',
# 'BANCO INTERNACIONAL',
# 'BANCO ITAU CHILE',
# 'BANCO RIPLEY',
# 'BANCO SANTANDER-CHILE',
# 'BANCO SECURITY',
# 'CORPBANCA',
# 'Caja de compensación Los Héroes',
# 'Coopeuch (Cooperativa de Ahorro y Crédito)',
# 'HNS BANCO',
# 'SCOTIABANK',
# ]
RUT_LOWER_RANGE = 1000000
RUT_UPPER_RANGE = 22000000
ACCOUNT_NUMBER_LOWER_RANGE = 1000000
ACCOUNT_NUMBER_UPPER_RANGE = 99999999999999999999
BOOLEANS = [True, False]
RUT_MESSAGES = ['', 'rut', 'run']
ACCOUNT_NUMBER_MESSAGES = ['', 'numero de cuenta',
'n', 'n˚ cuenta', 'n˚', 'cuenta']
BANK_NAME_MESSAGES = ['', 'banco']
ACCOUNT_TYPES_MESSAGES = ['', 'cuenta']
NAMES_OPTIONS = ['name/lastname', 'full-name']
BANK_OPTIONS = ['name/type', 'name-type']
ACCOUNT_TYPES_CARACTERS = ['', ' ', '-']
END_LINE = ['\n', ', ', ' ']
| StarcoderdataPython |
23738 | from typing import Optional
from pydantic import BaseModel, root_validator, validator
from fief.crypto.encryption import decrypt
from fief.db.types import DatabaseType
from fief.errors import APIErrorCode
from fief.schemas.generics import UUIDSchema
from fief.settings import settings
def validate_all_database_settings(cls, values):
database_type = values.get("database_type")
database_settings = [
values.get("database_host"),
values.get("database_port"),
values.get("database_username"),
values.get("database_password"),
values.get("database_name"),
]
if database_type is None and not any(database_settings):
return values
if database_type is None and any(database_settings):
raise ValueError(APIErrorCode.WORKSPACE_CREATE_MISSING_DATABASE_SETTINGS)
database_name = values.get("database_name")
if database_type == DatabaseType.SQLITE:
if database_name is None:
raise ValueError(APIErrorCode.WORKSPACE_CREATE_MISSING_DATABASE_SETTINGS)
else:
if not all(database_settings):
raise ValueError(APIErrorCode.WORKSPACE_CREATE_MISSING_DATABASE_SETTINGS)
return values
class WorkspaceCheckConnection(BaseModel):
database_type: DatabaseType
database_host: str
database_port: int
database_username: str
database_password: str
database_name: str
_validate_all_database_settings = root_validator(allow_reuse=True)(
validate_all_database_settings
)
class WorkspaceCreate(BaseModel):
name: str
database_type: Optional[DatabaseType]
database_host: Optional[str]
database_port: Optional[int]
database_username: Optional[str]
database_password: Optional[str]
database_name: Optional[str]
_validate_all_database_settings = root_validator(allow_reuse=True)(
validate_all_database_settings
)
class BaseWorkspace(UUIDSchema):
name: str
domain: str
class Workspace(BaseWorkspace):
database_type: Optional[DatabaseType]
database_host: Optional[str]
database_port: Optional[int]
database_username: Optional[str]
database_password: Optional[str]
database_name: Optional[str]
@validator(
"database_host",
"database_username",
"database_password",
"database_name",
pre=True,
)
def decrypt_database_setting(cls, value: Optional[str]) -> Optional[str]:
if value is None:
return value
return decrypt(value, settings.encryption_key)
@validator("database_port", pre=True)
def decrypt_database_port(cls, value: Optional[str]) -> Optional[int]:
if value is None:
return value
return int(decrypt(value, settings.encryption_key))
class WorkspacePublic(BaseWorkspace):
pass
| StarcoderdataPython |
11303661 | <gh_stars>0
import numpy as np
from .agent import Agent
class UCB(Agent):
"""
Emulates the EGreedy algorithm described in 'Adversarial Attacks
Against Multi-Armed Bandits'
"""
def __init__(self, n_arms, sigmas):
"""
"""
super().__init__()
if n_arms<2:
raise ValueError("number of arms must be greater than 1")
self.round = 0
self._means = -np.inf * np.ones(n_arms) # start pessimistic
self._action = None
self.n_arm_pulls = np.zeros(n_arms)
self._explore = True
self.n_arms = n_arms
self.sigmas = sigmas
@property
def explore(self):
"""True if in the most recent round the bandit explored"""
return self._explore
@property
def epsilon(self):
return self._epsilon
@property
def action(self):
return self._action
@property
def means(self):
"""estimated mean reward for each arm"""
return self._means
def sample_action(self, action=None):
"""chooses to explore or exploit, then
samples an action in the environment"""
self.round += 1
if action == None:
self._explore = False
score = self.means + 3*np.sqrt(self.sigmas)*np.sqrt(np.log(self.round)/self.n_arm_pulls)
action = np.argmax(score)
self._action = action
self.n_arm_pulls[action] += 1
return action
def update_means(self, reward):
if self.means[self.action] == -np.inf:
self.means[self.action] = reward
else:
previous_sum = self.means[self.action] * (self.n_arm_pulls[self.action] - 1)
self.means[self.action] = (previous_sum + reward) / self.n_arm_pulls[self.action] | StarcoderdataPython |
5109907 | <reponame>zanachka/autoextract-poet
import attr
import pytest
from autoextract_poet.items import (
GTIN,
AdditionalProperty,
Address,
Area,
Article,
ArticleFromList,
ArticleList,
AvailableAtOrFrom,
Breadcrumb,
Comment,
Comments,
ForumPost,
ForumPosts,
FuelEfficiency,
Item,
JobPosting,
Location,
MileageFromOdometer,
Offer,
Organization,
PaginationLink,
Product,
ProductFromList,
ProductList,
Rating,
RealEstate,
Review,
Reviews,
Salary,
Topic,
TradeAction,
Vehicle,
VehicleEngine,
)
from tests import crazy_monkey_nullify, load_fixture, temp_seed
from tests.typing import assert_type_compliance
example_article_result = load_fixture("sample_article.json")[0]
example_article_list_result = load_fixture("sample_article_list.json")[0]
example_product_result = load_fixture("sample_product.json")[0]
example_product_list_result = load_fixture("sample_product_list.json")[0]
example_job_posting_result = load_fixture("sample_job_posting.json")[0]
example_comments_result = load_fixture("sample_comments.json")[0]
example_forum_posts_result = load_fixture("sample_forum_posts.json")[0]
example_real_estate_result = load_fixture("sample_real_estate.json")[0]
example_reviews_result = load_fixture("sample_reviews.json")[0]
example_vehicle_result = load_fixture("sample_vehicle.json")[0]
@pytest.mark.parametrize(
"cls, data",
[(Offer, offer) for offer in example_product_result["product"]["offers"]]
+ [(Breadcrumb, breadcrumb) for breadcrumb in example_product_result["product"]["breadcrumbs"]] # type: ignore
+ [
(AdditionalProperty, additionalProperty) # type: ignore
for additionalProperty in example_product_result["product"]["additionalProperty"]
]
+ [(GTIN, gtin) for gtin in example_product_result["product"]["gtin"]] # type: ignore
+ [(Rating, example_product_result["product"]["aggregateRating"])] # type: ignore
+ [(Product, example_product_result["product"])] # type: ignore
+ [(Article, example_article_result["article"])] # type: ignore
+ [(ArticleList, example_article_list_result["articleList"])] # type: ignore
+ [(PaginationLink, example_product_list_result["productList"]["paginationNext"])] # type: ignore
+ [(ProductList, example_product_list_result["productList"])] # type: ignore
+ [(JobPosting, example_job_posting_result["jobPosting"])] # type: ignore
+ [(Comments, example_comments_result["comments"])] # type: ignore
+ [(ForumPosts, example_forum_posts_result["forumPosts"])] # type: ignore
+ [(RealEstate, example_real_estate_result["realEstate"])] # type: ignore
+ [(Reviews, example_reviews_result["reviews"])] # type: ignore
+ [
(Vehicle, example_vehicle_result["vehicle"]) # type: ignore
], # type: ignore
) # type: ignore
@pytest.mark.parametrize("unexpected_attrs", [{}, {"unexpected_attribute": "Should not fail"}]) # type: ignore
def test_item(cls, data, unexpected_attrs):
assert cls.from_dict(None) is None
item = cls.from_dict({**data, **unexpected_attrs})
assert isinstance(item, cls)
assert attr.asdict(item) == data
assert item._unknown_fields_dict == unexpected_attrs
assert_type_compliance(item)
with temp_seed(7):
for _ in range(10):
data_with_holes = crazy_monkey_nullify(data)
item = cls.from_dict(data_with_holes)
assert attr.asdict(item) == data_with_holes
# AttributeError: 'cls' object has no attribute 'foo'
with pytest.raises(AttributeError):
item.foo = "bar"
# TypeError: __init__() got an unexpected argument 'foo'
with pytest.raises(TypeError):
cls(**data, foo="bar")
def test_from_list():
@attr.s(auto_attribs=True, slots=True)
class Number(Item):
value: int
actual = Number.from_list([None, dict(value=1), None, dict(value=2)])
expected = [None, Number(1), None, Number(2)]
assert actual == expected
assert Number.from_list(None) == []
assert Number.from_list([]) == []
def assert_all_isinstance(lst, cls):
assert all(isinstance(el, cls) for el in lst)
def assert_pagination_types(item):
assert isinstance(item.paginationNext, PaginationLink)
assert isinstance(item.paginationPrevious, PaginationLink)
def test_article_attr_types():
item = Article.from_dict(example_article_result["article"])
assert isinstance(item, Article)
assert_all_isinstance(item.breadcrumbs, Breadcrumb)
def test_article_list_attr_types():
item = ArticleList.from_dict(example_article_list_result["articleList"])
assert isinstance(item, ArticleList)
assert_pagination_types(item)
assert_all_isinstance(item.articles, ArticleFromList)
def test_product_attr_types():
item = Product.from_dict(example_product_result["product"])
assert isinstance(item, Product)
assert_all_isinstance(item.offers, Offer)
assert_all_isinstance(item.gtin, GTIN)
assert_all_isinstance(item.breadcrumbs, Breadcrumb)
assert_all_isinstance(item.additionalProperty, AdditionalProperty)
assert isinstance(item.aggregateRating, Rating)
def test_product_list_attr_types():
item = ProductList.from_dict(example_product_list_result["productList"])
assert isinstance(item, ProductList)
assert_pagination_types(item)
assert_all_isinstance(item.products, ProductFromList)
assert_all_isinstance(item.breadcrumbs, Breadcrumb)
product = item.products[0]
assert isinstance(product.aggregateRating, Rating)
assert_all_isinstance(product.offers, Offer)
def test_job_posting_attr_types():
item = JobPosting.from_dict(example_job_posting_result["jobPosting"])
assert isinstance(item, JobPosting)
assert isinstance(item.baseSalary, Salary)
assert isinstance(item.hiringOrganization, Organization)
assert isinstance(item.jobLocation, Location)
def test_comments_attr_types():
item = Comments.from_dict(example_comments_result["comments"])
assert isinstance(item, Comments)
assert_all_isinstance(item.comments, Comment)
def test_forum_posts_attr_types():
item = ForumPosts.from_dict(example_forum_posts_result["forumPosts"])
assert isinstance(item, ForumPosts)
assert_all_isinstance(item.posts, ForumPost)
assert isinstance(item.topic, Topic)
def test_real_estate_attr_types():
item = RealEstate.from_dict(example_real_estate_result["realEstate"])
assert isinstance(item, RealEstate)
assert_all_isinstance(item.breadcrumbs, Breadcrumb)
assert_all_isinstance(item.additionalProperty, AdditionalProperty)
assert isinstance(item.address, Address)
assert isinstance(item.area, Area)
assert_all_isinstance(item.tradeActions, TradeAction)
def test_reviews_attr_types():
item = Reviews.from_dict(example_reviews_result["reviews"])
assert isinstance(item, Reviews)
assert_pagination_types(item)
assert_all_isinstance(item.reviews, Review)
review = item.reviews[0]
assert isinstance(review.reviewRating, Rating)
def test_vehicle_attr_types():
item = Vehicle.from_dict(example_vehicle_result["vehicle"])
assert isinstance(item, Vehicle)
assert_all_isinstance(item.offers, Offer)
assert_all_isinstance(item.breadcrumbs, Breadcrumb)
assert_all_isinstance(item.additionalProperty, AdditionalProperty)
assert_all_isinstance(item.fuelEfficiency, FuelEfficiency)
assert isinstance(item.aggregateRating, Rating)
assert isinstance(item.mileageFromOdometer, MileageFromOdometer)
assert isinstance(item.vehicleEngine, VehicleEngine)
assert isinstance(item.availableAtOrFrom, AvailableAtOrFrom)
| StarcoderdataPython |
1743386 | #
# Copyright (c) 2013-2022 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
"""
This module contains classes that are used in creating spatial and spatial/temporal indices.
It contains the following import shortcuts:
```python
from pygw.index import IndexBuilder
from pygw.index import Index
from pygw.index import SpatialIndexBuilder
from pygw.index import SpatialTemporalIndexBuilder
```
"""
from .index_builder import IndexBuilder
from .index import Index
from .spatial_index_builder import SpatialIndexBuilder
from .spatial_temporal_index_builder import SpatialTemporalIndexBuilder
| StarcoderdataPython |
1762985 | <filename>helpers/draw.py
# Copyright 2019 D-Wave Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http: // www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import networkx as nx
import sys
from bokeh.io import show, output_notebook
from bokeh.models import Plot, Range1d, MultiLine, Circle, Label, LabelSet, ColumnDataSource
from bokeh.models import WheelZoomTool, ZoomInTool, ZoomOutTool, ResetTool, PanTool
from bokeh.models.graphs import from_networkx
me = sys.modules[__name__]
if not hasattr(me, 'bokeh_loaded'):
output_notebook()
bokeh_loaded = True
def plot_bqm(bqm):
"""Plot binary quadratic model as a labeled graph."""
g = nx.Graph()
g.add_nodes_from(bqm.variables)
g.add_edges_from(bqm.quadratic)
plot_size = 400
text_size = '16pt'
graph = from_networkx(g, nx.spring_layout)
graph.node_renderer.glyph = Circle(size=35, fill_color='purple', fill_alpha=0.25)
graph.edge_renderer.glyph = MultiLine(line_alpha=0.8, line_width=2)
pos = nx.spring_layout(g)
data = {'xpos': [], 'ypos': [], 'label': []}
for label, loc in pos.items():
data['label'].append(label)
data['xpos'].append(loc[0])
data['ypos'].append(loc[1])
labels = LabelSet(x='xpos', y='ypos', text='label', level='glyph',
source=ColumnDataSource(data), x_offset=-1, y_offset=-1,
text_color="blue", text_font_size='14pt', text_font_style='bold')
plot = Plot(plot_width=plot_size, plot_height=plot_size, x_range=Range1d(-1.3, 1.3), y_range=Range1d(-1.3, 1.3))
plot.title.text = "BQM with {} nodes and {} edges".format(len(bqm), len(bqm.quadratic))
tools = [WheelZoomTool(), ZoomInTool(), ZoomOutTool(), PanTool(), ResetTool()]
plot.add_tools(*tools)
plot.toolbar.active_scroll = tools[0]
plot.renderers.append(graph)
plot.add_layout(labels)
plot.background_fill_color = "lightyellow"
show(plot)
def plot_feature_selection(features, selected_features):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.3, .9, .7])
ax.set_title("Best Feature Selection")
ax.set_ylabel('Number of Selected Features')
ax.set_xticks(np.arange(len(features)))
ax.set_xticklabels(features, rotation=90)
ax.set_yticks(np.arange(len(features)))
ax.set_yticklabels(np.arange(1, len(features)+1))
# Set a grid on minor ticks
ax.set_xticks(np.arange(-0.5, len(features)), minor=True)
ax.set_yticks(np.arange(-0.5, len(features)), minor=True)
ax.grid(which='minor', color='black')
ax.imshow(selected_features, cmap=colors.ListedColormap(['white', 'red']))
| StarcoderdataPython |
1998192 | from django.urls import path
from django.conf.urls import url
from voucher.views import VoucherDetail, CreateVoucherList, upload_email_list, upload_code_list, CreateOrganizationInVoucherList, VoucherTypeList
urlpatterns = [
path('voucher/', CreateVoucherList.as_view()),
path('voucher/addEmails/', upload_email_list, name='upload-email' ),
path('voucher/addCodes/', upload_code_list, name='upload-code' ),
path('voucher/organization', CreateOrganizationInVoucherList.as_view()),
path('voucher/type', VoucherTypeList.as_view()),
path('voucher/<str:pk>', VoucherDetail.as_view())
] | StarcoderdataPython |
1675511 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.actions.basicaction import BasicAction
from pastepwn.analyzers.bcrypthashanalyzer import BcryptHashAnalyzer
class TestBcryptHashAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = BcryptHashAnalyzer(None)
self.paste = mock.Mock()
def test_match(self):
valid_hashes = ["$2a$10$BIgnlSmYE8qYiONM0NQ53eRWBw5G4HIJEbXKzcsRVt.08IDnqH/V.",
"$2a$11$EppiRqR0kG9EKy56edDWTOnsv/oGW0dqAJB9ucmn3augbmcm8v/iy",
"$2b$10$FpOpno43SIE8e1hWnlOdR.9hG2J8dd5FD1kQq8hn4zLdKa5eIiFUO",
"$2a$10$SVy7GlMnWsemiZByHSnV0O3WoEHGImFt8v07uH.K3ZXwH5j9o/DP.",
"$2a$10$59SNkcZ0rdC2VgeWaavVyea9PFget/xmtbV7.9IeJl3CUq.Q954i2",
"$2b$10$Y8CJ9YIwrxt1YYgMqqU1delCYoTpIl18SRtYYI2kyM3jduKPHvWMC",
"$2a$10$2tNCUb.FUpSutyhkbqmMBuNnLzhqI4q9Miqurnj6eu.XsiIjww7I6",
"$2a$10$OyrADUFmj9QEqsd8frkEDOEYSPQalW5qoI1s2z6taCWwgUsjKzk5m"
]
invalid_hashes = ["7168D46050573DDA4CE409FA1515638BD28E86346D45F686310ED0678172BABCD4117FD15DD380B964352FE879FB745B573A730D526BB1188B2790FBA06E8ACA",
"5FD924625F6AB16A19CC9807C7C506AE1813490E4BA675F843D5A10E0BAACDB8",
"522F02FEA11E70C03C90C247C50410443246BFCB",
"8433FD5A3B0ED71D21CFB9F291BD89B9",
"$2a$124$SVy7GlMnWsemiZByHSnV0O3WoEHGImFt8v07uH.K3ZXwH5j9o/DP.a",
"$2a$12$SVy7GlMnWsemiZByHSnV0O3WoEHGImFt8v{07uH.K3ZXwH5j9o/DP.a",
"$2a$14$SVy7GlMnWsemiZByHSnV0O3WGImFt8v07uH.K3ZXwH5j9o/DP.a",
"@x,Y8q+jnYeZr$;",
"This is a test",
"$2a$10$ asdf 1234 how are you?"]
for test_hash in valid_hashes:
self.paste.body = test_hash
self.assertTrue(self.analyzer.match(self.paste), test_hash)
for test_hash in invalid_hashes:
self.paste.body = test_hash
self.assertFalse(self.analyzer.match(self.paste), test_hash)
def test_intext(self):
"""Test if matches inside text are recognized"""
self.paste.body = "We now have a hashe inside the text: $2a$11$EppiRqR0kG9EKy56edDWTOnsv/oGW0dqAJB9ucmn3augbmcm8v/iy and some text here!"
self.assertTrue(self.analyzer.match(self.paste))
def test_multiple(self):
"""Test if multiple matches are recognized"""
self.paste.body = "We now have a hashe inside the text: $2a$11$EppiRqR0kG9EKy56edDWTOnsv/oGW0dqAJB9ucmn3augbmcm8v/iy and some text here!" \
"Also there is $2a$10$OyrADUFmj9QEqsd8frkEDOEYSPQalW5qoI1s2z6taCWwgUsjKzk5m as another one"
match = self.analyzer.match(self.paste)
self.assertTrue(match)
self.assertEqual("$2a$11$EppiRqR0kG9EKy56edDWTOnsv/oGW0dqAJB9ucmn3augbmcm8v/iy", match[0])
self.assertEqual("$2a$10$OyrADUFmj9QEqsd8frkEDOEYSPQalW5qoI1s2z6taCWwgUsjKzk5m", match[1])
def test_match_none(self):
self.paste.body = None
self.assertFalse(self.analyzer.match(self.paste))
self.paste = None
self.assertFalse(self.analyzer.match(self.paste))
def test_match_empty(self):
self.paste.body = ""
self.assertFalse(self.analyzer.match(self.paste))
def test_actions_present(self):
action = mock.MagicMock(spec=BasicAction)
analyzer = BcryptHashAnalyzer(action)
self.assertEqual([action], analyzer.actions)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3401600 | <gh_stars>0
class Stack:
def __init__(self):
self.items =[]
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def is_empty(self):
return self.items == []
def peek(self):
if not self.is_empty():
return self.items[-1]
else:
print("Stack is Empty")
def get_stack(self):
return self.items
def size(self):
return len(self.items)
#loop through the string and push contents
#character by character onto stack
def reverse_string(stack, input_str):
for i in range(len(input_str)):
stack.push(input_str[i])
rev_str = ""
while not stack.is_empty():
rev_str += stack.pop()
return rev_str
| StarcoderdataPython |
1897665 | <reponame>zzz0906/LeetCode
import bisect
from sortedcontainers import SortedList
class Solution:
def createSortedArray(self, instructions: List[int]) -> int:
"""O(NlogN) / O(N)"""
ans = 0
sorted_insts = SortedList()
for inst in instructions: # O(N)
l = sorted_insts.bisect_left(inst) # O(logN)
r = len(sorted_insts) - sorted_insts.bisect(inst) # O(logN)
ans += min(l, r)
sorted_insts.add(inst) # O(logN)
return ans % (10**9 + 7) | StarcoderdataPython |
8102209 | <reponame>nebulae/ntntn.io<filename>app/src/handlers.py
import os
import webapp2
import jinja2
import json
import logging
import datetime
from time import mktime
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.api import search
from google.appengine.ext import ndb
from decimal import *
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
extensions=['jinja2.ext.autoescape'])
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, datetime.datetime):
return long(mktime(obj.timetuple()))
if isinstance(obj, ndb.Key):
return str(obj.urlsafe())
if isinstance(obj, ndb.GeoPt):
return {'lat': obj.lat, 'lon': obj.lon}
if isinstance(obj, search.GeoPoint):
return {'lat': obj.latitude, 'lon': obj.longitude}
if isinstance(obj, ndb.BlobProperty):
return True if obj is not None else False
return json.JSONEncoder.default(self, obj)
#
# Base Class for all requests which redirect to a template.
#
class BaseTemplateHandler(webapp2.RequestHandler):
@webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(app=self.app)
def render_template(self, filename, template_args):
template = JINJA_ENVIRONMENT.get_template(filename)
if users.get_current_user():
logout_url = users.create_logout_url('/')
template_args['auth_url'] = logout_url
template_args['user'] = users.get_current_user()
template_args['auth_text'] = ''
else:
template_args['auth_url'] = users.create_login_url(dest_url='/')
template_args['auth_text'] = '<img src="image/dt-login.png" />'
self.response.write(template.render(template_args))
#
# Base Class for all requests that redirect to a template and require authentication.
#
class AuthenticatedTemplateHandler(BaseTemplateHandler):
def dispatch(self):
user = users.get_current_user()
if user:
super(AuthenticatedTemplateHandler, self).dispatch()
else:
self.redirect('/')
#
# Base Class for all requests that redirect to a template and require administrative authentication.
#
class AdminAuthenticatedTemplateHandler(BaseTemplateHandler):
def dispatch(self):
admin = users.is_current_user_admin()
if admin:
super(AdminAuthenticatedTemplateHandler, self).dispatch()
else:
self.redirect('/')
#
# Base Class for all requests which need a json response.
#
class BaseJsonResponseHandler(webapp2.RequestHandler):
def render_json(self, dict):
callback = self.request.get('callback')
self.response.headers['Content-Type'] = 'application/json'
if callback:
self.response.out.write('{0}({1})'.format(callback, json.dumps(dict, cls=CustomJsonEncoder)))
else:
self.response.out.write(json.dumps(dict, cls=CustomJsonEncoder))
#
# Base Class for all requests which need a json response and require authentication.
#
class AuthenticatedJsonHandler(BaseJsonResponseHandler):
def dispatch(self):
user = users.get_current_user()
if user:
super(AuthenticatedJsonHandler, self).dispatch()
else:
self.abort(401, detail="You must be an authenticated user to access this resource.")
#
# Base Class for all requests which need a json response and require administrative authentication.
#
class AdminAuthenticatedJsonHandler(BaseJsonResponseHandler):
def dispatch(self):
admin = users.is_current_user_admin()
if admin:
super(AdminAuthenticatedJsonHandler, self).dispatch()
else:
self.abort(401, detail="You must be an authenticated administrator to access this resource.")
| StarcoderdataPython |
5024397 | <reponame>clach04/reviewboard<gh_stars>1-10
from django import forms
def validate_users(form, field='users'):
"""Validates that the users all have valid, matching LocalSites.
This will compare the LocalSite associated with the form to that of
each added User. If the form has a LocalSite set, then all Users are
required to be a part of that LocalSite. Otherwise, any User is allowed.
"""
local_site = form.cleaned_data['local_site']
users = form.cleaned_data.get(field, [])
if local_site:
for user in users:
if not user.local_site.filter(pk=local_site.pk).exists():
raise forms.ValidationError(
["The user %s is not a member of this site."
% user.username])
return users
def validate_review_groups(form, field='review_groups'):
"""Validates that the review groups all have valid, matching LocalSites.
This will compare the LocalSite associated with the form to that of
each added Group. Each Group must have the same LocalSite that the form
is using.
"""
groups = form.cleaned_data.get(field, [])
local_site = form.cleaned_data['local_site']
for group in groups:
if group.local_site != local_site:
raise forms.ValidationError(
["The review group %s does not exist." % group.name])
return groups
| StarcoderdataPython |
3399700 | <gh_stars>0
import single_coin_toss
def batch_toss_coins(batch_size):
heads_count = 0
for i in range(batch_size):
heads = single_coin_toss.toss_coin()
if heads:
heads_count = heads_count + 1
return heads_count | StarcoderdataPython |
3550975 | from os import getcwd
from os.path import join as pathjoin
from sciunit import settings as sciunit_settings
from cognibench.settings import settings
import sys
import os
sys.path.insert(0, os.getcwd())
from model_defs import PsPMModel
from libcommon import util
settings["CRASH_EARLY"] = True
sciunit_settings["CWD"] = getcwd()
MODEL_PATH = "exp6"
EXP_OUTPUT_PATH = pathjoin(util.OUT_PATH, MODEL_PATH)
ANGLE_LIST = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
if __name__ == "__main__":
os.makedirs(EXP_OUTPUT_PATH, exist_ok=True)
# prepare models
models = [
PsPMModel(
lib_paths=util.LIB_PATHS,
import_base_path=MODEL_PATH,
predict_fn="fit_all",
model_spec={"fixation_angle": angle},
name=f"Fixation angle: {angle}",
)
for angle in ANGLE_LIST
]
suite = util.get_test_suite(EXP_OUTPUT_PATH)
# judge
sm = suite.judge(models)
print(sm)
sm_df = util.sm_to_pandas(sm)
sm_df.to_csv(pathjoin(EXP_OUTPUT_PATH, "score_matrix.csv"), na_rep="NULL")
| StarcoderdataPython |
5100856 | <filename>test/test_hsm.py
from unittest import TestCase
from mock import patch, call
from flock.hsm import Hsm, HsmState
class TestHsm(Hsm):
class BaseState(HsmState):
def on_data(self, hsm, data):
return self
class State1(BaseState):
def on_data(self, hsm, data):
hsm.data = "State1" + data
return hsm.state1_1
class State1_1(BaseState):
def on_data(self, hsm, data):
hsm.data = "State1_1" + data
return self
def __init__(self):
super(TestHsm, self).__init__()
self.state1 = TestHsm.State1()
self.state1_1 = TestHsm.State1_1()
self.data = None
self.transition(self.state1)
def on_data(self, data):
return self.dispatch(self.current_state.on_data, data)
class HsmTestCase(TestCase):
def test_init(self):
test_hsm = TestHsm()
self.assertIs(test_hsm.state1, test_hsm.current_state)
def test_dispatch_one(self):
test_hsm = TestHsm()
test_hsm.on_data("one")
self.assertEqual("State1one", test_hsm.data)
self.assertIs(test_hsm.state1_1, test_hsm.current_state)
def test_dispatch_two(self):
test_hsm = TestHsm()
test_hsm.on_data("one")
test_hsm.on_data("two")
self.assertEqual("State1_1two", test_hsm.data)
self.assertIs(test_hsm.state1_1, test_hsm.current_state)
| StarcoderdataPython |
3425539 | <reponame>lordkyzr/launchkey-python
import unittest
from mock import patch
from formencode import Invalid
from datetime import datetime
from launchkey.exceptions import AuthorizationInProgress
class TestAuthorizationInProgressException(unittest.TestCase):
def setUp(self):
self.warnings_patch = \
patch("launchkey.exceptions.warnings").start()
self.validator_patch = patch(
"launchkey.exceptions.AuthorizationInProgressValidator").start()
self.addCleanup(patch.stopall)
def test_warning_thrown_when_data_is_not_expected(self):
self.validator_patch.side_effect = Invalid("Error Message", "Value", "State")
AuthorizationInProgress(
"Error Detail", 400,
error_data="Error Data"
)
self.warnings_patch.warn.assert_called_with(
"Failed to parse AuthorizationInProgress data: "
"exception: Error Message "
"data: Error Data"
)
def test_warning_not_thrown_when_data_is_expected(self):
AuthorizationInProgress(
"Error Detail", 400
)
self.warnings_patch.warn.assert_not_called()
def test_defaults_when_data_is_not_expected(self):
self.validator_patch.side_effect = Invalid("Message", "Value", "State")
exception = AuthorizationInProgress(
"Error Detail", 400
)
self.assertIsNone(exception.from_same_service)
self.assertIsNone(exception.expires)
self.assertIsNone(exception.authorization_request_id)
def test_validator_receives_input_data(self):
AuthorizationInProgress(
"Error Detail",
400,
error_data="input data"
)
self.validator_patch.return_value.to_python.assert_called_with(
"input data"
)
def test_parsed_data(self):
self.validator_patch.return_value.to_python.return_value = {
"from_same_service": True,
"auth_request": "fdd7fd97-f432-11e8-a00c-acde48001122",
"expires": datetime(2018, 1, 1, 1)
}
exception = AuthorizationInProgress(
"Error Detail", 400
)
self.assertTrue(exception.from_same_service)
self.assertEqual(
exception.expires,
datetime(2018, 1, 1, 1)
)
self.assertEqual(
exception.authorization_request_id,
"fdd7fd97-f432-11e8-a00c-acde48001122"
)
| StarcoderdataPython |
5141143 | # https://towardsdatascience.com/scraping-table-data-from-pdf-files-using-a-single-line-in-python-8607880c750
import tabula
infile_name ='/Users/craig/Documents/GEORGIAN-MEGRELIAN-LAZ-SVAN-ENGLISH_DICTIONARY.pdf'
table = tabula.read_pdf(infile_name,pages=1)
table[0]
| StarcoderdataPython |
9781707 | <gh_stars>0
import heterocl as hcl
import numpy as np
import time
import math
#import plotly.graph_objects as go
from compute_graphs.custom_graph_functions import *
from plots.plotting_utilities import *
from user_definer import *
from argparse import ArgumentParser
from compute_graphs.graph_3d import *
from compute_graphs.graph_4d import *
from compute_graphs.graph_5d import *
from compute_graphs.graph_6d import *
import scipy.io as sio
def main():
################### PARSING ARGUMENTS FROM USERS #####################
parser = ArgumentParser()
parser.add_argument("-p", "--plot", default=True, type=bool)
# Print out LLVM option only
parser.add_argument("-l", "--llvm", default=False, type=bool)
args = parser.parse_args()
hcl.init()
hcl.config.init_dtype = hcl.Float()
################# INITIALIZE DATA TO BE INPUT INTO EXECUTABLE ##########################
print("Initializing\n")
V_0 = hcl.asarray(my_shape)
V_1 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))
l0 = hcl.asarray(my_shape)
#probe = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))
#obstacle = hcl.asarray(cstraint_values)
list_x1 = np.reshape(g.vs[0], g.pts_each_dim[0])
list_x2 = np.reshape(g.vs[1], g.pts_each_dim[1])
list_x3 = np.reshape(g.vs[2], g.pts_each_dim[2])
if g.dims >= 4:
list_x4 = np.reshape(g.vs[3], g.pts_each_dim[3])
if g.dims >= 5:
list_x5 = np.reshape(g.vs[4], g.pts_each_dim[4])
if g.dims >= 6:
list_x6 = np.reshape(g.vs[5], g.pts_each_dim[5])
# Convert to hcl array type
list_x1 = hcl.asarray(list_x1)
list_x2 = hcl.asarray(list_x2)
list_x3 = hcl.asarray(list_x3)
if g.dims >= 4:
list_x4 = hcl.asarray(list_x4)
if g.dims >= 5:
list_x5 = hcl.asarray(list_x5)
if g.dims >= 6:
list_x6 = hcl.asarray(list_x6)
# Get executable
if g.dims == 3:
solve_pde = graph_3D()
if g.dims == 4:
solve_pde = graph_4D()
if g.dims == 5:
solve_pde = graph_5D()
if g.dims == 6:
solve_pde = graph_6D()
# Print out code for different backend
# print(solve_pde)
################ USE THE EXECUTABLE ############
# Variables used for timing
execution_time = 0
lookback_time = 0
tNow = tau[0]
for i in range(1, len(tau)):
#tNow = tau[i-1]
t_minh = hcl.asarray(np.array((tNow, tau[i])))
while tNow <= tau[i] - 1e-4:
# Start timing
start = time.time()
print("Started running\n")
# Run the execution and pass input into graph
if g.dims == 3:
solve_pde(V_1, V_0, list_x1, list_x2, list_x3, t_minh, l0)
if g.dims == 4:
solve_pde(V_1, V_0, list_x1, list_x2,
list_x3, list_x4, t_minh, l0)
if g.dims == 5:
solve_pde(V_1, V_0, list_x1, list_x2, list_x3,
list_x4, list_x5, t_minh, l0)
if g.dims == 6:
solve_pde(V_1, V_0, list_x1, list_x2, list_x3,
list_x4, list_x5, list_x6, t_minh, l0)
tNow = np.asscalar((t_minh.asnumpy())[0])
# Calculate computation time
execution_time += time.time() - start
# Some information printing
print(t_minh)
print("Computational time to integrate (s): {:.5f}".format(
time.time() - start))
# Saving data into disk
# Time info printing
print("Total kernel time (s): {:.5f}".format(execution_time))
print("Finished solving\n")
# V1 is the final value array, fill in anything to use it
# e.g. np.save("final_values", V_1.asnumpy())
##################### PLOTTING #####################
if args.plot:
# plot Value table when speed is maximum
plot_isosurface(g, V_1.asnumpy(), [0, 1, 3], g.pts_each_dim[2] - 1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3265242 | # coding: utf-8
from __future__ import print_function, unicode_literals
import sys
import signal
import threading
from .broker_util import ExceptionalQueue
from .httpsrv import HttpSrv
from .util import FAKE_MP
from copyparty.authsrv import AuthSrv
class MpWorker(object):
"""one single mp instance"""
def __init__(self, q_pend, q_yield, args, n):
self.q_pend = q_pend
self.q_yield = q_yield
self.args = args
self.n = n
self.log = self._log_disabled if args.q and not args.lo else self._log_enabled
self.retpend = {}
self.retpend_mutex = threading.Lock()
self.mutex = threading.Lock()
# we inherited signal_handler from parent,
# replace it with something harmless
if not FAKE_MP:
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGUSR1]:
signal.signal(sig, self.signal_handler)
# starting to look like a good idea
self.asrv = AuthSrv(args, None, False)
# instantiate all services here (TODO: inheritance?)
self.httpsrv = HttpSrv(self, n)
# on winxp and some other platforms,
# use thr.join() to block all signals
thr = threading.Thread(target=self.main, name="mpw-main")
thr.daemon = True
thr.start()
thr.join()
def signal_handler(self, sig, frame):
# print('k')
pass
def _log_enabled(self, src, msg, c=0):
self.q_yield.put([0, "log", [src, msg, c]])
def _log_disabled(self, src, msg, c=0):
pass
def logw(self, msg, c=0):
self.log("mp{}".format(self.n), msg, c)
def main(self):
while True:
retq_id, dest, args = self.q_pend.get()
# self.logw("work: [{}]".format(d[0]))
if dest == "shutdown":
self.httpsrv.shutdown()
self.logw("ok bye")
sys.exit(0)
return
elif dest == "reload":
self.logw("mpw.asrv reloading")
self.asrv.reload()
self.logw("mpw.asrv reloaded")
elif dest == "listen":
self.httpsrv.listen(args[0], args[1])
elif dest == "retq":
# response from previous ipc call
with self.retpend_mutex:
retq = self.retpend.pop(retq_id)
retq.put(args)
else:
raise Exception("what is " + str(dest))
def put(self, want_retval, dest, *args):
if want_retval:
retq = ExceptionalQueue(1)
retq_id = id(retq)
with self.retpend_mutex:
self.retpend[retq_id] = retq
else:
retq = None
retq_id = 0
self.q_yield.put([retq_id, dest, args])
return retq
| StarcoderdataPython |
6655703 | <gh_stars>0
import flask
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
testdata = [
{
"date_of_news": "February 23, 2018",
"title": "nGen_LUX is here",
"hyperlink": "https://learn.colorfabb.com/ngen_lux-is-here/",
"organizations_entity": [
[2, "<NAME>"],
],
}
]
@app.route("/", methods=["GET"])
def home():
return """<h1>News_Content_Named_Entity_Recognition</h1>
<p>A prototype API testing for LEONARD.</p>"""
@app.route("/api/v1/resources/testdata/all", methods=["GET"])
def api_all():
return jsonify(testdata)
app.run() | StarcoderdataPython |
8018806 | from .gradient_descent import gradient_descent
from .optimizer import optimizer
| StarcoderdataPython |
9684328 | <reponame>windowssocket/py_leetcode
# refer to https://leetcode.com/problems/multiply-strings/discuss/17605/Easiest-JAVA-Solution-with-Graph-Explanation
class Solution(object):
def multiply(self, num1: str, num2: str) -> str:
# corner case
if len(num1) == 0 or len(num2) == 0:
return '0'
output = [0 for _ in range(len(num1) + len(num2))]
for i in range(len(num1) - 1, -1, -1):
for j in range(len(num2) - 1, -1, -1):
mul = int(num1[i]) * int(num2[j])
output[i + j] += (mul + output[i + j + 1]) // 10
output[i + j + 1] = (mul + output[i + j + 1]) % 10
i = 0
while i < len(output) and output[i] == 0:
i += 1
return '0' if len(output[i:]) == 0 else "".join(map(str, output[i:]))
| StarcoderdataPython |
5142024 | <reponame>arkadeepnc/Visual-6-DoF-pose-tracker<filename>src/DoDecahedronUtils.py
#Used this code to confirm that the tvec and rvec given by the
# estimatePoseSingleMarkers is of the marker frame wrt the camera frame
# from __future__ import division
import numpy as np
from numpy import linalg as LA
import cv2
import cv2.aruco as aruco
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D
import transforms3d as tf3d
import time
from scipy.interpolate import griddata
#from helper import *
from scipy.optimize import minimize, leastsq,least_squares
from scipy import linalg
from scipy.spatial import distance
import rospy
from roscam import RosCam
from matplotlib.path import Path
_R_cent_face = np.load('Center_face_rotations.npy')
_T_cent_face = np.load('Center_face_translations.npy')
frame_gray = np.zeros((100,100, 3), np.uint8)
frame_gray_draw = np.copy(frame_gray)
def draw_3d_point(frame, pose, coord_3d, params, col=(255,0,255), rad = 10):
coord_3d =coord_3d.reshape(1,3)
rot = pose[0:3].reshape(3,1)
tra = pose[3:6].reshape(3,1)
projected_in_pix_sp,_ = cv2.projectPoints(coord_3d,rot,tra, params.mtx, params.dist)
temp1 = int(projected_in_pix_sp[0,0,0])
temp2 = int(projected_in_pix_sp[0,0,1])
cv2.circle(frame,(temp1,temp2), rad , col, 2)
def slerp(v0, v1, t_array):
# >>> slerp([1,0,0,0],[0,0,0,1],np.arange(0,1,0.001))
t_array = np.array(t_array)
v0 = np.array(v0)
v1 = np.array(v1)
dot = np.sum(v0*v1)
if (dot < 0.0):
v1 = -v1
dot = -dot
DOT_THRESHOLD = 0.9995
if (dot > DOT_THRESHOLD):
result = v0[np.newaxis,:] + t_array[:,np.newaxis]*(v1 - v0)[np.newaxis,:]
result = result/np.linalg.norm(result)
return result
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta = theta_0*t_array
sin_theta = np.sin(theta)
s0 = np.cos(theta) - dot * sin_theta / sin_theta_0
s1 = sin_theta / sin_theta_0
return (s0[:,np.newaxis] * v0[np.newaxis,:]) + (s1[:,np.newaxis] * v1[np.newaxis,:])
####################
def patch_norm_and_grad(frame,frame_grad_u,frame_grad_v,corners_pix,bounding_box):
''' patch making reference from
https://stackoverflow.com/questions/21339448/how-to-get-list-of-points-inside-a-polygon-in-python
'''
a = bounding_box[0]
b = bounding_box[1]
start_pnt = [b[0],a[0]]
x, y = np.meshgrid(a, b) # make a canvas with coordinates
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
p = Path(corners_pix) # make a polygon in pixel space
grid = p.contains_points(points) # make grid
mask = grid.reshape(len(a),len(b))
local_frame = frame[start_pnt[0]:start_pnt[0]+len(a), start_pnt[1]:start_pnt[1]+len(b)]
local_frame_cropped = np.ones(local_frame.shape)
np.copyto(local_frame_cropped,local_frame,where=mask)
local_frame_cropped_norm = cv2.normalize(local_frame_cropped,None,alpha = 0,
beta=255,norm_type=cv2.NORM_MINMAX)
local_frame_grad_v,local_frame_grad_u = np.gradient(local_frame_cropped_norm)
# local_frame_grad_int8 = np.asarray(local_frame_grad,dtype=np.uint8)
# np.copyto(frame_gray_draw[start_pnt[0]:start_pnt[0]+len(a), start_pnt[1]:start_pnt[1]+len(b)],
# local_frame_grad_int8,where=mask)
np.copyto(frame_grad_u[start_pnt[0]:start_pnt[0]+len(a), start_pnt[1]:start_pnt[1]+len(b)],
local_frame_grad_u,where=mask)
np.copyto(frame_grad_v[start_pnt[0]:start_pnt[0]+len(a), start_pnt[1]:start_pnt[1]+len(b)],
local_frame_grad_v,where=mask)
# cv2.imshow("frame",frame)
# cv2.imshow("mask",np.array(mask*1*255,dtype=np.uint8))
return frame_grad_v,frame_grad_u
def find_tfmat_avg(T_cent_accepted):
Tf_cam_ball = np.eye(4)
#### using slerp interpolation for averaging rotations
sum_tra = np.zeros(3,)
quat_av = tf3d.quaternions.mat2quat(T_cent_accepted[0][0:3,0:3])
for itr in range(T_cent_accepted.shape[0]-1):
quat2 = tf3d.quaternions.mat2quat(T_cent_accepted[itr+1][0:3,0:3])
quat_av = slerp(quat2,quat_av,[0.5])
quat_av =quat_av.reshape(4,)
Tf_cam_ball[0:3,0:3]=tf3d.quaternions.quat2mat(quat_av)
for itr in range(T_cent_accepted.shape[0]):
sum_tra = sum_tra + T_cent_accepted[itr][0:3,3]
sum_tra = sum_tra/T_cent_accepted.shape[0]
Tf_cam_ball[0:3,3] = sum_tra
return Tf_cam_ball
#######################################################
def RodriguesToTransf(x):
'''
Function to get a SE(3) transformation matrix from 6 Rodrigues parameters. NEEDS CV2.RODRIGUES()
input: X -> (6,) (rvec,tvec)
Output: Transf -> SE(3) rotation matrix
'''
x = np.array(x)
x = x.reshape(6,)
rot,_ = cv2.Rodrigues(x[0:3])
trans = np.reshape(x[3:6],(3,1))
Transf = np.concatenate((rot,trans),axis = 1)
Transf = np.concatenate((Transf,np.array([[0,0,0,1]])),axis = 0)
return Transf
def LM_APE_Dodecapen(X,stacked_corners_px_sp, ids, params, flag=False):
'''
Function to get the objective function for APE step of the algorithm
TODO: Have to put it in its respective class as a method (kind attn: Howard)
Inputs:
X: (6,) array of pose parameters [rod_1, rod_2,rod_3,x,y,z]
stacked_corners_px_sp = Output from Aruco marker detection. ALL the corners of the markers seen stacked in order
ids: int array of ids seen -- ids of faces seen
Output: V = [4*M x 1] numpy array of difference between pixel distances
'''
# print(ids)
corners_in_cart_sp = np.zeros((ids.shape[0],4,3))
Tf_cam_ball = RodriguesToTransf(X)
for ii in range(ids.shape[0]):
Tf_cent_face,Tf_face_cent = tf_mat_dodeca_pen(int(ids[ii]))
corners_in_cart_sp[ii,:,:] = Tf_cam_ball.dot(corners_3d(Tf_cent_face, params.marker_size_in_mm)).T[:,0:3]
corners_in_cart_sp = corners_in_cart_sp.reshape(ids.shape[0]*4,3)
projected_in_pix_sp,_ = cv2.projectPoints(corners_in_cart_sp,np.zeros((3,1)),np.zeros((3,1)),
params.mtx,params.dist)
projected_in_pix_sp = projected_in_pix_sp.reshape(projected_in_pix_sp.shape[0],2)
n,_=np.shape(stacked_corners_px_sp)
V = LA.norm(stacked_corners_px_sp-projected_in_pix_sp, axis=1)
if flag is False:
return V
def tf_mat_dodeca_pen(face_id):
'''
Function that looks at the dodecahedron geometry to get the rotation matrix and translation
TODO: when in class have to pass the dodecahedron geometry to this as a variable
Inputs: face_id: the face for which the transformation matrices is quer=ries (int)
Outputs: T_mat_cent_face = transformation matrix from center of the dodecahedron to a face
T_mat_face_cent = transformation matrix from face (with given face id) to the dodecahedron center
'''
T_cent_face_curr = _T_cent_face[face_id-1,:,:]
_R_cent_face_curr = _R_cent_face[face_id-1,:,:]
T_mat_cent_face = np.vstack((np.hstack((_R_cent_face_curr,T_cent_face_curr)),np.array([0,0,0,1])))
T_mat_face_cent = np.vstack((np.hstack((_R_cent_face_curr.T,-_R_cent_face_curr.T.dot(T_cent_face_curr))),np.array([0,0,0,1])))
return T_mat_cent_face,T_mat_face_cent
def corners_3d(tf_mat,m_s):
'''
Function to give coordinates of the marker corners and transform them using a given transformation matrix
Inputs:
tf_mat = transformation matrix between frames
m_s = marker size-- edge lenght in mm
Outputs:
corn_pgn_f = corners in camara frame
'''
corn_1 = np.array([-m_s/2.0, m_s/2.0, 0, 1])
corn_2 = np.array([ m_s/2.0, m_s/2.0, 0, 1])
corn_3 = np.array([ m_s/2.0, -m_s/2.0, 0, 1])
corn_4 = np.array([-m_s/2.0, -m_s/2.0, 0, 1])
corn_mf = np.vstack((corn_1,corn_2,corn_3,corn_4))
corn_pgn_f = tf_mat.dot(corn_mf.T)
return corn_pgn_f
def remove_bad_aruco_centers(center_transforms, params):
"""
takes in the tranforms for the aruco centers
returns the transforms, centers coordinates, and indices for centers which
aren't too far from the others
Input: center_transforms = N transformation matrices stacked as [N,4,4] numpy arrays
Output = center_transforms[good_indices, :, :] -> accepted center transforms,
centers_R3[good_indices, :] = center estimates from accepted center transforms,
good_indices = accepted ids
"""
max_dist = 25 # pixels
centers_R3 = center_transforms[:, 0:3, 3]
projected_in_pix_sp,_ = cv2.projectPoints(centers_R3,np.zeros((3,1)),np.zeros((3,1)), params.mtx, params.dist)
projected_in_pix_sp = projected_in_pix_sp.reshape(projected_in_pix_sp.shape[0],2)
distances = distance.cdist(centers_R3, centers_R3)
distances_2 = distance.cdist(projected_in_pix_sp, projected_in_pix_sp)
# print distances_2,"distances_2"
# print distances,"distances"
good_pairs = (distances_2 > 0) * (distances_2 < max_dist)
good_indices = np.where(np.sum(good_pairs, axis=0) > 0)[0].flatten()
if good_indices.shape[0] == 0 :
print('good_indices is none, resetting')
good_indices = np.array([0, 1])
return center_transforms[good_indices, :, :], centers_R3[good_indices, :], good_indices
def local_frame_grads (frame_gray, corners, ids,params): #### by arkadeep
''' Takes in the frame, the corners of the markers the camera sees and ids of the markers seen.
Returns the frame gradients
Input: frame_gray --> grayscale frame
corners: stacked as corners[num_markers,:,:]
ids: ids seen
Output
frame_grad_u and frame_grad_v: matrices of sizes as frame gray. the areas near the markers will
have gradients of the frame_gray frame in the same locations and rest is 0
'''
frame_grad_u = np.zeros((frame_gray.shape[0],frame_gray.shape[1]))
frame_grad_u_temp = np.zeros((frame_gray.shape[0],frame_gray.shape[1]))
frame_grad_v = np.zeros((frame_gray.shape[0],frame_gray.shape[1]))
frame_grad_v_temp = np.zeros((frame_gray.shape[0],frame_gray.shape[1]))
# debug_temp_img = np.zeros((frame_gray.shape[0],frame_gray.shape[1]),dtype= np.uint8)
for i in range(len(ids)):
expanded_corners_small = get_marker_borders(corners[i,:,:],params )
v_low_small = int(np.min(expanded_corners_small[:,1]))
v_high_small = int(np.max(expanded_corners_small[:,1]))
u_low_small = int(np.min(expanded_corners_small[:,0]))
u_high_small = int(np.max(expanded_corners_small[:,0]))
local_int_det = np.copy(frame_gray[v_low_small:v_high_small,u_low_small:u_high_small])
max_int_small_sect = np.max(local_int_det)
expanded_corners = get_marker_borders(corners[i,:,:],params)
v_low = int(np.min(expanded_corners[:,1]))
v_high = int(np.max(expanded_corners[:,1]))
u_low = int(np.min(expanded_corners[:,0]))
u_high = int(np.max(expanded_corners[:,0]))
frame_local = np.copy(frame_gray[v_low:v_high,u_low:u_high]) # not sure if v and u are correct order
if abs(u_high - u_low) > abs(v_high- v_low):
sq_dim = u_high - u_low
else:
sq_dim = v_high - v_low
# frame_local = cv2.normalize(frame_local,None,alpha = 0,
# beta=255.0*max_int_large_sect/max_int_small_sect,norm_type=cv2.NORM_MINMAX)
# A,B = np.gradient(frame_gray )
# frame_grad_v = np.copy(A)
# frame_grad_u = np.copy(B)
# cv2.imshow("full frame gradient",frame_grad_v.astype(np.uint8))
# A,B = np.gradient(frame_local)
# frame_grad_v[v_low:v_high,u_low:u_high] = np.copy(A)
# frame_grad_u[v_low:v_high,u_low:u_high] = np.copy(B)
# cv2.imshow("local frame gradient",frame_grad_v.astype(np.uint8))
frame_grad_v, frame_grad_u = patch_norm_and_grad(frame_gray,frame_grad_u_temp,frame_grad_v_temp,
expanded_corners_small,[np.arange(u_low,u_low+sq_dim),np.arange(v_low,v_low+sq_dim)])
# cv2.imshow("patched and normalised gradient",frame_grad_v.astype(np.uint8))
# cv2.waitKey(0)
return frame_grad_v, frame_grad_u
def marker_edges(ids, data,params):
''' Function to give the edge points in the image and their intensities.
to be called only once in the entire program to gather reference data for DPR
output:
b_edge = [:,:] of size (ids x params.dwnsmpl_by,2) points on the marker in R3 where the intensities change form [x,y,0] stacked as
to be directly used in cv2.projectpoints
edge_intensities = [:] ordered expected intensity points for the edge points to be used on obj fun of DPR size (ids x params.dwnsmpl_by)
'''
pix_offset = int((params.padding_fac-1)/2 * 600) ### aruco images are 600x600
b_edge = []
edge_intensities_expected = []
for aruco_id in ids:
b = data.edge_pts_in_img_sp[aruco_id[0]]
n = b.shape[0]
b[:,2] = 0.
b[:,3] = 1
b_shaped = b[0::params.dwnsmpl_by,0:4].astype(np.float32)
b_edge.append(b_shaped)
img_pnts_curr = data.img_pnts[aruco_id[0]][0::params.dwnsmpl_by,:]
edge_intensities = data.aruco_images_int16[aruco_id[0]][img_pnts_curr [:,1]+pix_offset,img_pnts_curr [:,0]+pix_offset]# TODO can we have it in terms of dil_fac
edge_intensities_expected.append(edge_intensities)
# ---------------------------------------------------------------
########## this part varifies that correct points are sampled from the edges of the aruco images.
# for j in range(len(ids)):
# img_pnts_curr =img_pnts[ids[j][0]][0::downsample,:]
# n_int = img_pnts_curr.shape[0]
# print img_pnts_curr.shape,"img_pnts_curr.shape"
# print img_pnts[ids[j][0]].shape,"img_pnts.shape"
# for i in range(n_int):
# center_2 = tuple(np.ndarray.astype(np.array([img_pnts_curr[i,0]+pix_offset,img_pnts_curr[i,1]+pix_offset]),int))
# cv2.circle( data.aruco_images[ids[j][0]], center_2 , 3 , 127, -1)
# cv2.imshow("data.aruco_images[ids_{}]".format(ids[j][0]),data.aruco_images[ids[j][0]])
# cv2.waitKey(0)
# print edge_intensities_expected[0]," edge_intensities_expected[{}]".format(ids[0][0])
# print ""
# print ids[0]
# print b_edge[0].shape,"b_edge[0].shape"
# print edge_intensities_expected[0].shape,"edge_intensities_expected[0].shape"
# print ""
# y = data.aruco_images_int16[aruco_id][data.img_pnts[aruco_id][:,1]+pix_offset,data.img_pnts[aruco_id][:,0]+pix_offset]
return np.asarray(b_edge) , np.asarray(edge_intensities_expected)
# pass
def get_marker_borders (corners,params):
''' Dilates a given marker from the corner pxl locations in an image by the dilate factor.
Returns: stack of expanded corners in the pixel space'''
cent = np.array([np.mean(corners[:,0]), np.mean(corners[:,1])])
vert_1 = (corners[0,:] - cent)* params.dilate_fac
vert_2 = (corners[1,:] - cent)* params.dilate_fac
vert_3 = (corners[2,:] - cent)* params.dilate_fac
vert_4 = (corners[3,:] - cent)* params.dilate_fac
expanded_corners = np.vstack((vert_1+cent,vert_2+cent,vert_3+cent,vert_4+cent))
return expanded_corners
def LM_DPR(X, frame_gray, ids, corners, b_edge, edge_intensities_expected_all, data, params):
''' Objective function for the DPR step. Takes in pose as the first arg [mandatory!!] and,
returns the value of the obj fun and jacobial of the obj fun'''
Tf_cam_ball = RodriguesToTransf(X)
borders_in_cart_sp = []
edge_intensities_expected = []
for ii in range(ids.shape[0]):
Tf_cent_face,Tf_face_cent = tf_mat_dodeca_pen(int(ids[ii]))
borders_in_cart_sp.append((Tf_cam_ball.dot(Tf_cent_face).dot(b_edge[ii].T)).T)
edge_intensities_expected.append(edge_intensities_expected_all[ii].reshape(edge_intensities_expected_all[ii].shape[0],1))
stacked_borders_in_cart_sp = np.vstack(borders_in_cart_sp)
edge_intensities_expected_stacked = np.vstack(edge_intensities_expected)
proj_points, _ = cv2.projectPoints( stacked_borders_in_cart_sp [:,0:3], np.zeros((3,1)), np.zeros((3,1)), params.mtx, params.dist)
proj_points_int = np.ndarray.astype(proj_points,int)
proj_points_int = proj_points_int.reshape(proj_points_int.shape[0],2)
n_int = proj_points_int.shape[0]
temp = proj_points.shape[0]
proj_points = proj_points.reshape(temp,2)
# testing_points = 10
# LM_DPR_DRAW(X, frame_gray_draw, ids, corners, b_edge, edge_intensities_expected_all,data, params, 127)
# -profile do not put drawing stuff here save 13% time
f_p = frame_gray[proj_points_int[:,1],proj_points_int[:,0]] # TODO i dont think framegray int16 is needed ? Also 0,1 order changed
err = (edge_intensities_expected_stacked/1.0 - f_p.reshape(f_p.shape[0],1)/1.0) # this is the error in the intensities
return err.reshape(err.shape[0],)
def LM_DPR_DRAW(X, frame_gray_draw, ids, corners, b_edge, edge_intensities_expected_all, data, params, col_gr = 127, rad=1):
''' Objective function for the DPR step. Takes in pose as the first arg [mandatory!!] and,
returns the value of the obj fun and jacobial of the obj fun'''
Tf_cam_ball = RodriguesToTransf(X)
borders_in_cart_sp = []
edge_intensities_expected = []
for ii in range(ids.shape[0]):
Tf_cent_face,Tf_face_cent = tf_mat_dodeca_pen(int(ids[ii]))
borders_in_cart_sp.append((Tf_cam_ball.dot(Tf_cent_face).dot(b_edge[ii].T)).T)
edge_intensities_expected.append(edge_intensities_expected_all[ii].reshape(edge_intensities_expected_all[ii].shape[0],1))
stacked_borders_in_cart_sp = np.vstack(borders_in_cart_sp)
edge_intensities_expected_stacked = np.vstack(edge_intensities_expected)
proj_points, _ = cv2.projectPoints( stacked_borders_in_cart_sp [:,0:3], np.zeros((3,1)), np.zeros((3,1)), params.mtx, params.dist)
proj_points_int = np.ndarray.astype(proj_points,int)
proj_points_int = proj_points_int.reshape(proj_points_int.shape[0],2)
n_int = proj_points_int.shape[0]
temp = proj_points.shape[0]
proj_points = proj_points.reshape(temp,2)
testing_points = temp
for i in range(n_int):
center = tuple(np.ndarray.astype(proj_points_int[i,:],int))
cv2.circle( frame_gray_draw, center , rad , col_gr, -1)
# f_p = frame_gray[proj_points_int[:,1],proj_points_int[:,0]] # TODO i dont think framegray int16 is needed ? Also 0,1 order changed
# err = (edge_intensities_expected_stacked - f_p.reshape(f_p.shape[0],1))/1.0 # this is the error in the intensities
return frame_gray_draw
def LM_DPR_Jacobian(X, frame_gray, ids, corners, b_edge, edge_intensities_expected_all, data, params):
'''Function to calculate the Jacobian of the objective function'''
Tf_cam_ball = RodriguesToTransf(X)
borders_in_cart_sp = []
edge_intensities_expected = []
for ii in range(ids.shape[0]):
Tf_cent_face,Tf_face_cent = tf_mat_dodeca_pen(int(ids[ii]))
# borders_in_cart_sp.append((Tf_cam_ball.dot(b_edge[ii].T)).T) ### by arkadeep
borders_in_cart_sp.append(( Tf_cent_face.dot(b_edge[ii].T)).T) ### added by tejas
edge_intensities_expected.append(edge_intensities_expected_all[ii].reshape(edge_intensities_expected_all[ii].shape[0],1))
stacked_borders_in_cart_sp = np.vstack(borders_in_cart_sp)
edge_intensities_expected_stacked = np.vstack(edge_intensities_expected)
proj_points , duvec_by_dp_all = cv2.projectPoints( stacked_borders_in_cart_sp [:,0:3], X[0:3],X[3:6], params.mtx, params.dist)
proj_points_int = np.ndarray.astype(proj_points,int) # -profile this is taking some time (5.5% of total time)
proj_points = proj_points.reshape(proj_points.shape[0],2)
proj_points_int = proj_points_int.reshape(proj_points_int.shape[0],2)
du_by_dp = griddata(proj_points,duvec_by_dp_all[0::2,0:6],(proj_points_int[:,0],proj_points_int[:,1]), method = 'nearest')
dv_by_dp = griddata(proj_points,duvec_by_dp_all[1::2,0:6],(proj_points_int[:,0],proj_points_int[:,1]), method = 'nearest')
# this is same as above - no it's not
# du_by_dp = duvec_by_dp_all[0::2,0:6]
# dv_by_dp = duvec_by_dp_all[1::2,0:6]
# print np.linalg.norm(du_by_dp- du_by_dp_grid),"norm"
# print ""
dI_by_dv ,dI_by_du = local_frame_grads (frame_gray.astype('int16'), np.vstack(corners), ids,params) ##TODO local frame gradients not working pl check ## arkadeep
# LM_DPR_DRAW(X, frame_gray_draw, ids, corners, b_edge, edge_intensities_expected_all,data, mtx, dist, 127,1)
n_int = proj_points_int.shape[0]
dI_by_dp = np.zeros((n_int,6))
for i in range(n_int):
ui,vi = proj_points_int[i,0], proj_points_int[i,1]
# dI_by_dp[i,:] = dI_by_du [ui,vi] * du_by_dp[i] + dI_by_dv [ui,vi] * dv_by_dp[i] #TODO confirn [u,v] order in eqn # by arkadeep
dI_by_dp[i,:] = dI_by_du [vi,ui] * du_by_dp[i] + dI_by_dv [vi,ui] * dv_by_dp[i] #TODO confirn [u,v] order in eqn #edited by tejas
return -dI_by_dp # the neg sign is required
######
def get_pnt_grey_image():
rospy.init_node('RosCam', anonymous=True)
ic = RosCam("/camera/image_color")
return ic
class parameters():
def __init__(self):
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
self.aruco_params = aruco.DetectorParameters_create()
self.aruco_params.cornerRefinementMethod = 1
self.aruco_params.cornerRefinementMinAccuracy = 0.05
self.marker_size_in_mm = 17.78
self.dilate_fac = 1.2 # dilate the square around the marker
self.padding_fac = 1.2 # padding around the aruco source image DO NOT CHANGE THIS PARAMETER
with np.load('PTGREY.npz') as X:
cam_mtx, cam_dist = [X[i] for i in ('mtx','dist')]
self.mtx = cam_mtx
self.dist = cam_dist
self.dwnsmpl_by = 100 ## changed the name of points_for_DPR
self.markers_possible = np.array([1,2,3,4,5,6,7,8,9,10,11,12])
self.markers_impossible = np.array([13,17,37,16,34,45,38,24,47,32,40])
class txt_data():
def __init__(self):
self.x = 0
self.edge_pts_in_img_sp = [0]*13
self.aruco_images = [0]*13
self.aruco_images_int16 = [0]*13
self.img_pnts = [0]*13
for i in range(1,13):
self.edge_pts_in_img_sp[i] = np.loadtxt("thick_edge_coord_R3/id_{}.txt".format(i),delimiter=',',dtype=np.float32)
self.aruco_images[i]= cv2.imread("aruco_images_mip_maps/res_75_{}.jpg".format(i),0)
self.img_pnts[i] = np.loadtxt("thick_edge_coord_pixels/id_{}.txt".format(i),delimiter=',',dtype='int16')
self.aruco_images_int16[i] = np.int16(self.aruco_images[i])
def find_pose(frame,params,data):
frame_gray = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)
frame_gray_draw = np.copy(frame_gray)
# ---------------------------------------------------------------------------------------
# the first row will allways be [0,0,0] this is to ensure that we can start from face 1 which is actually face 0
#
corners, ids, rejectedImgPoints = aruco.detectMarkers(frame, params.aruco_dict, parameters=params.aruco_params)
if ids not in params.markers_impossible and ids is not None and len(ids) >= 2:
stacked_corners_px_sp = np.reshape(np.asarray(corners),(ids.shape[0]*4,2))
t0 = time.time()
visib_flag = 1
#########################################################################################
################# finding out average of poses given by aruco ###########################
#########################################################################################
N_markers =ids.shape[0]
# frame = aruco.drawDetectedMarkers(frame, corners, ids)
rvecs = np.zeros((13,1,3))
tvecs = np.zeros((13,1,3))
jj = 0
# the following are with the camera frame
cent_in_R3 = np.zeros((N_markers,3))
T_cent = np.zeros((ids.shape[0],4,4))
for m in ids:
m_indx = np.asarray(np.where(m==ids))
rvecs[m,:,:], tvecs[m,:,:], _ = cv2.aruco.estimatePoseSingleMarkers( corners[int(m_indx[0])], params.marker_size_in_mm, params.mtx, params.dist)
# frame = aruco.drawAxis(frame, params.mtx, params.dist, rvecs[m,:,:], tvecs[m,:,:], 10)
T_4_Aruco = RodriguesToTransf(np.append(rvecs[m,:,:], tvecs[m,:,:]))
T_mat_cent_face,T_mat_face_cent = tf_mat_dodeca_pen(int(m))
T_cent[jj,:,:] = np.matmul(T_4_Aruco,T_mat_face_cent)
jj+=1
T_cent_accepted, centers_R3, good_indices = remove_bad_aruco_centers(T_cent, params )
# getting the rvecs and t vecs by averaging
Tf_cam_ball= find_tfmat_avg (T_cent_accepted) ########## by tejas
#----------------------------------returns Tf_cam_ball--------------------------------------------------
#########################################################################################
######################################### APE ###########################################
#########################################################################################
r_vec_ball,_ = cv2.Rodrigues(Tf_cam_ball[0:3,0:3])
t_vec_ball = Tf_cam_ball[0:3,3]
X_guess = np.append(r_vec_ball,np.reshape(t_vec_ball,(3,1))).reshape(6,1)
pose_marker_without_opt = X_guess.T # not efficient. May have to change
t0 = time.time()
res = leastsq (LM_APE_Dodecapen,X_guess,Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-6, xtol=1.49012e-4, gtol=0.0,
maxfev=1000, epsfcn=None, factor=1, diag=None,
args = (stacked_corners_px_sp, ids, params, False))
#---------------------------------- returns res --------------------------------------------
pose_marker_with_APE = np.reshape(res[0],(1,6))
b_edge, edge_intensities_expected = marker_edges(ids,data,params)
# final_pose = res[0]
# #########################################################################################
# ######################################### DPR ###########################################
# #########################################################################################
LM_DPR_DRAW(res[0], frame_gray_draw, ids, corners, # drawing points as result of APE
b_edge, edge_intensities_expected,data, params, 250,2)
res_DPR = leastsq (LM_DPR, res[0], Dfun= LM_DPR_Jacobian,full_output=1, #### by Tejas
col_deriv=0, ftol=1.49012e-10, xtol=1.49012e-4, gtol=0.0,
maxfev=1000, epsfcn=None, factor=1, diag=None,
args = (frame_gray, ids, corners, b_edge, edge_intensities_expected, data, params) )
LM_DPR_DRAW(res_DPR[0], frame_gray_draw, ids, corners, # drawing points as result of DPR
b_edge, edge_intensities_expected,data, params, 0,1)
final_pose = res_DPR[0]
#----------------------------------returns res_DPR--------------------------------------------------
pose_marker_with_DPR = np.reshape(res_DPR[0],(1,6))
# Tf_cam_ball = RodriguesToTransf(res_DPR[0])
# t_new = time.time()
# print("current frame rate",1./(t_new - t_prev))
# t_prev = t_new
else:
print("Required marker not visible")
pose_marker_without_opt = [0.,0.,0.,0.,0.,0.]
pose_marker_with_APE = [0.,0.,0.,0.,0.,0.]
pose_marker_with_DPR = [0.,0.,0.,0.,0.,0.]
final_pose = [0.,0.,0.,0.,0.,0.]
visib_flag = 0
return frame_gray_draw,pose_marker_without_opt,pose_marker_with_APE,pose_marker_with_DPR,visib_flag
def main():
global frame_gray_draw, frame_gray
# img = cv2.imread('sample_image_pntgrey.png')
# h, w = img.shape[:2]
sub_pix_refinement_switch = 1
plot_switch = 1
detect_tip_switch = 0
hist_plot_switch = 1
run_DPR_switch = 1
params = parameters() ## initializes the parameters
data = txt_data()
iterations_for_while =5500
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h)) ### mtx is newcameramtx
pose_marker_with_APE = np.zeros((iterations_for_while,6))
pose_marker_with_DPR = np.zeros((iterations_for_while,6))
pose_marker_without_opt = np.zeros((iterations_for_while,6))
j = 0 # iteration counter
## taking image from camera
# cv2.namedWindow('image',cv2.WINDOW_NORMAL) ### don't know the reason of this window
ic = get_pnt_grey_image()
t_prev = time.time()
while(j<iterations_for_while):
#########################################################################################
############################### taking frame as input ###################################
#########################################################################################
frame = ic.cv_image
if frame is None:
time.sleep(0.1)
print("No image")
continue
# frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
# mtx = newcameramtx #### ??????
frame_gray_draw,pose_without_opt, pose_APE,pose_DPR,visib_flag = find_pose(frame,params,data)
if visib_flag == 1:
pose_marker_with_APE[j,:] = pose_APE
pose_marker_without_opt[j,:] = pose_without_opt
pose_marker_with_DPR[j,:] = pose_DPR
print("frame number ", j)
cv2.imshow('frame_gray_draw',frame_gray_draw)
# cv2.imshow('frame_gray',frame_gray)
# cv2.imshow('data.aruco_images',data.aruco_images[ids[0]])
# cv2.imshow('frame_color',frame)
j+=1
if cv2.waitKey(1) & 0xFF == ord('q') or j >= iterations_for_while:
break
cv2.destroyAllWindows()
#### Analysis
r2d = 180/np.pi
pose_marker_with_APE = pose_marker_with_APE[0:j,:]
pose_marker_with_DPR= pose_marker_with_DPR[0:j,:]
pose_marker_without_opt = pose_marker_without_opt[0:j,:]
if plot_switch == 1 :
### translation
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
fig.canvas.set_window_title("translation x,y,z")
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.scatter(pose_marker_without_opt[:,3],pose_marker_without_opt[:,4],pose_marker_without_opt[:,5],
c ='m',label = "pose_marker_without_opt")
ax.scatter(pose_marker_with_APE[:,3],pose_marker_with_APE[:,4],pose_marker_with_APE[:,5],
c = 'r',label="pose_marker_with_APE" )
ax.scatter(pose_marker_with_DPR[:,3],pose_marker_with_DPR[:,4],pose_marker_with_DPR[:,5],
c = 'g',label="pose_marker_with_DPR" )
ax.legend()
### rotation
fig = plt.figure()
fig.canvas.set_window_title("rotation x,y,z")
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(111, projection="3d")
ax.scatter(pose_marker_without_opt[:,0]*r2d, pose_marker_without_opt[:,1]*r2d, pose_marker_without_opt[:,2]*r2d,
c ='m',label = "orientation_marker_without_opt")
ax.scatter(pose_marker_with_APE[:,0]*r2d, pose_marker_with_APE[:,1]*r2d, pose_marker_with_APE[:,2]*r2d,
c = 'r',label="orientation_marker_with_APE" )
ax.scatter(pose_marker_with_DPR[:,0]*r2d, pose_marker_with_DPR[:,1]*r2d, pose_marker_with_DPR[:,2]*r2d,
c = 'g',label="orientation_marker_with_DPR" )
ax.legend()
if hist_plot_switch == 1:
fig = plt.figure()
fig.canvas.set_window_title("histogram translation z")
plt.hist(pose_marker_without_opt[:,5],j,facecolor='magenta',normed = 1,label = 'pose_marker_without_opt' )
plt.hist(pose_marker_with_APE[:,5],j,facecolor='red',normed = 1, label = 'pose_marker_with_APE' )
plt.hist(pose_marker_with_DPR[:,5],j,facecolor='green',normed = 1, label = 'pose_marker_with_DPR' )
plt.legend()
fig = plt.figure()
fig.canvas.set_window_title("histogram rotation z")
plt.hist(pose_marker_without_opt[:,2]*r2d,j,facecolor='magenta',normed = 1,label = 'orientation_marker_without_opt' )
plt.hist(pose_marker_with_APE[:,2]*r2d,j,facecolor='red',normed = 1, label = 'orientation_marker_with_APE' )
plt.hist(pose_marker_with_DPR[:,2]*r2d,j,facecolor='green',normed = 1, label = 'orientation_marker_with_DPR' )
plt.legend()
print ("the end")
plt.show()
if __name__ == '__main__':
main() | StarcoderdataPython |
3598487 | #
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
# Copyright (c) 2019 <NAME> - All Rights Reserved.
#
import unittest
from salty_orm.db.model import Max, Min, Count, Sum
from salty_orm.db.mysql_provider import MySQLDBConnection
from salty_orm.examples.models import RulingModel
class TestQueryStructure(unittest.TestCase):
_provider = None
def setUp(self) -> None:
self._provider = MySQLDBConnection(testing=True)
return super(TestQueryStructure, self).setUp()
def test_simple_select(self):
""" test simple select returning all columns and rows """
sql, args = RulingModel(self._provider).objects.to_sql()
self.assertEqual(sql, 'SELECT * FROM test_model')
def test_simple_select_specific_columsn(self):
""" Test simple select returning a column list and all rows """
sql, args = RulingModel(self._provider).objects.values_list("id", "name").to_sql()
self.assertEqual(sql, "SELECT id, name FROM test_model")
def test_max_column(self):
""" Test a maximum column select, no group by clause """
sql, args = RulingModel(self._provider).objects.aggregate(Max('id')).to_sql()
self.assertEqual(sql, "SELECT MAX(id) as id__max FROM test_model")
def test_min_column(self):
""" Test a minimum column select, no group by clause """
sql, args = RulingModel(self._provider).objects.aggregate(Min('id')).to_sql()
self.assertEqual(sql, "SELECT MIN(id) as id__min FROM test_model")
def test_count_column(self):
""" Test a count column select, no group by clause """
sql, args = RulingModel(self._provider).objects.aggregate(Count('id')).to_sql()
self.assertEqual(sql, "SELECT COUNT(id) as id__count FROM test_model")
def test_sum_column(self):
""" Test a sum column select, no group by clause """
sql, args = RulingModel(self._provider).objects.aggregate(Sum('id')).to_sql()
self.assertEqual(sql, "SELECT SUM(id) as id__sum FROM test_model")
def test_order_by(self):
""" Test a simple order by clause """
sql, args = RulingModel(self._provider).objects.order_by('id').to_sql()
self.assertEqual(sql, "SELECT * FROM test_model ORDER BY id")
def test_group_by(self):
""" Test a simple group by clause """
sql, args = RulingModel(self._provider).objects.group_by('id').to_sql()
self.assertEqual(sql, "SELECT * FROM test_model GROUP BY id")
def test_limit(self):
""" Test the limit clause """
sql, args = RulingModel(self._provider).objects.limit(10).to_sql()
self.assertEqual(sql, "SELECT * FROM test_model LIMIT 10")
| StarcoderdataPython |
8105208 | <filename>paprika/restraints/openmm.py
"""A module aimed at applying restraints directly to OpenMM systems."""
import logging
import numpy as np
import openmm as openmm
import openmm.unit as openmm_unit
import parmed as pmd
from openff.units import unit as pint_unit
from openff.units.simtk import to_simtk
logger = logging.getLogger(__name__)
_PI_ = np.pi
def apply_positional_restraints(
coordinate_path: str, system, atom_name="DUM", force_group: int = 15, kpos=50.0
):
"""A utility function which will add OpenMM harmonic positional restraints to
any dummy atoms found within a system to restrain them to their initial
positions.
Parameters
----------
coordinate_path : str
The path to the coordinate file which the restraints will be applied to.
This should contain either the host or the complex, the dummy atoms and
and solvent.
system : :class:`openmm.System`
The system object to add the positional restraints to.
atom_name : str
The name of the atom to restrain.
force_group : int, optional
The force group to add the positional restraints to.
kpos : float, openmm.unit.Quantity or pint.unit.Quantity, optional
The force constant for restraining the dummy atoms (kcal/mol/Å^2 if float).
"""
# noinspection PyTypeChecker
structure: pmd.Structure = pmd.load_file(coordinate_path, structure=True)
for atom in structure.atoms:
if atom.name == atom_name:
positional_restraint = openmm.CustomExternalForce(
"k * ((x-x0)^2 + (y-y0)^2 + (z-z0)^2)"
)
positional_restraint.addPerParticleParameter("k")
positional_restraint.addPerParticleParameter("x0")
positional_restraint.addPerParticleParameter("y0")
positional_restraint.addPerParticleParameter("z0")
# I haven't found a way to get this to use ParmEd's unit library here.
# ParmEd correctly reports `atom.positions` as units of Ångstroms.
# But then we can't access atom indices. Using `atom.xx` works for
# coordinates, but is unitless.
if isinstance(kpos, float):
k = (
kpos
* openmm_unit.kilocalories_per_mole
/ openmm_unit.angstroms ** 2
)
elif isinstance(kpos, openmm_unit.Quantity):
k = kpos
elif isinstance(kpos, pint_unit.Quantity):
k = to_simtk(kpos)
x0 = 0.1 * atom.xx * openmm_unit.nanometers
y0 = 0.1 * atom.xy * openmm_unit.nanometers
z0 = 0.1 * atom.xz * openmm_unit.nanometers
positional_restraint.addParticle(atom.idx, [k, x0, y0, z0])
system.addForce(positional_restraint)
positional_restraint.setForceGroup(force_group)
def apply_dat_restraint(
system, restraint, phase, window_number, flat_bottom=False, force_group=None
):
"""A utility function which takes in pAPRika restraints and applies the
restraints to an OpenMM System object.
Parameters
----------
system : :class:`openmm.System`
The system object to add the positional restraints to.
restraint : list
List of pAPRika defined restraints
phase : str
Phase of calculation ("attach", "pull" or "release")
window_number : int
The corresponding window number of the current phase
flat_bottom : bool, optional
Specify whether the restraint is a flat bottom potential
force_group : int, optional
The force group to add the positional restraints to.
"""
assert phase in {"attach", "pull", "release"}
# Angular flat-bottom potential
if flat_bottom and phase == "attach" and restraint.mask3:
flat_bottom_force = openmm.CustomAngleForce(
"step(-(theta - theta_0)) * k * (theta - theta_0)^2"
)
# If theta is greater than theta_0, then the argument to step is negative,
# which means the force is off.
flat_bottom_force.addPerAngleParameter("k")
flat_bottom_force.addPerAngleParameter("theta_0")
theta_0 = 91.0 * openmm_unit.degrees
k = to_simtk(restraint.phase[phase]["force_constants"][window_number])
flat_bottom_force.addAngle(
restraint.index1[0],
restraint.index2[0],
restraint.index3[0],
[k, theta_0],
)
system.addForce(flat_bottom_force)
if force_group:
flat_bottom_force.setForceGroup(force_group)
return
# Distance flat-bottom potential
elif flat_bottom and phase == "attach" and not restraint.mask3:
flat_bottom_force = openmm.CustomBondForce("step((r - r_0)) * k * (r - r_0)^2")
# If x is greater than x_0, then the argument to step is positive, which means
# the force is on.
flat_bottom_force.addPerBondParameter("k")
flat_bottom_force.addPerBondParameter("r_0")
r_0 = to_simtk(restraint.phase[phase]["targets"][window_number])
k = to_simtk(restraint.phase[phase]["force_constants"][window_number])
flat_bottom_force.addBond(
restraint.index1[0],
restraint.index2[0],
[k, r_0],
)
system.addForce(flat_bottom_force)
if force_group:
flat_bottom_force.setForceGroup(force_group)
return
elif flat_bottom and phase == "pull":
return
elif flat_bottom and phase == "release":
return
# Distance restraints
if restraint.mask2 and not restraint.mask3:
if not restraint.group1 and not restraint.group2:
bond_restraint = openmm.CustomBondForce("k * (r - r_0)^2")
bond_restraint.addPerBondParameter("k")
bond_restraint.addPerBondParameter("r_0")
r_0 = to_simtk(restraint.phase[phase]["targets"][window_number])
k = to_simtk(restraint.phase[phase]["force_constants"][window_number])
bond_restraint.addBond(restraint.index1[0], restraint.index2[0], [k, r_0])
system.addForce(bond_restraint)
else:
bond_restraint = openmm.CustomCentroidBondForce(
2, "k * (distance(g1, g2) - r_0)^2"
)
bond_restraint.addPerBondParameter("k")
bond_restraint.addPerBondParameter("r_0")
r_0 = to_simtk(restraint.phase[phase]["targets"][window_number])
k = to_simtk(restraint.phase[phase]["force_constants"][window_number])
g1 = bond_restraint.addGroup(restraint.index1)
g2 = bond_restraint.addGroup(restraint.index2)
bond_restraint.addBond([g1, g2], [k, r_0])
system.addForce(bond_restraint)
if force_group:
bond_restraint.setForceGroup(force_group)
# Angle restraints
elif restraint.mask3 and not restraint.mask4:
if not restraint.group1 and not restraint.group2 and not restraint.group3:
angle_restraint = openmm.CustomAngleForce("k * (theta - theta_0)^2")
angle_restraint.addPerAngleParameter("k")
angle_restraint.addPerAngleParameter("theta_0")
theta_0 = to_simtk(restraint.phase[phase]["targets"][window_number])
k = to_simtk(restraint.phase[phase]["force_constants"][window_number])
angle_restraint.addAngle(
restraint.index1[0],
restraint.index2[0],
restraint.index3[0],
[k, theta_0],
)
system.addForce(angle_restraint)
else:
# Probably needs openmm.CustomCentroidAngleForce (?)
raise NotImplementedError
if force_group:
angle_restraint.setForceGroup(force_group)
# Torsion restraints
elif restraint.mask4:
if (
not restraint.group1
and not restraint.group2
and not restraint.group3
and not restraint.group4
):
dihedral_restraint = openmm.CustomTorsionForce(
f"k * min(min(abs(theta - theta_0), abs(theta - theta_0 + 2 * "
f"{_PI_})), abs(theta - theta_0 - 2 * {_PI_}))^2"
)
dihedral_restraint.addPerTorsionParameter("k")
dihedral_restraint.addPerTorsionParameter("theta_0")
theta_0 = to_simtk(restraint.phase[phase]["targets"][window_number])
k = to_simtk(restraint.phase[phase]["force_constants"][window_number])
dihedral_restraint.addTorsion(
restraint.index1[0],
restraint.index2[0],
restraint.index3[0],
restraint.index4[0],
[k, theta_0],
)
system.addForce(dihedral_restraint)
else:
# Probably needs openmm.CustomCentroidTorsionForce (?)
raise NotImplementedError
if force_group:
dihedral_restraint.setForceGroup(force_group)
| StarcoderdataPython |
11348157 | import pytest
from server.app import create_application
import sys
from os import getcwd
sys.path.append(getcwd())
@pytest.fixture
def app():
app = create_application()
yield app
@pytest.fixture
def server(loop, app, sanic_client):
return loop.run_until_complete(sanic_client(app))
async def test_index(server):
resp = await server.post('/api/login')
assert resp.status_code == 422
async def test_player(server):
resp = await server.post('/api/register')
assert resp.status_code == 422
| StarcoderdataPython |
3393293 | """
URLConf for Satchmo Newsletter app
Recommended usage is to use a call to ``include()`` in your project's
root URLConf to include this URLConf for any URL beginning with
'/newsletter/'.
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('satchmo.newsletter.views',
(r'^subscribe/$', 'add_subscription', {}, 'newsletter_subscribe'),
(r'^subscribe/ajah/$', 'add_subscription', {'result_template' : 'newsletter/ajah.html'}, 'newsletter_subscribe_ajah'),
(r'^unsubscribe/$', 'remove_subscription', {}, 'newsletter_unsubscribe'),
(r'^unsubscribe/ajah/$', 'remove_subscription', {'result_template' : 'newsletter/ajah.html'}, 'newsletter_unsubscribe_ajah'),
(r'^update/$', 'update_subscription', {}, 'newsletter_update'),
)
| StarcoderdataPython |
5123309 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
def register_fix_kms_create_grant_docs(cli):
# Docs may actually refer to actual api name (not the CLI command).
# In that case we want to remove the translation map.
cli.register('doc-title.kms.create-grant', remove_translation_map)
def remove_translation_map(help_command, **kwargs):
help_command.doc.translation_map = {}
| StarcoderdataPython |
272722 | <filename>nicos_sinq/gui/panels/live.py
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""NICOS livewidget for SINQ: we want to be able to show plots of scanfiles.
A slider allows to select which scanpoint to display"""
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QLabel
from nicos.clients.flowui.panels.live import \
LiveDataPanel as FlowuiLiveDataPanel
from nicos.guisupport.qt import QSlider, QVBoxLayout, QWidget
class ScanSlider(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.setLayout(QVBoxLayout())
self.slider = QSlider(Qt.Horizontal, self)
self.title = QLabel('Scanpoint selection', self)
self.title.setMaximumHeight(25)
self.layout().addWidget(self.title)
self.layout().addWidget(self.slider)
self.layout().addSpacing(15)
class LiveDataPanel(FlowuiLiveDataPanel):
def __init__(self, parent, client, options):
FlowuiLiveDataPanel.__init__(self, parent, client, options)
self.scan_slider = ScanSlider(parent)
self.scan_slider.setMaximumHeight(90)
self.layout().insertWidget(self.layout().count() - 1, self.scan_slider)
self.scan_slider.hide()
self._arrays = []
self.scan_slider.slider.valueChanged.connect(self._update_scan_slice)
def _show(self, data=None):
"""Same as the default, but if data has dimension 3 assumes that the
first dimension is the scanpoint set.
In that case shows a slider that allow to scroll and display all the
scanpoints images.
"""
idx = self.fileList.currentRow()
if idx == -1:
self.fileList.setCurrentRow(0)
return
# no data has been provided, try to get it from the cache
if data is None:
data = self.getDataFromItem(self.fileList.currentItem())
# still no data
if data is None:
return
arrays = data.get('dataarrays', [])
labels = data.get('labels', {})
titles = data.get('titles', {})
if len(np.asarray(arrays).shape) == 4:
_arrays = np.asarray(arrays)
self.scan_slider.show()
self.scan_slider.slider.setMaximum(_arrays.shape[1] - 1)
self.scan_slider.title.setText(
f'Scanpoint selection: <# {self.scan_slider.slider.value()+1}>')
_arrays = _arrays[:, self.scan_slider.slider.value(), ...]
else:
_arrays = np.asarray(arrays)
self.scan_slider.hide()
# if multiple datasets have to be displayed in one widget, they have
# the same dimensions, so we only need the dimensions of one set
self._initLiveWidget(_arrays)
self.applyPlotSettings()
for widget in self._get_all_widgets():
widget.setData(_arrays, labels)
widget.setTitles(titles)
if self.unzoom and self.widget:
self.on_actionUnzoom_triggered()
def _update_scan_slice(self):
# the signal triggering _show() directly might cause an exception.
# Looks like this prevents it
self._show()
| StarcoderdataPython |
9631894 | # -*- coding: utf-8 -*-
# Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lifelines import KaplanMeierFitter, CoxPHFitter
from lifelines.statistics import logrank_test
import logging
import matplotlib.colors as colors
from matplotlib import pyplot as plt
import numbers
import numpy as np
import seaborn as sb
import patsy
from .rounding import float_str
from .utils import get_logger
logger = get_logger(__name__, level=logging.INFO)
def _plot_kmf_single(df,
condition_col,
survival_col,
censor_col,
threshold,
title,
xlabel,
ylabel,
ax,
with_condition_color,
no_condition_color,
with_condition_label,
no_condition_label,
color_map,
label_map,
color_palette,
ci_show,
print_as_title):
"""
Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col.
All inputs are required - this function is intended to be called by `plot_kmf`.
"""
# make color inputs consistent hex format
if colors.is_color_like(with_condition_color):
with_condition_color = colors.to_hex(with_condition_color)
if colors.is_color_like(no_condition_color):
no_condition_color = colors.to_hex(no_condition_color)
## prepare data to be plotted; producing 3 outputs:
# - `condition`, series containing category labels to be plotted
# - `label_map` (mapping condition values to plot labels)
# - `color_map` (mapping condition values to plotted colors)
if threshold is not None:
is_median = threshold == "median"
if is_median:
threshold = df[condition_col].median()
label_suffix = float_str(threshold)
condition = df[condition_col] > threshold
default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix)
if is_median:
label_suffix += " (median)"
default_label_with_condition = "%s > %s" % (condition_col, label_suffix)
with_condition_label = with_condition_label or default_label_with_condition
no_condition_label = no_condition_label or default_label_no_condition
if not label_map:
label_map = {False: no_condition_label,
True: with_condition_label}
if not color_map:
color_map = {False: no_condition_color,
True: with_condition_color}
elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category":
condition = df[condition_col].astype("category")
if not label_map:
label_map = dict()
[label_map.update({condition_value: '{} = {}'.format(condition_col,
condition_value)})
for condition_value in condition.unique()]
if not color_map:
rgb_values = sb.color_palette(color_palette, len(label_map.keys()))
hex_values = [colors.to_hex(col) for col in rgb_values]
color_map = dict(zip(label_map.keys(), hex_values))
elif df[condition_col].dtype == 'bool':
condition = df[condition_col]
default_label_with_condition = "= {}".format(condition_col)
default_label_no_condition = "¬ {}".format(condition_col)
with_condition_label = with_condition_label or default_label_with_condition
no_condition_label = no_condition_label or default_label_no_condition
if not label_map:
label_map = {False: no_condition_label,
True: with_condition_label}
if not color_map:
color_map = {False: no_condition_color,
True: with_condition_color}
else:
raise ValueError('Don\'t know how to plot data of type\
{}'.format(df[condition_col].dtype))
# produce kmf plot for each category (group) identified above
kmf = KaplanMeierFitter()
grp_desc = list()
grp_survival_data = dict()
grp_event_data = dict()
grp_names = list(condition.unique())
for grp_name, grp_df in df.groupby(condition):
grp_survival = grp_df[survival_col]
grp_event = (grp_df[censor_col].astype(bool))
grp_label = label_map[grp_name]
grp_color = color_map[grp_name]
kmf.fit(grp_survival, grp_event, label=grp_label)
desc_str = "# {}: {}".format(grp_label, len(grp_survival))
grp_desc.append(desc_str)
grp_survival_data[grp_name] = grp_survival
grp_event_data[grp_name] = grp_event
if ax:
ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color)
else:
ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color)
## format the plot
# Set the y-axis to range 0 to 1
ax.set_ylim(0, 1)
y_tick_vals = ax.get_yticks()
ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals])
# plot title
if title:
ax.set_title(title)
elif print_as_title:
ax.set_title(' | '.join(grp_desc))
else:
[print(desc) for desc in grp_desc]
# axis labels
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
## summarize analytical version of results
## again using same groups as are plotted
if len(grp_names) == 2:
# use log-rank test for 2 groups
results = logrank_test(grp_survival_data[grp_names[0]],
grp_survival_data[grp_names[1]],
event_observed_A=grp_event_data[grp_names[0]],
event_observed_B=grp_event_data[grp_names[1]])
elif len(grp_names) == 1:
# no analytical result for 1 or 0 groups
results = NullSurvivalResults()
else:
# cox PH fitter for >2 groups
cf = CoxPHFitter()
cox_df = patsy.dmatrix('+'.join([condition_col, survival_col,
censor_col]),
df, return_type='dataframe')
del cox_df['Intercept']
results = cf.fit(cox_df, survival_col, event_col=censor_col)
results.print_summary()
# add metadata to results object so caller can print them
results.survival_data_series = grp_survival_data
results.event_data_series = grp_event_data
results.desc = grp_desc
return results
def plot_kmf(df,
condition_col,
censor_col,
survival_col,
strata_col=None,
threshold=None,
title=None,
xlabel=None,
ylabel=None,
ax=None,
with_condition_color="#B38600",
no_condition_color="#A941AC",
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set1",
ci_show=False,
print_as_title=False):
"""
Plot survival curves by splitting the dataset into two groups based on
condition_col. Report results for a log-rank test (if two groups are plotted)
or CoxPH survival analysis (if >2 groups) for association with survival.
Regarding definition of groups:
If condition_col is numeric, values are split into 2 groups.
- if threshold is defined, the groups are split on being > or < condition_col
- if threshold == 'median', the threshold is set to the median of condition_col
If condition_col is categorical or string, results are plotted for each unique value in the dataset.
If condition_col is None, results are plotted for all observations
Currently, if `strata_col` is given, the results are repeated among each stratum of the df.
A truly "stratified" analysis is not yet supported by may be soon.
Parameters
----------
df: dataframe
condition_col: string, column which contains the condition to split on
survival_col: string, column which contains the survival time
censor_col: string,
strata_col: optional string, denoting column containing data to
stratify by (default: None)
threshold: int or string, if int, condition_col is thresholded at int,
if 'median', condition_col thresholded
at its median
if 'median-per-strata', & if stratified analysis
then condition_col thresholded by strata
title: Title for the plot, default None
ax: an existing matplotlib ax, optional, default None
note: not currently supported when `strata_col` is not None
with_condition_color: str, hex code color for the with-condition curve
no_condition_color: str, hex code color for the no-condition curve
with_condition_label: str, optional, label for True condition case
no_condition_label: str, optional, label for False condition case
color_map: dict, optional, mapping of hex-values to condition text
in the form of {value_name: color_hex_code}.
defaults to `sb.color_palette` using `default_color_palette` name,
or *_condition_color options in case of boolean operators.
label_map: dict, optional, mapping of labels to condition text.
defaults to "condition_name = condition_value", or *_condition_label
options in case of boolean operators.
color_palette: str, optional, name of sb.color_palette to use
if color_map not provided.
print_as_title: bool, optional, whether or not to print text
within the plot's title vs. stdout, default False
"""
# set reasonable default threshold value depending on type of condition_col
if threshold is None:
if df[condition_col].dtype != "bool" and \
np.issubdtype(df[condition_col].dtype, np.number):
threshold = "median"
# check inputs for threshold for validity
elif isinstance(threshold, numbers.Number):
logger.debug("threshold value is numeric")
elif threshold not in ("median", "median-per-strata"):
raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.")
elif threshold == "median-per-strata" and strata_col is None:
raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?")
# construct kwarg dict to pass to _plot_kmf_single.
# start with args that do not vary according to strata_col
arglist = dict(
condition_col=condition_col,
survival_col=survival_col,
censor_col=censor_col,
threshold=threshold,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_map=color_map,
label_map=label_map,
xlabel=xlabel,
ylabel=ylabel,
ci_show=ci_show,
color_palette=color_palette,
print_as_title=print_as_title)
# if strata_col is None, pass all parameters to _plot_kmf_single
if strata_col is None:
arglist.update(dict(
df=df,
title=title,
ax=ax))
return _plot_kmf_single(**arglist)
else:
# prepare for stratified analysis
if threshold == "median":
# by default, "median" threshold should be intra-strata median
arglist["threshold"] = df[condition_col].dropna().median()
elif threshold == "median-per-strata":
arglist["threshold"] = "median"
# create axis / subplots for stratified results
if ax is not None:
raise ValueError("ax not supported with stratified analysis.")
n_strata = len(df[strata_col].unique())
f, ax = plt.subplots(n_strata, sharex=True)
# create results dict to hold per-strata results
results = dict()
# call _plot_kmf_single for each of the strata
for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)):
if n_strata == 1:
arglist["ax"] = ax
else:
arglist["ax"] = ax[i]
subtitle = "{}: {}".format(strata_col, strat_name)
arglist["title"] = subtitle
arglist["df"] = strat_df
results[subtitle] = plot_kmf(**arglist)
[print(desc) for desc in results[subtitle].desc]
if title:
f.suptitle(title)
return results
class NullSurvivalResults(object):
def __repr__(self):
return "No model fit."
def logrank(df,
condition_col,
censor_col,
survival_col,
threshold=None):
if threshold is not None:
if threshold == "median":
threshold = df[condition_col].median()
condition = df[condition_col] > threshold
else:
condition = df[condition_col]
df_with_condition = df[condition]
df_no_condition = df[~condition]
survival_no_condition = df_no_condition[survival_col]
survival_with_condition = df_with_condition[survival_col]
event_no_condition = (df_no_condition[censor_col].astype(bool))
event_with_condition = (df_with_condition[censor_col].astype(bool))
return logrank_test(survival_no_condition,
survival_with_condition,
event_observed_A=event_no_condition,
event_observed_B=event_with_condition)
| StarcoderdataPython |
1807116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
@Name: __init__.py
@Desc:
@Author: <EMAIL>
@Create: 2020.08.02 14:51
-------------------------------------------------------------------------------
@Change: 2020.08.02
-------------------------------------------------------------------------------
"""
from flask import Blueprint
main = Blueprint('main', __name__)
| StarcoderdataPython |
1874528 | <gh_stars>1-10
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QObject, pyqtSlot
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbarfrom
class Ui_MainWindow(QObject):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1365, 689)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.app_title = QtWidgets.QLabel(self.centralwidget)
self.app_title.setGeometry(QtCore.QRect(20, 20, 121, 21))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.app_title.setFont(font)
self.app_title.setObjectName("app_title")
self.file_upload_area = QtWidgets.QFrame(self.centralwidget)
self.file_upload_area.setGeometry(QtCore.QRect(20, 60, 1331, 43))
self.file_upload_area.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.file_upload_area.setFrameShadow(QtWidgets.QFrame.Raised)
self.file_upload_area.setObjectName("file_upload_area")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.file_upload_area)
self.horizontalLayout.setObjectName("horizontalLayout")
self.upload_instruction = QtWidgets.QLabel(self.file_upload_area)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.upload_instruction.setFont(font)
self.upload_instruction.setObjectName("upload_instruction")
self.horizontalLayout.addWidget(self.upload_instruction)
self.path_indicator = QtWidgets.QLineEdit(self.file_upload_area)
self.path_indicator.setObjectName("path_indicator")
self.horizontalLayout.addWidget(self.path_indicator)
self.browse_btn = QtWidgets.QPushButton(self.file_upload_area)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(10)
self.browse_btn.setFont(font)
self.browse_btn.setObjectName("browse_btn")
self.browse_btn.setStyleSheet("background-color: skyblue")
self.horizontalLayout.addWidget(self.browse_btn)
self.canvas_window = QtWidgets.QFrame(self.centralwidget)
self.canvas_window.setGeometry(QtCore.QRect(319, 129, 1031, 511))
self.canvas_window.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.canvas_window.setFrameShadow(QtWidgets.QFrame.Raised)
self.canvas_window.setObjectName("canvas_window")
self.canvas_grid = QtWidgets.QGridLayout(self.canvas_window)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.canvas_grid.addWidget(self.canvas, 0, 1, 10, 10)
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(20, 130, 261, 511))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.preview_window = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.preview_window.setFont(font)
self.preview_window.setFrameShape(QtWidgets.QFrame.Box)
self.preview_window.setText("")
self.preview_window.setObjectName("preview_window")
self.verticalLayout.addWidget(self.preview_window)
self.generate_btn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.generate_btn.setFont(font)
self.generate_btn.setObjectName("generate_btn")
self.generate_btn.setStyleSheet("background-color: lightgreen")
self.generate_btn.setEnabled(False)
self.verticalLayout.addWidget(self.generate_btn)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1365, 21))
self.menubar.setObjectName("menubar")
self.app_File = QtWidgets.QMenu(self.menubar)
self.app_File.setObjectName("app_File")
self.app_Edit = QtWidgets.QMenu(self.menubar)
self.app_Edit.setObjectName("app_Edit")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.Save_option = QtWidgets.QAction(MainWindow)
self.Save_option.setObjectName("Save_option")
self.Save_option.setShortcut('Ctrl+S')
self.Save_option.triggered.connect(self.saveFile)
self.Shuffle_Option = QtWidgets.QAction(MainWindow)
self.Shuffle_Option.setObjectName("Shuffle_option")
self.Shuffle_Option.triggered.connect(self.shuffle)
self.app_File.addAction(self.Save_option)
self.app_Edit.addAction(self.Shuffle_Option)
self.menubar.addAction(self.app_File.menuAction())
self.menubar.addAction(self.app_Edit.menuAction())
self.retranslateUi(MainWindow)
self.browse_btn.clicked.connect(self.browseSlot)
self.path_indicator.returnPressed.connect(self.pathSlot)
self.generate_btn.clicked.connect(self.generateSlot)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Linknoide"))
MainWindow.setWindowIcon(QtGui.QIcon('asset/logo/app_icon.png'))
self.app_title.setText(_translate("MainWindow", "Linknoide"))
self.upload_instruction.setText(
_translate("MainWindow", "Select File"))
self.browse_btn.setText(_translate("MainWindow", "Browse"))
self.generate_btn.setText(_translate("MainWindow", "Generate"))
self.app_File.setTitle(_translate("MainWindow", "File"))
self.app_Edit.setTitle(_translate("MainWindow", "Edit"))
self.Save_option.setText(_translate("MainWindow", "Save"))
self.Shuffle_Option.setText(_translate("MainWindow", "Shuffle"))
@pyqtSlot()
def browseSlot(self):
pass
@pyqtSlot()
def pathSlot(self):
pass
@pyqtSlot()
def generateSlot(self):
pass
def saveFile(self):
pass
def shuffle(self):
pass
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3202702 | # 0CD - Quality of life utlities for obsessive compulsive CTF enthusiasts
# by b0bb (https://twitter.com/0xb0bb)
from binaryninja import PluginCommand, Settings
from .modules import stackguards
settings = Settings()
settings.register_group("0cd", "0CD")
settings.register_setting("0cd.stackguards.var_name", """
{
"title" : "Stack canary variable name",
"type" : "string",
"default" : "CANARY",
"description" : "Name of the stack canary stored on the stack."
}
""")
settings.register_setting("0cd.stackguards.tcb_name", """
{
"title" : "TCB variable name",
"type" : "string",
"default" : "tcb",
"description" : "Name of the tcp struct pointer stored on the stack."
}
""")
PluginCommand.register(
"0CD\Stack Guards\Clean all",
"Clean up stack guards in all functions",
stackguards.run_plugin_all
)
PluginCommand.register_for_function(
"0CD\Stack Guards\Clean current function",
"Clean up stack guards in the current function",
stackguards.run_plugin_current
)
| StarcoderdataPython |
3419350 | <gh_stars>0
from django.shortcuts import render,redirect
from .models import empresa,aboutme,skill,servicio,comments,categorias,proyectos
from .forms import ContactoForm,MensajeForm
# Create your views here.
def Hola(request):
miFormulario = MensajeForm(request.POST or None)
if miFormulario.is_valid():
for valor in miFormulario.cleaned_data:
print (valor)
print (miFormulario.cleaned_data.get(valor))
return render(request,'hola.html',{'formulario':miFormulario})
def home(request):
miEmpresa = empresa.objects.all()
about = aboutme.objects.all()
skills = skill.objects.all()
servicios = servicio.objects.all()
comentario = comments.objects.all()
mcategorias = categorias.objects.all()
mproyectos = proyectos.objects.all()
miFormulario = MensajeForm(request.POST or None)
if miFormulario.is_valid:
miForm = miFormulario.save(commit=False)
miForm.revisado=False
miForm.save()
miFormulario = MensajeForm()
contexto = {
'miEmpresa':miEmpresa,
'about':about,
'skills':skills,
'servicios':servicios,
'comentarios':comentario,
'categorias':mcategorias,
'proyectos':mproyectos,
'formulario':miFormulario
}
return render(request,'index.html',contexto) | StarcoderdataPython |
3552115 | <reponame>gpdev001/pawn
import frappe
from frappe.utils import formatdate, cint
def customer_validate(customer, method):
set_customer_no(customer)
def set_customer_no(customer):
if not customer.customer_no:
branch_code = frappe.db.get_value('Branch', customer.branch, 'branch_code')
count = cint(frappe.db.count('Customer', {'branch': customer.branch}))
next_number = count + 1
creation_date = formatdate(customer.creation, "yyyymmdd")
customer.customer_no = "{0}-{1}-{2}".format(branch_code, next_number, creation_date) | StarcoderdataPython |
9715566 | <filename>morepath/toposort.py<gh_stars>0
"""Topological sort functionality.
"""
from dectate import topological_sort
def toposorted(infos):
"""Sort infos topologically.
Info object must have a ``key`` attribute, and ``before`` and ``after``
attributes that returns a list of keys. You can use :class:`Info`.
"""
key_to_info = {}
depends = {}
for info in infos:
key_to_info[info.key] = info
depends[info.key] = []
for info in infos:
for after in info.after:
after_info = key_to_info[after]
depends[info.key].append(after_info)
for before in info.before:
before_info = key_to_info[before]
depends[before_info.key].append(info)
return topological_sort(infos, lambda info: depends[info.key])
class Info(object):
"""Toposorted info helper.
Base class that helps with toposorted. ``before`` and ``after``
can be lists of keys, or a single key, or ``None``.
"""
def __init__(self, key, before, after):
self.key = key
self.before = _convert_before_after(before)
self.after = _convert_before_after(after)
def _convert_before_after(l):
if isinstance(l, (list, tuple)):
return list(l)
elif l is None:
return []
else:
return [l]
| StarcoderdataPython |
6671344 | class Module():
def __init__(self, name):
self.name = name
def add_data(self, row):
raise NotImplementedError
def print_data(self, indent=0):
raise NotImplementedError
def get_property(self, name):
raise NotImplementedError
# Should return an iterable of tuples with key-value
def get_properties(self):
raise NotImplementedError
def has_property(self, name):
raise NotImplementedError
| StarcoderdataPython |
1818343 | s=input()
print(s.count("cat"))
| StarcoderdataPython |
9698409 | # -*- coding: utf-8 -*-
import yfinance as yf
import pandas as pd
from datetime import datetime
from pathlib import Path
import logging
from tashares.cfg import config
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
class Stockhist(object):
"""wrap yfinance to download stock history.
- Input: stock symbol.
- Output: save stock price daily history into a file.
there are two modes:
1. load from the local file of stock history (by default)
2. update from internet server: download from yahoo! up to today, and upon request save history
Args:
symbol (string, required): stock symbol. Default: ''
data_dir (string, optional): the directory of data files. Default: CWD, Currunt Working Directory
update_history (bool, optional): download history and info if 'update_history=True'. Default: False, load local files only
start_from_date (string, optional): the date to start downloading stock history. Default. '2015-01-01'
dump_files (bool, optional): save updated history and info into files if 'dump_files=True'. Default: False, don't write files
Examples:
>>> from tashares import Stockhist
>>> msft = Stockhist("MSFT")
>>> msft = Stockhist(symbol="MSFT")
>>> msft = Stockhist("MSFT", update_history=True)
>>> msft = Stockhist("MSFT", data_dir='/tmp')
>>> msft = Stockhist("MSFT", update_history=True, start_from_date='2020-01-01')
"""
start_from_date = config['DEFAULT']['StartFromDate']
historyfolder = config['DEFAULT']['HistoryFolder']
history = pd.DataFrame()
history_start_date: datetime
history_stop_date: datetime
def __init__(self, *args, **kwargs):
self.symbol = kwargs.get('symbol', '') if len(args) == 0 else args[0]
self.ticker = yf.Ticker(self.symbol)
self.data_dir = kwargs.get('data_dir', '')
if kwargs.get('update_history', False):
self.start_from_date = kwargs.get('start_from_date', self.start_from_date)
self.update_history(start_from_date=self.start_from_date)
else:
if self.data_dir.is_dir():
self.history = self.load_history(self.history_filename)
else:
logging.critical(f"{self.data_dir} is not a directory")
if kwargs.get('dump_files', False):
self.dump_history_file(self.history_filename)
def load_history(self, filename) -> pd.DataFrame:
try:
content = pd.read_csv(filename, sep='\t', parse_dates=True, index_col=[0])
assert not content.empty
# sort by date in case that is not ordered
content = content.sort_index()
self.history_start_date = content.index.min()
self.history_stop_date = content.index.max()
assert datetime.now() >= self.history_start_date
logging.debug(
f" {self.symbol} : loaded {len(content)} lines in {filename} from {self.history_start_date} to {self.history_stop_date}")
except:
logging.debug(f" history file '{filename}' nonexistent or broken")
content = pd.DataFrame()
finally:
return content
def update_history(self, start_from_date='2015-01-01', stop_at_date='') -> None:
if bool(self.ticker):
assert self.ticker.ticker == self.symbol
try:
content = self.ticker.history(
start=self.history.index.max() if (not self.history.empty) else datetime.strptime(start_from_date, '%Y-%m-%d'),
end=datetime.today() if stop_at_date == '' else datetime.strptime(stop_at_date, '%Y-%m-%d'),
auto_adjust=False, back_adjust=False, rounding=True)
if not content.empty:
content = content[pd.notnull(content.index.values)]
# merge with history loaded from file
#self.history = self.history.append(content)
self.history = pd.concat([self.history, content])
self.history = self.history[~self.history.index.duplicated(keep='last')]
self.history = self.history.sort_index()
self.history_start_date = self.history.index.min()
self.history_stop_date = self.history.index.max()
logging.debug(
f" {self.symbol} : downloaded history {len(self.history)} days, from {self.history_start_date} to {self.history_stop_date}")
except:
logging.critical(f"{self.symbol}: fail to download history")
def dump_history_file(self, filename) -> None:
if not self.history.empty and filename.parent.is_dir():
self.history.to_csv(filename, sep='\t',
encoding='utf-8', index=True, float_format='%.5f')
logging.info(f" {self.symbol} : write {len(self.history)} lines to {filename}")
@property
def history_filename(self):
return self.data_dir / f"{self.historyfolder}/{self.symbol}"
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, value: str):
if value == '':
logging.warning(
f"__init__() missing 1 required positional argument: 'symbol', e.g. Stock('MSFT') or Stock(symbol='MSFT')...")
self._symbol = value
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, value: str):
if not Path(value).is_dir():
logging.warning(f"data directory {value} does not exist")
self._data_dir = Path(value)
def __str__(self):
return f"{str(self.__class__)} : {self.symbol} : history {len(self.history)} days, {len(self.history.columns)} columns"
def __len__(self):
return len(self.history)
def __call__(self, *args, **kwargs):
self.update_history()
| StarcoderdataPython |
9759781 | from nbcheckorder import are_cells_sequential
import pytest
from pathlib import Path
TESTS_DIR = Path(__file__).parent
@pytest.mark.parametrize("filename,expected_result",
(
(TESTS_DIR / 'dirty_order.ipynb', False),
(TESTS_DIR / 'clean_order.ipynb', True),
))
def test_are_cells_sequential(filename, expected_result):
result = are_cells_sequential(filename)
assert result == expected_result | StarcoderdataPython |
11300260 | # **args
def save_user(**user):
print(user["name"])
save_user(id=1, name='Sohail', email='<EMAIL>')
# arbitrary keyword arguments instead of arbitrary arguments
# o/p -> {'id': 1, 'name': 'Sohail', 'email': '<EMAIL>'} -> key : value
# the object we see here is called dictionary
| StarcoderdataPython |
6528226 | pietra = 0
for x in range(1, 7):
livy = int(input('digite a sua idade '))
if livy < 18:
print('você não está na maior idade ainda')
else:
pietra = pietra + 1
print('você está na maior idade')
print('o número de pessoas q estão na maior idade é igual a {}'.format(pietra)) | StarcoderdataPython |
3489203 | from setuptools import setup
setup(
name='to_tty',
author='Hixan',
author_email='<EMAIL>',
version='1.0.1',
py_modules=['to_tty'],
install_requires=['Click', 'to_tty'],
entry_points = '''
[console_scripts]
to-tty=to_tty:main
'''
)
| StarcoderdataPython |
5079784 | from lagury.service.core import start_service
if __name__ == '__main__':
start_service()
| StarcoderdataPython |
12861806 | #!/usr/bin/env python
import datetime
import os
import subprocess
import re
import urllib2
import math
####################################################################
## TODO: Replace this function by another one, which simply reads all lines from a file
####################################################################
def readLines(fPath):
if not os.path.isfile(fPath):
open(fPath, 'a').close()
with open(fPath) as ff:
lines = ff.read().splitlines()
return lines
def currentUser():
#pattern = re.compile(r"^(anna|eduards|kalvis|laima|marta|nelda).*$",re.MULTILINE)
pattern = re.compile(r"^(anna|eduards|kalvis|laima|marta|nelda)+\s.*$",re.MULTILINE)
m = pattern.match(subprocess.check_output("who"))
if m:
return m.group(1)
else:
return "nobody"
def getStatus():
url = 'http://172.16.58.3/downloads1/timeouts/index.html'
try:
response=urllib2.urlopen(url,timeout=1)
webFile = response.read()
pattern2 = re.compile(r"^green",re.MULTILINE)
m2 = pattern2.match(webFile)
if m2:
return "green"
else:
return "yellow"
except urllib2.URLError as err: pass
return "offline"
def getYellowCount(theLog):
yellowCount = 0
grayCount = 0
ll = readLines(theLog)
for theLine in ll:
if (theLine.find("yellow") >= 0) or (theLine.find("red") >= 0):
yellowCount = yellowCount + 1
if (theLine.find("green") >= 0):
yellowCount = 0
if (theLine.find("offline") >= 0):
grayCount = grayCount + 1
yellowCount = yellowCount + (grayCount//12)
return(yellowCount)
#####################################################################
## Find the right logfile; logfiles are different for different days
#####################################################################
ddat = datetime.datetime.now()
theLog = '/home/kalvis/.timeouts/access-{yyyy}-{mm}-{dd}.log'.format( \
yyyy = ddat.year, mm=ddat.month, dd=ddat.day)
yellowCount = getYellowCount(theLog)
status = getStatus()
if yellowCount >= 5:
status = "red"
if yellowCount > 1:
os.system("/home/kalvis/.timeouts/msg.py")
logline = '{user}:{time}:{status}({yellowCount})\n'.format(user=currentUser(), \
time=ddat,status=getStatus(),yellowCount=yellowCount)
if not os.path.isfile(theLog):
open(theLog, 'a').close()
with open(theLog, "a") as myfile:
myfile.write(logline)
if yellowCount >= 5:
from subprocess import call
call(["pkill","-KILL","-u","marta"])
| StarcoderdataPython |
3526728 | import contextlib
import re
import subprocess
from array import array
def run(cmd, **kwargs):
return subprocess.run(cmd, check=True, **kwargs)
def invoke(cmd, **kwargs):
try:
return run(cmd, **kwargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="UTF-8")
except subprocess.CalledProcessError as e:
raise IOError(f"Error: {cmd}\n stdout: {e.stdout}\n stderr: {e.stderr}")
def find_gpmd_track(filepath):
ffprobe_output = str(invoke(["ffprobe", filepath]).stderr) # str here just for PyCharm - its already a string
# look for: Stream #0:3(eng): Data: bin_data (gpmd / 0x646D7067), 61 kb/s (default)
match = re.search(r'Stream #\d:(\d)\(.+\): Data: \w+ \(gpmd', ffprobe_output)
if match:
return int(match.group(1))
def load_gpmd_from(filepath):
track = find_gpmd_track(filepath)
if track:
cmd = ["ffmpeg", '-y', '-i', filepath, '-codec', 'copy', '-map', '0:%d' % track, '-f', 'rawvideo', "-"]
result = run(cmd, capture_output=True, timeout=10)
if result.returncode != 0:
raise IOError(f"ffmpeg failed code: {result.returncode} : {result.stderr.decode('utf-8')}")
arr = array("b")
arr.frombytes(result.stdout)
return arr
class FFMPEGGenerate:
def __init__(self, output):
self.output = output
@contextlib.contextmanager
def generate(self):
cmd = [
"ffmpeg",
"-y",
"-loglevel", "info",
"-f", "rawvideo",
"-framerate", "10.0",
"-s", "1920x1080",
"-pix_fmt", "rgba",
"-i", "-",
"-r", "30",
"-vcodec", "libx264",
"-crf", "18",
"-preset", "veryfast",
self.output
]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=None, stderr=None)
yield process.stdin
process.stdin.close()
process.wait(10)
class FFMPEGOverlay:
def __init__(self, input, output, vsize=1080, redirect=None):
self.output = output
self.input = input
self.vsize = vsize
self.redirect = redirect
@contextlib.contextmanager
def generate(self):
if self.vsize == 1080:
filter_extra = ""
else:
filter_extra = f",scale=-1:{self.vsize}"
cmd = [
"ffmpeg",
"-y",
"-loglevel", "info",
"-i", self.input,
"-f", "rawvideo",
"-framerate", "10.0",
"-s", "1920x1080",
"-pix_fmt", "rgba",
"-i", "-",
"-r", "30",
f"-filter_complex", f"[0:v][1:v]overlay{filter_extra}",
"-vcodec", "libx264",
"-crf", "18",
"-preset", "veryfast",
self.output
]
if self.redirect:
with open(self.redirect, "w") as std:
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=std, stderr=std)
else:
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=None, stderr=None)
yield process.stdin
process.stdin.close()
# really long wait as FFMPEG processes all the mpeg input file - not sure how to prevent this atm
process.wait(5 * 60)
if __name__ == "__main__":
loaded = load_gpmd_from("/data/richja/gopro/GH010064.MP4")
print(len(loaded))
| StarcoderdataPython |
8085318 | from sentence_transformers import SentenceTransformer, util
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
sentences1 = ['playlist',
'A man is playing guitar',
'The new movie is awesome']
sentences2 = ['playlist',
'A woman watches TV',
'The new movie is so great']
#Compute embedding for both lists
embeddings1 = model.encode(sentences1, convert_to_tensor=True)
embeddings2 = model.encode(sentences2, convert_to_tensor=True)
#Compute cosine-similarits
cosine_scores = util.pytorch_cos_sim(embeddings1, embeddings2)
#Output the pairs with their score
# for i in range(len(sentences1)):
# print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[i], cosine_scores[i][i]))
print(cosine_scores[0][0])
sys.stdout.flush() | StarcoderdataPython |
1829725 | #!/usr/bin/python3
"""
This script allows you to view ONE of the graphs relatively quickly, mostly for debugging the graph generators.
"""
import logging
import pygame
import sqlite3
import sys
import time
import config
import dataaccess
import graphics
__author__ = '<NAME>, N1KDO'
__copyright__ = 'Copyright 2016, 2017, 2019 <NAME>'
__license__ = 'Simplified BSD'
logging.basicConfig(format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S',
level=config.LOG_LEVEL)
logging.Formatter.converter = time.gmtime
def main():
logging.info('dashboard startup')
try:
screen, size = graphics.init_display()
except Exception as e:
logging.exception('Could not initialize display.', exc_info=e)
sys.exit(1)
display_size = (size[0], size[1])
logging.debug('display setup')
qso_operators = []
qso_stations = []
qso_band_modes = []
operator_qso_rates = []
qsos_per_hour = []
qsos_by_section = {}
logging.debug('load data')
db = None
cursor = None
try:
logging.debug('connecting to database')
db = sqlite3.connect(config.DATABASE_FILENAME)
cursor = db.cursor()
logging.debug('database connected')
# get timestamp from the last record in the database
last_qso_time, message = dataaccess.get_last_qso(cursor)
# load qso_operators
qso_operators = dataaccess.get_operators_by_qsos(cursor)
# load qso_stations -- maybe useless chartjunk
qso_stations = dataaccess.get_station_qsos(cursor)
# get something else.
qso_band_modes = dataaccess.get_qso_band_modes(cursor)
# load QSOs per Hour by Operator
operator_qso_rates = dataaccess.get_qsos_per_hour_per_operator(cursor, last_qso_time)
# load QSO rates per Hour by Band
qsos_per_hour, qsos_per_band = dataaccess.get_qsos_per_hour_per_band(cursor)
# load QSOs by Section
qsos_by_section = dataaccess.get_qsos_by_section(cursor)
logging.debug('load data done')
except sqlite3.OperationalError as error:
logging.exception(error)
sys.exit(1)
finally:
if db is not None:
logging.debug('Closing DB')
cursor.close()
db.close()
db = None
try:
# image_data, image_size = graphics.qso_summary_table(size, qso_band_modes)
# image_data, image_size = graphics.qso_rates_table(size, operator_qso_rates)
# image_data, image_size = graphics.qso_operators_graph(size, qso_operators)
# image_data, image_size = graphics.qso_operators_table(size, qso_operators)
# image_data, image_size = graphics.qso_stations_graph(size, qso_stations)
# image_data, image_size = graphics.qso_bands_graph(size, qso_band_modes)
# image_data, image_size = graphics.qso_modes_graph(size, qso_band_modes)
# image_data, image_size = graphics.qso_rates_chart(size, qsos_per_hour)
image_data, image_size = graphics.draw_map(size, qsos_by_section)
# gc.collect()
image = pygame.image.frombuffer(image_data, image_size, 'RGB')
graphics.show_graph(screen, size, image)
pygame.display.flip()
# wait for a Q key press
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
break
elif event.type == pygame.KEYDOWN:
if event.key == ord('q'):
logging.debug('Q key pressed')
run = False
else:
logging.debug('event key=%d', event.key)
except Exception as e:
logging.exception("Exception in main:", exc_info=e)
pygame.display.quit()
logging.info('one_chart exit')
if __name__ == '__main__':
main()
| StarcoderdataPython |
6462141 | import unittest
import time
from scanpointgenerator import LineGenerator, CompoundGenerator
from malcolm.core import Process, Part, Context, AlarmStatus, \
AlarmSeverity, AbortedError
from malcolm.modules.scanning.parts import RunnableChildPart
from malcolm.modules.demo.blocks import ticker_block
from malcolm.compat import OrderedDict
from malcolm.modules.scanning.controllers import \
RunnableController
from malcolm.modules.scanning.util import RunnableStates
class TestRunnableStates(unittest.TestCase):
def setUp(self):
self.o = RunnableStates()
def test_init(self):
expected = OrderedDict()
expected['Resetting'] = {"Ready", "Fault", "Disabling"}
expected['Ready'] = {"Configuring", "Aborting", 'Saving', "Fault",
"Disabling", "Loading"}
expected['Saving'] = {'Fault', 'Ready', 'Disabling'}
expected['Loading'] = {'Disabling', 'Fault', 'Ready'}
expected['Configuring'] = {"Armed", "Aborting", "Fault", "Disabling"}
expected['Armed'] = {"Seeking", "Aborting", "Running",
"Fault", "Disabling", "Resetting"}
expected['Running'] = {"PostRun", "Seeking", "Aborting", "Fault",
"Disabling"}
expected['PostRun'] = {"Ready", "Armed", "Aborting", "Fault",
"Disabling"}
expected['Seeking'] = {"Armed", "Paused", "Aborting", "Fault",
"Disabling"}
expected['Paused'] = {"Seeking", "Running", "Aborting", "Fault",
"Disabling"}
expected['Aborting'] = {"Aborted", "Fault", "Disabling"}
expected['Aborted'] = {"Resetting", "Fault", "Disabling"}
expected['Fault'] = {"Resetting", "Disabling"}
expected['Disabling'] = {"Disabled", "Fault"}
expected['Disabled'] = {"Resetting"}
assert self.o._allowed == expected
possible_states = [
'Ready', 'Resetting', 'Saving', 'Loading', 'Configuring', 'Armed',
'Running', 'Seeking', 'PostRun', 'Paused', 'Aborting', 'Aborted',
'Fault', 'Disabling', 'Disabled']
assert self.o.possible_states == possible_states
class TestRunnableController(unittest.TestCase):
def setUp(self):
self.p = Process('process1')
self.context = Context(self.p)
# Make a ticker_block block to act as our child
for c in ticker_block(mri="childBlock", config_dir="/tmp"):
self.p.add_controller(c)
self.b_child = self.context.block_view("childBlock")
# Make an empty part for our parent
part1 = Part("part1")
# Make a RunnableChildPart to control the ticker_block
part2 = RunnableChildPart(
mri='childBlock', name='part2', initial_visibility=True)
# create a root block for the RunnableController block to reside in
self.c = RunnableController(mri='mainBlock', config_dir="/tmp")
self.c.add_part(part1)
self.c.add_part(part2)
self.p.add_controller(self.c)
self.b = self.context.block_view("mainBlock")
self.ss = self.c.state_set
# start the process off
self.checkState(self.ss.DISABLED)
self.p.start()
self.checkState(self.ss.READY)
def tearDown(self):
self.p.stop(timeout=1)
def checkState(self, state, child=True, parent=True):
if child:
assert self.b_child.state.value == state
if parent:
assert self.c.state.value == state
def checkSteps(self, configured, completed, total):
assert self.b.configuredSteps.value == configured
assert self.b.completedSteps.value == completed
assert self.b.totalSteps.value == total
assert self.b_child.configuredSteps.value == configured
assert self.b_child.completedSteps.value == completed
assert self.b_child.totalSteps.value == total
def test_init(self):
assert self.c.completed_steps.value == 0
assert self.c.configured_steps.value == 0
assert self.c.total_steps.value == 0
assert list(self.b.configure.takes.elements) == \
["generator", "axesToMove", "exceptionStep"]
def test_reset(self):
self.c.disable()
self.checkState(self.ss.DISABLED)
self.c.reset()
self.checkState(self.ss.READY)
def test_modify_child(self):
# Save an initial setting for the child
self.b_child.save("init_child")
assert self.b_child.modified.value is False
x = self.context.block_view("COUNTERX")
x.counter.put_value(31)
# x counter now at 31, child should be modified
assert x.counter.value == 31
assert self.b_child.modified.value is True
assert self.b_child.modified.alarm.severity == AlarmSeverity.MINOR_ALARM
assert self.b_child.modified.alarm.status == AlarmStatus.CONF_STATUS
assert self.b_child.modified.alarm.message == \
"x.counter.value = 31.0 not 0.0"
self.prepare_half_run()
self.b.run()
# x counter now at 2, child should be modified by us
assert self.b_child.modified.value is True
assert self.b_child.modified.alarm.severity == AlarmSeverity.NO_ALARM
assert self.b_child.modified.alarm.status == AlarmStatus.CONF_STATUS
assert self.b_child.modified.alarm.message == \
"(We modified) x.counter.value = 2.0 not 0.0"
assert x.counter.value == 2.0
x.counter.put_value(0.0)
# x counter now at 0, child should be unmodified
assert x.counter.value == 0
assert self.b_child.modified.alarm.message == ""
assert self.b_child.modified.value is False
def test_modify_parent(self):
# Save an initial setting for child and parent
self.b_child.save("init_child")
self.b.save("init_parent")
# Change a value and save as a new child setting
x = self.context.block_view("COUNTERX")
x.counter.put_value(31)
self.b_child.save("new_child")
assert self.b_child.modified.value is False
assert self.b.modified.value is True
assert self.b.modified.alarm.severity == AlarmSeverity.MINOR_ALARM
assert self.b.modified.alarm.status == AlarmStatus.CONF_STATUS
assert self.b.modified.alarm.message == \
"part2.design.value = 'new_child' not 'init_child'"
# Load the child again
self.b_child.design.put_value("new_child")
assert self.b.modified.value is True
# And check that loading parent resets it
self.b.design.put_value("init_parent")
assert self.b.modified.value is False
assert self.b_child.design.value == "init_child"
# Put back
self.b_child.design.put_value("new_child")
assert self.b.modified.value is True
# Do a configure, and check we get set back
self.prepare_half_run()
assert self.b_child.design.value == "init_child"
assert self.b_child.modified.value is False
assert self.b.modified.value is False
def test_abort(self):
self.b.abort()
self.checkState(self.ss.ABORTED)
def test_validate(self):
line1 = LineGenerator('y', 'mm', 0, 2, 3)
line2 = LineGenerator('x', 'mm', 0, 2, 2)
compound = CompoundGenerator([line1, line2], [], [])
actual = self.b.validate(generator=compound, axesToMove=['x'])
assert actual["generator"].to_dict() == compound.to_dict()
assert actual["axesToMove"] == ['x']
def prepare_half_run(self, duration=0.01, exception=0):
line1 = LineGenerator('y', 'mm', 0, 2, 3)
line2 = LineGenerator('x', 'mm', 0, 2, 2)
compound = CompoundGenerator([line1, line2], [], [], duration)
self.b.configure(
generator=compound, axesToMove=['x'], exceptionStep=exception)
def test_configure_run(self):
assert self.b.configure.writeable is True
assert self.b.configure.takes.elements["generator"].writeable is True
assert self.b.validate.takes.elements["generator"].writeable is True
assert self.b.validate.returns.elements["generator"].writeable is False
self.prepare_half_run()
self.checkSteps(2, 0, 6)
self.checkState(self.ss.ARMED)
assert self.b.configure.writeable is False
assert self.b.configure.takes.elements["generator"].writeable is True
assert self.b.validate.takes.elements["generator"].writeable is True
assert self.b.validate.returns.elements["generator"].writeable is False
self.b.run()
self.checkState(self.ss.ARMED)
self.checkSteps(4, 2, 6)
self.b.run()
self.checkState(self.ss.ARMED)
self.checkSteps(6, 4, 6)
self.b.run()
self.checkState(self.ss.READY)
def test_abort(self):
self.prepare_half_run()
self.b.run()
self.b.abort()
self.checkState(self.ss.ABORTED)
def test_pause_seek_resume(self):
self.prepare_half_run()
self.checkSteps(configured=2, completed=0, total=6)
self.b.run()
self.checkState(self.ss.ARMED)
self.checkSteps(4, 2, 6)
self.b.pause(completedSteps=1)
self.checkState(self.ss.ARMED)
self.checkSteps(2, 1, 6)
self.b.run()
self.checkSteps(4, 2, 6)
self.b.completedSteps.put_value(5)
self.checkSteps(6, 5, 6)
self.b.run()
self.checkState(self.ss.READY)
def test_resume_in_run(self):
self.prepare_half_run(duration=0.5)
f = self.b.run_async()
self.context.sleep(0.95)
self.b.pause()
self.checkState(self.ss.PAUSED)
self.checkSteps(2, 1, 6)
self.b.resume()
# Parent should be running, child won't have got request yet
then = time.time()
self.checkState(self.ss.RUNNING, child=False)
self.context.wait_all_futures(f, timeout=2)
now = time.time()
self.checkState(self.ss.ARMED)
self.checkSteps(4, 2, 6)
# This test fails on Travis sometimes, looks like the docker container
# just gets starved
# self.assertAlmostEqual(now - then, 0.5, delta=0.1)
def test_run_exception(self):
self.prepare_half_run(exception=1)
with self.assertRaises(AssertionError):
self.b.run()
self.checkState(self.ss.FAULT)
def test_run_stop(self):
self.prepare_half_run(duration=0.1)
f = self.b.run_async()
self.context.sleep(0.1)
self.b.abort()
with self.assertRaises(AbortedError):
f.result()
self.checkState(self.ss.ABORTED)
| StarcoderdataPython |
8197971 | <filename>aliyun/log/es_migration/migration_task.py<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
import os
import os.path as op
import json
import traceback
from datetime import datetime
from elasticsearch.exceptions import NotFoundError
from aliyun.log import LogClient
from aliyun.log.putlogsrequest import PutLogsRequest
from aliyun.log.es_migration.doc_logitem_converter import DocLogItemConverter
class Checkpoint(object):
processing = 'processing'
interrupted = 'interrupted'
finished = 'finished'
dropped = 'dropped'
failed = 'failed'
def __init__(
self, ckpt_path, task_id, es_index, es_shard,
logstore, time_reference, logger,
):
self._ckpt_file = op.join(ckpt_path, 'task_{}.json'.format(task_id))
self._logger = logger
self._content = {
'status': self.processing,
'task': {
'es_index': es_index,
'es_shard': es_shard,
'logstore': logstore,
'time_reference': time_reference,
},
'update_time': None,
'progress': 0,
'checkpoint': {
'_id': None,
'offset': {},
},
}
self._load()
@property
def id(self):
return self._content['checkpoint']['_id']
@property
def offset(self):
return self._content['checkpoint']['offset']
@property
def status(self):
return self._content['status']
@property
def content(self):
return self._content
def update(self, status=processing, count=0, _id=None, offset=None):
now = datetime.now()
self._content['update_time'] = now.isoformat(timespec='seconds')
self._content['status'] = status
if _id:
self._content['checkpoint']['_id'] = _id
self._content['progress'] += count
if offset:
self._content['checkpoint']['offset'].update(offset)
ckpt_bak = self._ckpt_file + '.bak'
try:
os.rename(self._ckpt_file, ckpt_bak)
except FileNotFoundError:
pass
with open(self._ckpt_file, 'w') as fp:
fp.write(json.dumps(self._content, indent=2))
try:
os.remove(ckpt_bak)
except FileNotFoundError:
pass
def _load(self):
try:
with open(self._ckpt_file) as f:
cont = f.read().strip()
except FileNotFoundError:
cont = ''
if len(cont) > 0:
try:
cont = json.loads(cont)
except json.JSONDecodeError:
msg = 'Invalid checkpoint file content'
extra = {'cache': cont}
self._logger.error(msg, extra)
raise Exception(msg)
if cont['task'] != self._content['task']:
msg = 'Specified task not matches with cache'
extra = {'task': self._content['task'], 'cache': cont['task']}
self._logger.error(msg, extra)
raise Exception(msg)
self._content.update(cont)
class MigrationTask(object):
scroll = '10m'
def __init__(
self, _id, es_client, es_index, es_shard,
logstore, ckpt_path, batch_size, logger,
es_version, es_query=None, time_reference=None,
):
self._id = _id
self._es_client = es_client
self._es_index = es_index
self._es_shard = es_shard
self._es_query = es_query or {}
self._es_scroll_id = None
self._logstore = logstore
self._time_reference = time_reference
self._batch_size = batch_size
self._logger = logger
self._ckpt = Checkpoint(
ckpt_path,
self._id,
self._es_index,
self._es_shard,
self._logstore.name,
self._time_reference,
self._logger,
)
id_key = '_id' if es_version >= 7 else '_uid'
if self._time_reference:
es_sort = [{self._time_reference: 'asc'}, {id_key: 'asc'}]
else:
es_sort = [{id_key: 'asc'}]
self._es_query['sort'] = es_sort
if self._time_reference in self._ckpt.offset:
self._es_query['query'] = {
'range': {
self._time_reference: {
'gte': self._ckpt.offset[self._time_reference],
'format': 'strict_date_optional_time',
}
}
}
self._es_params = {'preference': '_shards:{:d}'.format(self._es_shard)}
def run(self):
# already finished
if self._ckpt.status == Checkpoint.finished:
self._logger.info('Already finished. Ignore it.')
return Checkpoint.finished
self._logger.info('Migration task starts', extra=self._ckpt.content)
try:
self._run()
except NotFoundError:
self._ckpt.update(status=Checkpoint.dropped)
self._logger.info(
'ES index dropped',
extra={'traceback': traceback.format_exc()},
)
except KeyboardInterrupt:
self._logger.info('Interrupted')
self._ckpt.update(status=Checkpoint.interrupted)
except BaseException:
self._logger.error(
'Exception',
extra={'traceback': traceback.format_exc()},
)
self._ckpt.update(status=Checkpoint.failed)
try:
# clear active scroll
if self._es_scroll_id:
self._es_client.clear_scroll(
body={'scroll_id': [self._es_scroll_id]},
ignore=(404,)
)
except:
pass
self._logger.info('Migration task exits', extra=self._ckpt.content)
return self._ckpt.status
def _run(self):
# check if document been processed before
checking, offset, _id = False, None, None
if self._time_reference and self._ckpt.offset:
offset = self._ckpt.offset.get(self._time_reference)
if offset:
checking = True
if self._ckpt.id:
_id = self._ckpt.id
checking = True
if checking:
self._logger.info('Scanning migrated documents starts')
# initial search
resp = self._es_client.search(
index=self._es_index,
body=self._es_query,
scroll=self.scroll,
size=self._batch_size,
params=self._es_params,
)
self._es_scroll_id = resp.get('_scroll_id')
rnd = 0
while True:
if rnd > 0:
resp = self._es_client.scroll(
scroll_id=self._es_scroll_id,
scroll=self.scroll,
)
hits = resp['hits']['hits']
empty = len(hits) <= 0
if checking and not empty:
# check the last document
if self._pending_doc(hits[-1], _id, offset):
hits = filter(
lambda h: self._pending_doc(h, _id, offset),
hits,
)
hits = list(hits)
checking = False
self._logger.info('Scanning migrated documents ends')
else:
hits = []
count = len(hits)
self._update_ckpt(self._put_docs(hits), count)
self._es_scroll_id = resp.get('_scroll_id')
# end of scroll
if self._es_scroll_id is None or empty:
self._ckpt.update(status=Checkpoint.finished)
self._logger.info('Finished')
break
rnd += 1
if rnd % 100 == 0:
if checking:
self._logger.info('Scanning migrated documents')
else:
self._logger.info(
'Migration progress',
extra=self._ckpt.content,
)
def _pending_doc(self, doc, _id, offset):
is_new = False
if self._time_reference:
is_new = doc['_source'][self._time_reference] > offset
return is_new or doc['_id'] > _id
def _update_ckpt(self, doc, count):
if not doc:
return
offset = {}
if self._time_reference:
offset = {self._time_reference: doc['_source'][self._time_reference]}
self._ckpt.update(count=count, _id=doc['_id'], offset=offset)
def _put_docs(self, docs):
if len(docs) <= 0:
return None
logitems = [
DocLogItemConverter.to_log_item(doc, self._time_reference)
for doc in docs
]
self._logstore.put_logs(logitems)
return docs[-1]
class MigrationLogstore(object):
def __init__(
self,
endpoint,
access_id,
access_key,
project_name,
logstore_name,
topic,
source,
):
self._log_client = LogClient(
endpoint=endpoint,
accessKeyId=access_id,
accessKey=access_key,
)
self._project_name = project_name
self._logstore_name = logstore_name
self._topic, self._source = topic, source
@property
def name(self):
return self._logstore_name
def put_logs(self, logitems):
self._log_client.put_logs(
PutLogsRequest(
project=self._project_name,
logstore=self._logstore_name,
topic=self._topic,
source=self._source,
logitems=logitems,
)
)
| StarcoderdataPython |
3271125 | # This is Jim's example code for making a plot of an adsorption isotherm
import matplotlib.pyplot as pyplot
import numpy as np
xmin=0
xmax=2.5
K=10
n=2
x=np.linspace(xmin,xmax,50, endpoint=True)
y=np.divide(K*np.power(x,n),(1+K*np.power(x,n)))
pyplot.plot(x,y)
pyplot.axis([xmin,xmax,0,2])
pyplot.show()
| StarcoderdataPython |
8138947 | <reponame>oxquantum/CVAE_for_QE<gh_stars>1-10
import os
import numpy as np
import tensorflow as tf
import pickle # to load model definition
from CVAE_type1 import CVAE_type1
from CVAE_type2 import CVAE_type2
from CVAE_contextloss_model import CVAE_contextloss
import data_feeder_tf
import test_and_plot as test
import matplotlib.pyplot as plt
flags = tf.flags
#flags.DEFINE_string("file_name","CVAE_type2_128_latent100sim_epoch_700_wide2_aug2", "File name for data")
flags.DEFINE_string("file_name","CVAE_type2_128_latent100mixed_epoch_700_more_data_wide2_aug2", "File name for data")
flags.DEFINE_string("device", "/gpu:0", "Compute device.")
flags.DEFINE_boolean("allow_soft_placement", True, "Soft device placement.")
FLAGS = flags.FLAGS
def load_CVAE(sess, model_folder, model_name, batch_size, file_name):
if model_name == 'type1':
net = CVAE_type1()
elif model_name == 'type2':
net = CVAE_type2()
elif model_name == 'contextloss':
net = CVAE_contextloss()
else:
raise ValueError('Undefined model')
net.load(sess, model_folder, file_name)
print('Varaiables are loaded.')
net.create_testing_nodes(batch_size)
print('Testing nodes are created.')
return net
def convert_to_absolute_path(path):
if not os.path.isabs(path): # if the path is relative, convert to the absolute path
if path[0] == '~':
path = os.path.expanduser(path)
else:
path = os.path.realpath(path)
return path
def plot_100(predicted, input_shape, plot_min, plot_max, fpath=None):
assert predicted.shape[0] >= 100
plt.figure(figsize=(10,10))
for i in range(1,101):
plt.subplot(10, 10, i)
img = predicted[i-1].reshape(input_shape)
bias = np.mean(img[63:65,:])
plt.imshow(img-bias, vmin=plot_min, vmax=plot_max, origin="lower", cmap='seismic')
plt.xticks([])
plt.yticks([])
plt.subplots_adjust(wspace=0.0,hspace=0.0)
if fpath is None:
plt.show()
else:
plt.savefig(fpath)
def main():
# model storage
model_folder = './models' # path for tensorflow reconstruction model
model_folder = convert_to_absolute_path(model_folder)
plot_min, plot_max = -1.0, 1.0
model_name = 'contextloss'
batch_size=100
with tf.device(FLAGS.device):
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement, gpu_options=gpu_options)) as sess:
# build a model and load variables
net = load_CVAE(sess, model_folder, model_name, batch_size, FLAGS.file_name)
# data load for test
fname_test = []
fname_test.append('mixed_2345_50000__.h5') # test file name
# Load data
#data_folder = './Data' # the folder containing data files
data_folder = '~/Data/CVAE' # the folder containing data files
data_folder = convert_to_absolute_path(data_folder)
print('Data Directory: ', data_folder)
data_shape = (128,128,1)
dataset = data_feeder_tf.Data_from_HDF5(data_folder, [], fname_test, data_shape = data_shape) # Data load
data_arr = dataset.test
# reconstruction test
input_gen = subsample_stride16 # function to convert full data to initial measurement
file_path = test.save_recon_test(sess, data_arr, input_gen, net, batch_size) # save the result into a file
test.draw_recon_result_from_file(file_path, (128,128), low_res=True, plot_min=plot_min, plot_max=plot_max, cmap="seismic") # plot the result from the file
def subsample_stride16(data):
data_plot = np.zeros_like(data)
data_plot[:,::16,::16,0] = data[:,::16,::16,0]
return data[:,::16,::16,0].reshape((data.shape[0],-1)), data_plot
if __name__ == "__main__":
main()
| StarcoderdataPython |
1726597 | <reponame>ragnarok22/contactbot
from telegram.ext import Updater, CommandHandler, ConversationHandler, CallbackQueryHandler, MessageHandler, Filters
import callbacks
import constants
import conversations
from commands import start, cancel
from settings import TELEGRAM_KEY
from db import start_db
if __name__ == '__main__':
updater = Updater(token=TELEGRAM_KEY)
dp = updater.dispatcher
start_db()
dp.add_handler(CommandHandler('start', start))
dp.add_handler(ConversationHandler(
entry_points=[
CallbackQueryHandler(pattern='contact', callback=callbacks.contact_callback)
],
states={
constants.INPUT_FIRST_NAME: [MessageHandler(Filters.text, conversations.first_name_conversation)],
constants.INPUT_LAST_NAME: [MessageHandler(Filters.text, conversations.last_name_conversation)],
constants.INPUT_PHONE: [MessageHandler(Filters.text, conversations.phone_conversation)],
constants.SAVE_INFO: [MessageHandler(Filters.text, conversations.save_info_conversation)],
},
fallbacks=[
CommandHandler('cancel', cancel)
]
))
updater.start_polling()
print('Bot is polling')
updater.idle()
| StarcoderdataPython |
291324 | """This module is for learning
This module has basic functions to work with numbers
"""
def is_even(number: int) -> bool:
"""
This method will find if the number passed is even or not
:param number : a number
:return: True if even False otherwise
"""
if number <= 0:
return False
return number % 2 == 0
print(__name__)
if __name__ == '__main__':
# if someone is executing the code directly by calling python app.py then this block will be executed
print(is_even(5))
print(is_even(10))
| StarcoderdataPython |
3451701 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri May 08 10:50:00 2020
@author: <NAME>
"""
import numpy as np
class Electrolyte:
def __init__(self, lambdaEq=10.53e-3):
self.lambdaEq = lambdaEq
class Reservoir:
def __init__(self, volume, concentration, electrolyte=Electrolyte()):
self.volume = volume
self.electrolyte = electrolyte
self.concentration = concentration
def timeLoop(self, flux, time):
return self.concentration - time*flux/self.volume
def conductivity(self, concentration):
return concentration*self.electrolyte.lambdaEq
def conductivityTemp(self, concentration, temperature):
"""Source:
https://www.mt.com/dam/mt_ext_files/Editorial/Generic/4/Paper-THOR-Cation-Cond-Temp-Bev-Gray-11-97_Editorial-Generic_1161617581924_files/cation_cond_tempcompensation.pdf
"""
C25 = concentration*self.electrolyte.lambdaEq
Cw = 0.0545*(0.55*np.exp(0.0363*temperature)-0.356)
CT = (C25 - 0.0545)*(1+0.02*(temperature-25))+ Cw
return CT
class Channel:
def __init__(self, length, width, thickness, electrolyte=Electrolyte()):
self.length = length
self.width = width
self.thickness = thickness
self.surfaceMem = length*width
self.volume = length*width*thickness
self.electrolyte = electrolyte
def conductivity(self, concentration):
return concentration*self.electrolyte.lambdaEq
def resistance(self, concentration):
kappa = self.conductivity(concentration)
resistanceSolution = self.thickness/kappa/self.surfaceMem
return resistanceSolution | StarcoderdataPython |
11346563 | <gh_stars>0
Python 3.4.3 (v3.4.3:9b73f1c3e601, Feb 24 2015, 22:43:06) [MSC v.1600 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> mat = True
>>> print mat
[DEBUG ON]
>>>
[DEBUG OFF]
>>>
| StarcoderdataPython |
4811158 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 <EMAIL>
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from __future__ import absolute_import, division, print_function, unicode_literals
from keyplus.version import __version__
IS_PRE_RELASE = ('pre' in __version__)
class DEBUG(object):
"""
The static variables in this class can be used to turn on and off debug
messages.
"""
usb_cmd_timing = False and IS_PRE_RELASE
usb_cmd = False and IS_PRE_RELASE
parsing = False and IS_PRE_RELASE
parsing_extra = False and IS_PRE_RELASE
layout = True and IS_PRE_RELASE
gui = True and IS_PRE_RELASE
misc = True and IS_PRE_RELASE
| StarcoderdataPython |
11323370 | from typing import Dict, List, Set
from unittest import mock
import pandas as pd
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
BatchMarkers,
BatchRequest,
)
from great_expectations.core.id_dict import BatchSpec, IDDict
from great_expectations.data_context import DataContext
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.helpers.util import (
get_parameter_value_and_validate_return_type,
)
from great_expectations.rule_based_profiler.parameter_builder import (
ParameterBuilder,
RegexPatternStringParameterBuilder,
)
from great_expectations.rule_based_profiler.types import (
Domain,
ParameterContainer,
get_parameter_value_by_fully_qualified_parameter_name,
)
from great_expectations.validator.validator import Validator
@pytest.fixture
def batch_fixture() -> Batch:
"""
Fixture for Batch object that contains data, BatchRequest, BatchDefinition
as well as BatchSpec and BatchMarkers. To be used in unittesting.
"""
df: pd.DataFrame = pd.DataFrame(
{"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]}
)
batch_request: BatchRequest = BatchRequest(
datasource_name="my_datasource",
data_connector_name="my_data_connector",
data_asset_name="my_data_asset_name",
)
batch_definition: BatchDefinition = BatchDefinition(
datasource_name="my_datasource",
data_connector_name="my_data_connector",
data_asset_name="my_data_asset_name",
batch_identifiers=IDDict({"id": "A"}),
)
batch_spec: BatchSpec = BatchSpec(path="/some/path/some.file")
batch_markers: BatchMarkers = BatchMarkers(ge_load_time="FAKE_LOAD_TIME")
batch: Batch = Batch(
data=df,
batch_request=batch_request,
batch_definition=batch_definition,
batch_spec=batch_spec,
batch_markers=batch_markers,
)
return batch
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_regex_pattern_string_parameter_builder_instantiation_with_defaults(
mock_data_context: mock.MagicMock,
):
data_context: DataContext = mock_data_context
candidate_regexes: Set[str] = {
r"\d+", # whole number with 1 or more digits
r"-?\d+", # negative whole numbers
r"-?\d+(?:\.\d*)?", # decimal numbers with . (period) separator
r"[A-Za-z0-9\.,;:!?()\"'%\-]+", # general text
r"^\s+", # leading space
r"\s+$", # trailing space
r"https?:\/\/(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b(?:[-a-zA-Z0-9@:%_\+.~#()?&//=]*)", # Matching URL (including http(s) protocol)
r"<\/?(?:p|a|b|img)(?: \/)?>", # HTML tags
r"(?:25[0-5]|2[0-4]\d|[01]\d{2}|\d{1,2})(?:.(?:25[0-5]|2[0-4]\d|[01]\d{2}|\d{1,2})){3}", # IPv4 IP address
r"(?:[A-Fa-f0-9]){0,4}(?: ?:? ?(?:[A-Fa-f0-9]){0,4}){0,7}", # IPv6 IP address,
r"\b[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}-[0-5][0-9a-fA-F]{3}-[089ab][0-9a-fA-F]{3}-\b[0-9a-fA-F]{12}\b ", # UUID
}
regex_pattern_string_parameter: RegexPatternStringParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
data_context=data_context,
candidate_regexes=candidate_regexes,
)
)
assert regex_pattern_string_parameter.threshold == 1.0
assert regex_pattern_string_parameter.candidate_regexes == candidate_regexes
assert regex_pattern_string_parameter.CANDIDATE_REGEX == candidate_regexes
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_regex_pattern_string_parameter_builder_instantiation_override_defaults(
mock_data_context: mock.MagicMock,
):
data_context: DataContext = mock_data_context
candidate_regexes: Set[str] = {
r"\d{1}",
}
regex_pattern_string_parameter: RegexPatternStringParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
candidate_regexes=candidate_regexes,
threshold=0.5,
data_context=data_context,
)
)
assert regex_pattern_string_parameter.threshold == 0.5
assert regex_pattern_string_parameter.candidate_regexes == candidate_regexes
assert regex_pattern_string_parameter.CANDIDATE_REGEX != candidate_regexes
def test_regex_pattern_string_parameter_builder_alice(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: dict = {
"datasource_name": "alice_columnar_table_single_batch_datasource",
"data_connector_name": "alice_columnar_table_single_batch_data_connector",
"data_asset_name": "alice_columnar_table_single_batch_data_asset",
}
metric_domain_kwargs = {"column": "id"}
candidate_regexes: List[str] = [
r"^\d{1}$",
r"^\d{2}$",
r"^\S{8}-\S{4}-\S{4}-\S{4}-\S{12}$",
]
regex_pattern_string_parameter: ParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_regexes=candidate_regexes,
data_context=data_context,
)
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=metric_domain_kwargs,
rule_name="my_rule",
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
regex_pattern_string_parameter.build_parameters(
domain=domain,
parameters=parameters,
batch_request=batch_request,
)
fully_qualified_parameter_name_for_value: str = (
"$parameter.my_regex_pattern_string_parameter_builder"
)
expected_value: dict = {
"value": r"^\S{8}-\S{4}-\S{4}-\S{4}-\S{12}$",
"details": {
"evaluated_regexes": {
r"^\S{8}-\S{4}-\S{4}-\S{4}-\S{12}$": 1.0,
r"^\d{1}$": 0.0,
r"^\d{2}$": 0.0,
},
"success_ratio": 1.0,
},
}
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name_for_value,
domain=domain,
parameters=parameters,
)
== expected_value
)
def test_regex_pattern_string_parameter_builder_bobby_multiple_matches(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
"data_connector_query": {"index": -1},
}
metric_domain_kwargs: dict = {"column": "VendorID"}
candidate_regexes: List[str] = [
r"^\d{1}$", # will match
r"^[12]{1}$", # will match 0.9941111111 of the time
r"^\d{4}$", # won't match
]
threshold: float = 0.9
regex_parameter: RegexPatternStringParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_regexes=candidate_regexes,
threshold=threshold,
data_context=data_context,
)
)
assert regex_parameter.CANDIDATE_REGEX != candidate_regexes
assert regex_parameter.candidate_regexes == candidate_regexes
assert regex_parameter.threshold == 0.9
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=metric_domain_kwargs,
rule_name="my_rule",
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
regex_parameter.build_parameters(
domain=domain,
parameters=parameters,
batch_request=batch_request,
)
fully_qualified_parameter_name_for_value: str = (
"$parameter.my_regex_pattern_string_parameter_builder"
)
expected_value: dict = {
"value": r"^\d{1}$",
"details": {
"evaluated_regexes": {
r"^\d{1}$": 1.0,
r"^[12]{1}$": 0.9941111111111111,
r"^\d{4}$": 0.0,
},
"success_ratio": 1.0,
},
}
results = get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name_for_value,
domain=domain,
parameters=parameters,
)
assert results is not None
assert sorted(results["value"]) == sorted(expected_value["value"])
assert results["details"] == expected_value["details"]
def test_regex_pattern_string_parameter_builder_bobby_no_match(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
"data_connector_query": {"index": -1},
}
metric_domain_kwargs: dict = {"column": "VendorID"}
candidate_regexes: Set[str] = {
r"^\d{3}$", # won't match
}
threshold: float = 0.9
regex_parameter: ParameterBuilder = RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_regexes=candidate_regexes,
threshold=threshold,
data_context=data_context,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=metric_domain_kwargs,
rule_name="my_rule",
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
regex_parameter.build_parameters(
domain=domain,
parameters=parameters,
batch_request=batch_request,
)
fully_qualified_parameter_name_for_value: str = (
"$parameter.my_regex_pattern_string_parameter_builder"
)
expected_value: dict = {
"value": "(?:[A-Fa-f0-9]){0,4}(?: ?:? ?(?:[A-Fa-f0-9]){0,4}){0,7}",
"details": {
"evaluated_regexes": {
r"\d+": 1.0,
r"-?\d+": 1.0,
r"-?\d+(?:\.\d*)?": 1.0,
r"[A-Za-z0-9\.,;:!?()\"'%\-]+": 1.0,
r"^\s+": 0.0,
r"\s+$": 0.0,
r"https?:\/\/(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b(?:[-a-zA-Z0-9@:%_\+.~#()?&//=]*)": 0.0,
r"<\/?(?:p|a|b|img)(?: \/)?>": 0.0,
r"(?:25[0-5]|2[0-4]\d|[01]\d{2}|\d{1,2})(?:.(?:25[0-5]|2[0-4]\d|[01]\d{2}|\d{1,2})){3}": 0.0,
r"(?:[A-Fa-f0-9]){0,4}(?: ?:? ?(?:[A-Fa-f0-9]){0,4}){0,7}": 1.0,
r"\b[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}-[0-5][0-9a-fA-F]{3}-[089ab][0-9a-fA-F]{3}-\b[0-9a-fA-F]{12}\b ": 0.0,
},
"success_ratio": 1.0,
},
}
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name_for_value,
domain=domain,
parameters=parameters,
)
== expected_value
)
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_regex_wrong_domain(mock_data_context: mock.MagicMock, batch_fixture: Batch):
batch: Batch = batch_fixture
mock_data_context.get_batch_list.return_value = [batch]
mock_data_context.get_validator_using_batch_list.return_value = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
)
data_context: DataContext = mock_data_context
# column : c does not exist
metric_domain_kwargs: dict = {"column": "c"}
candidate_regexes: List[str] = [r"^\d{1}$"]
regex_pattern_string_parameter_builder: ParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_regexes=candidate_regexes,
data_context=data_context,
)
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=metric_domain_kwargs,
rule_name="my_rule",
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
with pytest.raises(ge_exceptions.ProfilerExecutionError) as e:
regex_pattern_string_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_list=[batch],
)
assert (
e.value.message
== "Result of metric computations for RegexPatternStringParameterBuilder is empty."
)
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_regex_single_candidate(
mock_data_context: mock.MagicMock, batch_fixture: Batch
):
batch: Batch = batch_fixture
mock_data_context.get_batch_list.return_value = [batch]
mock_data_context.get_validator_using_batch_list.return_value = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
)
data_context: DataContext = mock_data_context
metric_domain_kwargs: dict = {"column": "b"}
candidate_regexes: List[str] = [r"^\d{1}$"]
regex_pattern_string_parameter_builder: ParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_regexes=candidate_regexes,
data_context=data_context,
)
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=metric_domain_kwargs,
rule_name="my_rule",
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
regex_pattern_string_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_list=[batch],
)
fully_qualified_parameter_name_for_value: str = (
"$parameter.my_regex_pattern_string_parameter_builder.value"
)
expected_value: str = "^\\d{1}$"
assert (
get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_value,
expected_return_type=str,
domain=domain,
parameters=parameters,
)
== expected_value
)
fully_qualified_parameter_name_for_meta: str = (
"$parameter.my_regex_pattern_string_parameter_builder.details"
)
expected_meta: dict = {"evaluated_regexes": {"^\\d{1}$": 1.0}, "success_ratio": 1.0}
meta: dict = get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_meta,
expected_return_type=dict,
domain=domain,
parameters=parameters,
)
assert meta == expected_meta
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_regex_two_candidates(mock_data_context: mock.MagicMock, batch_fixture: Batch):
batch: Batch = batch_fixture
mock_data_context.get_batch_list.return_value = [batch]
mock_data_context.get_validator_using_batch_list.return_value = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
)
data_context: DataContext = mock_data_context
metric_domain_kwargs: dict = {"column": "b"}
candidate_regexes: List[str] = [r"^\d{1}$", r"^\d{3}$"]
regex_pattern_string_parameter_builder: ParameterBuilder = (
RegexPatternStringParameterBuilder(
name="my_regex_pattern_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_regexes=candidate_regexes,
data_context=data_context,
)
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=metric_domain_kwargs,
rule_name="my_rule",
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
regex_pattern_string_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_list=[batch],
)
fully_qualified_parameter_name_for_value: str = (
"$parameter.my_regex_pattern_string_parameter_builder.value"
)
expected_value: str = "^\\d{1}$"
assert (
get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_value,
domain=domain,
parameters=parameters,
)
== expected_value
)
fully_qualified_parameter_name_for_meta: str = (
"$parameter.my_regex_pattern_string_parameter_builder.details"
)
expected_meta: dict = {
"evaluated_regexes": {"^\\d{1}$": 1.0, "^\\d{3}$": 0.0},
"success_ratio": 1.0,
}
meta: dict = get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_meta,
expected_return_type=dict,
domain=domain,
parameters=parameters,
)
assert meta == expected_meta
| StarcoderdataPython |
161618 | from django.shortcuts import render
from .models import *
from django.db.models import Q,F,Aggregate
from django.http import HttpResponse, HttpResponseRedirect, QueryDict, JsonResponse
from django.urls import reverse
from django.template import loader
# Create your views here.
def game(req):
return render(req,'2048.html')
def testreq(req):
# print(dir(req))
# print(req.method)#打印请求方法
# print(req.POST)#POST请求
# print(req.COOKIES)# 打印cookie
# print(req.META.get("REMOTE_ADDR"))#打印用户IP
# print(req.FILES)
# param = QueryDict(req.body)
# print(param)
# print(req.path)#请求路径
# print(req.get_host())#拿请求的域名端口
# print(req.get_port())#访问端口
# response = HttpResponse()
# response.content="123"
# response.status_code = 404
# response.write("456")
# response.flush()
# response.content="789"
my_dict = {'key':'呵呵'}
return JsonResponse(my_dict)
# 首页API
def index(req):
user_name = req.COOKIES.get("uname","")
data = req.session.get('msg')
print(data)
return render(req,'index.html',{'u_name':user_name})
def login_api(req):
if req.method == 'GET':
return render(req,'login.html')
else :
param = req.POST
u_name = param.get('uname')
pwd = param.get('pwd')
req.session['msg'] = 'ok'
#用户校验(假设通过)
response = HttpResponseRedirect('/t05/index')
print(response)
# 设置cookie
response.set_cookie('uname',u_name,max_age=10)
return response
def logout_api(req):
response = HttpResponseRedirect(reverse('t05:index'))
del req.session['msg']
response.delete_cookie('uname')
return response | StarcoderdataPython |
5029165 |
age = 36
if age < 2:
stage = 'a baby'
elif age < 4:
stage = 'a toddler'
elif age < 13:
stage = 'a kid'
elif age < 20:
stage = 'a teenager'
elif age < 65:
stage = 'an adult'
else:
stage = 'an elder'
print('The person is ' + stage)
| StarcoderdataPython |
30909 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_nutnrb
@file marine-integrations/mi/dataset/parser/test/test_nutnrb.py
@author <NAME>
@brief Test code for a Nutnrb data parser
"""
import unittest
import gevent
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey
# Add a mixin here if needed
@unittest.skip('Nutnr parser is broken, timestamp needs to be fixed')
@attr('UNIT', group='mi')
class NutnrbParserUnitTestCase(ParserUnitTestCase):
"""
WFP Parser unit test suite
"""
TEST_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
LONG_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212
2012/12/13 15:31:15.350 SATNLC0239,2012348,15.520128,-7.57,32.65,-51.28,0.62,0.000186
2012/12/13 15:31:16.695 SATNLC0239,2012348,15.520501,-6.17,24.43,-37.71,0.60,0.000218
2012/12/13 15:31:18.015 SATNLC0239,2012348,15.520875,-5.59,18.68,-28.01,0.60,0.000166
2012/12/13 15:31:19.342 SATNLC0239,2012348,15.521232,-7.30,30.87,-48.21,0.62,0.000235
2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
BAD_TEST_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012\12\13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
def state_callback(self, pos, file_ingested):
""" Call back method to watch what comes in via the position callback """
log.trace("SETTING state_callback_value to " + str(pos))
self.position_callback_value = pos
self.file_ingested = file_ingested
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
log.trace("SETTING publish_callback_value to " + str(pub))
self.publish_callback_value = pub
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.nutnrb',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'NutnrbDataParticle'
}
# not a DataSourceLocation...its just the parser
self.position = {StateKey.POSITION: 0}
self.particle_a = NutnrbDataParticle("2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000\n")
self.particle_b = NutnrbDataParticle("2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231\n")
self.particle_c = NutnrbDataParticle("2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191\n")
self.particle_d = NutnrbDataParticle("2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203\n")
self.particle_e = NutnrbDataParticle("2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212\n")
self.particle_z = NutnrbDataParticle("2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240\n")
self.position_callback_value = None
self.publish_callback_value = None
def assert_result(self, result, position, particle):
self.assertEqual(result, [particle])
self.assertEqual(self.parser._state[StateKey.POSITION], position)
self.assertEqual(self.position_callback_value[StateKey.POSITION], position)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_happy_path(self):
"""
Test the happy path of operations where the parser takes the input
and spits out a valid data particle given the stream.
"""
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2458, self.particle_a)
result = self.parser.get_records(1)
self.assert_result(result, 2544, self.particle_b)
result = self.parser.get_records(1)
self.assert_result(result, 2630, self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 2716)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 2716)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d)
def test_get_many(self):
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(2)
self.assertEqual(result, [self.particle_a, self.particle_b])
self.assertEqual(self.parser._state[StateKey.POSITION], 2544)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 2544)
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.publish_callback_value[1], self.particle_b)
def test_bad_data(self):
# There's a bad sample in the data! Ack! Skip it!
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.BAD_TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2603, self.particle_c)
def test_long_stream(self):
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.LONG_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(11)
self.assertEqual(result[-1], self.particle_z)
self.assertEqual(self.parser._state[StateKey.POSITION], 3232)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 3232)
self.assertEqual(self.publish_callback_value[-1], self.particle_z)
def test_mid_state_start(self):
new_state = {StateKey.POSITION:2628}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
def reset_parser(self, state = {}):
self.state_callback_values = []
self.publish_callback_values = []
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_set_state(self):
new_state = {StateKey.POSITION: 2544}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, self.position, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2458, self.particle_a)
self.reset_parser(new_state)
self.parser.set_state(new_state) # seek to after particle_b
result = self.parser.get_records(1)
#
# If particles C and D appear, but the position is off
# it is because you are not consuming newlines in your
# DATA_REGEX pattern
#
self.assert_result(result, 2630, self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
| StarcoderdataPython |
6603453 | <gh_stars>0
p = float(input('Digite o seu peso: '))
a = float(input('Digite sua altura: '))
imc = p/a**2
if imc < 18.5:
print('Abaixo do peso!')
elif 18.5 <= imc < 25:
print('Peso ideal!')
elif 25 <= imc < 30:
print('Sobrepeso!')
elif 30 <= imc < 40:
print('Obesidade!')
else:
print('Obesidade mórbida!')
| StarcoderdataPython |
12815642 | <filename>tests/stdlib/test_time.py
import os
from unittest import expectedFailure
from ..utils import TranspileTestCase
class TimeModuleTests(TranspileTestCase):
#######################################################
# _STRUCT_TM_ITEMS
@expectedFailure
def test__STRUCT_TM_ITEMS(self):
self.assertCodeExecution("""
import time
print(time._STRUCT_TM_ITEMS)
print('Done.')
""")
#######################################################
# __doc__
@expectedFailure
def test___doc__(self):
self.assertCodeExecution("""
import time
print(time.__doc__)
print('Done.')
""")
#######################################################
# __file__
@expectedFailure
def test___file__(self):
self.assertCodeExecution("""
import time
print(time.__file__)
print('Done.')
""")
#######################################################
# __loader__
@expectedFailure
def test___loader__(self):
self.assertCodeExecution("""
import time
print(time.__loader__)
print('Done.')
""")
#######################################################
# __name__
@expectedFailure
def test___name__(self):
self.assertCodeExecution("""
import time
print(time.__name__)
print('Done.')
""")
#######################################################
# __package__
@expectedFailure
def test___package__(self):
self.assertCodeExecution("""
import time
print(time.__package__)
print('Done.')
""")
#######################################################
# __spec__
@expectedFailure
def test___spec__(self):
self.assertCodeExecution("""
import time
print(time.__spec__)
print('Done.')
""")
#######################################################
# altzone
@expectedFailure
def test_altzone(self):
self.assertCodeExecution("""
import time
print(time.altzone)
print('Done.')
""")
#######################################################
# asctime
@expectedFailure
def test_asctime(self):
self.assertCodeExecution("""
import time
print(time.asctime())
print('Done.')
""")
#######################################################
# clock
def test_clock(self):
# Since we can't know exactly what CPU time will be used,
# and CPU time will vary between implementations,
# this test validates that clock returns a float < 0.01s
sleepy_time = 1
diff_offset = sleepy_time if os.name == 'nt' else 0
# On Windows, time.clock includes the time spent in time.sleep
# however on Unix it does not.
self.assertCodeExecution("""
import time
start = time.clock()
time.sleep({sleepy_time})
end = time.clock()
diff = end - start - {diff_offset}
print(type(diff))
print(diff < 0.1)
print('Done.')
""".format(sleepy_time=sleepy_time, diff_offset=diff_offset))
#######################################################
# ctime
@expectedFailure
def test_ctime(self):
self.assertCodeExecution("""
import time
print(time.ctime())
print('Done.')
""")
#######################################################
# daylight
@expectedFailure
def test_daylight(self):
self.assertCodeExecution("""
import time
print(time.daylight)
print('Done.')
""")
#######################################################
# get_clock_info
@expectedFailure
def test_get_clock_info(self):
self.assertCodeExecution("""
import time
print(time.get_clock_info())
print('Done.')
""")
#######################################################
# gmtime
@expectedFailure
def test_gmtime(self):
self.assertCodeExecution("""
import time
print(time.gmtime())
print('Done.')
""")
#######################################################
# localtime
@expectedFailure
def test_localtime(self):
self.assertCodeExecution("""
import time
print(time.localtime())
print('Done.')
""")
#######################################################
# mktime
@expectedFailure
def test_mktime(self):
self.assertCodeExecution("""
import time
print(time.mktime())
print('Done.')
""")
#######################################################
# monotonic
@expectedFailure
def test_monotonic(self):
self.assertCodeExecution("""
import time
print(time.monotonic())
print('Done.')
""")
#######################################################
# perf_counter
@expectedFailure
def test_perf_counter(self):
self.assertCodeExecution("""
import time
print(time.perf_counter())
print('Done.')
""")
#######################################################
# process_time
@expectedFailure
def test_process_time(self):
self.assertCodeExecution("""
import time
print(time.process_time())
print('Done.')
""")
#######################################################
# sleep
def test_sleep(self):
self.assertCodeExecution("""
import time
print(time.sleep(1))
print('Done.')
""")
#######################################################
# strftime
@expectedFailure
def test_strftime(self):
self.assertCodeExecution("""
import time
print(time.strftime())
print('Done.')
""")
#######################################################
# strptime
@expectedFailure
def test_strptime(self):
self.assertCodeExecution("""
import time
print(time.strptime())
print('Done.')
""")
#######################################################
# struct_time
@expectedFailure
def test_struct_time(self):
self.assertCodeExecution("""
import time
print(time.struct_time())
print('Done.')
""")
#######################################################
# time
def test_time(self):
self.assertCodeExecution("""
import time
print(int(time.time() / 10000))
print('Done.')
""")
#######################################################
# timezone
@expectedFailure
def test_timezone(self):
self.assertCodeExecution("""
import time
print(time.timezone)
print('Done.')
""")
#######################################################
# tzname
@expectedFailure
def test_tzname(self):
self.assertCodeExecution("""
import time
print(time.tzname)
print('Done.')
""")
#######################################################
# tzset
@expectedFailure
def test_tzset(self):
self.assertCodeExecution("""
import time
print(time.tzset())
print('Done.')
""")
| StarcoderdataPython |
8133278 | import datetime
from numbers import Number
from typing import Callable, Iterator, TypeVar, Optional, Union, Type
E = TypeVar('E')
def _range(lower: E, step: Callable[[E], E], condition: Callable[[E], bool]) -> Iterator[E]:
"""
Very generic range function. Yields a stream from lower, incremented by the given step-function,
until the given condition is reached.
spec: takeWhile predicate (iterate step seed)
"""
current = lower
while condition(current):
yield current
current = step(current)
def _get_incrementor(type_: Type[E], step: Number = 1) -> Callable[[E], E]:
"""
Returns a function that increments values of the given type by the given step.
...basically `(+ 1)` (or `(+ step)` respectively), but can increment other types as well. Currently
the only other type supported is datetime.date which is incremented in days.
"""
if issubclass(type_, datetime.date):
# noinspection PyTypeChecker
return lambda value: value + datetime.timedelta(days=step)
return lambda value: value + step
def range_excl(lower: E, upper: E, step: Optional[Union[Callable[[E], E], Number]] = None) -> Iterator[E]:
if step is None:
step = _get_incrementor(type(lower))
elif isinstance(step, Number):
step = _get_incrementor(type(lower), step)
yield from _range(lower, step, lambda value: value < upper)
def range_incl(lower: E, upper: E, step: Optional[Union[Callable[[E], E], Number]] = None) -> Iterator[E]:
if step is None:
step = _get_incrementor(type(lower))
elif isinstance(step, Number):
step = _get_incrementor(type(lower), step)
yield from _range(lower, step, lambda value: value <= upper)
| StarcoderdataPython |
232464 | <filename>python_binding/rdc_collectd.py
from RdcReader import RdcReader
from rdc_bootstrap import *
import collectd
default_field_ids = [
rdc_field_t.RDC_FI_GPU_MEMORY_USAGE,
rdc_field_t.RDC_FI_GPU_MEMORY_TOTAL,
rdc_field_t.RDC_FI_POWER_USAGE,
rdc_field_t.RDC_FI_GPU_CLOCK,
rdc_field_t.RDC_FI_GPU_UTIL,
rdc_field_t.RDC_FI_GPU_TEMP
]
class CollectdReader(RdcReader):
def __init__(self, rdc_ip_port, field_ids, update_freq, max_keep_age, max_keep_samples,
gpu_indexes, rdc_unauth):
group_name = "rdc_collectd_plugin_group"
field_group_name = "rdc_collectd_plugin_fieldgroup"
if rdc_unauth:
RdcReader.__init__(self, ip_port = rdc_ip_port, field_ids = field_ids, update_freq=update_freq,
max_keep_age = max_keep_age, max_keep_samples = max_keep_samples,
gpu_indexes = gpu_indexes, field_group_name = field_group_name, gpu_group_name = group_name, root_ca = None)
else:
RdcReader.__init__(self, ip_port = rdc_ip_port, field_ids = field_ids, update_freq=update_freq,
max_keep_age = max_keep_age, max_keep_samples = max_keep_samples,
gpu_indexes = gpu_indexes, field_group_name = field_group_name, gpu_group_name = group_name)
def handle_field(self, gpu_index, value):
PLUGIN_NAME = "rdc_collectd"
field_name = self.rdc_util.field_id_string(value.field_id).lower()
collectd.Values(plugin=PLUGIN_NAME,
type_instance= field_name,
type="gauge",
values=[value.value.l_int]).dispatch()
g_reader = None
def config_func(config):
global g_reader
embedded = False # enable embedded if no rdcd
rdc_ip_port = "localhost:50051" # rdcd listen address
field_ids = default_field_ids # The fields to watch
update_freq = 10 # 10 seconds
max_keep_age = 3600 # 1 hour
max_keep_samples = 1000 # The max samples to keep for each field
gpu_indexes = None # All GPus
unauth = False # Enable auth by default
# Parse configure parameters
for node in config.children:
key = node.key.lower()
if len(node.values) <= 0:
print("Missing value in configure " + key)
continue
val = node.values[0]
if key == 'embedded' and val == True:
embedded = True
if key == 'rdc_ip_port':
rdc_ip_port = val
if key == 'unauth':
unauth = val
if key == 'field_ids':
field_ids = []
for f in node.values:
field_id = rdc.get_field_id_from_name(f)
if field_id.value == rdc_field_t.RDC_FI_INVALID:
print("Invalid field '%s' will be ignored." % (f))
else:
field_ids.append(field_id.value)
if key == 'update_freq':
update_freq = int(val)
if key == 'max_keep_age':
max_keep_age = int(max_keep_age)
if key == 'max_keep_samples':
max_keep_samples = int(max_keep_samples)
if key == 'gpu_indexes':
gpu_indexes = [int(x) for x in node.values]
if embedded:
rdc_ip_port = None
g_reader = CollectdReader(rdc_ip_port, field_ids, update_freq*1000000,
max_keep_age, max_keep_samples, gpu_indexes, unauth)
def read_callback(data=None):
global g_reader
g_reader.process()
collectd.register_config(config_func)
collectd.register_read(read_callback)
| StarcoderdataPython |
4914341 | <gh_stars>0
#!/usr/bin/python3
# Importing Modules
# DATA analysis modules
import numpy as np
from scipy.optimize import curve_fit
from uncertainties import ufloat as uf
from uncertainties import unumpy as unp
# Custom functions
from timescan_plot import timescanplot
# Felion Modules
from FELion_definitions import ShowInfo, ErrorInfo, FELion_Toplevel
# Tkinter modules
from tkinter import Toplevel
# Matplotlib Modules
from matplotlib.widgets import Slider
from matplotlib.gridspec import GridSpec as grid
# Built-In modules
import os
def depletionPlot(files, location, power_n, dpi, parent):
try:
####################################### Initialisation #######################################
try:
power_n = np.asarray(power_n.split(','), dtype = np.float)
power_values, n = power_n[:-1], power_n[-1]
except Exception:
return ErrorInfo("Error", 'Please enter the Power_on, power_off and n_shots value.')
np.seterr(all='ignore')
os.chdir(location)
####################################### END Initialisation #######################################
####################################### Tkinter figure 1 #######################################
## Embedding figure to tkinter Toplevel
title_name1 = 'Timescan'
root1 = Toplevel(parent)
tk_widget1 = FELion_Toplevel(root1, title_name1, location)
fig1, canvas1 = tk_widget1.figure(dpi, figsize=(15,5))
axs0 = fig1.add_subplot(111)
####################################### PLOTTING DETAILS Timescan #######################################
lg_fontsize = 15
title_fontsize = 15
lb_size = 15
counts, stde = [], []
for f in files:
mass, iterations, t_res, t_b0, mean, error, time = timescanplot(f, location, dpi, parent, depletion = True)
print('\nReturned.\n')
axs0.errorbar(time, mean[0], yerr = error[0], label = '{}; {}:[{}], B0:{}ms, Res:{}'.format(f, mass[0], iterations[0], t_b0, t_res))
time = time[1:]/1000
mean = mean[0][1:]
stde.append(error[0][1:])
counts.append(mean)
counts, stde = np.array(counts), np.array(stde)
axs0.set_title('Timescan', fontsize=title_fontsize)
axs0.set_xlabel('time (s)', fontsize= lb_size)
axs0.set_ylabel('Counts', fontsize= lb_size)
axs0.grid(True)
axs0.legend()
####################################### END Plotting details #######################################
canvas1.draw() # drawing in the tkinter canvas: canvas drawing board
####################################### END Tkinter figure 1 #######################################
####################################### Tkinter figure 2 #######################################
## Embedding figure to tkinter Toplevel
title_name2 = 'Depletion Scan'
root2 = Toplevel(parent)
tk_widget2 = FELion_Toplevel(root2, title_name2, location)
fig2, canvas2 = tk_widget2.figure(dpi)
spec = grid(ncols=2, nrows=1, figure=fig2)
axs = fig2.add_subplot(spec[0, 0])
depletion_plot = fig2.add_subplot(spec[0, 1])
####################################### PLOTTING DETAILS Depletion Scan #######################################
on_off = []
for i in counts:
on_off.append(i.min())
on_off = np.array(on_off)
K_OFF, N = [], []
K_OFF_err, N_err = [], []
K_ON, Na0, Nn0 = [], [], []
K_ON_err, Na0_err, Nn0_err = [], [], []
for i in range(0, len(counts), 2):
on = np.argmin(on_off)
off = np.argmax(on_off)
# making the error '0' value as very close to '0'
#since div by it makes it easier while fitting parameters
stde[on][stde[on]==0]=10e-10
stde[off][stde[off]==0]=10e-10
#depletion values; y-axis
depletion_on, depletion_on_err = counts[on], stde[on]
depletion_off, depletion_off_err = counts[off], stde[off]
# power values; x-axis
power_on = (power_values[i]*n*time)/1000. # divide by 1000 for mJ to J conversion
power_off = (power_values[i+1]*n*time)/1000.
power_max = power_values.max()*n*time.max()/1000.
x = np.linspace(0, power_max, num=len(time))
axs.errorbar(power_off, depletion_off, yerr = depletion_off_err, fmt='ok')
axs.errorbar(power_on, depletion_on, yerr = depletion_on_err, fmt='ok')
### finding parameters for fitting
# depletion off
def N_OFF(x, K_OFF, N):
return (N)*np.exp(-K_OFF*x)
K_OFF_init, N_init = 0, depletion_off.max()
N_increase_bound_by = 1000
N_upper_bound = N_init + N_increase_bound_by
pop_off, popc_off = curve_fit(
N_OFF, power_off, depletion_off,
sigma = stde[off],
absolute_sigma = True,
p0 = [K_OFF_init, N_init],
bounds = [(-np.inf, 0), (np.inf, N_upper_bound)]
)
perr_off = np.sqrt(np.diag(popc_off))
# off fitting variables
K_OFF.append(pop_off[0])
N.append(pop_off[1])
K_OFF_err.append(perr_off[0])
N_err.append(perr_off[1])
# depletion on
def N_ON(X, Na0, Nn0, K_ON):
x, K_OFF = X
return Na0*np.exp(-K_ON*x)*np.exp(-K_OFF*x) + Nn0*np.exp(-K_OFF*x)
#K_ON_init, Na0_init, Nn0_init = ()
X = (power_on, pop_off[0])
pop_on, popc_on = curve_fit(
N_ON, X, depletion_on,
sigma = stde[on],
absolute_sigma = True,
#p0 = [Na0_init, Nn0_init, K_ON_init]
bounds = ([0,0,-np.inf], [pop_off[1], pop_off[1], np.inf])
)
perr_on = np.sqrt(np.diag(popc_on))
#on fitting variables
Na0.append(pop_on[0])
Nn0.append(pop_on[1])
K_ON.append(pop_on[2])
Na0_err.append(perr_on[0])
Nn0_err.append(perr_on[1])
K_ON_err.append(perr_on[2])
uK_OFF, uN = unp.uarray(K_OFF, K_OFF_err), unp.uarray(N, N_err)
uK_ON, uNa0, uNn0 = unp.uarray(K_ON, K_ON_err), unp.uarray(Na0, Na0_err) , unp.uarray(Nn0, Nn0_err)
def Depletion(X, A):
x, K_ON = X
return A*(1-np.exp(-K_ON*x))
uy_OFF = lambda x, uN, uK_OFF: uN*unp.exp(-uK_OFF*x)
uy_ON = lambda x, uNa0, uNn0, uK_OFF, uK_ON : uNa0*unp.exp(-uK_ON*x)*unp.exp(-uK_OFF*x) + uNn0*unp.exp(-uK_OFF*x)
A, A_err = [], []
for i in range(len(N)):
udepletion = 1 - uy_ON(x, uNa0[i], uNn0[i], uK_OFF[i], uK_ON[i])/uy_OFF(x, uN[i], uK_OFF[i])
depletion, depletion_error = unp.nominal_values(udepletion), unp.std_devs(udepletion)
#fitting for depletion
X = (x, K_ON[i])
pop_depletion, poc_depletion = curve_fit(
Depletion, X, depletion,
sigma = depletion_error,
absolute_sigma = True
)
A.append(pop_depletion[0])
perr_A = np.sqrt(np.diag(poc_depletion))
A_err.append(perr_A[0])
uA = unp.uarray(A, A_err)
def plot(i, l):
# off plotting
y_off0 = N_OFF(x, K_OFF[i], N[i])
g_off0, = axs.plot(x, y_off0, label = 'N_OFF: [{:.2f}mJ], K_OFF={:.2fP}/J, N={:.2fP}'.format(power_values[i+1], uK_OFF[i], uN[i]))
# on plotting
y_on0 = N_ON((x, K_OFF[i]), Na0[i], Nn0[i], K_ON[i])
g_on0, = axs.plot(x, y_on0, label = 'N_ON: [{:.2f}mJ], K_ON={:.2fP}/J, N={:.2fP}, Na0={:.2fP}, Nn0={:.2fP}'.format(power_values[i], uK_ON[i], uNa0[i]+uNn0[i], uNa0[i], uNn0[i]))
# deletion plot
udepletion_new = 1 - uy_ON(x, uNa0[i], uNn0[i], uK_OFF[i], uK_ON[i])/uy_OFF(x, uN[i], uK_OFF[i])
depletion_new, depletion_error_new = unp.nominal_values(udepletion_new), unp.std_devs(udepletion_new)
depletion0, = depletion_plot.plot(x, depletion_new, '--')
depletion_fitted = Depletion(X, A[i])
depletion1, = depletion_plot.plot(x, depletion_fitted,
label = 'A = {:.2fP}, K_ON = {:.2fP}/J'.format(uA[i], uK_ON[i])
)
# controlling fitting parameters
axcolor = 'lightgoldenrodyellow'
koff_g = fig2.add_axes([l, 0.12, 0.2, 0.015], facecolor=axcolor) #[left, bottom, width, height]
n_g = fig2.add_axes([l, 0.10, 0.2, 0.015], facecolor=axcolor)
kon_g = fig2.add_axes([l, 0.08, 0.2, 0.015], facecolor=axcolor)
na_g = fig2.add_axes([l, 0.06, 0.2, 0.015], facecolor=axcolor)
nn_g = fig2.add_axes([l, 0.04, 0.2, 0.015], facecolor=axcolor)
koff_slider = Slider(koff_g, '$K_{OFF}$', 0, K_OFF[i]+10, valinit = K_OFF[i])
n_slider = Slider(n_g, 'N', 0, N[i]+(N[i]/2), valinit = N[i])
kon_slider = Slider(kon_g, '$K_{ON}$', 0, K_ON[i]+10, valinit = K_ON[i])
na_slider = Slider(na_g, '$Na_0$', 0, Na0[i]+(Na0[i]/2), valinit = Na0[i])
nn_slider = Slider(nn_g, '$Nn_0$', 0, Nn0[i]+(Nn0[i]/2), valinit = Nn0[i])
def update(val):
koff = koff_slider.val
ukoff = uf(koff, K_OFF_err[i])
n = n_slider.val
un = uf(n, N_err[i])
kon = kon_slider.val
ukon = uf(kon, K_ON_err[i])
na = na_slider.val
una = uf(na, Na0_err[i])
nn = nn_slider.val
unn = uf(nn, Nn0_err[i])
yoff = N_OFF(x, koff, n)
g_off0.set_ydata(yoff)
yon = N_ON((x, koff), na, nn, kon)
g_on0.set_ydata(yon)
# depletion
udepletion_new1 = 1 - uy_ON(x, una, unn, ukoff, ukon)/uy_OFF(x, un, ukoff)
depletion_new1, depletion_error_new1 = unp.nominal_values(udepletion_new1), unp.std_devs(udepletion_new1)
depletion0.set_ydata(depletion_new1)
X = (x, kon)
pop_depletion, poc_depletion = curve_fit(
Depletion, X , depletion_new1,
sigma = depletion_error_new1,
absolute_sigma = True
)
A_new1 = pop_depletion[0]
perr = np.sqrt(np.diag(poc_depletion))[0]
uA_new1 = uf(A_new1 , perr)
depletion_fitted_new = Depletion(X, A_new1)
depletion1.set_ydata(depletion_fitted_new)
k = i*2
legend.get_texts()[k].set_text('N_OFF: [{:.2f}mJ], K_OFF={:.2fP}/J, N={:.2fP}'.format(power_values[i+1], ukoff, un))
legend.get_texts()[k+1].set_text('N_ON: [{:.2f}mJ], K_ON={:.2fP}/J, N={:.2fP}, Na0={:.2fP}, Nn0={:.2fP}'.format(power_values[i], ukon, una+unn, una, unn))
depletion_legend.get_texts()[i].set_text('A = {:.2fP}, K_ON = {:.2fP}/J'.format(uA_new1, ukon))
canvas2.draw_idle()
return fig2
koff_slider.on_changed(update)
n_slider.on_changed(update)
kon_slider.on_changed(update)
na_slider.on_changed(update)
nn_slider.on_changed(update)
return koff_slider, n_slider, kon_slider, na_slider, nn_slider, koff_g, n_g, kon_g, na_g, nn_g, fig2
widget_position = l = 0.05
for i in range(len(N)):
koff_slider, n_slider, kon_slider, na_slider, nn_slider, koff_g, n_g, kon_g, na_g, nn_g, fig2 = plot(i, l)
l += 0.25
### setting labels
# title_depletion1 = '$N_{ON}(ntE)=N_{a0}e^{-k_{on}ntE}e^{-k_{off}ntE} + N_{n0}e^{-k_{off}ntE}$ ;\t$N_{OFF}(ntE)=(N)e^{-k_{off}ntE}$ ; $N = N_{a0}+ N_{n0}$'
# axs.set_title(title_depletion1, fontsize=title_fontsize)
axs.set_xlabel('$n * t * E (Joule)$', fontsize= lb_size)
axs.set_ylabel('Counts', fontsize= lb_size)
axs.grid(True)
legend = axs.legend(loc = 'upper right', bbox_to_anchor = (2, -0.1))
depletion_plot.grid(True)
depletion_legend = depletion_plot.legend(loc = 'lower right', fontsize=lg_fontsize)
depletion_plot.set_xlabel('$n * t * E (Joule)$', fontsize= lb_size)
depletion_plot.set_ylabel('Relative abundance of active isomer', fontsize= lb_size)
depletion_plot.set_title('$D(ntE) = 1-N_{ON}/N_{OFF}$ fitted with $D(ntE) = A(1-e^{K_{ON}*ntE})$', fontsize = title_fontsize)
####################################### END Plotting details #######################################
canvas2.draw() # drawing in the tkinter canvas: canvas drawing board
####################################### END Tkinter figure 2 #######################################
except Exception as e:
ErrorInfo("ERROR", e)
| StarcoderdataPython |
3477160 | <filename>temp_sikdan.py
def hooseng_temp(check_date):
if check_date == '0416':
menu_list = [
"""
뚝배기 11:00~18:30\n
돌솥치즈부대찌개+라면사리
쌀밥
탕수육
깍두기
요거타임\n
가격 : 3,500
""",
"""
일품1 10:30~18:30\n
비스트로등심돈까스
아채스프
비트샐러드/드레싱
깍두기\n
가격 : 2,700
""",
"""
일품2 10:30~18:30\n
대학생협치즈돈까스
아채스프
비트샐러드/드레싱
웨지감자
깍두기
요구르트\n
가격 : 3,500
""",
"""
석식 17:30~18:30\n
소고기김치만두뚝배기
쌀밥
멸치볶음
도시락김
깍두기\n
가격 : 2,500
"""
]
elif check_date == '0417':
menu_list = [
"""
뚝배기 11:00~18:30\n
*개교기념일 특식*
돌솥반계탕
쌀밥
부추겉절이
깍두기
요구르트\n
가격 : 3,500
""",
"""
일품1 10:30~18:30\n
삼겹살데리아끼덮밥
미역국
양상추사과샐러드
깍두기\n
가격 : 2,700
""",
"""
석식 17:30~18:30\n
미소라멘
가쓰오주먹밥
아채춘권
깍두기\n
가격 : 2,700
"""
]
elif check_date == '0418':
menu_list = [
"""
뚝배기 11:00~18:30\n
둔육떡찜뚝배기
쌀밥
비엔나야채볶음
미역무순초무침
배추김치\n
가격 : 2,500
""",
"""
일품1 10:30~18:30\n
함박오므라이스
우동장국
미니치즈함박
치즈조랑떡샐러드
배추김치
요구르트\n
가격 : 3,500
""",
"""
일품2 10:30~18:30\n
돈까스오므라이스
우동장국
돈까스
치즈조랑떡샐러드
배추김치
요구르트\n
가격 : 3,500
""",
"""
석식 17:30~18:30\n
소고기우거지탕
쌀밥
탕수육
배추김치\n
가격 : 2,500
"""
]
elif check_date == '0419':
menu_list = [
"""
뚝배기 11:00~18:30\n
돌솥날치알치즈김치비빔밥
유부된장국
깐풍기
시금치나물
배추김치
엑티비아\n
가격 : 3,500
""",
"""
일품1 10:30~18:30\n
고구마돈까스
옥수수스프
치커리샐러드/사우전아일앤드드레싱
배추김치\n
가격 : 2,700
""",
"""
일품2 10:30~18:30\n
비스트로왕돈까스
옥수수스프
웨지감자&칠리소스
치커리샐러드/사우전아일랜드드레싱
배추김치
요구르트\n
가격 : 3,500
""",
]
elif check_date == '0420':
menu_list = []
elif check_date == '0421':
menu_list = []
print(check_date)
return menu_list
| StarcoderdataPython |
96608 | <gh_stars>1000+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for Amazon DynamoDB batching code example.
"""
import time
import unittest.mock
from botocore.exceptions import ClientError
import pytest
import dynamo_batching
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_table')])
def test_create_table(
make_stubber, make_unique_name, stub_runner, error_code, stop_on_method):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
table_name = make_unique_name('table-')
schema = [
{'name': 'hash_item', 'type': 'N', 'key_type': 'HASH'},
{'name': 'range_item', 'type': 'S', 'key_type': 'RANGE'}]
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
dyn_stubber.stub_create_table, table_name, schema,
{'read': 10, 'write': 10})
runner.add(dyn_stubber.stub_describe_table, table_name)
if error_code is None:
got_table = dynamo_batching.create_table(table_name, schema)
assert got_table.name == table_name
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.create_table(table_name, schema)
assert exc_info.value.response['Error']['Code'] == error_code
def test_do_batch_get(make_stubber, monkeypatch):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
item_count = 5
request_keys = {
'test-table1': {
'Keys': [{'test': {'S': f'test-{index}'}} for index in range(item_count)]},
'test-table2': {
'Keys': [{'test': {'S': f'test-{index}'}} for index in range(item_count)]}
}
response_items = {
'test-table1':
[{'test': {'S': f'test-{index}' for index in range(item_count)}}],
'test-table2':
[{'test': {'S': f'test-{index}' for index in range(item_count)}}],
}
monkeypatch.setattr(time, 'sleep', lambda x: None)
dyn_stubber.stub_batch_get_item(request_keys, unprocessed_keys=request_keys)
dyn_stubber.stub_batch_get_item(request_keys, response_items=response_items)
got_data = dynamo_batching.do_batch_get(request_keys)
for key in request_keys:
assert got_data[key] == response_items[key]
@pytest.mark.parametrize(
'item_count,error_code',
[(0, None),
(10, None),
(25, None),
(100, None),
(13, 'TestException')])
def test_fill_table(make_stubber, item_count, error_code):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
table = dynamo_batching.dynamodb.Table('test-table')
table_data = [{'test': f'test-{index}'} for index in range(item_count)]
max_batch_size = 25 # Amazon DynamoDB limit
data_index = 0
while data_index < item_count:
dyn_stubber.stub_batch_write_item({
table.name: [{
'PutRequest': {'Item': item}}
for item in table_data[data_index:data_index+max_batch_size]]
}, error_code=error_code)
data_index += max_batch_size
if error_code is None:
dynamo_batching.fill_table(table, table_data)
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.fill_table(table, table_data)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize(
'item_count,error_code',
[(10, None),
(500, None),
(dynamo_batching.MAX_GET_SIZE, None),
(13, 'TestException')])
def test_get_batch_data(monkeypatch, item_count, error_code):
movie_table = unittest.mock.MagicMock()
movie_table.name = 'movie-test'
movie_list = [(index, f'title-{index}') for index in range(item_count)]
actor_table = unittest.mock.MagicMock()
actor_table.name = 'actor-test'
actor_list = [f'actor-{index}' for index in range(item_count)]
test_data = {movie_table.name: movie_list, actor_table.name: actor_list}
def mock_do_batch_get(batch_keys):
if error_code is not None:
raise ClientError({'Error': {'Code': error_code}}, 'test_op')
assert len(batch_keys[movie_table.name]['Keys']) == len(movie_list)
assert len(batch_keys[actor_table.name]['Keys']) == len(actor_list)
return test_data
monkeypatch.setattr(dynamo_batching, 'do_batch_get', mock_do_batch_get)
if error_code is None:
got_data = dynamo_batching.get_batch_data(
movie_table, movie_list, actor_table, actor_list)
assert got_data == test_data
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.get_batch_data(
movie_table, movie_list, actor_table, actor_list)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('item_count,error_code,stop_on_method', [
(20, None, None),
(10, 'TestException', 'stub_create_table'),
(10, 'TestException', 'stub_batch_write_item'),
])
def test_archive_movies(
make_stubber, stub_runner, item_count, error_code, stop_on_method):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
movie_table = dynamo_batching.dynamodb.Table('movie-test')
movie_list = [
{'year': index, 'title': f'title-{index}'} for index in range(item_count)]
table_schema = [
{'name': 'year', 'type': 'N', 'key_type': 'HASH'},
{'name': 'title', 'type': 'S', 'key_type': 'RANGE'}]
archive_table_name = f'{movie_table.name}-archive'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
dyn_stubber.stub_describe_table, movie_table.name, schema=table_schema,
provisioned_throughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10})
runner.add(
dyn_stubber.stub_create_table, archive_table_name, table_schema,
{'read': 10, 'write': 10})
runner.add(dyn_stubber.stub_describe_table, archive_table_name)
runner.add(
dyn_stubber.stub_batch_write_item, {
archive_table_name: [{
'PutRequest': {'Item': item}} for item in movie_list]},
error_code='ValidationException')
runner.add(
dyn_stubber.stub_batch_write_item, {
archive_table_name: [{
'PutRequest': {'Item': item}} for item in movie_list]})
runner.add(
dyn_stubber.stub_batch_write_item, {
movie_table.name: [{
'DeleteRequest': {'Key': item}} for item in movie_list]})
if error_code is None:
got_table = dynamo_batching.archive_movies(movie_table, movie_list)
assert got_table.name == archive_table_name
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.archive_movies(movie_table, movie_list)
assert exc_info.value.response['Error']['Code'] == error_code
| StarcoderdataPython |
27374 | """
one agent chooses an action, says it. other agent does it. both get a point if right
this file was forked from mll/discrete_bottleneck_discrete_input.py
"""
import torch
import torch.nn.functional as F
from torch import nn, optim
# from envs.world3c import World
from ulfs import alive_sieve, rl_common
from ulfs.stats import Stats
from ulfs.stochastic_trajectory import StochasticTrajectory
from ulfs.lexicon_recorder import LexiconRecorder
from ulfs.runner_base_v1 import RunnerBase
# this is just for display to human, cos reading 'badccd' gets annoying after a while :P
# this is somewhat based on how kirby 2001 does this
# we can randomize these potentially
phonemes = [
'ba',
'bo',
'bu',
'bi',
'be',
'to',
'ti',
'ta',
'te',
'tu',
'ra',
're',
'ri',
'ru',
'ro',
'la',
'le',
'li',
'lo',
'lu',
'ga',
'ge',
'gi',
'go',
'gu',
'ma',
'me',
'mu',
'mi',
'mo'
]
class AgentOneActionSelector(nn.Module):
"""
this is going to choose what action to do, given ... nothing :P
maybe given the previous few actions and results?
hmmm, given this model looks to be an LSTM (because we want to model a sequence
so I guess this makes sense?), a question arises which is: what are we going to predict?
so, we'll memorize a bunch of actions and rewards like:
1 => 0
5 => 0
1 => 1
5 => 1
5 => 0
3 => 0
3 => 0
3 => 1
...
we could train in at least a few ways:
- push the memories through, predict the next action to do, backprop on that reward
This has an issue though. clearly the result will be that the net just chooses the same
action over and over. how to mitigate this? hack the reward to decrease with
recency? hack the reward to contain some reward for entropy over action space?
somehow do something that doesnt appear superficially hacky? :P
how about hacking the reward for now, and then thinking about how to justify the hacking
later?
so, if we will hack the reward, then this can in fact predict the reward. eg:
3 => 1
3 => 0.7
3 => 0.5
3 => 0.3
...
what we really want is it for learn to do something like:
3
3
3
3
4
4
4
4
4
3
4
3
4
3
5
5
5
5
5
This probably needs something symbolic? I kind of think an LSTM is a bit too stupid
to be able to learn something like this, without either feature hacking and/or
becoming symbolic?
Let's go with feature hacking and/or symbolic perhaps???
To be symbolic, we no longer care about the actual sequence of actions, but we normalize the
actions to look the same each time. eg:
3,3,3,4,4,
4,4,4,2,2,
1,1,1,5,5
are all identicla sequences, if we consider that symbols are interchangeable. eg we could normalize them to:
1,1,1,2,2
or perhaps to:
2,2,2,1,1
we should find an appropriate way to normalize them. Perhaps, in full feature engineering mode :/ , based on
how the previous rewards have looked? In fact, in this case, we dont even need an LSTM, can just use some
linear classifier thing... Is there a way to make an LSTM symbolic though???
well... one way is to augment the data, by simpling rotating/permuting which symbols are which. that sounds
easy to do, and not horribly hacky. another way is to treat each specific action type, eg 3s, as independent
of the others. Both of these assume a bunch of prior knowledge....
unless we learn this knowledge in a meta-way ????
Hmmm, maybe we just have a simple model, with no explicit memory, no sequences
So, it's basically going to output a fixed probability distribution over actions,
and the RL reward, combined with ent-reg, will push this around. (initially will
likely become spiky, and then flatten??? (we can think about meta-learning it later...))
"""
# def __init__(self, num_actions, num_timesteps, embedding_size):
def __init__(self, num_actions):
"""
memorizes num_timesteps timesteps
(or perhaps should use a replay buffer?)
"""
# self.num_timesteps = num_timesteps
self.num_actions = num_actions
# self.embedding_size = embedding_size
super().__init__()
# self.memory = []
# self.rnn = nn.GRUcell(embedding_size, embedding_size)
# self.rnn = nn.LSTM(embedding_size, embedding_size)
# self.e2d = nn.Linear(embedding_size, num_actions)
self.action_distribution = nn.Parameter(torch.zeros(1, num_actions))
self.action_distribution.data.fill_(1 / num_actions)
def forward(self, batch_size):
"""
"""
# for i, m in enumerate(self.memory):
probs = F.softmax(self.action_distribution).expand(batch_size, self.num_actions)
s = rl_common.draw_categorical_sample(
action_probs=probs, batch_idxes=None)
return s
# def memorize(self, most_recent_action, most_recent_reward):
# self.memory.append({'action': most_recent_action, 'reward': most_recent_reward})
# self.memory = self.memory[-self.num_timesteps:]
class AgentOneLM(nn.Module):
"""
takes in a discrete action (1-in-k), converts to utterance
"""
def __init__(self, p, embedding_size, utterance_max, vocab_size, num_actions):
"""
Note that vocab_size excludes terminator character 0
"""
self.embedding_size = embedding_size
self.utterance_max = utterance_max
self.num_actions = num_actions
super().__init__()
self.h1 = nn.Embedding(num_actions, embedding_size)
# d2e "discrete to embed"
self.d2e = nn.Embedding(vocab_size + 1, embedding_size)
RNNCell = getattr(nn, f'{p.rnn_type}Cell')
self.rnn = RNNCell(embedding_size, embedding_size)
self.e2d = nn.Linear(embedding_size, vocab_size + 1)
def forward(self, actions, global_idxes):
"""
This agent will receive the image of the world
x might have been sieved. global_idxes too. but global_idxes contents are the global
indexes
"""
batch_size = actions.size()[0]
x = self.h1(actions)
state = x
global_idxes = global_idxes.clone()
# note that this sieve might start off smaller than the global batch_size
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=x.is_cuda)
type_constr = torch.cuda if x.is_cuda else torch
last_token = type_constr.LongTensor(batch_size).fill_(0)
utterance = type_constr.LongTensor(batch_size, self.utterance_max).fill_(0)
# N_outer might not be the full episode batch size, but a subset
N_outer = type_constr.LongTensor(batch_size).fill_(self.utterance_max)
stochastic_trajectory = StochasticTrajectory()
for t in range(self.utterance_max):
emb = self.d2e(last_token)
state = self.rnn(emb, state)
token_logits = self.e2d(state)
token_probs = F.softmax(token_logits, dim=-1)
if self.training:
s = rl_common.draw_categorical_sample(
action_probs=token_probs, batch_idxes=global_idxes[sieve.global_idxes])
stochastic_trajectory.append_stochastic_sample(s=s)
token = s.actions.view(-1)
# print('stochastic')
# print('token.size()', token.size())
# die()
else:
# print('argmax')
_, token = token_probs.max(-1)
# print('token.size()', token.size())
# die()
utterance[:, t][sieve.global_idxes] = token
last_token = token
sieve.mark_dead(last_token == 0)
sieve.set_global_dead(N_outer, t)
if sieve.all_dead():
break
state = state[sieve.alive_idxes]
last_token = last_token[sieve.alive_idxes]
sieve.self_sieve_()
res = {
'stochastic_trajectory': stochastic_trajectory,
'utterance': utterance,
'utterance_lens': N_outer
}
return res
class AgentTwo(nn.Module):
def __init__(self, p, embedding_size, vocab_size, num_actions):
"""
- input: utterance
- output: action
"""
super().__init__()
self.num_actions = num_actions
self.embedding_size = embedding_size
self.d2e = nn.Embedding(vocab_size + 1, embedding_size)
RNNCell = getattr(nn, f'{p.rnn_type}Cell')
self.rnn = RNNCell(embedding_size, embedding_size)
self.h1 = nn.Linear(embedding_size, num_actions)
def forward(self, utterance, global_idxes):
"""
utterance etc might be sieved, which is why we receive global_idxes
alive_masks will then create subsets of this already-sieved set
"""
batch_size = utterance.size()[0]
utterance_max = utterance.size()[1]
type_constr = torch.cuda if utterance.is_cuda else torch
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=utterance.is_cuda)
state = type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0)
output_state = state.clone()
for t in range(utterance_max):
emb = self.d2e(utterance[:, t])
state = self.rnn(emb, state)
output_state[sieve.global_idxes] = state
sieve.mark_dead(utterance[:, t] == 0)
if sieve.all_dead():
break
utterance = utterance[sieve.alive_idxes]
state = state[sieve.alive_idxes]
sieve.self_sieve_()
state = output_state
action_logits = self.h1(state)
action_probs = F.softmax(action_logits, dim=-1)
s = rl_common.draw_categorical_sample(
action_probs=action_probs, batch_idxes=global_idxes)
return s
def run_episode(actions, one, two, utterance_len_reg, enable_cuda, render=False):
batch_size = actions.size()[0]
global_idxes = torch.LongTensor(batch_size).fill_(1).cumsum(-1) - 1
one_res = one(
actions=actions, global_idxes=global_idxes)
one_stochastic_trajectory, utterances, utterances_lens = map(one_res.__getitem__, [
'stochastic_trajectory', 'utterance', 'utterance_lens'
])
two_s = two(
utterance=utterances, global_idxes=global_idxes)
stats = Stats([])
episode_result = {
'one_stochastic_trajectory': one_stochastic_trajectory,
'two_s': two_s,
'utterances': utterances,
'utterances_lens': utterances_lens,
'stats': stats
}
return episode_result
class Runner(RunnerBase):
def __init__(self):
super().__init__(
save_as_statedict_keys=['action_selector', 'one', 'two', 'opt_one', 'opt_two'],
additional_save_keys=['baseline'],
step_key='episode'
)
def setup(self, p):
num_actions = p.num_actions
self.lexicon_recorder = LexiconRecorder(num_actions=num_actions)
self.test_lexicon_recorder = LexiconRecorder(num_actions=num_actions)
self.action_selector = AgentOneActionSelector(num_actions=num_actions)
self.one = AgentOneLM(
embedding_size=p.embedding_size, vocab_size=p.vocab_size, utterance_max=p.utterance_max,
num_actions=num_actions)
self.two = AgentTwo(
embedding_size=p.embedding_size, vocab_size=p.vocab_size, num_actions=num_actions)
if p.enable_cuda:
self.one = self.one.cuda()
self.two = self.two.cuda()
self.action_selector = self.action_selector.cuda()
Opt = getattr(optim, p.opt)
self.opt_action_selector = Opt(lr=0.001, params=self.action_selector.parameters())
self.opt_one = Opt(lr=0.001, params=self.one.parameters())
self.opt_two = Opt(lr=0.001, params=self.two.parameters())
self.stats = Stats([
# 'batches_count',
'episodes_count',
'baseline_sum',
'train_len_sum',
'train_len_count',
'train_acc_sum',
'train_rewards_sum',
'test_acc_sum',
'test_len_sum',
'test_len_count',
'test_rewards_sum'
])
self.baseline = 0
def step(self, p):
render = self.should_render()
stats = self.stats
# dopamine per action is initially 1, decreases a bit each time we
# succeed on the action
# we gradually top it up over time too
# this is highly engineered, but we're trying to learn nlp, not
# learn curiosity, in this particular paper
dopamine_per_action = torch.ones(p.num_actions, dtype=torch.float32)
s_actions_in = self.action_selector(batch_size=p.batch_size)
actions_in = s_actions_in.actions
# print('s_actions_in', s_actions_in)
# print('s_actions_in.actions', s_actions_in.actions)
# print('dir(s_actions_in)', dir(s_actions_in))
# die()
# actions_in = s_actions_in
# actions_in = torch.from_numpy(
# np.random.choice(p.num_actions, p.batch_size, replace=True)
# ).long()
if p.enable_cuda:
actions_in = actions_in.cuda()
self.one.train()
self.two.train()
episode_result = run_episode(
actions=actions_in,
one=self.one, two=self.two,
render=render, utterance_len_reg=p.utterance_len_reg, enable_cuda=p.enable_cuda)
utterances, utterances_lens, _stats = map(episode_result.__getitem__, [
'utterances', 'utterances_lens', 'stats'
])
one_stochastic_trajectory, two_s = map(episode_result.__getitem__, [
'one_stochastic_trajectory', 'two_s'
])
self.lexicon_recorder.record(
action_probs_l=[two_s.action_probs], utterances_by_t=[utterances], utterance_lens_by_t=[utterances_lens])
# self.stats += _stats
self.stats.train_len_sum += utterances_lens.sum().item()
self.stats.train_len_count += len(utterances_lens)
self.stats.episodes_count += 1
# rewards = (two_s.actions == actions_in).float() * 2 - 1
correct_mask = two_s.actions == actions_in
rewards = correct_mask.float()
# if rewards_std > 0:
# rewards = rewards / rewards_std
zero_length_idxes = (utterances_lens == 0).nonzero().view(-1).long()
rewards[zero_length_idxes] = 0
rewards -= utterances_lens.float() * p.utterance_len_reg
rewards = rewards.clamp(min=0)
self.baseline = 0.7 * self.baseline + 0.3 * rewards.mean().item()
rewards_std = rewards.detach().std().item()
baselined_rewards = (rewards - self.baseline)
if rewards_std > 0:
baselined_rewards = baselined_rewards / rewards_std
dopamine_per_action = (dopamine_per_action + 0.1).clamp(max=1.0, min=0.1)
acc = (two_s.actions == actions_in).float().mean().item()
stats.train_acc_sum += acc
stats.train_rewards_sum += rewards.mean().item()
reinforce_loss_action_selector = s_actions_in.calc_loss(baselined_rewards)
reinforce_loss_one = one_stochastic_trajectory.calc_loss(baselined_rewards)
reinforce_loss_two = two_s.calc_loss(baselined_rewards)
ent_loss_action_selector = - p.actions_ent_reg * s_actions_in.entropy
ent_loss_one = - p.ent_reg * one_stochastic_trajectory.entropy
ent_loss_two = - p.ent_reg * two_s.entropy
loss_action_selector = reinforce_loss_action_selector + ent_loss_action_selector
loss_one = reinforce_loss_one + ent_loss_one
loss_two = reinforce_loss_two + ent_loss_two
self.opt_action_selector.zero_grad()
loss_action_selector.backward()
self.opt_action_selector.step()
self.opt_one.zero_grad()
loss_one.backward()
self.opt_one.step()
self.opt_two.zero_grad()
loss_two.backward()
self.opt_two.step()
# self.baseline = 0.7 * self.baseline + 0.3 * rewards.mean().item()
stats.baseline_sum += self.baseline
# ========================
self.one.eval()
self.two.eval()
episode_result = run_episode(
actions=actions_in,
one=self.one, two=self.two,
render=render, utterance_len_reg=p.utterance_len_reg, enable_cuda=p.enable_cuda)
utterances, utterances_lens, _stats = map(episode_result.__getitem__, [
'utterances', 'utterances_lens', 'stats'
])
one_stochastic_trajectory, two_s = map(episode_result.__getitem__, [
'one_stochastic_trajectory', 'two_s'
])
self.test_lexicon_recorder.record(
action_probs_l=[two_s.action_probs], utterances_by_t=[utterances], utterance_lens_by_t=[utterances_lens])
test_rewards = (two_s.actions == actions_in).float() * 2 - 1
# test_rewards = (two_s.actions == actions_in).float()
test_rewards -= utterances_lens.float() * p.utterance_len_reg
test_acc = (two_s.actions == actions_in).float().mean().item()
self.stats.test_acc_sum += test_acc
self.stats.test_len_sum += utterances_lens.sum().item()
self.stats.test_len_count += len(utterances_lens)
stats.test_rewards_sum += test_rewards.mean().item()
if render:
lex_stats = self.lexicon_recorder.calc_stats()
test_lex_stats = self.test_lexicon_recorder.calc_stats()
print('')
print('rewards[:16]', rewards[:16])
self.test_lexicon_recorder.print_lexicon()
self.lexicon_recorder.reset()
self.test_lexicon_recorder.reset()
stats = self.stats
log_dict = {
'baseline': stats.baseline_sum / stats.episodes_count,
'train_acc': stats.train_acc_sum / stats.episodes_count,
'train_reward': stats.train_rewards_sum / stats.episodes_count,
'train_utt_len': stats.train_len_sum / stats.train_len_count,
'train_lex_size': lex_stats['total_unique'],
'test_reward': stats.test_rewards_sum / stats.episodes_count,
'test_lex_size': test_lex_stats['total_unique'],
'test_utt_len': stats.test_len_sum / stats.test_len_count,
'test_acc': stats.test_acc_sum / stats.episodes_count,
'actions_ent': s_actions_in.entropy.mean().item()
}
for k, v in lex_stats.items():
log_dict[k] = v
self.print_and_log(
log_dict,
formatstr='e={episode} '
'b={baseline:.3f} '
'| train '
'len {train_utt_len:.2f} '
'acc {train_acc:.3f} '
'r {train_reward:.3f} '
'lex_size {train_lex_size} '
'| test '
'len {test_utt_len:.2f} '
'acc {test_acc:.3f} '
'r {test_reward:.3f} '
'lex_size {test_lex_size} '
'ent {actions_ent:.3f} '
)
stats.reset()
if __name__ == '__main__':
runner = Runner()
runner.add_param('--embedding-size', type=int, default=50)
runner.add_param('--num-actions', type=int, default=32)
runner.add_param('--utterance-max', type=int, default=10)
runner.add_param('--utterance-len-reg', type=float, default=0.01, help='how much to penalize longer utterances')
runner.add_param('--ent-reg', type=float, default=0.2)
runner.add_param('--actions-ent-reg', type=float, default=0.2)
runner.add_param('--boredom-reg', type=float, default=0.1,
help='less dopamine for repeated identical successes (I guess this is similar to count-based :/ )')
runner.add_param('--vocab-size', type=int, default=2, help='excludes terminator')
runner.add_param('--batch-size', type=int, default=128)
runner.add_param('--opt', type=str, default='Adam')
runner.add_param('--rnn-type', type=str, default='GRU')
runner.parse_args()
runner.setup_base()
runner.run_base()
| StarcoderdataPython |
1836618 | <reponame>CyberFlameGO/wikidetox<gh_stars>10-100
r"""Dataflow Main.
Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with the
License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
Dataflow Main
A dataflow pipeline to ingest the Wikipedia dump from 7zipped xml files to json.
Run with:
python dataflow_main.py --setup_file ./setup.py
Args:
ingestFrom: choose from the three options : {wikipedia, local, cloud}:
- wikipedia: performs the downloading job from Wikipedia, run with: [ python
dataflow_main.py --setup_file ./setup.py --ingestFrom=wikipedia \ --download
--language=YourLanguage --dumpdate=YourDumpdate \
--blobPrefix=YourCloudBucket --project=YourGoogleCloudProject \
--bucket=TemporaryFileBucket ]
- local: Tests the pipeline locally, run the code with [ python
dataflow_main.py --setup_file ./setup.py --ingestFrom=local \
--localStorage=YourLocalStorage --testmode --output=YourOutputStorage \
--project=YourGoogleCloudProject --bucket=TemporaryFileBucket ]
- cloud: Reads from downloaded bz2 files on cloud, performs the ingestion job,
run the code with [ python dataflow_main.py --setup_file ./setup.py
--ingestFrom=cloud \
--output=gs://bucket/YourOutputStorage --blobPrefix=YourCloudBucket \
--project=YourGoogleCloudProject --bucket=TemporaryFileBucket ]
output: the data storage where you want to store the ingested results
language: the language of the wikipedia data you want to extract, e.g. en, fr,
zh
dumpdate: the dumpdate of the wikipedia data, e.g. latest
testmode: if turned on, the pipeline runs on DirectRunner.
localStorage: the location of the local test file.
download: if turned on, the pipeline only performs downloading job from
Wikipedia.
bucket: the cloud storage bucket (gs://thispartonly/not/this).
"""
from __future__ import division
from __future__ import print_function
import argparse
import bz2
import datetime
import json
import logging
import os
import re
import six
import sys
import time
import apache_beam as beam
from wikiconv.ingest_revisions.ingest_utils import wikipedia_revisions_ingester
from google.cloud import storage
LOCAL_STORAGE = 'file'
MEMORY_THERESHOLD = 1000000
def run(known_args, pipeline_args, sections, prefix):
"""Main entry point; defines and runs the ingestion pipeline."""
if known_args.testmode:
# In testmode, disable cloud storage backup and run on directRunner
pipeline_args.append('--runner=DirectRunner')
else:
pipeline_args.append('--runner=DataflowRunner')
pipeline_args.extend([
'--project={project}'.format(project=known_args.project),
'--staging_location=gs://{bucket}/staging'.format(
bucket=known_args.bucket),
'--temp_location=gs://{bucket}/tmp'.format(bucket=known_args.bucket),
'--job_name=ingest-latest-revisions-{lan}'.format(
lan=known_args.language),
'--num_workers=80',
])
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
pcoll = (p | 'GetDataDumpList' >> beam.Create(sections))
if known_args.download:
pcoll = (
pcoll | 'DownloadDataDumps' >> beam.ParDo(DownloadDataDumps(),
known_args.bucket, prefix))
else:
pcoll = (
pcoll |
'Ingestion' >> beam.ParDo(WriteDecompressedFile(), known_args.bucket,
prefix, known_args.ingest_from)
| 'AddGroupByKey' >> beam.Map(lambda x: (x['year'], x))
| 'ShardByYear' >> beam.GroupByKey()
| 'WriteToStorage' >> beam.ParDo(WriteToStorage(), known_args.output,
known_args.dumpdate,
known_args.language))
class DownloadDataDumps(beam.DoFn):
"""Download the bulk wikipedia xml dumps from mirrors."""
def start_bundle(self):
self._storage_client = storage.Client()
def process(self, element, bucket, blob_prefix):
"""Downloads a data dump file, store in cloud storage.
Args:
element: beam input record.
bucket: a cloud storage bucket name.
blob_prefix: the path to the filename directory in the bucket.
Yields:
the cloud storage location.
"""
mirror, chunk_name = element
logging.info('USERLOG: Download data dump %s to store in cloud storage.',
chunk_name)
# Download data dump from Wikipedia and upload to cloud storage.
six.moves.urllib.request.urlretrieve(mirror + '/' + chunk_name, chunk_name)
self._storage_client.get_bucket(bucket).blob(
os.path.join(blob_prefix, chunk_name)).upload_from_filename(chunk_name)
os.remove(chunk_name)
yield chunk_name
return
class WriteDecompressedFile(beam.DoFn):
"""Decompress wikipedia file and creates json records."""
def start_bundle(self):
self._storage_client = None
def __init__(self):
self.processed_revisions = beam.metrics.Metrics.counter(
self.__class__, 'processed_revisions')
self.large_page_revision_count = beam.metrics.Metrics.counter(
self.__class__, 'large_page_revision_cnt')
def process(self, element, bucket, blob_prefix, ingest_from):
"""Ingests the xml dump into json, returns the json records."""
# Decompress the data dump
chunk_name = element
logging.info('USERLOG: Running ingestion process on %s', chunk_name)
if ingest_from != 'local':
if not self._storage_client:
self._storage_client = storage.Client()
self._storage_client.get_bucket(bucket).blob(
os.path.join(blob_prefix,
chunk_name)).download_to_filename(chunk_name)
# Running ingestion on the xml file
last_revision = 'None'
last_completed = time.time()
cur_page_id = None
page_size = 0
cur_page_revision_cnt = 0
i = 0
for i, content in enumerate(
wikipedia_revisions_ingester.parse_stream(bz2.BZ2File(chunk_name))):
self.processed_revisions.inc()
# Add the year field for sharding
dt = datetime.datetime.strptime(content['timestamp'],
'%Y-%m-%dT%H:%M:%SZ')
content['year'] = dt.isocalendar()[0]
last_revision = content['rev_id']
yield content
logging.info('CHUNK %s: revision %s ingested, time elapsed: %g.',
chunk_name, last_revision,
time.time() - last_completed)
last_completed = time.time()
if content['page_id'] == cur_page_id:
page_size += len(json.dumps(content))
cur_page_revision_cnt += 1
else:
if page_size >= MEMORY_THERESHOLD:
self.large_page_revision_count.inc(cur_page_revision_cnt)
cur_page_id = content['page_id']
page_size = len(json.dumps(content))
cur_page_revision_cnt = 1
if page_size >= MEMORY_THERESHOLD:
self.large_page_revision_count.inc(cur_page_revision_cnt)
if ingest_from != 'local':
os.remove(chunk_name)
logging.info(
'USERLOG: Ingestion on file %s complete! %s lines emitted, last_revision %s',
chunk_name, i, last_revision)
class WriteToStorage(beam.DoFn):
"""Copys files to gloud storage."""
def process(self, element, outputdir, dumpdate, language):
(key, val) = element
year = int(key)
cnt = 0
# Creates writing path given the year pair
date_path = '{outputdir}/{date}-{lan}/date-{year}/'.format(
outputdir=outputdir, date=dumpdate, lan=language, year=year)
file_path = 'revisions-{cnt:06d}.json'
write_path = os.path.join(date_path, file_path.format(cnt=cnt))
while beam.io.filesystems.FileSystems.exists(write_path):
cnt += 1
write_path = os.path.join(date_path, file_path.format(cnt=cnt))
# Writes to storage
logging.info('USERLOG: Write to path %s.', write_path)
outputfile = beam.io.filesystems.FileSystems.create(write_path)
for output in val:
outputfile.write(json.dumps(output) + '\n')
outputfile.close()
class ParseDirectory(six.moves.html_parser.HTMLParser):
"""Extension of HTMLParser that parses all file in a directory."""
def __init__(self):
self.files = []
six.moves.html_parser.HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.files.extend(attr[1] for attr in attrs if attr[0] == 'href')
def files(self):
return self.files
def directory(mirror):
"""Download the directory of files from the webpage.
This is likely brittle based on the format of the particular mirror site.
Args:
mirror: url to load wikipedia data from.
Returns:
list of mirror and filename tuples.
"""
# Download the directory of files from the webpage for a particular language.
parser = ParseDirectory()
mirror_directory = six.moves.urllib.request.urlopen(mirror)
parser.feed(mirror_directory.read().decode('utf-8'))
# Extract the filenames of each XML meta history file.
meta = re.compile(r'^[a-zA-Z-]+wiki-latest-pages-meta-history.*\.bz2$')
return [(mirror, fname) for fname in parser.files if meta.match(fname)]
def get_sections(bucket, blob_prefix):
"""Gets list of basenames from bucket that match a prefix.
Args:
bucket: a cloud storage bucket.
blob_prefix: a path prefix within the storage bucket to scan.
Returns:
a list of filename names.
"""
filenames = []
for obj in storage.Client().get_bucket(bucket).list_blobs(prefix=blob_prefix):
filenames.append(obj.name[obj.name.rfind('/') + 1:])
return filenames
def main(argv=None):
if argv is None:
argv = sys.argv
logging.getLogger().setLevel(logging.INFO)
# Define parameters
arg_parser = argparse.ArgumentParser()
# Options: local, cloud
arg_parser.add_argument(
'--project', dest='project', help='Your google cloud project.')
arg_parser.add_argument(
'--bucket',
dest='bucket',
help='Your google cloud bucket for temporary and staging files.')
arg_parser.add_argument('--ingestFrom', dest='ingest_from', default='none')
arg_parser.add_argument('--download', dest='download', action='store_true')
arg_parser.add_argument(
'--output', dest='output', help='Specify the output storage in cloud.')
arg_parser.add_argument(
'--blobPrefix',
dest='blob_prefix',
help='Specify the prefix to assign to blobs for the raw downloads.')
arg_parser.add_argument(
'--language',
dest='language',
help='Specify the language of the Wiki Talk Page you want to ingest.')
arg_parser.add_argument(
'--dumpdate',
dest='dumpdate',
help='Specify the date of the Wikipedia data dump.')
arg_parser.add_argument(
'--localStorage',
dest='localStorage',
help='If ingest from local storage, please specify the location of the input file.'
)
arg_parser.add_argument('--testmode', dest='testmode', action='store_true')
known_args, pipeline_args = arg_parser.parse_known_args()
if known_args.download:
# If specified downloading from Wikipedia
dumpstatus_url = 'https://dumps.wikimedia.org/{lan}wiki/{date}/dumpstatus.json'.format(
lan=known_args.language, date=known_args.dumpdate)
response = six.moves.urllib.request.urlopen(dumpstatus_url)
dumpstatus = json.loads(response.read())
url = 'https://dumps.wikimedia.org/{lan}wiki/{date}'.format(
lan=known_args.language, date=known_args.dumpdate)
if 'files' not in dumpstatus['jobs']['metahistorybz2dump']:
raise ValueError('Unable to find data for specifid date')
sections = [(url, filename) for filename in dumpstatus['jobs']
['metahistorybz2dump']['files'].keys()]
prefix = 'raw-downloads/%s-%s' % (known_args.language, known_args.dumpdate)
if known_args.ingest_from == 'cloud':
sections = get_sections(known_args.bucket, prefix)
if known_args.ingest_from == 'local':
sections = [known_args.localStorage]
run(known_args, pipeline_args, sections, prefix)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1639603 | #coding:utf-8
import requests
import os
import lxml.html
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
#创建浏览器对象
# browser = webdriver.PhantomJS(service_args=SERVICE_ARGS)
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10) #WebDriverWait 最大等待浏览器加载为10秒,
browser.set_window_size(1400, 900) #set_window_size 设置模拟浏览网页的大小
# 解析网页
def parser(url, param):
browser.get(url)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, param)))
html = browser.page_source
doc = lxml.html.fromstring(html)
return doc
# 解析数据, 解析本次主页面http://huaban.com/boards/favorite/beauty/ 然后获取到每个栏目的网址和栏目的名称,使用xpath来获取栏目的网页
def get_main_url():
print('打开主页收寻链接中...')
try:
doc = parser('http://huaban.com/boards/favorite/beauty/', '#waterfall')
name = doc.xpath('//*[@id="waterfall"]/div/a[1]/div[2]/h3/text()')
u = doc.xpath('//*[@id="waterfall"]/div/a[1]/@href')
for item, fileName in zip(u, name):
main_url = 'http://huaban.com' + item
print('主页已找到' + main_url)
if '*' in fileName:
fileName = fileName.replace('*', '')
download(main_url, fileName)
except Exception as e:
print(e)
# img_url = doc.xpath('//*[@id="baidu_image_holder"]/a/img/@src')
# img_url2 = doc.xpath('//*[@id="baidu_image_holder"]/img/@src')
def download(main_url, fileName):
print('------准备下载中------')
try:
doc = parser(main_url, '#waterfall')
if not os.path.exists('image\\' + fileName):
print('创建文件夹')
os.makedirs('image\\' + fileName)
link = doc.xpath('//*[@id="waterfall"]/div/a/@href')
# print(link)
i = 0
for item in link:
i += 1
minor_url = 'http://huaban.com' + item
doc = parser(minor_url, '#pin_view_page')
img_url = doc.xpath('//*[@id="baidu_image_holder"]/a/img/@src')
img_url2 = doc.xpath('//*[@id="baidu_image_holder"]/img/@src')
img_url += img_url2
try:
url = 'http:' + str(img_url[0])
print('正在下载' + str(i) + '张图片,地址' + url)
r = requests.get(url)
filename = 'image\\{}\\'.format(fileName) + str(i) + '.jpg'
with open(filename, 'wb') as fo:
fo.write(r.content)
except Exception:
print('出错了!')
except Exception:
print('出错了!')
if __name__ == '__main__':
get_main_url() | StarcoderdataPython |
9612835 | from typing import Optional
import argparse
import spacy
import importlib
from thinc.api import require_gpu
from scispacy.data_util import read_full_med_mentions, read_ner_from_tsv
from scispacy.train_utils import evaluate_ner
def main(model_path: str, dataset: str, output_path: str, code: Optional[str], med_mentions_folder_path: Optional[str], gpu_id: Optional[int]):
if gpu_id is not None and gpu_id >= 0:
require_gpu(gpu_id)
if code is not None:
# need to import code before loading a spacy model
spec = importlib.util.spec_from_file_location("python_code", str(code))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
nlp = spacy.load(model_path)
if dataset.startswith("medmentions"):
train_data, dev_data, test_data = read_full_med_mentions(med_mentions_folder_path, None, False)
data_split = dataset.split("-")[1]
if data_split == "train":
data = train_data
elif data_split == "dev":
data = dev_data
elif data_split == "test":
data = test_data
else:
raise Exception(f"Unrecognized split {data_split}")
else:
data = read_ner_from_tsv(dataset)
evaluate_ner(nlp, data, dump_path=output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, help="Path to model to evaluate")
parser.add_argument("--dataset", type=str, help="medmentions-<train/dev/test>, or a tsv file to evalute")
parser.add_argument("--output_path", type=str, help="Path to write results to")
parser.add_argument("--code", type=str, default=None, help="Path to code to import before loading spacy model")
parser.add_argument("--med_mentions_folder_path", type=str, default=None, help="Path to the med mentions folder")
parser.add_argument("--gpu_id", type=int, default=-1, help="GPU id to use")
args = parser.parse_args()
main(args.model_path, args.dataset, args.output_path, args.code, args.med_mentions_folder_path, args.gpu_id) | StarcoderdataPython |
3218088 | import zmq
from queuer.topics import get_port_by_topic
# context = zmq.Context()
# socket = context.socket(zmq.SUB)
# socket.connect("tcp://localhost:5555")
class Subscriber:
def __init__(self, topic):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
port = get_port_by_topic(topic)
connection_address = "tcp://localhost:{port}".format(port=port)
self.socket.connect(connection_address)
topic_bytes = bytes("", encoding="ascii")
self.socket.setsockopt(zmq.SUBSCRIBE, topic_bytes)
def recieve_message(self):
message = self.socket.recv_json()
return message
def recieve_object(self):
message_object = self.socket.recv_pyobj()
return message_object
| StarcoderdataPython |
11317836 | import os,sys
sys.path.append(r'./commonModule')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #close tf debug info
import numpy as np
import argparse
from commonModule.ImageBase import *
from commonModule.mainImagePlot import plotImagList
from mainTrainning import loadModel
#--------------------------------------------------------------------------------------
#usgae: python predictSegmentation.py -s .\res\PennFudanPed\PNGImages\FudanPed00001.png
#--------------------------------------------------------------------------------------
def argCmdParse():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source', help = 'source image')
parser.add_argument('-d', '--dst', help = 'save iamge')
parser.add_argument('-c', dest='compare', action='store_true')
return parser.parse_args()
def preditImg(img, modelName = r'..\weights\trainPedSegmentation.h5'):
model = loadModel(modelName) #ks.models.load_model(modelName,custom_objects={'dice_coef_loss': dice_coef_loss})
#model.summary()
#print(img.shape)
img = resizeImg(img,256,256)
assert(img.shape[0]==256 and img.shape[1]==256)
x = img.reshape((-1,img.shape[0],img.shape[1],1))
#print(x.shape)
mask = model.predict(x)
print('preditImg mask.shape=',type(mask),mask.shape)
predict = mask[0]
predict = predict.reshape((predict.shape[0],predict.shape[1]))
predict = np.where(predict>0.5,1,0)
predict = predict.astype(np.uint8)
return predict
def processMaskImg(img,backColor=0):
H,W = getImgHW(img)
chn = getImagChannel(img)
cl = np.unique(img)
print(cl,chn,H,W)
#colors = np.random.uniform(0, 255, size=(len(cl), chn))
#print('colors=', colors)
#colors = np.array([[255,0,0],[0,255,0],[0,0,255]]) #3 colors
colors = np.array([[0,255,0],[0,255,0],[0,255,0]]) #one colors
#print('colors=', colors[0])
newImg = np.zeros_like(img)
for i in range(H):
for j in range(W):
#newImg[i,j,:] = backColor
if img[i,j,0] == 0:
newImg[i,j,:] = backColor
else:
newImg[i,j,:] = colors[img[i,j,0]]
return newImg
def expandImageTo3chan(img):
if getImagChannel(img) == 3:
rimg = img
else:
img = img.reshape((img.shape[0],img.shape[1]))
rimg = np.zeros((img.shape[0],img.shape[1],3),dtype=np.uint8)
rimg[:,:,0] = img
rimg[:,:,1] = img
rimg[:,:,2] = img
return rimg
def maskToOrignimalImg(img,mask):
"""add mask to color image"""
if img.shape != mask.shape:
#mask = expandImageTo3chan(mask)
img = expandImageTo3chan(img)
#print(img.shape,mask.shape)
return cv2.addWeighted(img, 1.0, mask, 0.8, 0)
def getPredictionMaskImg(img):
H,W = getImgHW(img)
blurImg = gaussianBlurImg(img.copy(),3)
predMaskImg = preditImg(grayImg(blurImg))
predMaskImg = expandImageTo3chan(predMaskImg)
predMaskImg = processMaskImg(predMaskImg)
predMaskImg = predMaskImg.astype(np.uint8)
return resizeImg(predMaskImg,W,H)
def getPredictImg(img):
pMask = getPredictionMaskImg(img.copy())
return pMask, maskToOrignimalImg(img, pMask)
def demonstratePrediction(file,gtMaskFile=None):
img = loadImg(file)
pMask,pImg = getPredictImg(img) #prediction
if gtMaskFile is not None:
maskImg = loadImg(gtMaskFile)
#maskImg = resizeImg(maskImg,256,256)
maskImg = processMaskImg(maskImg)
mImg = maskToOrignimalImg(img,maskImg)
ls,nameList = [],[]
ls.append(img),nameList.append('Original')
if gtMaskFile is not None:
ls.append(maskImg),nameList.append('GT mask')
#ls.append(mImg),nameList.append('GT Segmentation')
ls.append(pImg),nameList.append('Predict Segmentation')
if gtMaskFile is not None:
ls.append(pMask),nameList.append('Predict Mask')
plotImagList(ls, nameList,gray=True,showticks=False) #title='Segmentation prediction',
def demonstrateGrayPrediction(file):
img = loadGrayImg(file)
_,pImg = getPredictImg(img)
#c = np.unique(pImg)
#print('c=',c)
ls,nameList = [],[]
ls.append(img),nameList.append('Original')
ls.append(pImg),nameList.append('PredmaskImg')
plotImagList(ls, nameList,gray=True,title='Segmentation prediction')
def comparePredict():
file = r'.\res\PennFudanPed\PNGImages\FudanPed00001.png'
mask = r'.\res\PennFudanPed\PedMasks\FudanPed00001_mask.png'
img = loadGrayImg(file)
gtMaskImg = loadGrayImg(mask)
gtMaskImg = resizeImg(gtMaskImg,256,256)
#infoImg(img)
#infoImg(gtMaskImg)
predMaskImg = preditImg(img)
print('groundTrue=',gtMaskImg.shape, np.unique(gtMaskImg), np.sum(gtMaskImg))
print('predMaskImg=',predMaskImg.shape, np.unique(predMaskImg), np.sum(predMaskImg))
#comparison = np.where(gtMaskImg[np.where(gtMaskImg==1)] == predMaskImg)
#comparison = np.where(gtMaskImg==1 and gtMaskImg == predMaskImg)
comparison = np.sum(gtMaskImg*predMaskImg)
print('eqaul=',comparison)
def getImageMask(file,maskeFile):
img = loadImg(file)
mask = loadImg(maskeFile)
mask = processMaskImg(mask,backColor=0)
#added_image = cv2.addWeighted(background, 1.0, mask, 0.8, 0)
return img,mask
def testCombing():
file = r'.\res\PennFudanPed\PNGImages\FudanPed00001.png'
maske = r'.\res\PennFudanPed\PedMasks\FudanPed00001_mask.png'
crop_file = r'.\res\PennFudanPed\newImages\newMaskCropping\FudanPed00001_b_143_15_553_524.png'
crop_mask = r'.\res\PennFudanPed\newImages\newMaskCroppingMask\FudanPed00001_b_143_15_553_524_mask.png'
f_file=r'.\res\PennFudanPed\newImages\newMaskFlipping\FudanPed00001_flip.png'
f_mask=r'.\res\PennFudanPed\newImages\newMaskFlippingMask\FudanPed00001_flip_mask.png'
color_file = r'.\res\PennFudanPed\PNGImages\FudanPed00001alphaBeta_2.0_50.png'
color_mask = r'.\res\PennFudanPed\PedMasks\FudanPed00001alphaBeta_2.0_50_mask.png'
img,mask = getImageMask(file,maske)
crop_img,crop_mask = getImageMask(crop_file,crop_mask)
f_img,f_mask = getImageMask(f_file,f_mask)
c_img,c_mask = getImageMask(color_file,color_mask)
ls,nameList = [],[]
ls.append(img),nameList.append('Orignial')
ls.append(mask),nameList.append('mask')
ls.append(crop_img),nameList.append('Cropping')
ls.append(crop_mask),nameList.append('mask')
ls.append(f_img),nameList.append('Flipping')
ls.append(f_mask),nameList.append('mask')
ls.append(c_img),nameList.append('Color change')
ls.append(c_mask),nameList.append('mask')
#ls.append(added_image),nameList.append('added_image')
plotImagList(ls, nameList, gray=True, title='', showticks=False)
def main():
arg = argCmdParse()
file=arg.source
dstFile = arg.dst
print(file,dstFile)
if arg.compare:
comparePredict()
else:
if 0:
# file=r'.\res\PennFudanPed\newImages\trainImages\trainPNGImage\FudanPed00001_17_104_557_490.png'
# maskFile=r'.\res\PennFudanPed\newImages\trainImages\trainPNGImageMask\FudanPed00001_17_104_557_490_mask.png'
file=r'.\res\PennFudanPed\PNGImages\FudanPed00012.png'
maskFile=r'.\res\PennFudanPed\PedMasks\FudanPed00012_mask.png'
file=r'.\res\PennFudanPed\PNGImages\PennPed00068.png'
maskFile=r'.\res\PennFudanPed\PedMasks\PennPed00068_mask.png'
demonstratePrediction(file,maskFile)
else:
#file=r'.\res\7.jpg'
#demonstrateGrayPrediction(file)
demonstratePrediction(file)
if __name__=='__main__':
main()
| StarcoderdataPython |
9775038 | <reponame>chromium/chromium
# python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import unittest
from lib import compiler
class CompilerTestCase(unittest.TestCase):
def assertListSortedEqual(self, a, b, msg=None):
a.sort()
b.sort()
if msg:
self.assertListEqual(a, b, msg=msg)
else:
self.assertListEqual(a, b)
def matching_archs(self, matching: str) -> set[str]:
return {
arch
for arch in compiler._RUSTC_ARCH_TO_BUILD_CONDITION
if re.search(matching, arch)
}
def not_matching_archs(self, matching: str) -> set[str]:
"""The inverse of matching_archs()."""
return {
arch
for arch in compiler._RUSTC_ARCH_TO_BUILD_CONDITION
if not re.search(matching, arch)
}
class TestGnConditions(CompilerTestCase):
def test_all_platforms(self):
s = compiler.BuildConditionSet(compiler.ArchSet.ALL())
self.assertListSortedEqual([], s.get_gn_conditions())
def test_one_platform(self):
for a in compiler.ArchSet.ALL().as_strings():
s = compiler.BuildConditionSet(compiler.ArchSet(initial={a}))
mode: compiler.BuildCondition = \
compiler._RUSTC_ARCH_TO_BUILD_CONDITION[a]
self.assertListSortedEqual([mode.gn_condition()],
s.get_gn_conditions())
def test_os(self):
# One OS.
for (matching, mode) in [
(compiler._RUSTC_ARCH_MATCH_ANDROID,
compiler.BuildCondition.ALL_ANDROID),
(compiler._RUSTC_ARCH_MATCH_FUCHSIA,
compiler.BuildCondition.ALL_FUCHSIA),
(compiler._RUSTC_ARCH_MATCH_IOS, compiler.BuildCondition.ALL_IOS),
(compiler._RUSTC_ARCH_MATCH_WINDOWS,
compiler.BuildCondition.ALL_WINDOWS),
(compiler._RUSTC_ARCH_MATCH_LINUX,
compiler.BuildCondition.ALL_LINUX),
(compiler._RUSTC_ARCH_MATCH_MAC, compiler.BuildCondition.ALL_MAC),
]:
archs = self.matching_archs(matching)
s = compiler.BuildConditionSet(compiler.ArchSet(initial=archs))
cond = mode.gn_condition()
self.assertListSortedEqual([cond],
s.get_gn_conditions(),
msg=repr(archs))
# Two OSs.
archs = self.matching_archs(compiler._RUSTC_ARCH_MATCH_WINDOWS + r"|" +
compiler._RUSTC_ARCH_MATCH_MAC)
s = compiler.BuildConditionSet(compiler.ArchSet(initial=archs))
cond1 = compiler.BuildCondition.ALL_WINDOWS.gn_condition()
cond2 = compiler.BuildCondition.ALL_MAC.gn_condition()
self.assertListSortedEqual([cond1, cond2], s.get_gn_conditions())
# All but one OS.
for (matching_os, mode) in [
(compiler._RUSTC_ARCH_MATCH_ANDROID,
compiler.BuildCondition.NOT_ANDROID),
(compiler._RUSTC_ARCH_MATCH_FUCHSIA,
compiler.BuildCondition.NOT_FUCHSIA),
(compiler._RUSTC_ARCH_MATCH_IOS, compiler.BuildCondition.NOT_IOS),
(compiler._RUSTC_ARCH_MATCH_WINDOWS,
compiler.BuildCondition.NOT_WINDOWS),
(compiler._RUSTC_ARCH_MATCH_LINUX,
compiler.BuildCondition.NOT_LINUX),
(compiler._RUSTC_ARCH_MATCH_MAC, compiler.BuildCondition.NOT_MAC),
]:
s = compiler.BuildConditionSet(
compiler.ArchSet(initial=self.not_matching_archs(matching_os)))
cond = mode.gn_condition()
self.assertListSortedEqual([cond], s.get_gn_conditions())
def test_one_cpu(self):
for (matching, mode) in [
(compiler._RUSTC_ARCH_MATCH_X86, compiler.BuildCondition.ALL_X86),
(compiler._RUSTC_ARCH_MATCH_X64, compiler.BuildCondition.ALL_X64),
(compiler._RUSTC_ARCH_MATCH_ARM32,
compiler.BuildCondition.ALL_ARM32),
(compiler._RUSTC_ARCH_MATCH_ARM64,
compiler.BuildCondition.ALL_ARM64)
]:
s = compiler.BuildConditionSet(
compiler.ArchSet(initial=self.matching_archs(matching)))
cond = mode.gn_condition()
self.assertListSortedEqual([cond], s.get_gn_conditions())
def test_combining_os_and_cpu(self):
# One Cpu and one OS (with overlap).
archs = self.matching_archs(compiler._RUSTC_ARCH_MATCH_LINUX + r"|" +
compiler._RUSTC_ARCH_MATCH_X86)
s = compiler.BuildConditionSet(compiler.ArchSet(initial=archs))
cond1 = compiler.BuildCondition.ALL_LINUX.gn_condition()
cond2 = compiler.BuildCondition.ALL_X86.gn_condition()
self.assertListSortedEqual([cond1, cond2], s.get_gn_conditions())
# One Cpu and one OS (without overlap).
archs = self.matching_archs(compiler._RUSTC_ARCH_MATCH_MAC + r"|" +
compiler._RUSTC_ARCH_MATCH_X86)
s = compiler.BuildConditionSet(compiler.ArchSet(initial=archs))
cond1 = compiler.BuildCondition.ALL_MAC.gn_condition()
cond2 = compiler.BuildCondition.ALL_X86.gn_condition()
self.assertListSortedEqual([cond1, cond2], s.get_gn_conditions())
def test_invert(self):
all = compiler.BuildConditionSet.ALL()
none = compiler.BuildConditionSet.EMPTY()
self.assertEqual(none, all.inverted())
self.assertEqual(all, none.inverted())
one = compiler.BuildConditionSet(
compiler.ArchSet(
initial=self.matching_archs(compiler._RUSTC_ARCH_MATCH_MAC)))
the_rest = compiler.BuildConditionSet(
compiler.ArchSet(initial=self.not_matching_archs(
compiler._RUSTC_ARCH_MATCH_MAC)))
self.assertListSortedEqual(one.inverted().get_gn_conditions(),
the_rest.get_gn_conditions())
class TestCompiler(unittest.TestCase):
def test_all_and_one(self):
self.assertEqual(len(compiler.ArchSet.ALL().as_strings()),
len(compiler._RUSTC_ARCH_TO_BUILD_CONDITION))
self.assertEqual(len(compiler.ArchSet.ONE()), 1)
class TestArchSet(CompilerTestCase):
def test_construct(self):
a = compiler.ArchSet(
initial=self.matching_archs(compiler._RUSTC_ARCH_MATCH_ARM32))
self.assertSetEqual({
"armv7-linux-androideabi",
"armv7-apple-ios",
}, a.as_strings())
a = compiler.ArchSet.EMPTY()
self.assertSetEqual(set(), a.as_strings())
a = compiler.ArchSet(
initial=self.matching_archs(compiler._RUSTC_ARCH_MATCH_ARM32))
self.assertSetEqual({
"armv7-linux-androideabi",
"armv7-apple-ios",
}, a.as_strings())
a = compiler.ArchSet(
initial=self.matching_archs(compiler._RUSTC_ARCH_MATCH_ARM32))
self.assertTrue(a.has_arch("armv7-linux-androideabi"))
self.assertFalse(a.has_arch("i686-pc-windows-msvc"))
def test_bool(self):
a = compiler.ArchSet.EMPTY()
self.assertFalse(bool(a))
a = compiler.ArchSet.ONE()
self.assertTrue(bool(a))
a = compiler.ArchSet.ALL()
self.assertTrue(bool(a))
def test_eq(self):
self.assertEqual(compiler.ArchSet.EMPTY(), compiler.ArchSet.EMPTY())
self.assertEqual(compiler.ArchSet.ONE(), compiler.ArchSet.ONE())
self.assertEqual(compiler.ArchSet.ALL(), compiler.ArchSet.ALL())
def test_len(self):
self.assertEqual(len(compiler.ArchSet.EMPTY()), 0)
self.assertEqual(len(compiler.ArchSet.ONE()), 1)
self.assertEqual(len(compiler.ArchSet.ALL()),
len(compiler._RUSTC_ARCH_TO_BUILD_CONDITION))
def test_add_archset(self):
a = compiler.ArchSet.EMPTY()
a.add_archset(compiler.ArchSet.ALL())
self.assertEqual(a, compiler.ArchSet.ALL())
a = compiler.ArchSet.ONE()
a.add_archset(compiler.ArchSet.ALL())
self.assertEqual(a, compiler.ArchSet.ALL())
a = compiler.ArchSet.ALL()
a.add_archset(compiler.ArchSet.ALL())
self.assertEqual(a, compiler.ArchSet.ALL())
def test_and(self):
a = compiler.ArchSet.EMPTY()
a = a & compiler.ArchSet.ALL()
self.assertSetEqual(set(), a.as_strings())
a = compiler.ArchSet.EMPTY()
a &= compiler.ArchSet.ALL()
self.assertSetEqual(set(), a.as_strings())
a = compiler.ArchSet.EMPTY()
a = a & compiler.ArchSet.EMPTY()
self.assertSetEqual(set(), a.as_strings())
a = compiler.ArchSet.EMPTY()
a &= compiler.ArchSet.EMPTY()
self.assertSetEqual(set(), a.as_strings())
a = compiler.ArchSet.ALL()
a = a & compiler.ArchSet.ALL()
self.assertSetEqual(compiler.ArchSet.ALL().as_strings(), a.as_strings())
a = compiler.ArchSet.ALL()
a &= compiler.ArchSet.ALL()
self.assertSetEqual(compiler.ArchSet.ALL().as_strings(), a.as_strings())
a = compiler.ArchSet.ALL()
a = a & compiler.ArchSet.ONE()
self.assertSetEqual(compiler.ArchSet.ONE().as_strings(), a.as_strings())
a = compiler.ArchSet.ALL()
a &= compiler.ArchSet.ONE()
self.assertSetEqual(compiler.ArchSet.ONE().as_strings(), a.as_strings())
a = compiler.ArchSet.ALL()
a = a & compiler.ArchSet.EMPTY()
self.assertSetEqual(set(), a.as_strings())
a = compiler.ArchSet.ALL()
a &= compiler.ArchSet.EMPTY()
self.assertSetEqual(set(), a.as_strings())
| StarcoderdataPython |
5180545 | def build_shift_dict(shift):
try:
assert shift>= 0 and shift < 26
assert type(shift) == int
except AssertionError:
print('Error, Shift key must be an integer from 0 to 26 excluding 26')
except:
print('An error has occured with the shift type')
else:
base_dict = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X',24:'Y',25:'Z',26:'a',27:'b',28:'c',29:'d',30:'e',31:'f',32:'g',33:'h',34:'i',35:'j',36:'k',37:'l',38:'m',39:'n',40:'o',41:'p',42:'q',43:'r',44:'s',45:'t',46:'u',47:'v',48:'w',49:'x',50:'y',51:'z', }
shifted_dict = {}
for key in base_dict.keys():
if key < 26:
if key - shift < 0:
print('base:', key, base_dict[key])
print('shift', 26-abs(key - shift), base_dict[26 - (abs(key - shift))])
shifted_dict[key] = base_dict[26 - (abs(key - shift))]
else:
print('base:', key, base_dict[key])
print('shift', 26-abs(key - shift), base_dict[26 - (abs(key - shift))])
shifted_dict[key] = base_dict[key-shift]
else:
if key - shift < 26:
print('base:', key, base_dict[key])
print('shift', 52-abs(key - shift-26), base_dict[26 - abs(key - shift-26)])
shifted_dict[key] = base_dict[52-abs(key - shift-26)]
else:
shifted_dict[key] = base_dict[key - shift]
return shifted_dict.copy()
build_shift_dict(3)
| StarcoderdataPython |
6557411 | <gh_stars>1-10
"""
This problem was asked by Google.
Given an undirected graph represented as an adjacency matrix and an integer k,
write a function to determine whether each vertex in the graph can be colored
such that no two adjacent vertices share the same color using at most k colors.
"""
# creates a 2 object list out of each and adds None to the place where we'll mark the color of the node
def get_nodes(adj_mat):
return [[i, None] for i in range(len(adj_mat))]
def check_nodes_filled(nodes):
# check if all nodes are filled
return all((val[1] for val in nodes))
def check_valid(curr_node, nodes, adj_mat):
for node,edge in enumerate(adj_mat[curr_node[0]]):
if edge == 1:
if nodes[node][1] == curr_node[1]:
return False
return True
def add_color(nodes, k):
# take the first node with None as the second attr
current_node = None
for n in nodes:
if n[1] is None:
current_node = n
break
# check if all nodes are colored
if current_node is None:
return nodes
# try to add color to the node:
for color in k:
current_node[1] = color
if check_valid(current_node, nodes,adjacency_mat):
# if True
result = add_color(nodes, k)
if check_nodes_filled(result):
return result
current_node[1] = None
return nodes
def solve(adj_mat, k):
result = add_color(get_nodes(adjacency_mat), k)
return result, check_nodes_filled(result)
if __name__ == '__main__':
"""
(a)---(b)
| \/
| /\
| / \
(c)---(d)
"""
adjacency_mat = [
[0, 1, 1, 1],
[1, 0, 1, 0],
[1, 1, 0, 1],
[1, 0, 1, 0]
]
print(solve(adjacency_mat, ['r', 'g', 'b']))
| StarcoderdataPython |
11777 | """
Environment for basic obstacle avoidance controlling a robotic arm from UR.
In this environment the obstacle is only moving up and down in a vertical line in front of the robot.
The goal is for the robot to stay within a predefined minimum distance to the moving obstacle.
When feasible the robot should continue to the original configuration,
otherwise wait for the obstacle to move away before proceeding
"""
import numpy as np
from typing import Tuple
from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2
from robo_gym.envs.simulation_wrapper import Simulation
from robo_gym.envs.ur.ur_base_avoidance_env import URBaseAvoidanceEnv
# base, shoulder, elbow, wrist_1, wrist_2, wrist_3
JOINT_POSITIONS = [-1.57, -1.31, -1.31, -2.18, 1.57, 0.0]
DEBUG = True
MINIMUM_DISTANCE = 0.3 # the distance [cm] the robot should keep to the obstacle
class BasicAvoidanceUR(URBaseAvoidanceEnv):
"""Universal Robots UR basic obstacle avoidance environment.
Args:
rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None.
fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False.
fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False.
fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False.
fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False.
fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False.
fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True.
ur_model (str): determines which ur model will be used in the environment. Defaults to 'ur5'.
include_polar_to_elbow (bool): determines wether or not the polar coordinates to the elbow joint are included in the state. Defaults to False.
Attributes:
ur (:obj:): Robot utilities object.
client (:obj:str): Robot Server client.
real_robot (bool): True if the environment is controlling a real robot.
"""
max_episode_steps = 1000
def _set_initial_robot_server_state(self, rs_state, fixed_object_position = None) -> robot_server_pb2.State:
if fixed_object_position:
state_msg = super()._set_initial_robot_server_state(rs_state=rs_state, fixed_object_position=fixed_object_position)
return state_msg
z_amplitude = np.random.default_rng().uniform(low=0.09, high=0.35)
z_frequency = 0.125
z_offset = np.random.default_rng().uniform(low=0.2, high=0.6)
string_params = {"object_0_function": "triangle_wave"}
float_params = {"object_0_x": 0.12,
"object_0_y": 0.34,
"object_0_z_amplitude": z_amplitude,
"object_0_z_frequency": z_frequency,
"object_0_z_offset": z_offset}
state = {}
state_msg = robot_server_pb2.State(state = state, float_params = float_params,
string_params = string_params, state_dict = rs_state)
return state_msg
def reset(self, joint_positions = JOINT_POSITIONS, fixed_object_position = None) -> np.array:
"""Environment reset.
Args:
joint_positions (list[6] or np.array[6]): robot joint positions in radians.
fixed_object_position (list[3]): x,y,z fixed position of object
"""
self.prev_action = np.zeros(6)
state = super().reset(joint_positions = joint_positions, fixed_object_position = fixed_object_position)
return state
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
env_state = self._robot_server_state_to_env_state(rs_state)
reward = 0
done = False
info = {}
# Reward weights
close_distance_weight = -2
delta_joint_weight = 1
action_usage_weight = 1
rapid_action_weight = -0.2
# Difference in joint position current vs. starting position
delta_joint_pos = env_state[9:15]
# Calculate distance to the obstacle
obstacle_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
forearm_coord = np.array([rs_state['forearm_to_ref_translation_x'], rs_state['forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']])
distance_to_ee = np.linalg.norm(obstacle_coord - ee_coord)
distance_to_forearm = np.linalg.norm(obstacle_coord - forearm_coord)
distance_to_target = np.min([distance_to_ee, distance_to_forearm])
# Reward staying close to the predefined joint position
if abs(env_state[-6:]).sum() < 0.1 * action.size:
reward += delta_joint_weight * (1 - (abs(delta_joint_pos).sum()/(0.1 * action.size))) * (1/1000)
# Reward for not acting
if abs(action).sum() <= action.size:
reward += action_usage_weight * (1 - (np.square(action).sum()/action.size)) * (1/1000)
# Negative reward if actions change to rapidly between steps
for i in range(len(action)):
if abs(action[i] - self.prev_action[i]) > 0.5:
reward += rapid_action_weight * (1/1000)
# Negative reward if the obstacle is close than the predefined minimum distance
if distance_to_target < MINIMUM_DISTANCE:
reward += close_distance_weight * (1/self.max_episode_steps)
# Check if there is a collision
collision = True if rs_state['in_collision'] == 1 else False
if collision:
done = True
info['final_status'] = 'collision'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
if self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'success'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
return reward, done, info
def step(self, action) -> Tuple[np.array, float, bool, dict]:
if type(action) == list: action = np.array(action)
state, reward, done, info = super().step(action)
self.prev_action = self.add_fixed_joints(action)
return state, reward, done, info
class BasicAvoidanceURSim(BasicAvoidanceUR, Simulation):
cmd = "roslaunch ur_robot_server ur_robot_server.launch \
world_name:=tabletop_sphere50.world \
reference_frame:=base_link \
max_velocity_scale_factor:=0.2 \
action_cycle_rate:=20 \
rviz_gui:=false \
gazebo_gui:=true \
objects_controller:=true \
rs_mode:=1moving2points \
n_objects:=1.0 \
object_0_model_name:=sphere50 \
object_0_frame:=target"
def __init__(self, ip=None, lower_bound_port=None, upper_bound_port=None, gui=False, ur_model='ur5', **kwargs):
self.cmd = self.cmd + ' ' + 'ur_model:=' + ur_model
Simulation.__init__(self, self.cmd, ip, lower_bound_port, upper_bound_port, gui, **kwargs)
BasicAvoidanceUR.__init__(self, rs_address=self.robot_server_ip, ur_model=ur_model, **kwargs)
class BasicAvoidanceURRob(BasicAvoidanceUR):
real_robot = True
# roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving | StarcoderdataPython |
3397938 | # Generated by Django 2.2 on 2020-03-26 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20200326_1918'),
]
operations = [
migrations.RenameModel(
old_name='Acceptor',
new_name='Accepts',
),
migrations.AddField(
model_name='itemrequest',
name='location',
field=models.CharField(default='vellore', max_length=100),
preserve_default=False,
),
]
| StarcoderdataPython |
5168755 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-19 22:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0021_game_treasury_shares_pay'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='acting_player',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='core.Player'),
),
migrations.AddField(
model_name='logentry',
name='action',
field=models.IntegerField(blank=True, choices=[(0, 'Player transfers money to bank')], default=None, null=True),
),
migrations.AddField(
model_name='logentry',
name='amount',
field=models.IntegerField(default=0),
),
]
| StarcoderdataPython |
11356396 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import check_token, _query_nodeping_api, config
API_URL = config.API_URL
def get_results(token,
check_id,
customerid=None,
span=None,
limit=300,
start=None,
end=None,
clean=True):
""" Get results for a certain check ID.
_id - ID of the result record.
ci - customer id
t - Check Type: DNS, FTP, HTTP, HTTPCONTENT, IMAP4, MYSQL, PING, POP3,
PORT, RDP, SMTP, SSH, SSL
tg - Target, generally the URL or Hostname
th - Threshold or timeout for this check
i - Check interval
ra - Timestamp, when this check was scheduled to run
q - Internal informationa about what queue this check was in when it ran.
s - Timestamp, when this check actually ran. This will usually be a few ms
behind ra
sc - Short text field showing the result of the check. This varies slighly
by check type. For HTTP checks, it is the status code returned by the
remote server.
m - Message regarding the result of the check. This varies by check type,
but generally if there is an error code it will appear in this field.
su - boolean, whether the check was a pass or fail
rt - The run time. This is the value that get's charted. For many checks
this is the value of e - s.
e - Timestamp, when the check finished.
l - list of locations the check ran in, and the timestamps for each.
:param token: NodePing API token
:type token: str
:param check_id: The ID of the check to get results for
:type check_id: str
:param customerid: (Optional) subaccount ID
:type customerid: str
:param span: number of hours of results to retrieve
:type span: int
:param limit: Limit the nubmer of records to be retrieved
:type limit: int
:param start: Date/time for the start of the results. Timestamps in milliseconds
:type start: int
:param end: Date/time for the end of the results. Timestanps in milliseconds
:type end: int
:param clean: Clean being set to true will use the new output format above
:type clean: bool
:return: Returns the output that was queried from NodePing
:rtype: dict
"""
check_token.is_valid(token)
parameters = locals()
url = "{0}results/{1}?token={2}".format(API_URL, check_id, token)
for key, value in parameters.items():
if key in ("token", "check_id"):
continue
elif value:
url = "{0}&{1}={2}".format(url, key, value)
return _query_nodeping_api.get(url)
def get_uptime(token,
check_id,
customerid=None,
interval="months",
start=None,
end="now"):
""" Retrieves uptime information for a check
:param token: NodePing API token
:type token: str
:param check_id: The ID of the check to get results for
:type check_id: str
:param customerid: (Optional) subaccount ID
:type customerid: str
:param interval: "days" or "months" for uptimes result for check
:type interval: str
:param start: optional start date for the range of days or months
:type start: str
:param end: optional end date for the range of days or months
:type end: str
:return: Uptime for checks up to the specified end date
:rtype: dict
"""
check_token.is_valid(token)
parameters = locals()
url = "{0}results/uptime/{1}?token={2}".format(API_URL, check_id, token)
for key, value in parameters.items():
if key in ("token", "check_id"):
continue
elif value:
url = "{0}&{1}={2}".format(url, key, value)
return _query_nodeping_api.get(url)
def get_current(token, customerid=None):
""" Retrieves information about current "events" for checks"
Not to be confused with listing checks that are passing/failing.
For passing/failing, use get_checks.failing_checks or
get_checks.passing_checks
:param token: NodePing API token
:type token: str
:param customerid: (Optional) subaccount ID
:type customerid: str
:return: Information about current events for checks on account
:rtype: dict
"""
check_token.is_valid(token)
if customerid:
url = "{0}results/current?token={1}&customerid={2}".format(
API_URL, token, customerid)
else:
url = "{0}results/current?token={1}".format(API_URL, token)
return _query_nodeping_api.get(url)
| StarcoderdataPython |
6602539 | <filename>venv/lib/python2.7/site-packages/nano-1.0a3-py2.7.egg/nano/user/tests.py<gh_stars>0
from django.utils.timezone import now as tznow
from django.test import TestCase
from nano.faq.models import QA
class QATest(TestCase):
def test_str(self):
item = QA(question='blbl', answer='fofo')
self.assertEqual(str(item), item.question)
def test_save(self):
item = QA(question='blbl', answer='fofo')
item.save()
self.assertNotEqual(item.last_modified, None)
| StarcoderdataPython |
8118596 | import numpy as np
def compute_cost(A, Y):
m = Y.shape[1]
logprobs = np.multiply(-np.log(A), Y) + np.multiply(-np.log(1 - A), 1 - Y)
costs = 1./m * np.sum(logprobs)
return costs
# =============================================================================================== #
# Base optimizer class #
# =============================================================================================== #
class Optimizer:
"""Base class of the optimization algorithms. It contains the basic
functions that all optimization algorithms have to over wright.
These functions are accounting for every possible combination of
mini batch or batch descent and regularization method.
Inputs: iterations: [int32] Number of iterations
learning_rate: [float32] The learning rate
Mini_batch: [int] Mini batch size or [None] for batch gradient descent"""
def __init__(self, iterations, learning_rate, Mini_batch):
self.mini_batch = Mini_batch
self.iterations = iterations
self.learning_rate = learning_rate
def initialize(self, Network):
pass
@property
def epoch(self):
"""Function to return the epoch to be used for optimization
depending on the mini_batch hyper parameters of the optimization"""
if self.mini_batch is None:
Epoch = self.batch_epoch
else:
Epoch = self.mini_batch_epoch
return Epoch
def batch_epoch(self, Network, X, Y):
"""Batch gradient descent with no regularization"""
pass
def mini_batch_epoch(self, Network, X, Y):
"""Mini batch gradient descent with no regularization"""
pass
| StarcoderdataPython |
5168792 | from django.shortcuts import redirect
from django.urls import reverse
from django.http import JsonResponse
from functools import wraps
from django.conf import settings
def auth_required(forid):
def realone(func):
@wraps(func)
def _wrapped_view(request, *args, **kw):
if request.user.userextra.has_authorization(request, forid) or \
getattr(settings, 'TESTING', False):
return func(request, *args, **kw)
else:
return redirect(reverse('shop:authorize',
kwargs={'forid': forid})
+ '?next=' + request.path)
return _wrapped_view
return realone
def auth_required_api(forid, goto=None):
def realone(func):
@wraps(func)
def _wrapped_view(request, *args, **kw):
if request.user.userextra.has_authorization(request, forid) or \
getattr(settings, 'TESTING', False):
return func(request, *args, **kw)
else:
return JsonResponse({
'redirect': reverse('shop:authorize',
kwargs={'forid': forid})
+ '?next=' + goto or request.path})
return _wrapped_view
return realone
| StarcoderdataPython |
8028523 | import logging
from discord.ext import commands
from discord.ext.commands import Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class Say:
def __init__(self, bot):
self.bot: CogBot = bot
@checks.is_manager()
@commands.command(
pass_context=True,
name="reboot",
aliases=["haveutriedturningitoffandbackonagain"],
hidden=True,
)
async def cmd_reboot(self, ctx: Context):
log.warning('Bot is being forcefully rebooted...')
await self.bot.add_reaction(ctx.message, "🤖")
# the bot should auto-recover, reloading all state and extensions
await self.bot.logout()
log.warning('Bot should attempt auto-recovery...')
def setup(bot):
bot.add_cog(Say(bot))
| StarcoderdataPython |
128491 | import mysql.connector
from contextlib import closing
with closing(mysql.connector.connect(
host="localhost",
port=3306,
user="root",
password="<PASSWORD>!"
)) as mydb:
print(mydb)
print(mydb.is_connected())
print(mydb.is_connected())
| StarcoderdataPython |
4917183 | <filename>EXERCICIOS/exercicio_ffmpeg/exercicio_ffmpeg.py
# https://ffmpeg.org/documentation.html
"""
ffmpeg -i "ENTRADA" -i "LEGENDA" -c:v libx264 -crf 23 -preset ultrafast -c:a aac -b:a 320k -c:s srt -map v:0 -map a
-map 1:0 -ss 00:00:00 -to 00:00:50 "SAIDA"
"""
import os, fnmatch, sys
if sys.platform == 'linux':
command_ffmpeg = 'ffmpeg'
else:
command_ffmpeg = r'ffmpeg\ffmpeg.exe'
codec_video = '-c:v libx264'
crf = '-crf 23'
preset = '-preset ultrafast'
codec_audio = '-c:a aac'
bitrate_audio = '-b:a 320k'
debug = '-ss 00:00:00 -to 00:00:50'
extent_output = '.mkv'
path_origin = r'E:\temp\mp4'
path_destination = r'E:\temp\mkv'
for root, dirs, files in os.walk(path_origin):
for file in files:
if not fnmatch.fnmatch(file, '*.mp4'):
continue
full_path = os.path.join(root, file)
file_name, file_extent = os.path.splitext(full_path)
path_subtitle = file_name + '.srt'
if os.path.isfile(path_subtitle):
input_subtitle = f'-i "{path_subtitle}"'
map_subtitle = '-c:s srt -map v:0 -map a -map 1:0'
else:
input_subtitle = ''
map_subtitle = ''
file_name, file_extent = os.path.splitext(file)
output_path = fr'{path_destination}\{file_name}_NOVO{extent_output}'
command_full = f'{command_ffmpeg} -i "{full_path}" {input_subtitle} ' \
f'{codec_video} {crf} {preset} {codec_audio} {bitrate_audio} ' \
f'{debug} {map_subtitle} "{output_path}"'
os.system(command_full)
| StarcoderdataPython |
1622541 | <reponame>ptesan777/model-optimization
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init module for TensorFlow Model Optimization Python API.
```
import tensorflow_model_optimization as tfmot
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need to put some imports inside a function call below, and the function
# call needs to come before the *actual* imports that populate the
# tensorflow_model_optimization namespace. Hence, we disable this lint check
# throughout the file.
#
# pylint: disable=g-import-not-at-top
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print(
'\n\nFailed to import TensorFlow. Please note that TensorFlow is not '
'installed by default when you install TensorFlow Model Optimization. This '
'is so that users can decide whether to install the GPU-enabled '
'TensorFlow package. To use TensorFlow Model Optimization, please install '
'the most recent version of TensorFlow, by following instructions at '
'https://tensorflow.org/install.\n\n')
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = '1.14.0'
if (distutils.version.LooseVersion(tf.version.VERSION) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
'This version of TensorFlow Model Optimization requires TensorFlow '
'version >= {required}; Detected an installation of version {present}. '
'Please upgrade TensorFlow to proceed.'.format(
required=required_tensorflow_version, present=tf.__version__))
_ensure_tf_install()
import inspect as _inspect
import os as _os
import sys as _sys
# To ensure users only access the expected public API, the API structure is
# created in the `api` directory. Import all api modules.
# pylint: disable=wildcard-import
from tensorflow_model_optimization.python.core.api import *
# pylint: enable=wildcard-import
# Use sparsity module to fetch the path for the `api` directory.
# This handles all techniques, not just sparsity.
_API_MODULE = sparsity # pylint: disable=undefined-variable
# Returns $(install_dir)/tensorflow_model_optimization/api
_sparsity_api_dir = _os.path.dirname(
_os.path.dirname(_inspect.getfile(_API_MODULE)))
# Add the `api` directory to `__path__` so that `from * import module` works.
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_sparsity_api_dir]
elif _os.path.dirname(_inspect.getfile(_API_MODULE)) not in __path__:
__path__.append(_sparsity_api_dir)
# Delete python module so that users only access the code using the API path
# rather than using the code directory structure.
# This will disallow usage such as `tfmot.python.core.sparsity.keras`.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
# pylint: enable=undefined-variable
| StarcoderdataPython |
1699945 |
def test_stuff1():
assert True
def test_stuff2():
assert False
| StarcoderdataPython |
12839612 | <gh_stars>1-10
import os
import pickle
import numpy as np
import re
from string import ascii_letters
from datetime import datetime
import argparse
import gzip
def collect_datasets_is(folder = [],
model = [],
ndata = [],
nsubsample = []):
# Load in parameter recovery data
if machine == 'ccv':
if model == 'weibull' or model == 'weibull2':
param_recov_files = os.listdir('/users/afengler/data/kde/' + 'weibull_cdf' + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/')
param_recov_dat = pickle.load(open('/users/afengler/data/kde/' + 'weibull_cdf' + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/' + param_recov_files[0], 'rb'))
else:
param_recov_files = os.listdir('/users/afengler/data/kde/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/')
param_recov_dat = pickle.load(open('/users/afengler/data/kde/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/' + param_recov_files[0], 'rb'))
if machine == 'x7':
param_recov_files = os.listdir('/media/data_cifs/projects/prj_approx-bayes/projectABC/data/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/')
param_recov_dat = pickle.load(open('/media/data_cifs/projects/prj_approx-bayes/projectABC/data/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/' + param_recov_files[0], 'rb'))
n_data_substring = 'N_' + str(ndata)
is_dict = {}
is_dict['gt'] = []
is_dict['posterior_samples'] = []
is_dict['timings'] = []
is_dict['perplexities'] = []
is_dict['importance_weights'] = []
is_dict['effective_sample_size'] = []
is_dict['means'] = []
is_dict['maps'] = []
is_dict['data'] = []
files_ = os.listdir(folder)
cnt = 0
for file_ in files_:
if model + '_training_' in file_ and n_data_substring in file_ and 'summary' not in file_:
print(cnt)
print('Processing file: ', file_)
cnt += 1
# extract id
st = file_.find('_idx_')
fin = file_.find('_tdist')
idx = int(file_[st + len('_idx_'):fin])
tmp = pickle.load(gzip.open(folder + file_, 'rb'), encoding = 'latin1')
sub_idx = np.random.choice(tmp['posterior_samples'].shape[0], nsubsample, replace = False)
is_dict['gt'].append(tmp['gt_params'])
is_dict['posterior_samples'].append(tmp['posterior_samples'][sub_idx, :])
is_dict['timings'].append(tmp['timeToConvergence'])
is_dict['perplexities'].append(tmp['norm_perplexity'])
is_dict['importance_weights'].append(tmp['final_w'][sub_idx])
is_dict['effective_sample_size'].append(1 / np.sum(np.square(tmp['final_w'])))
is_dict['means'].append(np.mean(tmp['posterior_samples'], axis = 0))
is_dict['maps'].append(tmp['final_x'][np.argmax(tmp['log_likelihood']), :])
# Add data
is_dict['data'].append(param_recov_dat[1][0][idx, : , :])
print('Processed file: ', file_)
#print(model + '_training_' in file_)
is_dict['gt'] = np.stack(is_dict['gt'])
is_dict['posterior_samples'] = np.stack(is_dict['posterior_samples'])
is_dict['timings'] = np.array(is_dict['timings'])
is_dict['perplexities'] = np.array(is_dict['perplexities'])
is_dict['importance_weights'] = np.stack(is_dict['importance_weights'])
is_dict['means'] = np.stack(is_dict['means'])
is_dict['maps'] = np.stack(is_dict['maps'])
is_dict['data'] = np.stack(is_dict['data'])
if machine == 'ccv':
if model == 'weibull':
print('writing to file: ', '/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + 'weibull_cdf' + \
'_' + n_data_substring + '.pickle')
pickle.dump(is_dict, open('/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + 'weibull_cdf' + \
'_' + n_data_substring + '.pickle', 'wb'), protocol = 4)
else:
print('writing to file: ', '/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + model + \
'_' + n_data_substring + '.pickle')
pickle.dump(is_dict, open('/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + model + \
'_' + n_data_substring + '.pickle', 'wb'), protocol = 4)
if machine == 'x7':
print('writing to file: ', '/media/data_cifs/projects/prj_approx-bayes/projectABC/' + isfolder + '/' + 'IS_summary_' + \
model + '_' + n_data_substring + '.pickle')
pickle.dump(is_dict, open( '/media/data_cifs/projects/prj_approx-bayes/projectABC/' + isfolder + '/' + 'IS_summary_' + \
model + '_' + n_data_substring + '.pickle', 'wb'), protocol = 4)
return is_dict
if __name__ == "__main__":
# Currently available models = ['weibull', 'race_model_6', 'ornstein', 'full_ddm', 'ddm_seq2', 'ddm_par2', 'ddm_mic2']
CLI = argparse.ArgumentParser()
CLI.add_argument("--machine",
type = str,
default = 'x7')
CLI.add_argument("--method",
type = str,
default = 'ddm')
CLI.add_argument("--ndata",
type = int,
default = 1024)
CLI.add_argument("--nsubsample",
type = int,
default = 10000)
CLI.add_argument("--isfolder",
type = str,
default = 'eLIFE_exps')
args = CLI.parse_args()
print(args)
machine = args.machine
method = args.method
ndata = args.ndata
nsubsample = args.nsubsample
isfolder = args.isfolder
if machine == 'home':
is_sample_folder = '/Users/afengler/OneDrive/project_nn_likelihoods/data/' + isfolder + '/'
if method == 'weibull_cdf' or method == 'weibull_cdf2':
method = 'weibull'
if machine == 'ccv':
is_sample_folder = '/users/afengler/data/' + isfolder + '/'
if method == 'weibull_cdf' or method == 'weibull_cdf2':
method = 'weibull'
if machine == 'x7':
is_sample_folder = '/media/data_cifs/projects/prj_approx-bayes/projectABC/' + isfolder + '/'
print(is_sample_folder)
print('Started processing model: ', method, ' with ndata: ', ndata)
collect_datasets_is(folder = is_sample_folder,
model = method,
ndata = ndata,
nsubsample = nsubsample) | StarcoderdataPython |
8186234 | <filename>src/app.py
import logging
from config import conf
from factory.standard_discord_bot_factory import StandardDiscordBotFactory
from runner.standard_discord_bot_runner import StandardDiscordBotRunner
class App:
'''Entry point of the program.'''
def start(self) -> None:
'''Starts the program.'''
fac = StandardDiscordBotFactory(conf)
StandardDiscordBotRunner(conf, fac).run()
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s,%(levelname)s,%(message)s',
datefmt='%d/%m/%Y %H:%M:%S'
)
App().start()
| StarcoderdataPython |
1794504 | <filename>AKDPRFramework/mlops/knn.py
from AKDPRFramework.utils.dataops import euclidean_distance
import numpy as np
class KNN:
"""
K Nearest neighbor classifier in machine learning
Args:
- ``k`` (int): The number of closest neighbors.
Examples::
>>> from sklearn import datasets
>>> from AKDPRFramework.utils.dataops import train_test_split, normalize
>>> data = datasets.load_iris()
>>> X = normalize(data.data)
>>> y = data.target
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
>>> from AKDPRFramework.mlops.knn import KNN
>>> classifier = KNN(k=6)
>>> predict = classifier.predict(X_test, X_train, y_train)
>>> print(f'Accuracy is {accuracy_score(y_test, y_pred)}')
"""
def __init__(self, k):
self.k = k
def vote(self, neighbor_samples):
"""
Returns:
- The most common class among the all neighbor samples.
"""
count = np.bincount(neighbor_samples.astype('int'))
return count.argmax()
def predict(self, X_test, X_train, y_train):
"""
Args:
- ``X_test``
- ``X_train``
- ``y_train``
Returns:
Prediction from the KNN algorithm
"""
pred = np.empty(X_test.shape[0])
for i, test_samples in enumerate(X_test):
# Sort the training samples by their distance to the test sample and get the K nearest
idx = np.argsort([euclidean_distance(test_samples, x) for x in X_train])[:self.k]
# Extract the labels of the K nearest neighboring training samples
k_nearest_neighbors = np.array([y_train[i] for i in idx])
# Label sample as the most common class label
pred[i] = self.vote(k_nearest_neighbors)
return pred
| StarcoderdataPython |
4886475 | <filename>python/cfgmdl/__init__.py
""" Tools for configuration parsing and model building """
from .version import get_git_version
__version__ = get_git_version()
del get_git_version
from .unit import Unit
from .ref import Ref
from .array import Array
from .property import Property
from .derived import Derived, cached
from .configurable import Configurable
from .choice import Choice
from .parameter import Parameter
from .param_holder import ParamHolder
from .model import Model
from . import tools, utils
| StarcoderdataPython |
118678 | <filename>1.two-sum.py<gh_stars>1-10
#
# @lc app=leetcode.cn id=1 lang=python3
#
# [1] 两数之和
#
# https://leetcode-cn.com/problems/two-sum/description/
#
# algorithms
# Easy (44.30%)
# Total Accepted: 243.9K
# Total Submissions: 547.9K
# Testcase Example: '[2,7,11,15]\n9'
#
# 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
#
# 你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。
#
# 示例:
#
# 给定 nums = [2, 7, 11, 15], target = 9
#
# 因为 nums[0] + nums[1] = 2 + 7 = 9
# 所以返回 [0, 1]
#
#
#
class Solution:
def twoSum(self, nums: 'List[int]', target: 'int') -> 'List[int]':
i = 0
len_nums = len(nums)
res = []
while i < len_nums:
num = target - nums[i]
nums[i] = '#'
num_index = self._find_num(nums, num)
if num_index != -1:
return [i, num_index]
i += 1
return res
def _find_num(self, nums, num):
try:
index = nums.index(num)
except ValueError:
return -1
else:
return index
| StarcoderdataPython |
11367504 | # 主函数
from global_utils import print_summary
from options import parse_options
from global_utils import set_global_seed, save_performance, plot_data
import time
from agent_env_params import design_agent_and_env
from multiprocessing import Process
import random
from environment import Environment
from agent import Agent
def run_HAC(FLAGS,env,agent, plot_figure=False, num=0):
from global_utils import save_plot_figure
NUM_EPOCH = FLAGS.num_epochs # 训练多少个epoch
SAVE_FREQ = FLAGS.save_freq # 每两个epoch存一次
# Print task summary
print_summary(FLAGS, env)
if not FLAGS.test:
num_episodes = FLAGS.num_exploration_episodes # 默认为100
else:
num_episodes = FLAGS.num_test_episodes
NUM_EPOCH = 1 # 测试只测试100个episode
performance_list = []
for epoch in range(1, NUM_EPOCH + 1): # 从1开始,使第一个不存,最后一个存
successful_episodes = 0
for episode in range(num_episodes): # episode这里是序号
print("\nEpoch %d, Episode %d" % (epoch, episode))
# Train for an episode
success = agent.train(env, episode)
if success:
print("End Goal Achieved\n")
successful_episodes += 1
# Save agent
if epoch % SAVE_FREQ == 0 and not FLAGS.test and FLAGS.threadings == 1:
agent.save_model(epoch * num_episodes)
success_rate = successful_episodes / num_episodes * 100
print("\nEpoch %d, Success Rate %.2f%%" % (epoch, success_rate))
performance_list.append(success_rate)
if plot_figure:
save_plot_figure(performance_list)
save_performance(performance_list, FLAGS, num)
def worker(agent_params, env_params, FLAGS, i):
seed = int(time.time()) + random.randint(0, 100)
set_global_seed(seed)
FLAGS.seed = seed
env = Environment(env_params, FLAGS)
agent = Agent(FLAGS, env, agent_params)
run_HAC(FLAGS, env, agent, plot_figure=False, num=i)
FLAGS = parse_options()
agent_params, env_params = design_agent_and_env(FLAGS)
assert FLAGS.threadings >= 1, "Threadings should be more than 1!"
if FLAGS.threadings == 1:
seed = int(time.time()) + random.randint(0,100)
set_global_seed(seed)
FLAGS.seed = seed
env = Environment(env_params, FLAGS)
agent = Agent(FLAGS, env, agent_params)
run_HAC(FLAGS, env, agent, plot_figure=True)
else:
# 并行运行
thread_list = []
for i in range(FLAGS.threadings):
p = Process(target=worker, args=(agent_params, env_params, FLAGS, i))
p.start()
thread_list.append(p)
for p in thread_list:
p.join()
# 依次运行
# FLAGS = parse_options()
# # set_global_seed(FLAGS.seed)
# print("Start time 1")
# set_global_seed(time.time())
# design_function = import_module("agent_env_params_" + FLAGS.env)
# agent_params, env_params = design_function.design_agent_and_env(FLAGS)
# env = Environment(env_params, FLAGS)
# agent = Agent(FLAGS, env, agent_params)
#
# for i in range(1, FLAGS.times + 1):
# print("\n\n###################################")
# print("\nStart times {} ......".format(i))
# print("\n\n####################################\n")
# set_global_seed(time.time())
# assert FLAGS.retrain == True, "Not Retrain ERROR!"
# agent.initialize_networks() # 重新训练
# agent.clear_buffer()
# run_HAC(FLAGS, env, agent)
| StarcoderdataPython |
3296249 | import unittest
from quasimodo.statement_maker import StatementMaker
dataset = [
("why is", "is", ""),
("how is software piracy illegal in the first place?", "software", 'software piracy is illegal in the first place'),
("why are white monkeys superior to other races?", "white", 'white monkeys are superior to other races'),
("why can nebraska recruit?", "nebraska", 'nebraska can recruit'),
("why do nebraska recruit?", "nebraska", 'nebraska recruit'),
("how are weed vans legal in atlanta, ga?", "weed", "weed vans are legal in atlanta, ga"),
("how are gold nano-particles colored", "gold", "gold nano-particles are colored"),
# ("how are other ents preparing for secret santa", "other", 'other ents are preparing for secret santa'),
("how are pumpkin pies are made", "pumpkins", "pumpkin pies are made"),
("why are black monkeys killers", "black", "black monkeys are killers"),
("why is black a color", "black", "black is a color"),
("why cant Leeds get promoted", "leeds", 'leeds can get promoted'),
("why are client profitability studies important", "client", 'client profitability studies are important'),
# ("why are plasma thrusters so hard to create", "plasma", 'plasma thrusters are so hard to create'),
("why can cold dark matter simply be h2 gas", "cold", 'cold dark matter simply can be h2 gas'),
("why is becoming a nurse so hard", "becoming", "becoming a nurse is so hard"),
("how was math created", "math", "math was created"),
("why is light crude oil more expensive in summer in the northern hemisphere", "light",
"light crude oil is more expensive in summer in the northern hemisphere"),
("why is egypt at war with israel", "egypt", 'egypt is at war with israel'),
("why are cactus spiky", "egypt", 'cactus are spiky'),
("why are canadians are in russia cold", "canadian", 'canadians in russia are cold'),
("why are canada and russia colder than the uk", "canada", 'canada and russia are colder than the uk'),
("why are pink and grey elephants big and fat", "pink", "pink and grey elephants are big and fat"),
("why are pink flamingo and gray elephant big and fat", "flamingos",
"pink flamingo and gray elephant are big and fat"),
("why are pink flamingos big and fat", "flamingo", "pink flamingos are big and fat"),
("why are butts pink", "butts", "butts are pink"),
("why is ice floating", "float", 'ice is floating'),
("why are cactus bad luck", "cactus", 'cactus are bad luck'),
("why are cactus so lucky", "cactus", "cactus are so lucky"),
("why are there cactus in africa", "cactus", "there are cactus in africa"),
("why is it cold in africa", "cactus", 'it is cold in africa'),
("why is kale not allowed on dr bernstein", "kale", 'kale is not allowed on dr bernstein'),
("why is tom's idea good?", "tom", "tom's idea is good"),
("why are elephants big while i am small", "elephants", 'elephants are big while i am small'),
("why is chocolate of switzerland better", "chocolate", "chocolate of switzerland is better"),
("why is chocolate in the north of switzerland better", "switzerland",
"chocolate in the north of switzerland is better"),
("why are lily pads zipping", "lily pad", "lily pads are zipping"),
("why does panda climb tree", "panda", "panda climb tree"),
("why do african people have noses and lips", "african people", "african people have noses and lips"),
("why are african elephants endangered", "elephant", "african elephants are endangered"),
("why elephants are big", "elephant", "elephants are big"),
("why are things creepy", "thing", "things are creepy"),
("how are babies actually made", "baby", "babies are actually made")
]
class TestStatementMaker(unittest.TestCase):
def setUp(self) -> None:
self.statement_maker = StatementMaker(use_cache=False)
def test_dataset(self):
for question, subject, statement in dataset:
self.assertEqual(statement, self.statement_maker.to_statement(question, subject))
def _test_temp(self):
for question, subject, statement in [("why are lily pads zipping", "lily pad", "lily pads are zipping")]:
self.assertEqual(statement, self.statement_maker.to_statement(question, subject))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3332038 | <gh_stars>1-10
import os
import requests
import json
import threading
import copy
import common
from habitica import Habitica
import leancloud
from leancloud import LeanCloudError
from lc.api import LC
# 提供不背单词基本操作
class BBDC(object):
"""docstring for BBDC"""
# 单例模式加锁
_instance_lock = threading.Lock()
def __init__(self):
env_list = ['BBDC_USERID']
all_right, no_env = common.check_env(env_list)
if not all_right:
raise Exception("未设置必要环境变量 %s" % no_env)
if not hasattr(self, 'userid'):
self.userid = os.getenv("BBDC_USERID")
if not hasattr(self, 'habit_name'):
self.habit_name = LC().get_habit_name_by_project_name("不背单词")
if not hasattr(self, 'habit_id'):
self.habit_id = Habitica().get_habitica_habit_id_by_name(self.habit_name)
# 单例模式实现
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
with BBDC._instance_lock:
if not hasattr(cls, '_instance'):
BBDC._instance = super().__new__(cls)
return BBDC._instance
# 获取最近7天背单词数
def get_latest_learn_list(self):
get_user_url = 'https://learnywhere.cn/bb/dashboard/profile/search?userId=%s' % self.userid
user_res = requests.get(get_user_url)
user_json = json.loads(user_res.text)
if user_json['result_code'] == 200:
return user_json['data_body']['learnList']
else:
raise Exception("获取单词数据出现异常 %s" % str(user_json))
# 获取当天背单词数
def get_today_learn_number(self):
latest_learn_list = self.get_latest_learn_list()
return latest_learn_list[-1]['learnNum']
# 获取当天复习数量
def get_today_review_number(self):
latest_learn_list = self.get_latest_learn_list()
return latest_learn_list[-1]['reviewNum']
# 获取当天学习数据(包含 learnNum - 背单词数量 reviewNum - 复习数量)
def get_today_words_data(self):
latest_learn_list = self.get_latest_learn_list()
data = latest_learn_list[-1]
data['total'] = data['learnNum'] + data['reviewNum']
return data
# 获取在 leancloud 的最近的学习数据
def get_latest_lc_data(self):
now = common.get_china_now()
query = leancloud.Query('BBDC')
query.descending('date')
try:
latest_data = query.first()
except LeanCloudError as e:
if e.code == 101:
return {
'date' : now,
'learnNum' : 0,
'reviewNum' : 0,
'total' : 0
}
data = {
'date' : latest_data.get('date'),
'learnNum' : latest_data.get('learnNum'),
'reviewNum' : latest_data.get('reviewNum'),
'total' : latest_data.get('total')
}
return data
# 获取leancloud数据
def get_lc_data(self, page):
now = common.get_china_now()
query = leancloud.Query('BBDC')
query.descending('date')
query.limit(10)
query.skip(page * 10)
lc_list = None
try:
lc_list = query.find()
except LeanCloudError as e:
if e.code == 101:
return []
data_list = []
for item in lc_list:
data = {
'id' : item.id,
'date' : item.get('date'),
'learnNum' : item.get('learnNum'),
'reviewNum' : item.get('reviewNum'),
'total' : item.get('total'),
'oldUser' : item.get('oldUser'),
'newUser' : item.get('newUser'),
'diffUser' : item.get('diffUser'),
'info' : item.get('info'),
'oldTotal' : item.get('oldTotal'),
'oldLearnNum' : item.get('oldLearnNum'),
'oldReviewNum' : item.get('oldReviewNum'),
}
data_list.append(data)
return data_list
def set_lc_data(self, words_data, OldUser, NewUser, DiffUser):
latest_lc_data = self.get_latest_lc_data()
now = common.get_china_now()
# 如果不是同一天
if not common.is_same_day(latest_lc_data['date'], common.get_china_now()):
latest_lc_data['total'] = latest_lc_data['learnNum'] = latest_lc_data['reviewNum'] = 0
TodayBBDC = leancloud.Object.extend('BBDC')
today_BBDC = TodayBBDC()
today_BBDC.set('learnNum', words_data['learnNum'])
today_BBDC.set('oldLearnNum', latest_lc_data['learnNum'])
today_BBDC.set('reviewNum', words_data['reviewNum'])
today_BBDC.set('oldReviewNum', latest_lc_data['reviewNum'])
today_BBDC.set('total', words_data['total'])
today_BBDC.set('oldTotal', latest_lc_data['total'])
today_BBDC.set('oldUser', OldUser.toJSON())
today_BBDC.set('newUser', NewUser.toJSON())
today_BBDC.set('diffUser', DiffUser.toJSON())
today_BBDC.set('info', DiffUser.get_diff_info())
today_BBDC.set('date', now)
today_BBDC.save()
# 获取今日已背单词与今日已导入单词差异总数量
def get_today_total_lc_diff(self):
diff = self.get_today_lc_diff()
return diff['total']
# 获取今日已背单词与今日已导入单词差异数量
def get_today_lc_diff(self):
latest_lc_data = self.get_latest_lc_data()
today_words = self.get_today_words_data()
diff = {
'learnNum' : 0,
'reviewNum' : 0,
'total' : 0
}
# 如果是同一天
if common.is_same_day(latest_lc_data['date'], common.get_china_now()):
diff['total'] = today_words['total'] - latest_lc_data['total']
diff['learnNum'] = today_words['learnNum'] - latest_lc_data['learnNum']
diff['reviewNum'] = today_words['reviewNum'] - latest_lc_data['reviewNum']
else:
diff['total'] = today_words['total']
diff['learnNum'] = today_words['learnNum']
diff['reviewNum'] = today_words['reviewNum']
return diff
def get_word_steps(self):
# 点一次习惯的步长
word_steps = os.getenv("BBDC_STEPS")
if not word_steps:
word_steps = 20
return int(word_steps)
# 获取需要进行完成 habit 的次数
def get_do_habitica_habit_times(self):
word_steps = self.get_word_steps()
today_lc_diff = self.get_today_total_lc_diff()
return int(today_lc_diff / int(word_steps))
# 每日导出数据到habitica
def habitica_daily_export(self):
hc = Habitica()
OldUser = hc.get_habitica_user()
do_habitica_habit_times = self.get_do_habitica_habit_times()
NewUser = copy.copy(OldUser)
res = "你还没有背满应该背的单词哦! 快去背单词吧!"
if do_habitica_habit_times > 10:
word_steps = self.get_word_steps()
for time in range(10):
NewUser = hc.do_habitica_habit_by_id(self.habit_id)
DiffUser = NewUser - OldUser
latest_lc_data = self.get_latest_lc_data()
words_data = self.get_today_words_data()
# 如果不是同一天
if not common.is_same_day(latest_lc_data['date'], common.get_china_now()):
words_data['total'] = word_steps * 10
else:
words_data['total'] = latest_lc_data['total'] + word_steps * 10
self.set_lc_data(words_data, OldUser, NewUser, DiffUser)
common.send_push_plus("你的不背单词已导入 Habitica !", DiffUser.get_diff_info())
res = DiffUser.get_diff_info()
elif do_habitica_habit_times > 0:
for time in range(do_habitica_habit_times):
NewUser = hc.do_habitica_habit_by_id(self.habit_id)
DiffUser = NewUser - OldUser
words_data = self.get_today_words_data()
self.set_lc_data(words_data, OldUser, NewUser, DiffUser)
common.send_push_plus("你的不背单词已导入 Habitica !", DiffUser.get_diff_info())
res = DiffUser.get_diff_info()
return res, do_habitica_habit_times | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.