id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1668575 | <reponame>suesuhyoonjin/kkp
from rest_framework import generics, serializers
from rest_framework.response import Response
from .models import Summary
class SummaryListSerializer(serializers.ModelSerializer):
class Meta:
model = Summary
fields = ('thumbnail', 'id', 'author', 'section_name', 'category_name', 'summary_name', 'url', 'created_date')
class SummaryListView(generics.ListAPIView):
queryset = Summary.objects.all()
serializer_class = SummaryListSerializer
def list(self, request):
queryset = self.get_queryset()
serializer_class = self.get_serializer_class()
serializer = serializer_class(queryset, many=True)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
return Response(serializer.data) | StarcoderdataPython |
1789765 | <reponame>sigurdsa/angelika-api
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('motivation_text', '0002_auto_20141015_1552'),
]
operations = [
migrations.AlterField(
model_name='motivationtext',
name='time_created',
field=models.DateTimeField(auto_now_add=True),
),
]
| StarcoderdataPython |
1618892 | import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class AppWindow(Gtk.ApplicationWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
label = Gtk.Label.new("Hello World")
self.add(label)
class Application(Gtk.Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, application_id="org.example.myapp", **kwargs)
self.window = None
def do_activate(self):
if not self.window:
self.window = AppWindow(application=self, title="Main Window")
self.window.fullscreen()
self.window.present()
self.window.show_all()
app = Application()
app.run(sys.argv)
| StarcoderdataPython |
1692187 | a = 257
b = 11
quoziente = a / b
resto = a % b
prodotto = b * quoziente
print "a = %d b = %d" % (a, b)
print "a / b= %d con resto %d" % (quoziente, resto)
print "b * quoziente = ", prodotto
print "prodotto + resto = ", prodotto + resto
| StarcoderdataPython |
1792561 | ################################## 2222222222222222222 #####################################
#!/usr/bin/python3
import os
import json
import logging
import requests
logger = logging.Logger('catch_all')
def query_az(query):
json_cis=os.popen(query).read()
return json.loads(json_cis)
def check21():
print("Processing 21...")
return "Check not available with azure CLI"
def check22(subid):
print("Processing 22...")
try:
query20=('az account get-access-token --subscription %s --query [accessToken]' % subid)
score22=['<font color="red">Failed</font>',0]
score23=['<font color="red">Failed</font>',0]
score24=['<font color="red">Failed</font>',0]
score25=['<font color="red">Failed</font>',0]
score26=['<font color="red">Failed</font>',0]
score27=['<font color="red">Failed</font>',0]
score28=['<font color="red">Failed</font>',0]
score29=['<font color="red">Failed</font>',0]
score210=['<font color="red">Failed</font>',0]
score211=['<font color="red">Failed</font>',0]
score212=['<font color="red">Failed</font>',0]
score213=['<font color="red">Failed</font>',0]
score214=['<font color="red">Failed</font>',0]
score215=['<font color="red">Failed</font>',0]
score216=['<font color="red">Failed</font>',0]
score217=['<font color="red">Failed</font>',0]
score218=['<font color="red">Failed</font>',0]
score219=['<font color="red">Failed</font>',0]
json_cis20=query_az(query20)
access_token=json_cis20[0]
headers = {"Authorization": 'Bearer ' + access_token}
request = ('https://management.azure.com/subscriptions/%s/providers/microsoft.Security/policies?api-version=2015-06-01-preview' % subid)
try:
json_output = requests.get(request, headers=headers).json()
value22=json_output['value'][0]['properties']['logCollection']
value23=json_output['value'][0]['properties']['recommendations']['patch']
value24=json_output['value'][0]['properties']['recommendations']['baseline']
value25=json_output['value'][0]['properties']['recommendations']['antimalware']
value26=json_output['value'][0]['properties']['recommendations']['diskEncryption']
value27=json_output['value'][0]['properties']['recommendations']['nsgs']
value28=json_output['value'][0]['properties']['recommendations']['waf']
value29=json_output['value'][0]['properties']['recommendations']['ngfw']
value210=json_output['value'][0]['properties']['recommendations']['vulnerabilityAssessment']
value211=json_output['value'][0]['properties']['recommendations']['storageEncryption']
value212=json_output['value'][0]['properties']['recommendations']['jitNetworkAccess']
value213=json_output['value'][0]['properties']['recommendations']['appWhitelisting']
value214=json_output['value'][0]['properties']['recommendations']['sqlAuditing']
value215=json_output['value'][0]['properties']['recommendations']['sqlTde']
value216=json_output['value'][0]['properties']['securityContactConfiguration']['securityContactEmails']
value217=json_output['value'][0]['properties']['securityContactConfiguration']['securityContactPhone']
value218=json_output['value'][0]['properties']['securityContactConfiguration']['areNotificationsOn']
value219=json_output['value'][0]['properties']['securityContactConfiguration']['sendToAdminOn']
if (value22=="On"):
score22=['<font color="green">Passed</font>',1]
if (value23=="On"):
score23=['<font color="green">Passed</font>',1]
if (value24=="On"):
score24=['<font color="green">Passed</font>',1]
if (value25=="On"):
score25=['<font color="green">Passed</font>',1]
if (value26=="On"):
score26=['<font color="green">Passed</font>',1]
if (value27=="On"):
score27=['<font color="green">Passed</font>',1]
if (value28=="On"):
score28=['<font color="green">Passed</font>',1]
if (value29=="On"):
score29=['<font color="green">Passed</font>',1]
if (value210=="On"):
score210=['<font color="green">Passed</font>',1]
if (value211=="On"):
score211=['<font color="green">Passed</font>',1]
if (value212=="On"):
score212=['<font color="green">Passed</font>',1]
if (value213=="On"):
score213=['<font color="green">Passed</font>',1]
if (value214=="On"):
score214=['<font color="green">Passed</font>',1]
if (value215=="On"):
score215=['<font color="green">Passed</font>',1]
if (value216!="[]"):
score216=['<font color="green">Passed</font>',1]
if (value217!=""):
score217=['<font color="green">Passed</font>',1]
if (value218):
score218=['<font color="green">Passed</font>',1]
if (value219):
score219=['<font color="green">Passed</font>',1]
chk22=('Current Setting: <font color="blue"> %s</b></font>' % value22)
chk23=('Current Setting: <font color="blue"> %s</b></font>' % value23)
chk24=('Current Setting: <font color="blue"> %s</b></font>' % value24)
chk25=('Current Setting: <font color="blue"> %s</b></font>' % value25)
chk26=('Current Setting: <font color="blue"> %s</b></font>' % value26)
chk27=('Current Setting: <font color="blue"> %s</b></font>' % value27)
chk28=('Current Setting: <font color="blue"> %s</b></font>' % value28)
chk29=('Current Setting: <font color="blue"> %s</b></font>' % value29)
chk210=('Current Setting: <font color="blue"> %s</b></font>' % value210)
chk211=('Current Setting: <font color="blue"> %s</b></font>' % value211)
chk212=('Current Setting: <font color="blue"> %s</b></font>' % value212)
chk213=('Current Setting: <font color="blue"> %s</b></font>' % value213)
chk214=('Current Setting: <font color="blue"> %s</b></font>' % value214)
chk215=('Current Setting: <font color="blue"> %s</b></font>' % value215)
chk216=('Current Setting: <font color="blue"> %s</b></font>' % value216)
chk217=('Current Setting: <font color="blue"> %s</b></font>' % value217)
chk218=('Current Setting: <font color="blue"> %s</b></font>' % value218)
chk219=('Current Setting: <font color="blue"> %s</b></font>' % value219)
return [chk22,chk23,chk24,chk25,chk26,chk27,chk28,chk29,chk210,chk211,chk212,chk213,chk214,chk215,chk216,chk217,chk218,chk219,score22,score23,score24,score25,score26,score27,score28,score29,score210,score211,score212,score213,score214,score215,score216,score217,score218,score219]
except Exception as e:
logger.error("Exception in check2: %s %s" %(type(e), str(e.args)))
unkScore=['<font color="orange">UNKNOWN </font>',0]
chk="Failed to make API call"
return [chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore]
except Exception as e:
logger.error("Exception in check2: %s %s" %(type(e), str(e.args)))
unkScore=['<font color="orange">UNKNOWN </font>',0]
chk="Failed to Query"
return [chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore]
| StarcoderdataPython |
1775056 | <reponame>Muck-Man/muck.gg-api
import datetime
from server.rest.endpoint import Endpoint
from server.rest.invalidusage import InvalidUsage
from server.rest.response import Response
from server.utils import ContextTypes, PerspectiveAttributes
class RestEndpoint(Endpoint):
def __init__(self, server):
super().__init__()
self.server = server
self.path = '/muck/stats'
async def get(self, request):
#implement using timestamp
data = {}
connection = await self.server.database.acquire()
try:
async with connection.cursor() as cur:
await cur.execute(
' '.join([
'SELECT',
', '.join([
'`count`',
'`started`',
', '.join([
'`{}`'.format(attribute.value) for attribute in PerspectiveAttributes
])
]),
'FROM `muck_averages` WHERE',
'`timestamp` = %s AND `context_type` = %s AND `context_id` = %s AND `user_id` = %s'
]),
[0, ContextTypes.GLOBAL.value, 0, 0]
)
data['scores'] = await cur.fetchone()
if not data['scores']:
raise InvalidUsage(404, 'No data found')
data['count'] = data['scores'].pop('count')
data['started'] = data['scores'].pop('started')
for key in data['scores'].keys():
data['scores'][key] = round(float(data['scores'][key]) / data['count'], 10)
finally:
self.server.database.release(connection)
return Response(200, data)
| StarcoderdataPython |
3394362 | from datetime import timedelta
from django.db.models import signals
from django.utils import timezone
import factory
import pytest
from app.common.enums import AdminGroup, GroupType, MembershipType
from app.content.factories import EventFactory
from app.forms.enums import EventFormType
from app.forms.tests.form_factories import EventFormFactory
from app.group.factories import GroupFactory
from app.group.models import Group
from app.util.test_utils import (
add_user_to_group_with_name,
get_api_client,
get_group_type_from_group_name,
)
API_EVENTS_BASE_URL = "/events/"
def get_events_url_detail(event=None):
return f"{API_EVENTS_BASE_URL}{event.pk}/"
def get_event_data(title="New Title", location="New Location", organizer=None):
start_date = timezone.now() + timedelta(days=10)
end_date = timezone.now() + timedelta(days=11)
data = {
"title": title,
"location": location,
"start_date": start_date,
"end_date": end_date,
}
if organizer:
data["organizer"] = organizer
return data
# "event_current_organizer"/"event_new_organizer" should have one of 3 different values:
# - None -> The event has no connected organizer/should remove connection to organizer
# - "same" -> The event is connected to/should be connected to same organizer as user is member of
# - "other" -> The event is connected to/should be connected to another organizer as user i member of
permission_params = pytest.mark.parametrize(
(
"user_member_of_organizer",
"membership_type",
"organizer_type",
"event_current_organizer",
"event_new_organizer",
"expected_status_code",
),
(
# Members of admin-organizers have access if event.organizer is None
(AdminGroup.HS, None, None, None, None, 200),
(AdminGroup.INDEX, None, None, None, None, 200),
(AdminGroup.NOK, None, None, None, None, 200),
(AdminGroup.PROMO, None, None, None, None, 200),
(AdminGroup.SOSIALEN, None, None, None, None, 200),
# Members of admin-organizers have access if member of the event.organizer
(AdminGroup.HS, None, None, "same", None, 200),
(AdminGroup.INDEX, None, None, "same", None, 200),
(AdminGroup.NOK, None, None, "same", None, 200),
(AdminGroup.PROMO, None, None, "same", None, 200),
(AdminGroup.SOSIALEN, None, None, "same", None, 200),
# HS and Index have access if not member of the event.organizer
(AdminGroup.HS, None, None, "other", None, 200),
(AdminGroup.INDEX, None, None, "other", None, 200),
# HS and Index have access even if not member of new organizer
(AdminGroup.HS, None, None, "other", "other", 200),
(AdminGroup.INDEX, None, None, "other", "other", 200),
# Members of admin-organizers don't have access if not member of the event.organizer
(AdminGroup.NOK, None, None, "other", None, 403),
(AdminGroup.PROMO, None, None, "other", None, 403),
(AdminGroup.SOSIALEN, None, None, "other", None, 403),
# Members of admin-organizers don't have access if not member of new organizer
(AdminGroup.NOK, None, None, "same", "other", 403),
(AdminGroup.PROMO, None, None, "same", "other", 403),
(AdminGroup.SOSIALEN, None, None, "same", "other", 403),
# Not member of admin organizer don't have access
("Non_admin_group", None, None, "other", None, 403),
("Non_admin_group", None, None, None, None, 403),
# Leaders of committees and interest-organizers have access if event.organizer is None
("Kont", MembershipType.LEADER, GroupType.COMMITTEE, None, None, 200),
("Py", MembershipType.LEADER, GroupType.INTERESTGROUP, None, None, 200),
# Leaders of committees and interest-organizers have access if has access of the event.organizer
("Kont", MembershipType.LEADER, GroupType.COMMITTEE, "same", None, 200),
("Py", MembershipType.LEADER, GroupType.INTERESTGROUP, "same", None, 200),
# Leaders of committees and interest-organizers don't have access if not has access of the event.organizer
("Kont", MembershipType.LEADER, GroupType.COMMITTEE, "other", None, 403),
("Py", MembershipType.LEADER, GroupType.INTERESTGROUP, "other", None, 403),
# Leaders of committees and interest-organizers don't have access if not has access of new organizer
("Kont", MembershipType.LEADER, GroupType.COMMITTEE, "same", "other", 403),
("Py", MembershipType.LEADER, GroupType.INTERESTGROUP, "same", "other", 403),
# Members of committees and interest-organizers don't have access even if member of event.organizer
("Kont", MembershipType.MEMBER, GroupType.COMMITTEE, None, None, 403),
("Py", MembershipType.MEMBER, GroupType.INTERESTGROUP, None, None, 403),
),
)
@pytest.fixture
@permission_params
def permission_test_util(
user,
user_member_of_organizer,
membership_type,
organizer_type,
event_current_organizer,
event_new_organizer,
expected_status_code,
):
user_organizer_type = (
organizer_type
if organizer_type
else get_group_type_from_group_name(user_member_of_organizer)
)
organizer = None
if event_current_organizer == "same":
organizer = Group.objects.get_or_create(
type=user_organizer_type, name=user_member_of_organizer,
)[0]
elif event_current_organizer == "other":
organizer = GroupFactory()
event = EventFactory(organizer=organizer)
expected_title = "Title" if expected_status_code == 200 else event.title
new_organizer = None
if event_new_organizer == "same":
new_organizer = Group.objects.get_or_create(
type=user_organizer_type, name=user_member_of_organizer,
)[0]
new_organizer = new_organizer.slug
elif event_new_organizer == "other":
new_organizer = GroupFactory()
new_organizer = new_organizer.slug
add_user_to_group_with_name(
user=user,
group_name=user_member_of_organizer,
group_type=user_organizer_type,
membership_type=membership_type if membership_type else MembershipType.MEMBER,
)
return (
user,
event,
new_organizer,
expected_title,
expected_status_code,
event_current_organizer,
event_new_organizer,
)
@pytest.mark.django_db
def test_list_as_anonymous_user(default_client):
"""An anonymous user should be able to list all events."""
response = default_client.get(API_EVENTS_BASE_URL)
assert response.status_code == 200
@pytest.mark.django_db
def test_retrieve_as_anonymous_user(default_client, event):
"""An anonymous user should be able to retrieve an event."""
url = get_events_url_detail(event)
response = default_client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_update_as_anonymous_user(default_client, event):
"""An anonymous user should not be able to update an event entity."""
data = get_event_data()
url = get_events_url_detail(event)
response = default_client.put(url, data=data)
assert response.status_code == 403
@pytest.mark.django_db
def test_update_as_user(event, user):
"""A user should not be able to update an event entity."""
data = get_event_data()
url = get_events_url_detail(event)
client = get_api_client(user=user)
response = client.put(url, data=data)
assert response.status_code == 403
@pytest.mark.django_db
@permission_params
@factory.django.mute_signals(signals.post_save)
def test_update_event_as_admin(permission_test_util):
"""
HS and Index members should be able to update all events.
Other subgroup members can update events where event.organizer is their group or None.
Leaders of committees and interest groups should be able to
update events where event.organizer is their group or None.
"""
(
user,
event,
new_organizer,
expected_title,
expected_status_code,
_,
_,
) = permission_test_util
client = get_api_client(user=user)
url = get_events_url_detail(event)
data = get_event_data(title=expected_title, organizer=new_organizer)
response = client.put(url, data)
event.refresh_from_db()
assert response.status_code == expected_status_code
assert event.title == expected_title
@pytest.mark.django_db
def test_create_as_anonymous_user(default_client):
"""An anonymous user should not be able to create an event entity."""
data = get_event_data()
response = default_client.post(API_EVENTS_BASE_URL, data=data)
assert response.status_code == 403
@pytest.mark.django_db
def test_create_as_user(user):
"""A user should not be able to create an event entity."""
data = get_event_data()
client = get_api_client(user=user)
response = client.post(API_EVENTS_BASE_URL, data=data)
assert response.status_code == 403
@pytest.mark.django_db
@permission_params
@factory.django.mute_signals(signals.post_save)
def test_create_event_as_admin(permission_test_util):
"""
HS and Index members should be able to create events no matter which organizer is selected.
Other subgroup members can create events where event.organizer is their group or None.
Leaders of committees and interest groups should be able to
update events where event.organizer is their group or None.
"""
(user, _, new_organizer, _, expected_status_code, _, _,) = permission_test_util
client = get_api_client(user=user)
data = get_event_data(organizer=new_organizer)
response = client.post(API_EVENTS_BASE_URL, data)
assert (
response.status_code == 201
if expected_status_code == 200
else expected_status_code
)
@pytest.mark.django_db
def test_delete_as_anonymous_user(default_client, event):
"""An anonymous user should not be able to delete an event entity."""
url = get_events_url_detail(event)
response = default_client.delete(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_delete_as_user(user, event):
"""A user should not be able to to delete an event entity."""
client = get_api_client(user=user)
url = get_events_url_detail(event)
response = client.delete(url)
assert response.status_code == 403
@pytest.mark.django_db
@permission_params
@factory.django.mute_signals(signals.post_save)
def test_delete_event_as_admin(permission_test_util):
"""
HS and Index members should be able to delete events no matter which organizer is selected.
Other subgroup members can delete events where event.organizer is their group or None.
Leaders of committees and interest groups should be able to
delete events where event.organizer is their group or None.
"""
(
user,
event,
_,
_,
expected_status_code,
_,
event_new_organizer,
) = permission_test_util
# These tests only apply to create and delete to ensure that you can't create an
# event for another organizer, and therefore expects 403. In delete we don't change organizer
# and the user should therefore be allowed to delete the event
if event_new_organizer == "other":
expected_status_code = 200
client = get_api_client(user=user)
url = get_events_url_detail(event)
response = client.delete(url)
assert response.status_code == expected_status_code
@pytest.mark.django_db
@pytest.mark.parametrize(
(
"member_of_organizer",
"organizer_type",
"membership_type",
"expected_events_amount",
),
[
(AdminGroup.HS, GroupType.BOARD, MembershipType.MEMBER, 9),
(AdminGroup.INDEX, GroupType.SUBGROUP, MembershipType.MEMBER, 9),
(AdminGroup.NOK, GroupType.SUBGROUP, MembershipType.MEMBER, 3),
(AdminGroup.SOSIALEN, GroupType.SUBGROUP, MembershipType.MEMBER, 2),
(AdminGroup.PROMO, GroupType.SUBGROUP, MembershipType.MEMBER, 2),
("Pythons", GroupType.INTERESTGROUP, MembershipType.LEADER, 2),
("KontKom", GroupType.COMMITTEE, MembershipType.LEADER, 2),
("Pythons", GroupType.INTERESTGROUP, MembershipType.MEMBER, 0),
("KontKom", GroupType.COMMITTEE, MembershipType.MEMBER, 0),
("Not_admin", GroupType.OTHER, MembershipType.MEMBER, 0),
],
)
def test_retrieve_events_where_is_admin_only_includes_events_where_is_admin(
user, member_of_organizer, organizer_type, membership_type, expected_events_amount
):
"""When retrieving events where is admin, only events where is admin should be returned"""
hs = GroupFactory(type=GroupType.BOARD, name=AdminGroup.HS, slug=AdminGroup.HS)
index = GroupFactory(
type=GroupType.SUBGROUP, name=AdminGroup.INDEX, slug=AdminGroup.INDEX
)
nok = GroupFactory(
type=GroupType.SUBGROUP, name=AdminGroup.NOK, slug=AdminGroup.NOK
)
sosialen = GroupFactory(
type=GroupType.SUBGROUP, name=AdminGroup.SOSIALEN, slug=AdminGroup.SOSIALEN
)
promo = GroupFactory(
type=GroupType.SUBGROUP, name=AdminGroup.PROMO, slug=AdminGroup.PROMO
)
kontkom = GroupFactory(type=GroupType.COMMITTEE, name="KontKom", slug="kontkom")
pythons = GroupFactory(type=GroupType.INTERESTGROUP, name="Pythons", slug="pythons")
EventFactory(organizer=hs)
EventFactory(organizer=index)
EventFactory(organizer=nok)
EventFactory(organizer=nok)
EventFactory(organizer=sosialen)
EventFactory(organizer=promo)
EventFactory(organizer=kontkom)
EventFactory(organizer=pythons)
EventFactory()
client = get_api_client(user=user)
add_user_to_group_with_name(
user=user,
group_name=member_of_organizer,
group_type=organizer_type,
membership_type=membership_type,
)
url = f"{API_EVENTS_BASE_URL}admin/"
response = client.get(url)
if expected_events_amount > 0:
assert int(response.json().get("count")) == expected_events_amount
else:
assert response.status_code == 403
@pytest.mark.django_db
def test_retrieve_event_includes_form_evaluation(default_client, event):
"""Should include the id of the related form evaluation in the response."""
evaluation = EventFormFactory(type=EventFormType.EVALUATION)
event.forms.add(evaluation)
event.save()
url = get_events_url_detail(event)
response = default_client.get(url)
assert response.json().get("evaluation") == str(evaluation.id)
@pytest.mark.django_db
def test_retrieve_event_includes_form_survey(default_client, event):
"""Should include the id of the related form survey in the response."""
survey = EventFormFactory(type=EventFormType.SURVEY)
event.forms.add(survey)
event.save()
url = get_events_url_detail(event)
response = default_client.get(url)
assert response.json().get("survey") == str(survey.id)
| StarcoderdataPython |
1691535 | import pandas as pd
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from evaluation.tasks.auto_task import AutoTask
class CrowSPairsDataset(Dataset):
def __init__(self):
super().__init__()
# TODO: maybe implement using HuggingFace Datasets
# https://huggingface.co/datasets/crows_pairs
# Load CrowS-Pairs dataset from URL
url = "https://raw.githubusercontent.com/nyu-mll/crows-pairs/master/data/crows_pairs_anonymized.csv"
df = pd.read_csv(url)
# if direction is stereo, sent1, sent2 are sent_more, sent_less respectively,
# otherwise the other way around
df["direction"] = df["stereo_antistereo"]
df["sent1"] = df["sent_less"]
df["sent2"] = df["sent_more"]
df.loc[df["direction"] == "stereo", "sent1"] = df["sent_more"]
df.loc[df["direction"] == "stereo", "sent2"] = df["sent_less"]
# Convert dataframe to list of dictionaries
self.items = df[["sent1", "sent2", "direction", "bias_type"]].to_dict("records")
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
class CrowSPairsTask(AutoTask):
@staticmethod
def get_display_name() -> str:
return "crowspairs"
@staticmethod
def metric_score(df_score):
"""Returns the percentage of times the model prefers the stereotypical example"""
metric_score = df_score["sent_more_score"].gt(df_score["sent_less_score"]).sum()
metric_score /= len(df_score)
return metric_score
def evaluate(self) -> None:
"""
All task-specific evaluation logic lives here.
Model and tokenizer are available as self.model and self.tokenizer, respectively.
For task-specific configurations, populate english.json or multilingual.json.
Configs are read at initialization and available in dict form as self.task_config.
For further details, refer to the AutoTask parent class in auto_task.py.
"""
dataset = CrowSPairsDataset()
df_score = pd.DataFrame(
columns=[
"sent_more",
"sent_less",
"sent_more_score",
"sent_less_score",
"score",
"stereo_antistereo",
"bias_type",
]
)
for item in tqdm(dataset, desc=f"Evaluating {self.get_display_name()}"):
sent1 = torch.LongTensor(self.tokenizer.encode(item["sent1"])).to(self.device)
sent2 = torch.LongTensor(self.tokenizer.encode(item["sent2"])).to(self.device)
with torch.no_grad():
output_sent1 = self.model(sent1, labels=sent1)
output_sent2 = self.model(sent2, labels=sent2)
# Calculating the negative perplexity, assuming the loss is Cross Entropy Loss.
score_sent1 = -torch.exp(output_sent1["loss"])
score_sent2 = -torch.exp(output_sent2["loss"])
# Implement score for this item following:
# https://github.com/nyu-mll/crows-pairs/blob/master/metric.py#L213
sent_more, sent_less = "", ""
if item["direction"] == "stereo":
sent_more = item["sent1"]
sent_less = item["sent2"]
sent_more_score = score_sent1
sent_less_score = score_sent2
else:
sent_more = item["sent2"]
sent_less = item["sent1"]
sent_more_score = score_sent2
sent_less_score = score_sent1
df_score = df_score.append(
{
"sent_more": sent_more,
"sent_less": sent_less,
"sent_more_score": sent_more_score,
"sent_less_score": sent_less_score,
"stereo_antistereo": item["direction"],
"bias_type": item["bias_type"],
},
ignore_index=True,
)
# Aggregation of item scores into bias metric
metric_scores = {}
metric_scores["all"] = self.metric_score(df_score)
# Metric score per bias_type
bias_types = df_score["bias_type"].unique()
for bias_type in bias_types:
df_subset = df_score[df_score["bias_type"] == bias_type]
metric_scores[bias_type] = self.metric_score(df_subset)
# Save aggregated bias metrics
self.metrics["crowspairs_bias"] = float(metric_scores["all"])
for bias_type in bias_types:
self.metrics[f"crowspairs_bias_{bias_type}"] = float(metric_scores[bias_type])
| StarcoderdataPython |
3316034 | #!/usr/bin/python
##
# Description: Implements 2d bimodal gaussian
##
import numpy as np
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
class BimodeGauss_2D(object):
def __init__(self, mu_g1=[0, 0], mu_g2=[2, 2], sigma_g1=[0.25, 0.25], sigma_g2=[0.25, 0.25],
rho_g1=0.8, rho_g2=-0.8, w_g1=0.25, w_g2=0.75):
self.mu_g1 = mu_g1
self.mu_g2 = mu_g2
cov_g1 = np.array([[sigma_g1[0] ** 2.0, rho_g1 * (sigma_g1[0] * sigma_g1[1])], \
[rho_g1 * (sigma_g1[0] * sigma_g1[1]), sigma_g1[1] ** 2.0]])
cov_g2 = np.array([[sigma_g2[0] ** 2.0, rho_g2 * (sigma_g2[0] * sigma_g2[1])], \
[rho_g2 * (sigma_g2[0] * sigma_g2[1]), sigma_g2[1] ** 2.0]])
self.cov_g1 = cov_g1
self.cov_g2 = cov_g2
self.rv_2d_g1 = multivariate_normal(self.mu_g1, self.cov_g1)
self.rv_2d_g2 = multivariate_normal(self.mu_g2, self.cov_g2)
self.w_g1 = w_g1 / (w_g1 + w_g2)
self.w_g2 = w_g2 / (w_g1 + w_g2)
def pdf(self, y1, y2):
pos = np.dstack((y1, y2))
return self.w_g1 * self.rv_2d_g1.pdf(pos) + self.w_g2 * self.rv_2d_g2.pdf(pos)
def ln_like(self, y):
assert len(y) == 2
return np.log(self.pdf(y[0], y[1]))
def rvs(self, n_samples):
pdf_select = np.random.choice((True, False), p=(self.w_g1, self.w_g2), size=n_samples)
samples = np.zeros((n_samples, 2))
rv_g1_samples = self.rv_2d_g1.rvs(size=n_samples)
rv_g2_samples = self.rv_2d_g2.rvs(size=n_samples)
samples[pdf_select, :] = rv_g1_samples[pdf_select, :]
samples[~pdf_select, :] = rv_g2_samples[~pdf_select, :]
return (samples[:, 0], samples[:, 1])
if __name__ == "__main__":
banana = BimodeGauss_2D()
y1, y2 = banana.rvs(10000)
plt.figure()
plt.scatter(y1, y2, s=2, alpha=0.3)
plt.grid(ls='--', alpha=0.5)
plt.savefig("banana_plot_samples_ex.png")
plt.close()
mean = (np.mean(y1), np.mean(y2))
print("mean: ", mean)
plt.figure()
y1 = np.linspace(-4, 4, 100)
y2 = np.linspace(-2, 8, 100)
y1, y2 = np.meshgrid(y1, y2)
p = banana.pdf(y1, y2)
plt.contourf(y1, y2, p)
plt.grid(ls='--', alpha=0.5)
plt.savefig("banana_plot_pdf_ex.png")
plt.close()
| StarcoderdataPython |
82991 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ============================================================================ #
from matplotlib.ticker import ScalarFormatter
import numpy as np
import matplotlib.pyplot as plt
# ============================================================================ #
# prepare figure
fig_t, ax_t = plt.subplots(4, 1, figsize=(6, 8))
# ============================================================================ #
# plot
filebase = f'./accel' # t,j,a,v,x
for i in range(8):
raw = np.loadtxt(f"{filebase}_{i}.csv", delimiter=',', ndmin=2)
if raw.size == 0:
raw = np.empty(shape=(0, 5))
t = raw[:, 0]
value = raw[:, 1:1+4]
# theta
for k in range(4):
ax_t[k].plot(t, value[:, k], lw=4)
# ============================================================================ #
# t style
ylabels = ['jerk [m/s/s/s]', 'accel. [m/s/s]',
'velocity [m/s]', 'position [m]']
titles = ['Jerk', 'Acceleration', 'Velocity', 'Position']
for i, ax in enumerate(ax_t):
ax.grid(which='both')
ax.set_ylabel(ylabels[i])
ax.set_title(titles[i])
for ax in ax_t[0:-1]:
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
ax_t[-1].set_xlabel('time [s]')
# ============================================================================ #
# fit
fig_t.tight_layout()
# ============================================================================ #
# save
for ext in ['.png', '.svg']:
fig_t.savefig(filebase + '_t' + ext)
# ============================================================================ #
# show
plt.show()
| StarcoderdataPython |
20775 | <reponame>druids/django-fperms-iscore<filename>fperms_iscore/main.py<gh_stars>1-10
from is_core.main import DjangoUiRestCore
from fperms_iscore.mixins import PermCoreMixin
class PermDjangoUiRestCore(PermCoreMixin, DjangoUiRestCore):
abstract = True
| StarcoderdataPython |
1608242 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'ServerEndpointDetailsArgs',
]
@pulumi.input_type
class ServerEndpointDetailsArgs:
def __init__(__self__, *,
vpc_endpoint_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] vpc_endpoint_id: The ID of the VPC endpoint.
"""
pulumi.set(__self__, "vpc_endpoint_id", vpc_endpoint_id)
@property
@pulumi.getter(name="vpcEndpointId")
def vpc_endpoint_id(self) -> pulumi.Input[str]:
"""
The ID of the VPC endpoint.
"""
return pulumi.get(self, "vpc_endpoint_id")
@vpc_endpoint_id.setter
def vpc_endpoint_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_endpoint_id", value)
| StarcoderdataPython |
4813193 | # Binary Searh Tree node
class BSTNode:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
def min(self):
root = self
while root and root.left:
root = root.left
return root
def insert(self, key):
if key < self.key:
if not self.left:
self.left = BSTNode(key)
return self.left
return self.left.insert(key)
if key > self.key:
if not self.right:
self.right = BSTNode(key)
return self.right
return self.right.insert(key)
return self
def delete(self, key):
return BSTNode._delete(self, key)
@staticmethod
def _delete(root, key, parent=None):
if not root:
return None
if key < root.key:
return BSTNode._delete(root.left, key, root)
if key > root.key:
return BSTNode._delete(root.right, key, root)
# root has the key
# if no children, delete root making parent stop pointing to it
if not root.left and not root.right:
if parent:
if parent.left == root:
parent.left = None
elif parent.right == root:
parent.right = None
return root
# if root has only one child, make parent point to it and not to root
if root.left and not root.right:
if parent:
if parent.left == root:
parent.left = root.left
else: # parent.right == root
parent.right = root.left
root.left = None # clear the node
return root
elif root.right and not root.left:
if parent:
if parent.left == root:
parent.left = root.right
else: # parent.right == root
parent.right = root.right
return root
else: # 2 children
# swap successor key
m = root.right.min()
temp = m.key
m.key = root.key
root.key = temp
# and delete it (it won't have 2 children now)
return BSTNode._delete(root.right, key, parent=root)
def height(self):
left, right = 0, 0
if self.left:
left = self.left.height()
if self.right:
right = self.right.height()
return max([left, right]) + 1
def __str__(self):
return str(self.key)
class BSTLinkedNode(BSTNode):
"""Node with link to its parent node"""
def __init__(self, key, parent=None):
super().__init__(key)
self.parent = parent
def insert(self, node):
if node.key < self.key:
if not self.left:
node.parent = self
self.left = node
return self.left
return self.left.insert(node)
if node.key > self.key:
if not self.right:
node.parent = self
self.right = node
return self.right
return self.right.insert(node)
return self
def __str__(self):
return str(self.key)
def inorder_traversal(root, fn):
if not root:
return
inorder_traversal(root.left, fn)
fn(root.key)
inorder_traversal(root.right, fn)
def build_default_tree():
t = BSTNode(4)
t.insert(2)
t.insert(1)
t.insert(3)
t.insert(6)
t.insert(5)
t.insert(7)
return t
if __name__ == "__main__":
t = BSTNode(5)
t.insert(3)
t.insert(1)
t.insert(6)
t.insert(4)
print("inorder_traversal")
inorder_traversal(t, print)
print(f"height: {t.height()}")
print(f"delete {t.delete(1)}")
print(f"delete {t.delete(4)}")
print(f"height: {t.height()}")
print(f"min: {t.min().key}")
| StarcoderdataPython |
3364596 | from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
from datetime import datetime
start = datetime.now()
# Parameters.
learning_rate = 0.01
training_steps = 1000
display_step = 100
# Training Data.
X = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
Y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = X.shape[0]
# Weight and Bias, initialized randomly.
W = tf.Variable(-0.06, name="weight")
b = tf.Variable(-0.73, name="bias")
# Linear regression (Wx + b).
def linear_regression(x):
return W * x + b
# Mean square error.
def mean_square(y_pred, y_true):
pow = tf.pow(y_pred-y_true, 2)
denominator = 2 * n_samples
sum = tf.reduce_sum(pow)
return sum / denominator
# Stochastic Gradient Descent Optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# Optimization process.
def run_optimization():
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
pred = linear_regression(X)
loss = mean_square(pred, Y)
# Compute gradients.
gradients = g.gradient(loss, [W, b])
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, [W, b]))
# Run training for the given number of steps.
for step in range(1, training_steps + 1):
# Run the optimization to update W and b values.
run_optimization()
if step % display_step == 0:
pred = linear_regression(X)
loss = mean_square(pred, Y)
print("step: %i, loss: %f, W: %f, b: %f" % (step, loss, W.numpy(), b.numpy()))
print(datetime.now() - start)
print("finished") | StarcoderdataPython |
1731317 | SMALL_NUMBER = 1e-7 | StarcoderdataPython |
50326 | <reponame>arijitsdrush/python<filename>concept/generator-function.py
#!/usr/bin/python3
bridgera = ['Arijit','Soumya','Gunjan','Arptia','Bishwa','Rintu','Satya','Lelin']
# Generator Function iterarates through all items of array
def gen_func(data):
for i in range(len(data)):
yield data[i]
data_gen = list(gen_func(bridgera))
print (data_gen)
# Normal Function iterates through only first item of array
def norm_func(data):
for i in range(len(data)):
return data[i]
norm_gen = list(norm_func(bridgera))
print (norm_gen) | StarcoderdataPython |
1697734 | <reponame>jmacgrillen/perspective
#! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
main_window.py
Desscription:
The main window for image_tools. This uses Tkinter to
make the GUI.
Version:
1 - Initial release
Author:
J.MacGrillen <<EMAIL>>
Copyright:
Copyright (c) <NAME>. All rights reserved.
"""
import logging
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import QApplication, QAction, QHBoxLayout, QWidget
from PyQt5.QtWidgets import QMainWindow, QMenuBar, QStatusBar, QFileDialog
import maclib.mac_logger as mac_logger
import qdarkstyle
from src.perspective_settings import PerspecitveSettings
from src.ui.image_view import PerspectiveImageView
from maclib.mac_detect import MacDetect
class MacWindow(QMainWindow):
"""
Base window for mac_lib ui
"""
menu_bar: QMenuBar
status_bar: QStatusBar
scaling_ratio: float
mac_detect: MacDetect
logger: logging.Logger
perspective_settings: PerspecitveSettings
def __init__(self,
window_name: str,
main_app: QApplication,
window_width: int = 800,
window_height: int = 600,
window_icon: object = None,
*args,
**kwargs):
"""
Create a QT main window
"""
super(MacWindow, self).__init__(*args, **kwargs)
self.logger = logging.getLogger(name=mac_logger.LOGGER_NAME)
self.mac_detect = MacDetect()
self.perspective_settings = PerspecitveSettings()
# Decide whether to use the light or dark theme.
if self.perspective_settings.app_settings['ui']['theme'] == 'system':
self.logger.debug("Using system UI theme...")
if self.mac_detect.os_theme == "Dark":
main_app.setStyleSheet(qdarkstyle.load_stylesheet(
qt_api='pyqt5'))
else:
pass
elif self.perspective_settings.app_settings['ui']['theme'] == 'dark':
self.logger.debug("Enabling dark theme from saved setting.")
main_app.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
else:
self.logger.debug("Using default light theme.")
self.setWindowTitle(window_name)
# If the window position has been saved in settings, use them to
# set the position on the window.
if self.perspective_settings.key_exists('window'):
if self.perspective_settings.app_settings[
'window']['save_pos'] == "True":
self.logger.debug(
"Using settings to place window and set size.")
self.move(
self.perspective_settings.app_settings[
'window']['x_coord'],
self.perspective_settings.app_settings[
'window']['y_coord']
)
self.resize(
self.perspective_settings.app_settings[
'window']['width'],
self.perspective_settings.app_settings[
'window']['height']
)
else:
self.resize(window_width, window_height)
else:
self.resize(window_width, window_height)
self.status_bar = self.statusBar()
self.menu_bar = self.menuBar()
self.setCentralWidget(QWidget())
self.show()
def save_window_geometry(self) -> None:
"""
Save the window position to the settings file.
"""
self.logger.debug("Saving window coords before closing app")
self.perspective_settings.app_settings[
'window']['width'] = self.width()
self.perspective_settings.app_settings[
'window']['height'] = self.height()
self.perspective_settings.app_settings[
'window']['x_coord'] = self.x()
self.perspective_settings.app_settings[
'window']['y_coord'] = self.y()
self.perspective_settings.save_settings()
def closeEvent(self, close_event: QtGui.QCloseEvent) -> None:
self.logger.debug("User pressed the window close button.")
self.save_window_geometry()
close_event.accept()
return super(MacWindow, self).closeEvent(close_event)
class PerspectiveWindow(object):
"""
This is the main window that controls image_tools.
"""
main_app: QApplication
main_window: MacWindow
default_status: str = "Ready"
logger: logging.Logger
perspective_settings: PerspecitveSettings
h_layout: QHBoxLayout
image_view: PerspectiveImageView
def __init__(self):
"""
Create and run the main window for WAD Walker.
"""
super(PerspectiveWindow, self).__init__()
self.logger = logging.getLogger(name=mac_logger.LOGGER_NAME)
# Handle high dpi display scaling
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
self.perspective_settings = PerspecitveSettings()
self.create_window()
self.run()
def load_image(self) -> None:
"""
Load an image.
"""
self.main_window.status_bar.showMessage("Loading image")
file_name = QFileDialog.getOpenFileName(filter="Image (*.*)")[0]
self.logger.debug(f"Opening file {file_name}")
self.image_view = PerspectiveImageView()
self.image_view.load_image(file_name)
self.main_window.status_bar.showMessage("Image loaded successfully")
def do_nothing(self) -> None:
"""
Literlly do nothing!
"""
self.logger.debug("I ain't doin' nuffink.")
def quit_application(self) -> None:
"""
Update the settings with the window geometry.
"""
self.main_window.save_window_geometry()
self.main_app.quit()
def create_file_menu(self) -> None:
"""
Create the main file menu
"""
open_action = QAction('&Open', self.main_window)
open_action.setShortcut('Ctrl+O')
open_action.setStatusTip('Open an image')
open_action.triggered.connect(self.load_image)
quit_action = QAction('&Quit', self.main_window)
quit_action.setShortcut('Ctrl+Q')
quit_action.setStatusTip('Quit application')
quit_action.triggered.connect(self.quit_application)
file_menu = self.main_window.menu_bar.addMenu('&File')
file_menu.addAction(open_action)
file_menu.addAction(quit_action)
def create_edit_menu(self) -> None:
"""
Create the main Edit menu
"""
settings_action = QAction('&Settings', self.main_window)
settings_action.setShortcut('Ctrl+S')
settings_action.setStatusTip('Adjust application settings')
settings_action.triggered.connect(self.do_nothing)
file_menu = self.main_window.menu_bar.addMenu('&Edit')
file_menu.addAction(settings_action)
def create_window(self) -> None:
"""
Create the main Perspective window.
"""
self.main_app = QApplication([])
self.main_window = MacWindow("Perspective", self.main_app)
central_widget = self.main_window.centralWidget()
self.logger.debug("Adding horizontal layout to the main window.")
self.h_layout = QHBoxLayout(central_widget)
central_widget.setLayout(self.h_layout)
self.logger.debug("Adding image view to the layout.")
self.image_view = PerspectiveImageView(central_widget)
self.h_layout.addWidget(self.image_view)
self.create_file_menu()
self.create_edit_menu()
def run(self) -> None:
"""
Run the main window
"""
# Now show and run the window.
self.logger.debug("Starting the main application loop.")
self.main_app.exec()
if __name__ == "__main__":
pass
| StarcoderdataPython |
3243917 | <reponame>xuychen/Leetcode<filename>401-500/431-440/437-pathSum3/pathSum3-naive.py
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, sum, path=[]):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
return int(sum == root.val) + self.pathSum(root.left, sum-root.val, path+[root.val]) + \
self.pathSum(root.right, sum-root.val, path+[root.val]) + (self.pathSum(root.left, sum, path) + \
self.pathSum(root.right, sum, path) if path == [] else 0) | StarcoderdataPython |
4840080 | <filename>get_started.py
# -*- coding=utf-8 -*-
import mxnet as mx
import numpy as np
import cv2
class RTSbtl(object):
pass
| StarcoderdataPython |
173420 | <filename>sql_athame/escape.py
import math
import uuid
from typing import Any, Sequence
def escape(value: Any) -> str:
if isinstance(value, str):
return f"E{repr(value)}"
elif isinstance(value, float) or isinstance(value, int):
if math.isnan(value):
raise ValueError("Can't escape NaN float")
elif math.isinf(value):
raise ValueError("Can't escape infinite float")
return f"{repr(value)}"
elif isinstance(value, uuid.UUID):
return f"{repr(str(value))}::UUID"
elif isinstance(value, Sequence):
args = ", ".join(escape(x) for x in value)
return f"ARRAY[{args}]"
elif value is None:
return "NULL"
else:
raise TypeError(f"Can't escape type {type(value)}")
| StarcoderdataPython |
3257458 | import numpy as np
from PIL import Image
import os
from sys import exit, argv
import csv
import torch
import torchgeometry as tgm
from torch.utils.data import Dataset
from lib.utils import preprocess_image, grid_positions, upscale_positions
import cv2
from tqdm import tqdm
np.random.seed(0)
class PhotoTourism(Dataset):
def __init__(self, rootDir, imgData, preprocessing):
self.rootDir = rootDir
self.imgData = imgData
self.preprocessing = preprocessing
self.dataset = []
# points_src = torch.FloatTensor([[
# [190,210],[455,210],[633,475],[0,475],
# ]]).cuda()
points_src = torch.FloatTensor([[
[149, 157],[447, 166],[311, 151],[322, 265],
]]).cuda()
points_dst = torch.FloatTensor([[
[0, 0], [399, 0], [399, 399], [0, 399],
]]).cuda()
cropH = tgm.get_perspective_transform(points_src, points_dst)
points_src = torch.FloatTensor([[
[0, 0], [400, 0], [400, 400], [0, 400]
]]).cuda()
points_dst = torch.FloatTensor([[
[400, 400], [0, 400], [0, 0], [400, 0]
]]).cuda()
flipH = tgm.get_perspective_transform(points_src, points_dst)
self.H1 = cropH
self.H2 = cropH
def getImageFiles(self):
imgFiles = os.listdir(self.rootDir)
imgFiles = [os.path.join(self.rootDir, img) for img in imgFiles]
return imgFiles
def getimgPair(self):
imgFiles = []
with open(self.imgData) as csvFile:
csvReader = csv.reader(csvFile, delimiter=',')
for i, row in enumerate(csvReader):
if(i == 0):
continue
else:
imgFiles.append(row)
#print(imgFiles)
return imgFiles
def imgRot(self, img1):
img2 = img1.rotate(np.random.randint(low=90, high=270))
# img2 = img1.rotate(np.random.randint(low=0, high=60))
return img2
def imgCrop(self, img1, cropSize=400):
w, h = img1.size
left = np.random.randint(low = 0, high = w - (cropSize + 10))
upper = np.random.randint(low = 0, high = h - (cropSize + 10))
cropImg = img1.crop((left, upper, left+cropSize, upper+cropSize))
# cropImg = cv2.cvtColor(np.array(cropImg), cv2.COLOR_BGR2RGB)
# cv2.imshow("Image", cropImg)
# cv2.waitKey(0)
return cropImg
def getFloor(self, img1, img2):
img_warp1 = tgm.warp_perspective(img1, self.H1, dsize=(400, 400))
img_warp2 = tgm.warp_perspective(img2, self.H2, dsize=(400, 400))
return img_warp1, img_warp2
def getGrid(self, img1, img2, minCorr=10, scaling_steps=3, matcher="FLANN"):
im1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
im2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
# surf = cv2.xfeatures2d.SURF_create(100)
surf = cv2.xfeatures2d.SIFT_create()
kp1, des1 = surf.detectAndCompute(img1,None)
kp2, des2 = surf.detectAndCompute(img2,None)
if(len(kp1) < minCorr or len(kp2) < minCorr):
print('ha', len(kp1))
return [], []
if(matcher == "BF"):
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
matches = bf.match(des1,des2)
matches = sorted(matches, key=lambda x:x.distance)
elif(matcher == "FLANN"):
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
matches = good
if(len(matches) > 800):
matches = matches[0:800]
elif(len(matches) < minCorr):
return [], []
# im4 = cv2.drawMatches(im1, kp1, im2, kp2, matches, None, flags=2)
# cv2.imshow('Image4', im4)
# cv2.waitKey(0)
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1,1,2)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
if H is None:
return [], []
h1, w1 = int(im1.shape[0]/(2**scaling_steps)), int(im1.shape[1]/(2**scaling_steps))
device = torch.device("cpu")
fmap_pos1 = grid_positions(h1, w1, device)
pos1 = upscale_positions(fmap_pos1, scaling_steps=scaling_steps).data.cpu().numpy()
pos1[[0, 1]] = pos1[[1, 0]]
ones = np.ones((1, pos1.shape[1]))
pos1Homo = np.vstack((pos1, ones))
pos2Homo = np.dot(H, pos1Homo)
pos2Homo = pos2Homo/pos2Homo[2, :]
pos2 = pos2Homo[0:2, :]
pos1[[0, 1]] = pos1[[1, 0]]
pos2[[0, 1]] = pos2[[1, 0]]
pos1 = pos1.astype(np.float32)
pos2 = pos2.astype(np.float32)
ids = []
for i in range(pos2.shape[1]):
x, y = pos2[:, i]
if(2 < x < (im1.shape[0]-2) and 2 < y < (im1.shape[1]-2)):
ids.append(i)
pos1 = pos1[:, ids]
pos2 = pos2[:, ids]
# for i in range(0, pos1.shape[1], 20):
# im1 = cv2.circle(im1, (pos1[1, i], pos1[0, i]), 1, (0, 0, 255), 2)
# for i in range(0, pos2.shape[1], 20):
# im2 = cv2.circle(im2, (pos2[1, i], pos2[0, i]), 1, (0, 0, 255), 2)
# im3 = cv2.hconcat([im1, im2])
# for i in range(0, pos1.shape[1], 20):
# im3 = cv2.line(im3, (int(pos1[1, i]), int(pos1[0, i])), (int(pos2[1, i]) + im1.shape[1], int(pos2[0, i])), (0, 255, 0), 1)
# cv2.imshow('Image', im1)
# cv2.imshow('Image2', im2)
# cv2.imshow('Image3', im3)
# cv2.waitKey(0)
return pos1, pos2
def build_dataset(self, cropSize=400):
print("Building Dataset.")
#device = torch.device("cuda")
imgFiles = self.getimgPair()
for i in range(len(imgFiles)):
rgbFile1, depthFile1, rgbFile2, depthFile2 = imgFiles[i]
rgbFile1 = os.path.join(self.rootDir, rgbFile1)
rgbFile2 = os.path.join(self.rootDir, rgbFile2)
img1 = Image.open(rgbFile1)
img2 = Image.open(rgbFile2)
if(img1.mode != 'RGB'):
img1 = img1.convert('RGB')
# elif(img1.size[0] < cropSize or img1.size[1] < cropSize):
# continue
if(img2.mode != 'RGB'):
img2 = img2.convert('RGB')
# elif(img2.size[0] < cropSize or img2.size[1] < cropSize):
# continue
#img1 = self.imgCrop(img1, cropSize)
#img2 = self.imgCrop(img2, cropSize)
#img2 = self.imgRot(img1)
img1 = np.array(img1)
img2 = np.array(img2)
img1 = torch.from_numpy(img1.astype(np.float32)).cuda().unsqueeze(0).permute(0, 3, 1, 2)
img2 = torch.from_numpy(img2.astype(np.float32)).cuda().unsqueeze(0).permute(0, 3, 1, 2)
img1, img2 = self.getFloor(img1, img2)
img1 = img1.cpu().squeeze(0).permute(1, 2, 0)
img2 = img2.cpu().squeeze(0).permute(1, 2, 0)
img1 = np.array(img1).astype('uint8')
img2 = np.array(img2).astype('uint8')
#print('img1', img1.size)
cv2.imwrite('/home/dhagash/d2-net/d2-net_udit/media/img1.jpg', img1)
cv2.imwrite('/home/dhagash/d2-net/d2-net_udit/media/img2.jpg', img2)
pos1, pos2 = self.getGrid(img1, img2)
if(len(pos1) == 0 or len(pos2) == 0):
continue
print('pos1', pos1)
self.dataset.append((img1, img2, pos1, pos2))
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
image1, image2, pos1, pos2 = self.dataset[idx]
image1 = preprocess_image(image1, preprocessing=self.preprocessing)
image2 = preprocess_image(image2, preprocessing=self.preprocessing)
return {
'image1': torch.from_numpy(image1.astype(np.float32)),
'image2': torch.from_numpy(image2.astype(np.float32)),
'pos1': torch.from_numpy(pos1.astype(np.float32)),
'pos2': torch.from_numpy(pos2.astype(np.float32))
}
if __name__ == '__main__':
rootDir = argv[1]
imgData = argv[2]
training_dataset = PhotoTourism(rootDir, imgData, 'caffe')
training_dataset.build_dataset()
data = training_dataset[0]
print(data['image1'].shape, data['image2'].shape, data['pos1'].shape, data['pos2'].shape)
| StarcoderdataPython |
1663389 | import pytest
import torch
import numpy as np
import pickle
from perf_gan.data.preprocess import Identity, PitchTransform, LoudnessTransform
from perf_gan.data.contours_dataset import ContoursDataset
def read_from_pickle(path):
with open(path, 'rb') as file:
try:
while True:
yield pickle.load(file)
except EOFError:
pass
def test_dataset_items_shape():
path = "data/dataset.pickle"
for c in read_from_pickle(path):
# we want to check that dimensions are equal
assert c["u_f0"].shape == c["u_lo"].shape == c["e_f0"].shape == c[
"e_lo"].shape == c["onsets"].shape == c["offsets"].shape == c[
"mask"][0].shape
def test_dataset_items():
"""Test size of dataset items
"""
l = [(PitchTransform, {
"feature_range": (-1, 1)
}), (LoudnessTransform, {
"feature_range": (-1, 1)
})]
d = ContoursDataset(path="data/dataset.pickle", list_transforms=l)
# loop over the 4 components (u contours, e contours, onsets, offsets)
for idx in range(len(d)):
item = d[idx]
assert (item[0].shape[-1]) == 1024
def test_dataset_items_range():
"""Test size of dataset items ranges
"""
l = [(PitchTransform, {
"feature_range": (-1, 1)
}), (LoudnessTransform, {
"feature_range": (-1, 1)
})]
d = ContoursDataset(path="data/dataset.pickle", list_transforms=l)
for i in range(len(d)):
u_f0, u_lo, e_f0, e_lo, _, _, _ = d[i]
assert torch.min(u_f0) >= -1 and torch.max(u_f0) <= 1
assert torch.min(u_lo) >= -1 and torch.max(u_lo) <= 1
assert torch.min(e_f0) >= -1 and torch.max(e_f0) <= 1
assert torch.min(e_lo) >= -1 and torch.max(e_lo) <= 1
| StarcoderdataPython |
4816711 | <gh_stars>0
"""
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file test_jumping_robot.py
* @brief Unit test for jumping robot.
* @author <NAME>
"""
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from src.jumping_robot import Actuator, JumpingRobot
from src.jr_visualizer import visualize_jr
import unittest
import gtsam
import gtdynamics as gtd
import numpy as np
class TestJumpingRobot(unittest.TestCase):
""" Tests for jumping robot. """
def setUp(self)):
""" Set up the jumping robot. """
self.yaml_file_path="examples/example_jumping_robot/yaml/robot_config.yaml"
self.init_config=JumpingRobot.create_init_config()
self.jr=JumpingRobot(self.yaml_file_path, self.init_config)
def test_links_joints(self):
""" Test number of links and joints. """
self.assertEqual(self.jr.robot.numLinks(), 6)
self.assertEqual(self.jr.robot.numJoints(), 6)
def test_forward_kinematics(self):
""" Test forward kinematics of jumping robot. """
values=gtsam.Values()
k=0
theta=np.pi/3
qs=[-theta, 2 * theta, -theta, -theta, 2 * theta, -theta]
for joint in self.jr.robot.joints():
j=joint.id()
gtd.InsertJointAngleDouble(values, j, k, qs[j])
gtd.InsertJointVelDouble(values, j, k, 0.)
fk_results=self.jr.robot.forwardKinematics(values, k)
torso_i=self.jr.robot.link("torso").id()
torso_pose=gtd.Pose(fk_results, torso_i, k)
expected_torso_pose=gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(0, 0, 0.55))
self.assertTrue(torso_pose.equals(expected_torso_pose, tol=1e-5))
# visualize_jr(fk_results, self.jr, k)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3225302 | <filename>application/config.py<gh_stars>0
#!/usr/bin/env python3
################################
# config.py #
# environment vars/backend #
# Modified by: #
# <NAME>(Nov 10,2019)#
################################
import os
from flask import Flask
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__, template_folder='templates')
app.config['SECRET_KEY'] = 'teameight'
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'teameight'
# TODO: fix database url here: (mysql://user:pass@server/db)
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://flaskapp:flaskpass@localhost/app'
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = os.path.join(basedir, 'uploads')
UPLOAD_FOLDER = os.path.join(basedir, 'uploads')
| StarcoderdataPython |
1778953 | from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from batchmanager import BatchManager
from joblib import Parallel, delayed
import scipy
import numpy as np
def n_labeled_data(x_set, y_set, sample_size, dataset_names):
means = [0] * len(x_set)
stds = [0] * len(x_set)
for i in range(len(x_set)):
print("Evaluating dataset: ", i)
x = x_set[i]
x = x if len(x.shape) == 2 else x.reshape((x.shape[0], -1))
y = y_set[i]
if scipy.sparse.issparse(y):
y = y.toarray()
y = y if len(y.shape) == 1 else y.argmax(-1)
means[i], stds[i] = n_labeled_estimator(x, y, sample_size, classes=np.unique(y))
return means, stds
def n_labeled_estimator(x, y, sample_size, classes=3):
train_size = 0.6
x_tr, x_te, y_tr, y_te = train_test_split(
x, y, train_size=train_size, test_size=1 - train_size
)
n_train = x_tr.shape[0]
n_iter = 10
performance = np.zeros((n_iter, (n_train // sample_size) + 1))
lw = loop_wrapper(performance, x_te, y_te, n_train, sample_size)
Parallel(n_jobs=5, require="sharedmem")(
[delayed(lw)(x_tr, y_tr, i) for i in range(n_iter)]
)
# print("PERFORMANCE ATTR ", lw.performance)
mean_performances = lw.performance.mean(axis=0)
std_performances = lw.performance.std(axis=0)
return mean_performances, std_performances
class loop_wrapper:
def __init__(self, performance, x_te, y_te, n_train, sample_size):
self.performance = performance
self.x_te = x_te
self.y_te = y_te
self.n_train = n_train
self.sample_size = sample_size
self.i = 0
def __call__(self, x_tr, y_tr, i):
bm = BatchManager(self.n_train, self.sample_size)
x_samp = None
for j, ind in enumerate(bm):
if x_samp is None:
x_samp = x_tr[ind]
y_samp = y_tr[ind]
else:
x_samp = np.concatenate([x_samp, x_tr[ind]], axis=0)
y_samp = np.concatenate([y_samp, y_tr[ind]], axis=0)
lr = fit_model(x_samp, y_samp)
self.performance[i, j] = f1_score(
self.y_te, lr.predict(self.x_te), average="macro"
)
# print("Sample size ", (j+1)*bm.batch_size, self.performance[i, j])
def fit_model(x_samp, y_samp):
lr = LogisticRegression(
class_weight="balanced",
multi_class="multinomial",
solver="newton-cg",
max_iter=1000,
penalty="l2",
)
lr.fit(x_samp, y_samp)
return lr
| StarcoderdataPython |
3355243 | from houdini import IWaddle
class CardJitsuWaterLogic(IWaddle):
room_id = 995
def __init__(self, waddle):
super().__init__(waddle)
class WaterSenseiLogic(CardJitsuWaterLogic):
def __init__(self, waddle):
super().__init__(waddle)
| StarcoderdataPython |
549 | import os
from tempfile import TemporaryDirectory
from quickbase_client.utils.pywriting_utils import BasicPyFileWriter
from quickbase_client.utils.pywriting_utils import PyPackageWriter
class TestBasicFileWriter:
def test_outputs_lines(self):
w = BasicPyFileWriter()
w.add_line('import abc')
w.add_line('import os').space()
s = w.get_file_as_string()
assert s == 'import abc\nimport os\n'
def test_indent_dedent(self):
w = BasicPyFileWriter()
w.add_line('def foo():').indent().add_line('return 5').dedent().space()
s = w.get_file_as_string()
assert s == 'def foo():\n return 5\n'
def test_use_refs(self):
w = BasicPyFileWriter()
w.add_line('a = "A"')
ref = w.make_ref()
w.add_line('d = "D"')
ref.add_line('b = "B"').add_line('c = "C"')
s = w.get_file_as_string()
lns = s.split('\n')
assert 'a' in lns[0]
assert 'b' in lns[1]
assert 'c' in lns[2]
assert 'd' in lns[3]
class TestPyPackageWriter:
def test_includes_init(self):
with TemporaryDirectory() as d:
w = PyPackageWriter(pkg_name='foo', parent_dir=d)
assert '__init__' in w.modules
assert w.has_module_name('__init__')
assert w.pkg_path == os.path.join(d, 'foo')
w.write()
assert os.path.exists(d)
assert os.path.exists(os.path.join(d, 'foo'))
assert os.path.exists(os.path.join(d, 'foo', '__init__.py'))
| StarcoderdataPython |
1790704 | """Test for Connect view utilities"""
from django.test import TestCase
from open_connect.connect_core.utils.views import JSONResponseMixin
class JSONResponseMixinTest(TestCase):
"""Test the JSONResponse Mixin"""
def test_render_to_response(self):
"""Test the render_to_response method on the JSON Mixin"""
mixin = JSONResponseMixin()
response = mixin.render_to_response(context={'something': 123})
self.assertEqual(response.content, '{\n "something": 123\n}')
self.assertEqual(response['Content-Type'], 'application/json')
| StarcoderdataPython |
146980 | """
helper of plyer.battery
"""
__all__ = [
'battery_status'
]
import ctypes
from package_making_practice.platforms.windows.libs import win_api_defs
def battery_status():
"""
implementation of windows API
:return:
"""
status = win_api_defs.SYSTEM_POWER_STATUS()
if not win_api_defs.GetSystemPowerStatus(ctypes.pointer(status)):
raise Exception('Cannot get battery status')
return dict(
(field, getattr(status, field))
for field, _ in status._fields_
) | StarcoderdataPython |
3361281 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_discovery.ipynb (unless otherwise specified).
__all__ = ['parse_dataflows', 'all_available', 'search_dataset', 'DataSet']
# Cell
from .base import ISTAT
from .utils import make_tree, strip_ns
import pandas as pd
from fastcore.test import *
# Cell
def parse_dataflows(response):
"""parse the `response` containing all the available datasets and return a list of dataflows."""
tree = make_tree(response)
strip_ns(tree)
root = tree.root
dataflows_l = []
for dataflow in root.iter("Dataflow"):
id = dataflow.get("id")
version = dataflow.get("version")
structure_id = [ref.get("id") for ref in dataflow.iter("Ref")][0]
# iter over names and get the descriptions
for name in dataflow.findall("Name"):
lang = name.get("{http://www.w3.org/XML/1998/namespace}lang")
if lang == "en":
description_en = name.text
# if lang == 'it':
# description_it = name.text
dataflow_dict = {
"df_id": id,
"version": version,
"df_description": description_en,
# "description_it": description_it,
"df_structure_id": structure_id,
}
dataflows_l.append(dataflow_dict)
return dataflows_l
def all_available(dataframe=True):
"""Return all available dataflows"""
path = "dataflow/IT1"
client = ISTAT()
response = client._request(path=path)
dataflows = parse_dataflows(response)
if dataframe == True:
dataflows = pd.DataFrame(dataflows)
return dataflows
def search_dataset(keyword):
"""Search available dataflows that contain `keyword`. Return these dataflows in a DataFrame"""
dataflows = all_available()[
all_available()["df_description"].str.contains(keyword, case=False)
]
if len(dataflows) == 0: raise ValueError('No dataset matching `keyword`')
return dataflows
# Cell
class DataSet(ISTAT):
"""Class that implements methods to retrieve informations (metadata) about a Dataset"""
def __init__(self, dataflow_identifier):
super().__init__()
self.resource = "datastructure"
self.all_available = all_available() # df with all the available dataflows
self.identifiers = self.set_identifiers(dataflow_identifier)
self.available_values = self.get_available_values()
self.dimensions = list(self.dimensions_info(description=False).dimension)
self.filters = self.default_filters()
# self.dimensions_values = self.available_dimensions_values()
# TODO: returning all metadata related to the dataflow contained in 'Header'
def set_identifiers(self, dataflow_identifier):
"""Take any type of `dataflow_identifier` and return all identifiers in a dictionary"""
if dataflow_identifier[3] == "_":
return self.set_from_id(dataflow_identifier)
elif dataflow_identifier[4] == "_":
return self.set_from_structure_id(dataflow_identifier)
else:
if type(dataflow_identifier) == str:
return self.set_from_description(dataflow_identifier)
else:
raise ValueError(dataflow_identifier)
def set_from_id(self, df_id):
mask = self.all_available["df_id"] == df_id
df = self.all_available[mask]
return df.to_dict(orient="records")[0]
def set_from_structure_id(self, df_structure_id):
mask = self.all_available["df_structure_id"] == df_structure_id
df = self.all_available[mask]
return df.to_dict(orient="records")[0]
def set_from_description(self, description):
mask = self.all_available["df_description"] == description
df = self.all_available[mask]
return df.to_dict(orient="records")[0]
def parse_dimensions(self, response):
"""Parse the `response` containing a dataflow's dimensions and return them in a list"""
tree = make_tree(response)
strip_ns(tree)
root = tree.root
dimensions_l = []
for dimension in root.iter("Dimension"):
dimension_name = dimension.attrib["id"]
dimension_id = [
enumeration.find("Ref").get("id")
for enumeration in dimension.iter("Enumeration")
][0]
dimension_dict = {"dimension": dimension_name, "dimension_ID": dimension_id}
dimensions_l.append(dimension_dict)
return dimensions_l
def dimensions_info(self, dataframe=True, description=True):
"""Return the dimensions of a specific dataflow and their `descriptions`."""
df_structure_id = self.identifiers["df_structure_id"]
path_parts = [self.resource, self.agencyID, df_structure_id]
path = "/".join(path_parts)
response = self._request(path=path)
dimensions = self.parse_dimensions(response)
if dataframe == True:
dimensions = pd.DataFrame(dimensions)
if description == True:
dimensions_description = self.dimensions_description(dimensions)
dimensions = dimensions.merge(dimensions_description, on="dimension_ID")
return dimensions
def dimensions_description(self, dimensions):
"""Return a dataframe with the descriptions of `dimensions`"""
resource = "codelist"
dimensions_l = dimensions.dimension_ID.tolist()
descriptions_l = []
for dimension_id in dimensions_l:
path_parts = [resource, self.agencyID, dimension_id]
path = "/".join(path_parts)
response = self._request(path=path)
tree = make_tree(response)
strip_ns(tree)
root = tree.root
description = [x for x in root.iter("Codelist")][0]
# description_it = description.findall('Name')[0].text
description = description.findall("Name")[1].text
description_dict = {
"dimension_ID": dimension_id,
"description": description,
}
descriptions_l.append(description_dict)
dimensions_descriptions = pd.DataFrame(descriptions_l)
return dimensions_descriptions
def get_available_values(self):
"""Return a dictionary with available values for each dimension in the DataSet instance"""
resource = "availableconstraint"
df_id = self.identifiers["df_id"]
path_parts = [
resource,
df_id,
"?references=all&detail=full",
] # TODO: pass them as parameters
path = "/".join(path_parts)
response = self._request(path=path)
tree = make_tree(response)
strip_ns(tree)
root = tree.root
dimensions_values = {}
for dimension in root.iter("Codelist"):
dimension_id = dimension.get("id")
values = {}
value_id_l, value_descr_l = [], []
for value in dimension.iter("Code"):
value_id = value.get("id")
value_descr = [name.text for name in value.findall("Name")][1]
value_id_l.append(value_id)
value_descr_l.append(value_descr)
values["values_ids"] = value_id_l
values["values_description"] = value_descr_l
dimensions_values[dimension_id] = values
for dimension_id in list(dimensions_values.keys()):
dimension = self.get_dimension_name(dimension_id)
dimensions_values[dimension] = dimensions_values.pop(dimension_id)
return dimensions_values
def get_dimension_values(self, dimension, dataframe=True):
"""Return the available values of a single `dimension` in the dataset"""
dimension_dict = self.available_values[dimension]
dimension_df = pd.DataFrame.from_dict(dimension_dict)
return dimension_df if dataframe else dimension_dict
def get_dimension_name(self, dimension_id):
"""Convert `dimension_id` to `dimension`"""
dimensions_df = self.dimensions_info(description=False)
mask = dimensions_df["dimension_ID"] == dimension_id
dimension = dimensions_df[mask]["dimension"]
return dimension.values[0]
def default_filters(self):
""""initiate self.filters with default values"""
default_filters = {}
# no filter equals all values (default)
for dimension in self.dimensions:
default_filters[dimension] = "."
return default_filters
def set_filters(self, **kwargs):
"""set filters for the dimensions of the dataset by passing dimension_name=value"""
# add kwargs in case passed
for arg, arg_value in kwargs.items():
self.filters[arg.upper()] = arg_value | StarcoderdataPython |
1751801 | <filename>problems_0_99/problem_3/missing_posint.py
testcase1 = [3, 4, -1, 1]
testcase2 = [1, 2, 0]
testcase3 = [3, 5, 6]
expected1 = 2
expected2 = 3
expected3 = 1
def find_missing_pos_int(arr):
lowest_pos_int = 1
for i in arr:
if (int_is_pos(i)):
if (i < lowest_pos_int+1):
lowest_pos_int = lowest_pos_int+1
return lowest_pos_int
def int_is_pos(number):
return number > 0
actual1 = find_missing_pos_int(testcase1)
actual2 = find_missing_pos_int(testcase2)
actual3 = find_missing_pos_int(testcase3)
print("Actual1: " + str(actual1))
print("Actual2: " + str(actual2))
print("Actual3: " + str(actual3))
assert actual1 == expected1
print("Passed")
assert actual2 == expected2
print("Passed")
assert actual3 == expected3
print("Passed")
| StarcoderdataPython |
3228151 | <reponame>hmaarrfk/vispy<filename>vispy/visuals/transforms/interactive.py
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .linear import STTransform
class PanZoomTransform(STTransform):
"""Pan-zoom transform
Parameters
----------
canvas : instance of Canvas | None
The canvas to attch to.
aspect : float | None
The aspect ratio to apply.
**kwargs : dict
Keyword arguments to pass to the underlying `STTransform`.
"""
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
self.on_resize(None)
def attach(self, canvas):
"""Attach this tranform to a canvas
Parameters
----------
canvas : instance of Canvas
The canvas.
"""
self._canvas = canvas
canvas.events.resize.connect(self.on_resize)
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
@property
def canvas_tr(self):
return STTransform.from_mapping(
[(0, 0), self._canvas.size],
[(-1, 1), (1, -1)])
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The event.
"""
if self._aspect is None:
return
w, h = self._canvas.size
aspect = self._aspect / (w / h)
self.scale = (self.scale[0], self.scale[0] / aspect)
self.shader_map()
def on_mouse_move(self, event):
"""Mouse move handler
Parameters
----------
event : instance of Event
The event.
"""
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
dxy = self.canvas_tr.map(dxy)
o = self.canvas_tr.map([0, 0])
t = dxy - o
self.move(t)
elif button == 2:
center = self.canvas_tr.map(event.press_event.pos)
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
self.shader_map()
def on_mouse_wheel(self, event):
"""Mouse wheel handler
Parameters
----------
event : instance of Event
The event.
"""
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
| StarcoderdataPython |
3397371 | begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'libvirt'
op|'.'
name|'volume'
name|'import'
name|'fs'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|LibvirtGPFSVolumeDriver
name|'class'
name|'LibvirtGPFSVolumeDriver'
op|'('
name|'fs'
op|'.'
name|'LibvirtBaseFileSystemVolumeDriver'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Class for volumes backed by gpfs volume."""'
newline|'\n'
nl|'\n'
DECL|member|_get_mount_point_base
name|'def'
name|'_get_mount_point_base'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"''"
newline|'\n'
nl|'\n'
DECL|member|get_config
dedent|''
name|'def'
name|'get_config'
op|'('
name|'self'
op|','
name|'connection_info'
op|','
name|'disk_info'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns xml for libvirt."""'
newline|'\n'
name|'conf'
op|'='
name|'super'
op|'('
name|'LibvirtGPFSVolumeDriver'
op|','
nl|'\n'
name|'self'
op|')'
op|'.'
name|'get_config'
op|'('
name|'connection_info'
op|','
name|'disk_info'
op|')'
newline|'\n'
name|'conf'
op|'.'
name|'source_type'
op|'='
string|'"file"'
newline|'\n'
name|'conf'
op|'.'
name|'source_path'
op|'='
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'['
string|"'device_path'"
op|']'
newline|'\n'
name|'return'
name|'conf'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| StarcoderdataPython |
1616543 | <reponame>alexandrwang/hackmit<filename>server.py
from flask import Flask, request, redirect
import twilio.twiml
import subprocess
import json
import sklearn
import random
import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn import svm
import re
import nltk
import datetime
import traceback
app = Flask(__name__)
AMADEUS_API_KEY = '<KEY>'
LOW_FARE_URL = 'http://api.sandbox.amadeus.com/v1.2/flights/low-fare-search'
EXTENSIVE_URL = 'http://api.sandbox.amadeus.com/v1.2/flights/extensive-search'
app = Flask(__name__)
cities_regex = re.compile('(?:^|.* )([A-Z]*) to ([A-Z]*).*')
day_regex = re.compile('.*(January|February|March|April|May|June|July|August|September|October|November|December) ([0-3]?[0-9]).*')
day_regex_2 = re.compile('.*([01]?[0-9])[\-/]([0123]?[0-9]).*')
time_regex = re.compile('.*(before|after) ([01]?[0-9]) ?([AaPp][Mm]).*')
month_to_num = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12
}
def canned_responses(msg):
if len(msg) > 20:
return None
if 'thank' in msg.lower():
return 'No problem! :)'
elif 'hi' in msg.lower() or 'hey' in msg.lower() or 'hello' in msg.lower():
return 'Hi, how can I help?'
elif ('who' in msg.lower() or 'name' in msg.lower()) and '?' in msg:
return 'Hi, I\'m Emma! Nice to meet you! :)'
@app.route("/", methods=['GET', 'POST'])
def respond():
msg = request.form.get('Body')
canned = canned_responses(msg)
if canned:
resp = twilio.twiml.Response()
resp.message(canned)
return str(resp)
try:
try:
msg_params = parse_msg(msg)
except Exception:
resp = twilio.twiml.Response()
resp.message("Sorry, I didn't quite catch that. What did you mean?")
return str(resp)
today = datetime.date.today()
month = msg_params['month']
month = int(month) if len(month) < 3 else month_to_num[msg_params['month']]
day = int(msg_params['day'])
year = today.year if today < datetime.date(today.year, month, day) else today.year + 1
datestr = str(datetime.date(year, month, day))
best_time, saved_amt = find_best_time_to_buy(msg_params['origin'], msg_params['destination'], datestr)
buy_in_days = (best_time - today).days
buy_in_days_str = 'in %d days' % buy_in_days if buy_in_days > 0 else 'now'
if len(buy_in_days_str) > 3:
buy_in_days_str += ", and I saved you $%.2f" % saved_amt
if buy_in_days_str == 'now':
depart, arrive, fare = get_best_current_flight(msg_params['origin'], msg_params['destination'], datestr)
buy_in_days_str += '. The flight will depart at %s and arrive at %s in the local time of %s, and the total fare was $%s' % (depart, arrive, msg_params['destination'], fare)
resp = twilio.twiml.Response()
resp.message("Sure thing! I'll book that for you %s. Have a safe trip!" % buy_in_days_str)
return str(resp)
except Exception as e:
resp = twilio.twiml.Response()
resp.message("Oops! I had a kerfuffle. Could you ask me that again?")
traceback.print_exc()
return str(resp)
def iso_to_ordinal(iso):
return datetime.datetime.strptime(iso, '%Y-%m-%d').toordinal()
def amadeus_low_fare_request(origin, destination, departure_date, **kwargs):
"""Makes a request to Amadeus for the low fare flights according to params."""
url_params = {
'origin': origin,
'destination': destination,
'departure_date': departure_date,
}
url_params.update(kwargs)
url = LOW_FARE_URL + '?' + ('apikey=%s&' % AMADEUS_API_KEY) + '&'.join(['%s=%s' % (a, b) for a, b in url_params.iteritems()])
try:
output = subprocess.check_output(['curl', '-X', 'GET', url])
except Exception:
output = subprocess.check_output(['curl', '-X', 'GET', url])
return json.loads(output)
def amadeus_extensive_request(origin, destination, **kwargs):
"""Makes a request to Amadeus for the low fare flights according to params."""
url_params = {
'origin': origin,
'destination': destination,
'aggregation_mode': 'DAY',
}
url_params.update(kwargs)
url = EXTENSIVE_URL + '?' + ('apikey=%s&' % AMADEUS_API_KEY) + '&'.join(['%s=%s' % (a, b) for a, b in url_params.iteritems()])
try:
output = subprocess.check_output(['curl', '-X', 'GET', url])
except Exception:
output = subprocess.check_output(['curl', '-X', 'GET', url])
return json.loads(output)
def flat_flights(amadeus_res):
ret = []
for d in amadeus_res['results']:
common = set(d.keys()) - {'itineraries'}
for it in d['itineraries']:
newd = {k: d[k] for k in common}
newd.update(it)
ret.append(newd)
return ret
def parse_extensive(data):
origin = data['origin']
new_data = []
values = []
for i in data['results']:
values.append(float(i['price']))
temp = i
del temp['price']
del temp['airline']
temp[u'origin'] = origin
departure_date = iso_to_ordinal(temp['departure_date'])
return_date = iso_to_ordinal(temp['return_date'])
now = datetime.datetime.today().toordinal()
days_in_advance = departure_date - now
temp[u'departure_date'] = departure_date
temp[u'return_date'] = return_date
temp[u'days_in_advance'] = days_in_advance
new_data.append(temp)
return (new_data, values)
def get_best_current_flight(origin, destination, departure_date):
res = amadeus_low_fare_request(origin, destination, departure_date, number_of_results=1)
depart_time = res['results'][0]['itineraries'][0]['outbound']['flights'][0]['departs_at']
arrival_time = res['results'][0]['itineraries'][0]['outbound']['flights'][-1]['arrives_at']
depart_time = depart_time.split('T')[-1]
arrival_time = arrival_time.split('T')[-1]
depart_time = datetime.datetime.strptime(depart_time, "%H:%M").strftime("%I:%M %p")
arrival_time = datetime.datetime.strptime(arrival_time, "%H:%M").strftime("%I:%M %p")
fare = res['results'][0]['fare']['total_price']
return depart_time, arrival_time, fare
def find_best_time_to_buy(origin, destination, departure_date, arrive_by=None):
"""Given the parameters from a text, find the best time to buy."""
features, values = parse_extensive(amadeus_extensive_request(origin, destination))
vec = DictVectorizer()
clf = svm.SVR()
clf.fit(vec.fit_transform(features).toarray(), values)
print vec.get_feature_names()
base = {
u'origin': origin,
u'destination': destination,
u'departure_date' : iso_to_ordinal(departure_date),
u'return_date' : iso_to_ordinal(departure_date) + 7,
}
now = datetime.datetime.today().toordinal()
curr = 1000000000000.0
best_day = now
worst = 0.0
for days_in_advance in range(iso_to_ordinal(departure_date) - now + 1):
temp = base
temp[u'days_in_advance'] = days_in_advance
price = clf.predict(vec.transform(temp).toarray()) + random.uniform(-0.3,0.3)
if price < curr:
curr = price
best_day = iso_to_ordinal(departure_date) - days_in_advance
worst = max(worst, price)
best_day = min(best_day, max(iso_to_ordinal(departure_date) - 47, now))
amount_saved = worst - curr if best_day != now else 0.0
return datetime.date.fromordinal(best_day), amount_saved * 100.0
def parse_msg(msg):
origin = cities_regex.match(msg).group(1)
destination = cities_regex.match(msg).group(2)
month = ''
day = ''
try:
month = day_regex.match(msg).group(1)
day = day_regex.match(msg).group(2)
except Exception:
try:
month = day_regex_2.match(msg).group(1)
day = day_regex_2.match(msg).group(2)
except Exception:
if 'tomorrow' in msg.lower():
flight_date = datetime.date.today() + datetime.timedelta(1)
month = str(flight_date.month)
day = str(flight_date.day)
hour_side = ''
hour = ''
m = ''
try:
hour_side = time_regex.match(msg).group(1)
hour = time_regex.match(msg).group(2)
m = time_regex.match(msg).group(3)
except Exception:
pass
res = {
'origin': origin,
'destination': destination,
'month': month,
'day': day,
'hour_side': hour_side,
'hour': hour,
'm': m
}
return res
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
1628562 | <filename>elsie/render/render.py
import os
from ..render.backends.svg.utils import svg_begin, svg_end
from ..utils.sxml import Xml
from .inkscape import export_by_inkscape
class RenderUnit:
"""
A single presentation page that can render itself using a backend.
"""
def __init__(self, slide, step):
self.slide = slide
self.step = step
def write_debug(self, out_dir):
pass
def export(self, fs_cache, export_type: str):
raise NotImplementedError
def get_svg(self):
return None
class SvgRenderUnit(RenderUnit):
def __init__(self, slide, step, svg, inkscape):
super().__init__(slide, step)
self.svg = svg
self.inkscape = inkscape
def write_debug(self, out_dir):
svg_file = os.path.join(
out_dir, "{}-{}.svg".format(self.slide.index, self.step)
)
with open(svg_file, "w") as f:
f.write(self.svg)
def export(self, fs_cache, export_type):
return fs_cache.ensure(
self.svg.encode(),
export_type,
lambda source, target, et: export_by_inkscape(
self.inkscape, source, target, et
),
)
def get_svg(self):
return self.svg
class ExportedRenderUnit(RenderUnit):
def __init__(self, slide, step, filename, export_type="pdf"):
super().__init__(slide, step)
self.filename = filename
self.export_type = export_type
def export(self, fs_cache, export_type):
if export_type == self.export_type:
return self.filename
def per_page_grouping(backend, units, count_x, count_y, width, height):
from .backends import InkscapeBackend
assert isinstance(backend, InkscapeBackend)
# TODO: reimplement using RenderingContext
def new():
tmp_xml = Xml()
svg_begin(tmp_xml, width * count_x, height * count_y)
return tmp_xml
def close():
if idx > 0:
svg_end(xml)
new_units.append(
SvgRenderUnit(None, None, xml.to_string(), backend.inkscape)
)
assert count_x > 0
assert count_y > 0
limit = count_x * count_y
if limit == 1:
return units
new_units = []
xml = new()
idx = 0
for unit in units:
svg = unit.get_svg()
if svg is None:
new_units.append(unit)
continue
if idx == limit:
close()
xml = new()
idx = 0
x = (idx % count_x) * width
y = (idx // count_x) * height
xml.element("g")
xml.set("transform", f"translate({x}, {y})")
xml.raw_text(svg)
xml.close("g")
idx += 1
close()
return new_units
| StarcoderdataPython |
1782849 | from dataknead import Knead
# Read data from file
Knead("input/entity.json")
# Pass data directly
Knead([1,2,3])
# Parse data from string
Knead("[1,2,3]", parse_as="csv")
# Read data from file without a file extension, give the file format
Knead("input/entity", read_as="json") | StarcoderdataPython |
3314442 | from jivago.lang.annotations import Inject
from jivago.wsgi.annotations import Resource
from jivago.wsgi.invocation.parameters import OptionalQueryParam
from jivago.wsgi.methods import POST
from jivago.wsgi.request.request import Request
from jivago.wsgi.request.response import Response
from pdfinvert.wsgi.application.conversion_service import ConversionService
from pdfinvert.wsgi.application.temporary_filepath_factory import TemporaryFilePathFactory
from pdfinvert.wsgi.config.conversion_config import ConversionConfig
@Resource("/convert")
class FileResource(object):
@Inject
def __init__(self, temporary_file_factory: TemporaryFilePathFactory, conversion_service: ConversionService,
conversion_config: ConversionConfig):
self.conversion_config = conversion_config
self.conversion_service = conversion_service
self.temporary_file_factory = temporary_file_factory
@POST
def post_file(self, request: Request, dpi: OptionalQueryParam[int]) -> Response:
filename = self.temporary_file_factory.generate_temporary_pdf_filepath()
dpi = min(100 if dpi is None else int(dpi), self.conversion_config.max_dpi)
with open(filename, 'wb') as f:
f.write(request.body)
new_filename = self.conversion_service.convert(filename, dpi)
with open(new_filename, 'rb') as f:
body = f.read()
return Response(200, {}, body)
| StarcoderdataPython |
1656762 | <reponame>jaraco/pmxbot.webhooks<gh_stars>0
import json
import textwrap
from unittest import mock
import cherrypy
from cherrypy.test import helper
from pmxbot.webhooks import Server
class ServerTest(helper.CPWebCase):
def setUp(self):
Server.queue.clear()
def tearDown(self):
Server.queue.clear()
def test_send_to(self):
Server.send_to('channel', 'msg1', 'msg2', 'msg3')
assert Server.queue == ['channel', 'msg1', 'msg2', 'msg3']
def test_send_to_multiline(self):
Server.send_to('channel', 'msg1\nmsg2', 'msg3')
assert Server.queue == ['channel', 'msg1', 'msg2', 'msg3']
def test_send_to_multiple(self):
Server.send_to('chan1', 'msg1')
Server.send_to('chan2', 'msg2')
Server.send_to('chan3', 'msg3\nmsg4')
assert Server.queue == [
'chan1',
'msg1',
'chan2',
'msg2',
'chan3',
'msg3',
'msg4',
]
class VelociraptorTest(helper.CPWebCase):
@staticmethod
def setup_server():
cherrypy.tree.mount(Server())
@property
def server(self):
return cherrypy.tree.apps[''].root
def _get(self, **kwargs):
return self.getPage("/velociraptor", **kwargs)
def _post_json(self, data):
body = json.dumps(data)
kwargs = {
'method': 'POST',
'headers': [
('Content-Type', 'application/json'),
('Content-Length', str(len(body))),
],
'body': body,
}
return self.getPage("/velociraptor", **kwargs)
def test_only_post(self):
self._get()
self.assertStatus('405 Method Not Allowed')
def test_event_invalid(self):
for payload in [
{}, # no tags
{'tags': ['route']}, # no title
{'tags': ['swarm', 'deploy', 'done']}, # no title
{'tags': ['scheduled', 'failed']}, # no message
]:
self._post_json(payload)
self.assertStatus('400 Bad Request')
def test_event_unknown(self):
payload = {
'tags': ['unknown'],
}
self._post_json(payload)
self.assertStatus('200 OK')
self.assertBody('IGNORED')
@mock.patch('pmxbot.webhooks.Server.send_to')
@mock.patch('pmxbot.webhooks.ChannelSelector.get_channels')
def test_event_route(self, mock_get_channels, mock_send_to):
mock_get_channels.return_value = ['chan1', 'chan2']
payload = {
'tags': ['route'],
'title': 'My Swarm',
}
self._post_json(payload)
self.assertStatus('200 OK')
self.assertBody('OK')
mock_send_to.assert_has_calls(
[
mock.call('chan1', 'VR: Routed My Swarm'),
mock.call('chan2', 'VR: Routed My Swarm'),
]
)
@mock.patch('pmxbot.webhooks.Server.send_to')
@mock.patch('pmxbot.webhooks.ChannelSelector.get_channels')
def test_event_swarm_deploy_done(self, mock_get_channels, mock_send_to):
mock_get_channels.return_value = ['chan1', 'chan2']
payload = {
'tags': ['swarm', 'deploy', 'done'],
'title': 'Swarm MySwarm finished',
'message': 'Swarm MySwarm finished',
}
self._post_json(payload)
self.assertStatus('200 OK')
self.assertBody('OK')
mock_send_to.assert_has_calls(
[
mock.call('chan1', 'VR: Swarm MySwarm finished'),
mock.call('chan2', 'VR: Swarm MySwarm finished'),
]
)
@mock.patch('pmxbot.webhooks.Server.send_to')
@mock.patch('pmxbot.webhooks.ChannelSelector.get_channels')
def test_event_scheduled_failed(self, mock_get_channels, mock_send_to):
mock_get_channels.return_value = ['chan1']
message = textwrap.dedent(
"""
MySwarm1@host: encoding.py failed:
traceback
MySwarm2@host: some other error...
fat
traceback
MySwarm3@host: bizarre bug;
MySwarm4@host: py3 stacktraces contain multiline tracebacks:
trackback1
trackback1
trackback1
traceback2
traceback2
traceback2
"""
)
payload = {
'tags': ['scheduled', 'failed'],
'message': message,
}
self._post_json(payload)
self.assertStatus('200 OK')
self.assertBody('OK')
assert mock_send_to.call_args_list == [
mock.call(
'chan1',
(
'VR: Scheduled uptests failed for MySwarm1@host: '
'encoding.py failed:'
),
),
mock.call(
'chan1',
(
'VR: Scheduled uptests failed for MySwarm2@host: '
'some other error...'
),
),
mock.call(
'chan1',
('VR: Scheduled uptests failed for MySwarm3@host: ' 'bizarre bug;'),
),
mock.call(
'chan1',
(
'VR: Scheduled uptests failed for MySwarm4@host: '
'py3 stacktraces contain multiline tracebacks:'
),
),
]
| StarcoderdataPython |
1612250 | <filename>test.py
#!/usr/bin/env python3
import os
import time
import sys
import json
import subprocess
from enum import unique
from multiprocessing.pool import Pool
from pathlib import Path
from datetime import datetime
from collections import defaultdict
import requests
sys.path.extend(["./python-client"])
from swagger_client.api.default_api import DefaultApi
import swagger_client as faasnap
from swagger_client.configuration import Configuration
from types import SimpleNamespace
bpf_map = {
'brq': 'tracepoint:block:block_rq_issue /strncmp("fc_vcpu", comm, 7)==0 || comm =="main"/ {@blockrq[comm] = count(); @bsize[comm] = sum(args->bytes);}',
'bsize': 'tracepoint:block:block_rq_issue /strncmp("fc_vcpu", comm, 7)==0 || comm =="main"/ {@blockrqsize[comm] = sum(args->bytes)}',
'_bsize': 'tracepoint:block:block_rq_issue {@blockrqsize[comm] = sum(args->bytes)}',
'pf': 'kprobe:handle_mm_fault /strncmp("fc_vcpu", comm, 7)==0 || comm =="main" || comm=="firecracker"/ {@pf[comm] = count()}',
'_pf': 'kprobe:handle_mm_fault {@pf[comm] = count()}',
'mpf': 'kretprobe:handle_mm_fault / (retval & 4) == 4 && (strncmp("fc_vcpu", comm, 7)==0 || comm =="main")/ {@majorpf[comm] = count()}',
'pftime': 'kprobe:kvm_mmu_page_fault { @start[tid] = nsecs; } kretprobe:kvm_mmu_page_fault /@start[tid]/ {@n[comm] = count(); $delta = nsecs - @start[tid]; @dist[comm] = hist($delta); @avrg[comm] = avg($delta); delete(@start[tid]); }',
'vcpublock': 'kprobe:kvm_vcpu_block { @start[tid] = nsecs; } kprobe:kvm_vcpu_block /@start[tid]/ {@n[comm] = count(); $delta = nsecs - @start[tid]; @dist[comm] = hist($delta); @avrg[comm] = avg($delta); delete(@start[tid]); }',
'cache': 'hardware:cache-misses:1000 /strncmp("fc_vcpu", comm, 7)==0/ {@misses[comm] = count()}',
'mpf-tl': 'BEGIN { @start = nsecs; } kretprobe:handle_mm_fault / @start != 0 && (retval & 4) == 4 && (strncmp("fc_vcpu", comm, 7)==0 ) / { printf("%d\\n", (nsecs - @start) / 1000000); }'
}
PAUSE = None
TESTID = None
RESULT_DIR = None
BPF = None
os.umask(0o777)
def addNetwork(client: DefaultApi, idx: int):
ns = 'fc%d' % idx
guest_mac = 'AA:FC:00:00:00:01' # fixed MAC
guest_addr = '172.16.0.2' # fixed guest IP
unique_addr = '192.168.0.%d' % (idx+2)
client.net_ifaces_namespace_put(namespace=ns, interface={
"host_dev_name": 'vmtap0',
"iface_id": "eth0",
"guest_mac": guest_mac,
"guest_addr": guest_addr,
"unique_addr": unique_addr
})
clients = {}
def prepareVanilla(params, client: DefaultApi, setting, func, func_param, par_snap):
all_snaps = []
vm = client.vms_post(vm={'func_name': func.name, 'namespace': 'fc%d' % 1})
time.sleep(5)
invoc = faasnap.Invocation(func_name=func.name, vm_id=vm.vm_id, params=func_param, mincore=-1, enable_reap=False)
ret = client.invocations_post(invocation=invoc)
print('prepare invoc ret:', ret)
base = faasnap.Snapshot(vm_id=vm.vm_id, snapshot_type='Full', snapshot_path=params.test_dir+'/Full.snapshot', mem_file_path=params.test_dir+'/Full.memfile', version='0.23.0', **vars(setting.record_regions))
base_snap = client.snapshots_post(snapshot=base)
all_snaps.append(base_snap)
client.vms_vm_id_delete(vm_id=vm.vm_id)
time.sleep(2)
for i in range(par_snap-1):
all_snaps.append(client.snapshots_put(base_snap.ss_id, '%s/Full.memfile.%d' % (params.test_dir, i)))
for snap in all_snaps:
client.snapshots_ss_id_patch(ss_id=snap.ss_id, state=vars(setting.patch_state)) # drop cache
time.sleep(1)
return [snap.ss_id for snap in all_snaps]
def prepareMincore(params, client: DefaultApi, setting, func, func_param, par_snap):
all_snaps = []
vm = client.vms_post(vm={'func_name': func.name, 'namespace': 'fc%d' % 1})
time.sleep(5)
base_snap = client.snapshots_post(snapshot=faasnap.Snapshot(vm_id=vm.vm_id, snapshot_type='Full', snapshot_path=params.test_dir+'/Full.snapshot', mem_file_path=params.test_dir+'/Full.memfile', version='0.23.0'))
client.vms_vm_id_delete(vm_id=vm.vm_id)
client.snapshots_ss_id_patch(ss_id=base_snap.ss_id, state=vars(setting.patch_base_state)) # drop cache
time.sleep(2)
# input("Press Enter to start 1st invocation...")
if setting.mincore_size > 0:
mincore = -1
else:
mincore = 100
invoc = faasnap.Invocation(func_name=func.name, ss_id=base_snap.ss_id, params=func_param, mincore=mincore, mincore_size=setting.mincore_size, enable_reap=False, namespace='fc%d'%1, use_mem_file=True)
ret = client.invocations_post(invocation=invoc)
newVmID = ret['vmId']
print('prepare invoc ret:', ret)
ret = client.invocations_post(invocation=faasnap.Invocation(func_name='run', vm_id=newVmID, params="{\"args\":\"echo 8 > /proc/sys/vm/drop_caches\"}", mincore=-1, enable_reap=False)) # disable sanitizing
warm_snap = client.snapshots_post(snapshot=faasnap.Snapshot(vm_id=newVmID, snapshot_type='Full', snapshot_path=params.test_dir+'/Warm.snapshot', mem_file_path=params.test_dir+'/Warm.memfile', version='0.23.0', **vars(setting.record_regions)))
all_snaps.append(warm_snap)
client.vms_vm_id_delete(vm_id=newVmID)
time.sleep(2)
client.snapshots_ss_id_mincore_put(ss_id=warm_snap.ss_id, source=base_snap.ss_id) # carry over mincore to new snapshot
client.snapshots_ss_id_mincore_patch(ss_id=warm_snap.ss_id, state=vars(setting.patch_mincore))
for i in range(par_snap-1):
all_snaps.append(client.snapshots_put(warm_snap.ss_id, '%s/Full.memfile.%d' % (params.test_dir, i)))
client.snapshots_ss_id_patch(ss_id=base_snap.ss_id, state=vars(setting.patch_base_state)) # drop cache
for snap in all_snaps:
client.snapshots_ss_id_patch(ss_id=snap.ss_id, state=vars(setting.patch_state)) # drop cache
client.snapshots_ss_id_mincore_patch(ss_id=warm_snap.ss_id, state={'drop_ws_cache': True})
# input("Press Enter to start finish invocation...")
time.sleep(1)
return [snap.ss_id for snap in all_snaps]
def prepareReap(params, client: DefaultApi, setting, func, func_param, idx):
vm = client.vms_post(vm={'func_name': func.name, 'namespace': 'fc%d' % idx})
time.sleep(5)
invoc = faasnap.Invocation(func_name=func.name, vm_id=vm.vm_id, params=func_param, mincore=-1, enable_reap=False)
ret = client.invocations_post(invocation=invoc)
print('1st prepare invoc ret:', ret)
base = faasnap.Snapshot(vm_id=vm.vm_id, snapshot_type='Full', snapshot_path=params.test_dir+'/Full.snapshot'+str(idx), mem_file_path=params.test_dir+'/Full.memfile'+str(idx), version='0.23.0')
base_snap = client.snapshots_post(snapshot=base)
client.vms_vm_id_delete(vm_id=vm.vm_id)
time.sleep(1)
client.snapshots_ss_id_patch(ss_id=base_snap.ss_id, state=vars(setting.patch_state)) # drop cache
time.sleep(1)
invoc = faasnap.Invocation(func_name=func.name, ss_id=base_snap.ss_id, params=func_param, mincore=-1, enable_reap=True, ws_file_direct_io=True, namespace='fc%d'%1)
ret = client.invocations_post(invocation=invoc)
print('2nd prepare invoc ret:', ret)
time.sleep(1)
client.vms_vm_id_delete(vm_id=ret['vmId'])
time.sleep(2)
client.snapshots_ss_id_patch(ss_id=base_snap.ss_id, state=vars(setting.patch_state)) # drop cache
client.snapshots_ss_id_reap_patch(ss_id=base_snap.ss_id, cache=False) # drop reap cache
time.sleep(1)
return [base_snap.ss_id]
def prepareEmuMincore(params, client: DefaultApi, setting, func, func_param):
vm = client.vms_post(vm={'func_name': func.name, 'namespace': 'fc%d' % 1})
time.sleep(5)
invoc = faasnap.Invocation(func_name=func.name, vm_id=vm.vm_id, params=func_param, mincore=-1, enable_reap=False)
ret = client.invocations_post(invocation=invoc)
print('1st prepare invoc ret:', ret)
snapshot = client.snapshots_post(snapshot=faasnap.Snapshot(vm_id=vm.vm_id, snapshot_type='Full', snapshot_path=params.test_dir+'/Full.snapshot', mem_file_path=params.test_dir+'/Full.memfile', version='0.23.0', **vars(setting.record_regions)))
client.vms_vm_id_delete(vm_id=vm.vm_id)
time.sleep(1)
client.snapshots_ss_id_patch(ss_id=snapshot.ss_id, state=vars(setting.patch_state)) # drop cache
time.sleep(1)
invoc = faasnap.Invocation(func_name=func.name, ss_id=snapshot.ss_id, params=func_param, mincore=-1, enable_reap=True, ws_file_direct_io=True, namespace='fc%d'%1) # get emulated mincore
ret = client.invocations_post(invocation=invoc)
print('2nd prepare invoc ret:', ret)
time.sleep(1)
client.vms_vm_id_delete(vm_id=ret['vmId'])
time.sleep(2)
client.snapshots_ss_id_reap_patch(ss_id=snapshot.ss_id, cache=False) # drop reap cache
client.snapshots_ss_id_mincore_patch(ss_id=snapshot.ss_id, state=vars(setting.patch_mincore))
client.snapshots_ss_id_patch(ss_id=snapshot.ss_id, state=vars(setting.patch_state)) # drop cache
time.sleep(1)
return [snapshot.ss_id]
def invoke(args):
params, setting, func, func_param, idx, ss_id, par, par_snap, record_input, test_input = args
if par > 1 or par_snap > 1:
runId = '%s_%s_%d_%d' % (setting.name, func.id, par, par_snap)
else:
runId = '%s_%s_%d%d' % (setting.name, func.id, record_input, test_input)
bpfpipe = None
time.sleep(1)
mcstate = None
if setting.invoke_steps == "vanilla":
invoc = faasnap.Invocation(func_name=func.name, ss_id=ss_id, params=func_param, mincore=-1, enable_reap=False, namespace='fc%d'%idx, **vars(setting.invocation))
elif setting.invoke_steps == "mincore":
mcstate = clients[idx].snapshots_ss_id_mincore_get(ss_id=ss_id)
invoc = faasnap.Invocation(func_name=func.name, ss_id=ss_id, params=func_param, mincore=-1, load_mincore=[n + 1 for n in range(mcstate['nlayers'])], enable_reap=False, namespace='fc%d'%idx, **vars(setting.invocation))
elif setting.invoke_steps == "reap":
invoc = faasnap.Invocation(func_name=func.name, ss_id=ss_id, params=func_param, mincore=-1, enable_reap=True, ws_single_read=True, namespace='fc%d'%idx)
else:
print('invoke steps undefined')
return
if BPF:
program = bpf_map[BPF]
bpffile = open('%s/%s/bpftrace' % (RESULT_DIR, TESTID), 'a+') if RESULT_DIR else open('/tmp/bpftrace', 'a+')
print('==== %s ====' % runId, file=bpffile, flush=True)
bpfpipe = subprocess.Popen(['bpftrace', '-e', program], cwd='/tmp/', stdout=bpffile, stderr=subprocess.STDOUT)
time.sleep(3)
ret = clients[idx].invocations_post(invocation=invoc)
if bpfpipe:
bpfpipe.terminate()
bpfpipe.wait()
clients[idx].vms_vm_id_delete(vm_id=ret['vmId'])
trace_id = ret['traceId']
print('invoke', runId, 'ret:', ret)
time.sleep(2)
if RESULT_DIR:
directory = '%s/%s/%s' % (RESULT_DIR, TESTID, runId)
os.makedirs(directory, exist_ok=True)
with open('%s/%s.json' % (directory, trace_id), 'w+') as f:
resp = requests.get('%s/%s' % (params.trace_api, trace_id))
json.dump(resp.json(), f)
with open('%s/%s-mcstate.json' % (directory, trace_id), 'w+') as f:
json.dump([mcstate], f)
def run_snap(params, setting, par, par_snap, func, record_input, test_input):
if par_snap > 1:
assert(par == par_snap)
client: DefaultApi
global clients
# start faasnap
snappipe = subprocess.Popen(['./main', '--port=8080', '--host=0.0.0.0'], cwd=params.home_dir, stdout=open('%s/%s/stdout' % (RESULT_DIR, TESTID), 'a+') if RESULT_DIR else open('/tmp/faasnap-stdout', 'a+'), stderr=subprocess.STDOUT)
time.sleep(5)
# set up
for idx in range(1, 1+par):
clients[idx] = faasnap.DefaultApi(faasnap.ApiClient(conf))
addNetwork(clients[idx], idx)
client = clients[1]
client.functions_post(function=faasnap.Function(func_name=func.name, image=func.image, kernel=setting.kernel, vcpu=params.vcpu))
params0 = func.params[record_input]
params1 = func.params[test_input]
if setting.prepare_steps == 'vanilla':
ssIds = prepareVanilla(params, client, setting, func, params0, par_snap=par_snap)
elif setting.prepare_steps == 'mincore':
ssIds = prepareMincore(params, client, setting, func, params0, par_snap=par_snap)
elif setting.prepare_steps == 'reap':
ssIds = []
for idx in range(par_snap):
ssIds += prepareReap(params, client, setting, func, params0, idx=idx+1)
elif setting.prepare_steps == 'emumincore':
ssIds = prepareEmuMincore(params, client, setting, func, params0)
time.sleep(1)
if PAUSE:
input("Press Enter to start...")
with Pool(par) as p:
if len(ssIds) > 1:
vector = [(params, setting, func, params1, idx, ssIds[idx-1], par, par_snap, record_input, test_input) for idx in range(1, 1+par)]
else:
vector = [(params, setting, func, params1, idx, ssIds[0], par, par_snap, record_input, test_input) for idx in range(1, 1+par)]
p.map(invoke, vector)
# input("Press Enter to finish...")
snappipe.terminate()
snappipe.wait()
time.sleep(1)
def invoke_warm(args):
client: DefaultApi
params, setting, func, func_param, idx, vm_id = args
client = clients[idx]
runId = '%s_%s' % (setting.name, func.id)
time.sleep(1)
mcstate = None
invoc = faasnap.Invocation(func_name=func.name, vm_id=vm_id, params=func_param, mincore=-1, enable_reap=False)
if BPF:
program = bpf_map[BPF]
bpffile = open('%s/%s/bpftrace' % (RESULT_DIR, TESTID), 'a+') if RESULT_DIR else open('/tmp/bpftrace', 'a+')
print('==== %s ====' % runId, file=bpffile, flush=True)
bpfpipe = subprocess.Popen(['bpftrace', '-e', program], cwd='/tmp/', stdout=bpffile, stderr=subprocess.STDOUT)
time.sleep(3)
ret = client.invocations_post(invocation=invoc)
if BPF:
bpfpipe.terminate()
bpfpipe.wait()
print('2nd invoc ret:', ret)
trace_id = ret['traceId']
client.vms_vm_id_delete(vm_id=vm_id)
time.sleep(2)
if RESULT_DIR:
directory = '%s/%s/%s' % (RESULT_DIR, TESTID, runId)
os.makedirs(directory, exist_ok=True)
with open('%s/%s.json' % (directory, trace_id), 'w+') as f:
resp = requests.get('%s/%s' % (params.trace_api, trace_id))
json.dump(resp.json(), f)
def run_warm(params, setting, par, par_snap, func, record_input, test_input):
client: DefaultApi
snappipe = subprocess.Popen(['./main', '--port=8080', '--host=0.0.0.0'], cwd=params.home_dir, stdout=open('%s/%s/stdout' % (RESULT_DIR, TESTID), 'a+') if RESULT_DIR else open('/tmp/faasnap-stdout', 'a+'), stderr=subprocess.STDOUT)
time.sleep(2)
# set up
for idx in range(1, 1+par):
clients[idx] = faasnap.DefaultApi(faasnap.ApiClient(conf))
addNetwork(clients[idx], idx)
client = clients[1]
client.functions_post(function=faasnap.Function(func_name=func.name, image=func.image, kernel=setting.kernel, vcpu=params.vcpu))
params0 = func.params[record_input]
params1 = func.params[test_input]
vms = {}
for idx in range(1, 1+par):
vms[idx] = clients[idx].vms_post(vm={'func_name': func.name, 'namespace': 'fc%d' % idx})
time.sleep(5)
for idx in range(1, 1+par):
invoc = faasnap.Invocation(func_name=func.name, vm_id=vms[idx].vm_id, params=params0, mincore=-1, enable_reap=False)
ret = clients[idx].invocations_post(invocation=invoc)
print('1st invoc ret:', ret)
time.sleep(1)
if PAUSE:
input("Press Enter to start...")
with Pool(par) as p:
vector = [(params, setting, func, params1, idx, vms[idx].vm_id) for idx in range(1, 1+par)]
p.map(invoke_warm, vector)
snappipe.terminate()
snappipe.wait()
time.sleep(5)
def run(params, setting, func, par, par_snap, repeat, record_input, test_input):
for r in range(repeat):
print("\n=========%s %s: %d=========\n" % (setting.name, func.id, r))
if setting.name == 'warm':
run_warm(params, setting, par, par_snap, func, record_input, test_input)
else:
run_snap(params, setting, par, par_snap, func, record_input, test_input)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <test.json>" % sys.argv[0])
exit(1)
PAUSE = os.environ.get('PAUSE', None)
TESTID = os.environ.get('TESTID', datetime.now().strftime('%Y-%m-%dT%H-%M-%S'))
print("TESTID:", TESTID)
RESULT_DIR = os.environ.get('RESULT_DIR', None)
if not RESULT_DIR:
print("no RESULT_DIR set, will not save results")
else:
os.makedirs('%s/%s' % (RESULT_DIR, TESTID), mode=0o777, exist_ok=True)
BPF = os.environ.get('BPF', None)
with open(sys.argv[1], 'r') as f:
params = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
conf = Configuration()
conf.host = params.host
params.settings.faasnap.patch_mincore.to_ws_file = params.test_dir + '/wsfile'
if RESULT_DIR:
n = 1
while True:
p = Path("%s/%s/tests-%d.json" % (RESULT_DIR, TESTID, n))
if not p.exists():
break
n += 1
with p.open('w') as f:
json.dump(params, f, default=lambda o: o.__dict__, sort_keys=False, indent=4)
with open("/etc/faasnap.json", 'w') as f:
json.dump(params.faasnap, f, default=lambda o: o.__dict__, sort_keys=False, indent=4)
print("test_dir:", params.test_dir)
print("repeat:", params.repeat)
print("parallelism:", params.parallelism)
print("par_snapshots:", params.par_snapshots)
print("kernels:", params.faasnap.kernels)
print("vcpu:", params.vcpu)
print("record input:", params.record_input)
print("test input:", params.test_input)
for func in params.function:
for setting in params.setting:
for par, par_snap in zip(params.parallelism, params.par_snapshots):
for record_input in params.record_input:
for test_input in params.test_input:
run(params, setting=vars(params.settings)[setting], func=vars(params.functions)[func], par=par, par_snap=par_snap, repeat=params.repeat, record_input=record_input, test_input=test_input)
| StarcoderdataPython |
1690023 | <gh_stars>0
__version__ = '1.0.0'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from flask_login import current_user, login_required
from .datastore import UserDatastore
from .security_manager import SecurityManager
from .decorators import role_required, roles_required, roles_optional
| StarcoderdataPython |
4806155 | #!/usr/bin/env python
# stdlib imports
import shutil
import os.path
import datetime
# local imports
from .sender import Sender
class CopySender(Sender):
'''
Class for sending and deleting files and directories via system copy
and delete.
CopySender creates a remote directory if one does not already exist, and
copies files and directories to that remote directory. Files are simply
copied to the remote_directory. Files in the local directory are copied
to the remote directory, and sub-directories under the local directory are
preserved in the remote directory.
For example:
If the local_directory is /home/user/event1/, which contains:
/home/user/event1/datafile1.txt
/home/user/event1/other/datafile2.txt
and the remote_directory is /data/event1, then it will contain:
/data/event1/datafile1.txt
/data/event1/other/datafile2.txt
The cancel() method implemented in this class behaves as follows:
cs = CopySender(properties={'remote_directory':'/data/event1'},
local_directory='/home/user/event1',cancelfile='CANCEL')
#Sending...
cs.cancel() #=>This creates a file called /data/event1/CANCEL.
Required properties:
- remote_directory String indicating which directory input content
should be copied to.
'''
_required_properties = ['remote_directory']
_optional_properties = []
def send(self):
'''Send any files or folders that have been passed to constructor.
This method deletes any previous cancel files that may exist in the
remote_directory.
Returns:
Tuple containing number of files sent to local directory, and a
message describing success.
'''
# copy directories to remote location, changing remote name to desired
# alias
remote_folder = self._properties['remote_directory']
# remove any previous cancel files sent to remote_folder
cancelfile = os.path.join(remote_folder, self._cancelfile)
if os.path.isfile(cancelfile):
os.remove(cancelfile)
nfiles = 0
if not os.path.isdir(remote_folder):
os.makedirs(remote_folder)
if self._local_directory:
local_folder = self._local_directory
# recursively find all of the files locally, then
# copy them to corresponding remote folder structure
allfiles = self.getAllLocalFiles()
for filename in allfiles:
self._copy_file_with_path(
filename, remote_folder, local_folder=local_folder)
nfiles += 1
nfiles += sum([len(files)
for r, d, files in os.walk(local_folder)])
# copy files to remote location
for filename in self._local_files:
self._copy_file_with_path(filename, remote_folder)
nfiles += 1
return (nfiles,
f'{int(nfiles):d} files sent successfully using CopySender.')
def cancel(self, cancel_content=None):
"""
Create a cancel file (named as indicated in constructor "cancelfile"
parameter) in remote_directory.
Args:
cancel_content: String containing text that should be written to
the cancelfile.
Returns:
A string message describing what has occurred.
"""
remote_folder = self._properties['remote_directory']
cancelfile = os.path.join(remote_folder, self._cancelfile)
f = open(cancelfile, 'wt')
if cancel_content is not None:
f.write(cancel_content)
f.close()
return (f'A .cancel file has been placed in remote directory {remote_folder}.')
def _copy_file_with_path(self, local_file, remote_folder,
local_folder=None):
"""
Copy local_file to remote_folder, preserving relative path and creating
required sub-directories.
Usage:
local_file: /home/user/data/events/us2016abcd/data_files/datafile.txt
remote_folder: /data/archive/events
local_folder: /home/user/data/events/us2016abcd
would create:
/data/archive/events/us2016abcd/data_files/datafile.txt
local_file: /home/user/data/events/us2016abcd/data_files/datafile.txt
remote_folder: /data/archive/events/us2016abcd
local_folder: None
would create:
/data/archive/events/us2016abcd/datafile.txt
Args:
local_file: Local file to copy.
remote_folder: Remote folder to copy local files to.
local_folder: Top of local directory where file copying started.
If None, local_file should be copied to a file of the same
name (not preserving path) into remote_folder.
"""
if local_folder is not None:
local_parts = local_file.replace(local_folder, '').strip(
os.path.sep).split(os.path.sep)
remote_parts = remote_folder.strip(os.path.sep).split(os.path.sep)
all_parts = [os.path.sep] + remote_parts + local_parts
remote_file = os.path.join(*all_parts)
root, rfile = os.path.split(remote_file)
if not os.path.isdir(root):
os.makedirs(root)
else:
root, tfile = os.path.split(local_file)
remote_file = os.path.join(remote_folder, tfile)
remote_tmp_file = remote_file + '.tmp_' + \
datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
shutil.copyfile(local_file, remote_tmp_file)
os.rename(remote_tmp_file, remote_file)
| StarcoderdataPython |
1625363 | <gh_stars>1-10
import requests
class CurrencyConverter():
def __init__(self, url):
self.data = requests.get(url).json()
self.currencies = self.data['rates']
def convert(self, from_currency, to_currency, amount):
initial_amount = amount
if from_currency != 'USD':
amount = amount / self.currencies[from_currency]
# limiting the precision to 4 decimal places
amount = round(amount * self.currencies[to_currency], 4)
return amount
| StarcoderdataPython |
3254622 | <filename>subway_spider/__init__.py
"""
@author: <NAME>
@license: MIT license
@contact: <EMAIL>
@file: __init__.py.py
@time: 2021/3/3 10:52 上午
@desc:
"""
import requests
| StarcoderdataPython |
59418 | # -*- coding: utf-8 -*-
__all__ = ['MySqlConnectionResolver', 'MySqlConnection']
from .MySqlConnection import MySqlConnection
from .MySqlConnectionResolver import MySqlConnectionResolver
| StarcoderdataPython |
1672943 | <reponame>Stonehaven-Campaigns/uk-politics-module
"""Functions for importing data."""
import os
import csv
from typing import Dict, Tuple
import pandas as pd
from . import exceptions
def data_path(filename: str) -> str:
"""Get full path to a file in the data folder.
Args:
filename (str): The data file's name.
Returns:
path (str): The full path to the file.
"""
return os.path.join(os.path.dirname(__file__), "data", filename)
def test_data_file(short_name: str) -> None:
"""Raise exception if data file unavailable.
Args:
short_name (str): file name.
Raises:
exceptions.DataFileMissingOrUnreadable
Returns:
None
"""
path = data_path(short_name)
if not os.path.isfile(path):
raise exceptions.DataFileMissingOrUnreadable(short_name)
def read_csv_to_dict(short_name: str) -> Dict[str, str]:
"""Load csv into memory.
Args:
short_name (str): file name.
Returns:
Dict[str, str]: A dictionary of first column -> second column
"""
path = data_path(short_name)
test_data_file(short_name)
with open(path, "r", encoding="utf-8-sig") as file:
csv_reader = csv.reader(file, delimiter=",")
mapping = {rows[0]: rows[1] for rows in csv_reader}
return mapping
def read_nicknames_file(
short_name: str
) -> Tuple[Dict[str, str], Dict[str, str]]:
"""Read a nicknames csv file into two dictionaries.
The file is in the format:
official,short,*other_nicknames
Args:
short_name (str): [description]
Returns:
A tuple of dictionaries (Tuple[Dict[str, str], Dict[str, str]]):
nicknames_to_official
official_to_short_name
"""
nicknames_to_official: Dict[str, str] = {}
official_to_short_name: Dict[str, str] = {}
path = data_path(short_name)
test_data_file(short_name)
with open(path, "r", encoding="utf-8-sig") as file:
csv_reader = csv.reader(file, delimiter=",")
for row in csv_reader:
official = row[0].strip()
short = row[1].strip()
nicknames_to_official[short.lower()] = official
nicknames_to_official[official.lower()] = official
official_to_short_name[official.lower()] = short
for nickname in row[2:]:
nicknames_to_official[nickname.lower()] = official
# Green Party, with its different chapters, needs to be set by hand
nicknames_to_official["green party"] = "Green Party"
return nicknames_to_official, official_to_short_name
def read_csv_to_dataframe(
short_name: str,
separator: str = ","
) -> "pd.DataFrame":
"""Load csv into pandas dataframe.
Args:
short_name (str): Name of data file.
separator (str, optional): Separator used in the file. Defaults to ",".
Returns:
data: dataframe
"""
path = data_path(short_name)
test_data_file(short_name)
dataframe = pd.read_csv(path, sep=separator)
return dataframe
PARTY_NICKNAMES, PARTY_SHORTNAMES = read_nicknames_file("party_nicknames.csv")
"""Official names from the nicknames we have on record."""
PARTY_COLORS = read_csv_to_dict("party_colors.csv")
"""Web color names for the parties."""
"""MIT License
Copyright (c) 2021 Stonehaven
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" | StarcoderdataPython |
1660680 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-11-22 19:10
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
def to_scores(text):
return [float(x) for x in text.split()]
giis = to_scores('80.2 80.2 80 80 77.5')
merge = to_scores('80 80.1 80 79.7 78.4')
levi = to_scores('80 80 79.9 79.4 78.3')
# evenly sampled time at 200ms intervals
t = [1, 2, 4, 8, 16]
t = list(reversed(t))
# red dashes, blue squares and green triangles
plt.rcParams["figure.figsize"] = (4, 4)
_, ax = plt.subplots()
# Be sure to only pick integer tick locations.
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot(t, giis, 'r--')
plt.plot(t, merge, 'g:')
plt.plot(t, levi, 'b')
plt.legend(['GSII', 'ND + AD + BD', 'ND + AD + Levi'])
plt.xlabel('Beam Size')
plt.ylabel('Smatch')
plt.savefig("/Users/hankcs/Dropbox/应用/Overleaf/NAACL-2021-AMR/fig/beam.pdf", bbox_inches='tight')
plt.show()
| StarcoderdataPython |
1682437 | <filename>gestao/contrato/views/remove_despesa_contrato.py
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, redirect
from gestao.contrato.models.financeiro.ContratoDespesas import ContratoDespesas
def remove_despesa_contrato( request, id_despesa_contrato):
despesa_contrato = get_object_or_404(ContratoDespesas, pk=id_despesa_contrato)
id_contrato=despesa_contrato.contrato.id
despesa_contrato.despesa.delete()
despesa_contrato.delete()
return redirect("despesas_contrato", id_contrato=id_contrato) | StarcoderdataPython |
4816593 | from soundEnvelope import SoundEnvelope, Envelope
import time
def test1():
print "test1()"
for i in range(0,2):
e3=Envelope(2,1,-1,0,4,4,0,20,-1,0,-1,126,110)
se.play([[24,200,e3]])
time.sleep(1)
e3=Envelope(4,48,-48,0,1,1,0,127,0,0,-127,127,8)
se.play([[0,25,e3],[2,25],[4,25],[5,25],[7,25],[9,25],[11,25],[12,50]],pitchOffset=24,speed=2,legato=1.0)
time.sleep(2)
def test2():
print "test2()"
#random
e3=Envelope(4,121,17,171,3,1,4,127,0,0,-127,127,8)
se.play([[0,200,e3]],legato=1)
time.sleep(2.5)
#updown1
e3=Envelope(4,48,-48,0,5,5,100,127,0,0,-127,127,8)
se.play([[0,10*4,e3]],legato=1)
time.sleep(2)
#updown2
e3=Envelope(2,24,-24,0,10,10,100,127,0,0,-127,127,8)
se.play([[0,20*2,e3]],legato=1)
time.sleep(2)
#down
e3=Envelope(8,-48,0,0,5,16,16,127,0,0,-127,127,8)
se.play([[12*5,8*6,e3]])
def test3():
print "test3()"
#no
e3=Envelope(16,0,0,0,1,1,0,127,0,0,-127,127,8)
se.play([[12,32,e3],[0,64]],legato=0.8,speed=1.5,pitchOffset=0)
time.sleep(2)
#yes
se.play([[0,16,e3],[4,16],[7,16],[12,48]],legato=0.9,speed=1.5,pitchOffset=24)
time.sleep(2)
#beep-beep
se.play([[0,16,e3],[None,16],[0,16]],legato=0.9,speed=1.5,pitchOffset=24)
time.sleep(2)
se.play([[0,16,e3],[None,16],[0,16],[None,16],[0,16]],legato=0.9,speed=1.5,pitchOffset=24)
time.sleep(2)
def test4():
print "test4()"
#slow siren
e3=Envelope(1,1,-1,0,48,48,0,127,0,0,-127,127,8)
se.play([[36,256,e3]])
time.sleep(2)
#fast siren
e3=Envelope(1,2,-2,0,12,12,0,127,0,0,-127,127,8)
se.play([[48,128,e3]])
def test5():
print "test5()"
#bell
e3=Envelope(1,-48,48,0,2,2,255,127,-3,0,-127,127,0)
e4=Envelope(4,1,-1,0,2,2,0,127,-3,0,-127,127,0)
se.play([[60,128,e3],[54,128],[None,64],[60,128,e4]])
time.sleep(2)
def test6():
print "test6()"
#click
e3=Envelope(1,0,0,0,12,12,0,127,-127,0,-127,127,8)
se.play([[48,4,e3]])
def test7():
print "test7()"
brass=Envelope(8,0,0,0,2,2,4,4,-1,0,-16,127,126)
fill=Envelope(4,48,-48,0,1,1,0,127,0,0,-127,127,8)
se.play([
[24,32,brass],[22,8],[None,8],[21,8],[None,8],
[24,32],[22,8],[None,8],[21,8],[None,8],
[24,8],[None,8],[19,16],[7,8,fill],[None,8],[12,8],[None,8],
[16,24],[None,8],[12,24],[None,8],
[26,16,brass],[24,8],[None,8],[26,8],[None,8],[28,32],
[26,8],[None,8],[24,8],[None,8],[22,8],[None,8],
[17,24,fill],[None,8],[16,24],[None,8],
[14,24],[None,8],[12,24],[None,8],
[24,32,brass],[22,8],[None,8],[21,8],[None,8],
[24,32],[22,8],[None,8],[21,8],[None,8],
[24,8],[None,8],[19,16],[7,8,fill],[None,8],[12,8],[None,8],
[16,24],[None,8],[12,24],[None,8],
[26,16,brass],[24,8],[None,8],[26,8],[None,8],[28,24],
[None,8],[26,32],[28,8],[None,8],
[29,8],[None,24],
[24,16,fill],[26,8],[28,8],[29,16]
],legato=0.8,speed=0.75,pitchOffset=12)
se=SoundEnvelope()
test1()
test2()
test3()
test4()
test5()
test6()
test7() | StarcoderdataPython |
1629759 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.views.generic import View
from django.http import Http404
from django.db.models import Q
from django.utils.translation import ugettext as _
from esb.bkcore.models import ComponentAPIDoc, ESBChannel, ComponentSystem, FeedbackForComponentDocs
from esb.common.django_utils import JsonResponse
from .utils import get_system_category
class BaseApiCls(View):
pass
class AllApi(BaseApiCls):
def get(self, request):
# 获取接口以及对应的说明
filter_val = request.GET.get('keyword')
if filter_val:
all_comp_info = ESBChannel.objects.filter(is_hidden=False)\
.filter(Q(component_name__icontains=filter_val) | Q(name__icontains=filter_val))\
.order_by('component_system_id')
else:
all_comp_info = ESBChannel.objects.filter(is_hidden=False)\
.order_by('component_system_id')
all_comp_info = [
{
'id': api.id,
'name': api.component_name,
'label': api.name_display,
'system_id': api.component_system_id,
}
for api in all_comp_info
]
# 通过system_id获取系统信息
system_ids = [api['system_id'] for api in all_comp_info]
all_system_info = ComponentSystem.objects.filter(id__in=system_ids).values('id', 'name', 'label')
all_system_info = dict([(system['id'], system) for system in list(all_system_info)])
# 组装参数
for comp_info in all_comp_info:
system_info = all_system_info.get(comp_info['system_id'], {})
comp_info.update({
'system_name': system_info.get('name', ''),
'system_label': system_info.get('label', ''),
})
all_comp_info = all_comp_info[:30]
return JsonResponse(list(all_comp_info))
class GetApisBySystem(BaseApiCls):
def get(self, request, system_name):
"""查询指定系统下的apis信息"""
# 获取当前系统的信息
try:
system_info = ComponentSystem.objects.get(name=system_name)
except Exception:
raise Http404
api_info_by_system = ESBChannel.objects.filter(component_system_id=system_info.id, is_hidden=False)\
.order_by('component_name')
api_info_by_system = [
{
'id': api.id,
'system_id': api.component_system_id,
'name': api.component_name,
'label': api.name_display,
'path': api.path,
'type': api.type,
}
for api in api_info_by_system
]
return JsonResponse({
'system_summary': system_info.remark_display or _(u'暂无系统简介'),
'api_info_by_system': list(api_info_by_system),
})
class GetApiDocByApiId(BaseApiCls):
def get(self, request, system_name, api_id):
try:
component = ESBChannel.objects.get(id=api_id)
api = ComponentAPIDoc.objects.get(component_id=component.id)
doc_html = api.doc_html_display
except Exception:
doc_html = ''
return JsonResponse({
'doc_html': doc_html
})
class SubmitTheAdvice(BaseApiCls):
def post(self, request):
data = dict(request.POST.items())
FeedbackForComponentDocs(
operator=request.user.username,
board='',
component_id=data['api_id'],
content=data.get('content', _("满足需求")),
).save()
return JsonResponse({'result': True})
class CheckComponentExist(BaseApiCls):
def get(self, request):
data = dict(request.GET.items())
try:
system_obj = ComponentSystem.objects.get(name=data['system'])
except Exception:
return JsonResponse({'result': False})
result = ESBChannel.objects.filter(component_system=system_obj, component_name=data['component']).exists()
return JsonResponse({'result': result})
def post(self, request):
return JsonResponse({'result': True})
class GetSystemDocCategory(BaseApiCls):
def get(self, request):
return JsonResponse(get_system_category())
| StarcoderdataPython |
24763 | """
Tests for the reference loader for Buyback Authorizations.
"""
from functools import partial
from unittest import TestCase
import blaze as bz
from blaze.compute.core import swap_resources_into_scope
from contextlib2 import ExitStack
import pandas as pd
from six import iteritems
from zipline.pipeline.common import(
BUYBACK_ANNOUNCEMENT_FIELD_NAME,
CASH_FIELD_NAME,
DAYS_SINCE_PREV,
PREVIOUS_BUYBACK_ANNOUNCEMENT,
PREVIOUS_BUYBACK_CASH,
PREVIOUS_BUYBACK_SHARE_COUNT,
SHARE_COUNT_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME)
from zipline.pipeline.data import (CashBuybackAuthorizations,
ShareBuybackAuthorizations)
from zipline.pipeline.factors.events import (
BusinessDaysSinceCashBuybackAuth,
BusinessDaysSinceShareBuybackAuth
)
from zipline.pipeline.loaders.buyback_auth import \
CashBuybackAuthorizationsLoader, ShareBuybackAuthorizationsLoader
from zipline.pipeline.loaders.blaze import (
BlazeCashBuybackAuthorizationsLoader,
BlazeShareBuybackAuthorizationsLoader,
)
from zipline.utils.test_utils import (
tmp_asset_finder,
)
from .base import EventLoaderCommonMixin, DATE_FIELD_NAME
buyback_authorizations = [
# K1--K2--A1--A2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [1, 15],
CASH_FIELD_NAME: [10, 20]
}),
# K1--K2--A2--A1.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [7, 13],
CASH_FIELD_NAME: [10, 22]
}),
# K1--A1--K2--A2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [3, 1],
CASH_FIELD_NAME: [4, 7]
}),
# K1 == K2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [6, 23],
CASH_FIELD_NAME: [1, 2]
}),
pd.DataFrame(
columns=[SHARE_COUNT_FIELD_NAME,
CASH_FIELD_NAME],
dtype='datetime64[ns]'
),
]
def create_buyback_auth_tst_frame(cases, field_to_drop):
buyback_auth_df = {
sid:
pd.concat([df, buyback_authorizations[sid]], axis=1).drop(
field_to_drop, 1)
for sid, df
in enumerate(case.rename(columns={DATE_FIELD_NAME:
BUYBACK_ANNOUNCEMENT_FIELD_NAME}
)
for case in cases
)
}
return buyback_auth_df
class CashBuybackAuthLoaderTestCase(TestCase, EventLoaderCommonMixin):
"""
Test for cash buyback authorizations dataset.
"""
pipeline_columns = {
PREVIOUS_BUYBACK_CASH:
CashBuybackAuthorizations.cash_amount.latest,
PREVIOUS_BUYBACK_ANNOUNCEMENT:
CashBuybackAuthorizations.announcement_date.latest,
DAYS_SINCE_PREV:
BusinessDaysSinceCashBuybackAuth(),
}
@classmethod
def setUpClass(cls):
cls._cleanup_stack = stack = ExitStack()
cls.finder = stack.enter_context(
tmp_asset_finder(equities=cls.equity_info),
)
cls.cols = {}
cls.dataset = create_buyback_auth_tst_frame(cls.event_dates_cases,
SHARE_COUNT_FIELD_NAME)
cls.loader_type = CashBuybackAuthorizationsLoader
@classmethod
def tearDownClass(cls):
cls._cleanup_stack.close()
def setup(self, dates):
zip_with_floats_dates = partial(self.zip_with_floats, dates)
num_days_between_dates = partial(self.num_days_between, dates)
_expected_previous_cash = pd.DataFrame({
0: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[10] * num_days_between_dates('2014-01-15', '2014-01-19') +
[20] * num_days_between_dates('2014-01-20', None)
),
1: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[22] * num_days_between_dates('2014-01-15', '2014-01-19') +
[10] * num_days_between_dates('2014-01-20', None)
),
2: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[4] * num_days_between_dates('2014-01-10', '2014-01-19') +
[7] * num_days_between_dates('2014-01-20', None)
),
3: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[1] * num_days_between_dates('2014-01-10', '2014-01-14') +
[2] * num_days_between_dates('2014-01-15', None)
),
4: zip_with_floats_dates(['NaN'] * len(dates)),
}, index=dates)
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT] = \
self.get_expected_previous_event_dates(dates)
self.cols[PREVIOUS_BUYBACK_CASH] = _expected_previous_cash
self.cols[DAYS_SINCE_PREV] = self._compute_busday_offsets(
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT]
)
class ShareBuybackAuthLoaderTestCase(TestCase, EventLoaderCommonMixin):
"""
Test for share buyback authorizations dataset.
"""
pipeline_columns = {
PREVIOUS_BUYBACK_SHARE_COUNT:
ShareBuybackAuthorizations.share_count.latest,
PREVIOUS_BUYBACK_ANNOUNCEMENT:
ShareBuybackAuthorizations.announcement_date.latest,
DAYS_SINCE_PREV:
BusinessDaysSinceShareBuybackAuth(),
}
@classmethod
def setUpClass(cls):
cls._cleanup_stack = stack = ExitStack()
cls.finder = stack.enter_context(
tmp_asset_finder(equities=cls.equity_info),
)
cls.cols = {}
cls.dataset = create_buyback_auth_tst_frame(cls.event_dates_cases,
CASH_FIELD_NAME)
cls.loader_type = ShareBuybackAuthorizationsLoader
@classmethod
def tearDownClass(cls):
cls._cleanup_stack.close()
def setup(self, dates):
zip_with_floats_dates = partial(self.zip_with_floats, dates)
num_days_between_dates = partial(self.num_days_between, dates)
_expected_previous_buyback_share_count = pd.DataFrame({
0: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[1] * num_days_between_dates('2014-01-15', '2014-01-19') +
[15] * num_days_between_dates('2014-01-20', None)
),
1: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[13] * num_days_between_dates('2014-01-15', '2014-01-19') +
[7] * num_days_between_dates('2014-01-20', None)
),
2: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[3] * num_days_between_dates('2014-01-10', '2014-01-19') +
[1] * num_days_between_dates('2014-01-20', None)
),
3: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[6] * num_days_between_dates('2014-01-10', '2014-01-14') +
[23] * num_days_between_dates('2014-01-15', None)
),
4: zip_with_floats_dates(['NaN'] * len(dates)),
}, index=dates)
self.cols[
PREVIOUS_BUYBACK_SHARE_COUNT
] = _expected_previous_buyback_share_count
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT] = \
self.get_expected_previous_event_dates(dates)
self.cols[DAYS_SINCE_PREV] = self._compute_busday_offsets(
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT]
)
class BlazeCashBuybackAuthLoaderTestCase(CashBuybackAuthLoaderTestCase):
""" Test case for loading via blaze.
"""
@classmethod
def setUpClass(cls):
super(BlazeCashBuybackAuthLoaderTestCase, cls).setUpClass()
cls.loader_type = BlazeCashBuybackAuthorizationsLoader
def loader_args(self, dates):
_, mapping = super(
BlazeCashBuybackAuthLoaderTestCase,
self,
).loader_args(dates)
return (bz.Data(pd.concat(
pd.DataFrame({
BUYBACK_ANNOUNCEMENT_FIELD_NAME:
frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],
CASH_FIELD_NAME:
frame[CASH_FIELD_NAME],
TS_FIELD_NAME:
frame[TS_FIELD_NAME],
SID_FIELD_NAME: sid,
})
for sid, frame in iteritems(mapping)
).reset_index(drop=True)),)
class BlazeShareBuybackAuthLoaderTestCase(ShareBuybackAuthLoaderTestCase):
""" Test case for loading via blaze.
"""
@classmethod
def setUpClass(cls):
super(BlazeShareBuybackAuthLoaderTestCase, cls).setUpClass()
cls.loader_type = BlazeShareBuybackAuthorizationsLoader
def loader_args(self, dates):
_, mapping = super(
BlazeShareBuybackAuthLoaderTestCase,
self,
).loader_args(dates)
return (bz.Data(pd.concat(
pd.DataFrame({
BUYBACK_ANNOUNCEMENT_FIELD_NAME:
frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],
SHARE_COUNT_FIELD_NAME:
frame[SHARE_COUNT_FIELD_NAME],
TS_FIELD_NAME:
frame[TS_FIELD_NAME],
SID_FIELD_NAME: sid,
})
for sid, frame in iteritems(mapping)
).reset_index(drop=True)),)
class BlazeShareBuybackAuthLoaderNotInteractiveTestCase(
BlazeShareBuybackAuthLoaderTestCase):
"""Test case for passing a non-interactive symbol and a dict of resources.
"""
def loader_args(self, dates):
(bound_expr,) = super(
BlazeShareBuybackAuthLoaderNotInteractiveTestCase,
self,
).loader_args(dates)
return swap_resources_into_scope(bound_expr, {})
class BlazeCashBuybackAuthLoaderNotInteractiveTestCase(
BlazeCashBuybackAuthLoaderTestCase):
"""Test case for passing a non-interactive symbol and a dict of resources.
"""
def loader_args(self, dates):
(bound_expr,) = super(
BlazeCashBuybackAuthLoaderNotInteractiveTestCase,
self,
).loader_args(dates)
return swap_resources_into_scope(bound_expr, {})
| StarcoderdataPython |
111427 | <reponame>boris-42/notify
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from notify import driver
from tests.unit import test
class ModuleTestCase(test.TestCase):
@mock.patch("notify.driver.importlib.import_module")
def test_get_driver(self, mock_import_module):
foo, bar = mock.Mock(), mock.Mock()
foo.Driver.return_value = "foo_driver"
bar.Driver.return_value = "bar_driver"
mock_import_module.side_effect = [foo, bar]
driver.DRIVERS = {}
foo_ins = driver.get_driver("foo", {"arg": 123})
bar_ins = driver.get_driver("bar", {"arg": 321})
self.assertEqual("foo_driver", foo_ins)
self.assertEqual("bar_driver", bar_ins)
self.assertEqual({"foo": foo.Driver, "bar": bar.Driver},
driver.DRIVERS)
foo.Driver.assert_called_once_with({"arg": 123})
foo.Driver.validate_config.assert_called_once_with({"arg": 123})
bar.Driver.assert_called_once_with({"arg": 321})
bar.Driver.validate_config.assert_called_once_with({"arg": 321})
self.assertEqual([mock.call("notify.drivers.foo"),
mock.call("notify.drivers.bar")],
mock_import_module.mock_calls)
foo.Driver.validate_config.side_effect = ValueError
self.assertRaises(RuntimeError, driver.get_driver, "foo", {"arg": 1})
foo.Driver.validate_config.side_effect = None
driver.DRIVERS["foo"] = mock.Mock(return_value="cached_foo_driver")
self.assertEqual("cached_foo_driver",
driver.get_driver("foo", {"arg": 123}))
mock_import_module.side_effect = ImportError
self.assertRaises(RuntimeError, driver.get_driver, "spam", {"arg": 1})
class DriverTestCase(test.TestCase):
def test_validate_payload(self):
self.assertIsNone(driver.Driver.validate_payload(self.payload))
del self.payload["region"]
self.assertRaises(ValueError,
driver.Driver.validate_payload, self.payload)
def test_validate_config(self):
self.assertIsNone(driver.Driver.validate_config({}))
for cfg in (None, [], 42, "foo"):
self.assertRaises(ValueError, driver.Driver.validate_config, cfg)
def test_notify(self):
drv = driver.Driver({})
self.assertRaises(NotImplementedError, drv.notify, self.payload)
| StarcoderdataPython |
1655411 | # -*- coding: utf-8 -*-
"""
Class for handling tenhou logs
@author: ApplySci
"""
#%% imports
# standard libraries
from collections import OrderedDict
from itertools import chain
import json
import lzma
import pickle
from types import SimpleNamespace
import urllib
from lxml import etree
import portalocker
import requests
class TenhouLogs():
"""
stores tenhou logs
"""
#GAMEURL = 'https://tenhou.net/3/mjlog2xml.cgi?%s'
GAMEURL = 'https://tenhou.net/0/log/?%s'
def __init__(self, outdir, username, args={}):
self.outdir = outdir
self.username = username
self._flags = SimpleNamespace()
self._flags.force = args.force
self._flags.need_to_sort = False
self._flags.no_web = args.no_web
self._flags.have_new = False
self._lockfile = None
self.logs = OrderedDict()
self.pickle_file = outdir + username + '.pickle.7z'
def _get_rates(self, xml, key):
"""
for one game, get the R for each player at the start of the game,
and the player names, and add them into the OrderedDict
"""
players = xml.find('UN').attrib
ratestrings = players['rate'].split(',')
rates = [float(x) for x in ratestrings]
self.logs[key]['meanrate'] = sum(rates)/len(rates)
names = []
found_player = False
for j in range(0, 4):
nextname = urllib.parse.unquote(players['n%d' % j])
names.append(nextname)
if nextname == self.username:
self.logs[key]['rate'] = rates[j]
found_player = True
if found_player:
self.logs[key]['uname'] = names
else:
print('ignoring, player not in %s' % ','.join(names))
del self.logs[key]
return found_player
def _load_from_text(self, key, text):
""" takes an mjlog text string in, and stores it as an xml object """
try:
xml = etree.XML(text, etree.XMLParser(recover=True)).getroottree().getroot()
except:
print('failed to parse xml in %s' % key)
print(text)
return
if not self._get_rates(xml, key):
return
self._flags.have_new = True
self._process_scores(xml, key)
def _process_scores(self, xml, key):
"""
for one game, get the scores for each player,
rank them in descending order,
and compile into a string to match nodocchi.moe
Add this into the self record
"""
xml_scores = xml.find('AGARI[@owari][last()]')
draw_test = xml.find('RYUUKYOKU[@owari][last()]')
if xml_scores is not None and draw_test is not None:
print('WARNING: %s had both a win and a draw with final scores!' % key)
if xml_scores is None:
xml_scores = draw_test
if xml_scores is not None:
self.logs[key]['sc'] = xml_scores.attrib['owari']
# take only the 0,2,4,6th elements of score
scores = [float(x) for x in self.logs[key]['sc'].split(',')][1::2]
sortedscores = sorted(scores, reverse=True)
sortedplayers = sorted(self.logs[key]['uname'],
reverse=True,
key=lambda x: scores[self.logs[key]['uname'].index(x)])
self.logs[key]['place'] = sortedplayers.index(self.username) + 1
self.logs[key]['players'] = ''
for i, player in enumerate(sortedplayers):
self.logs[key]['players'] += '%s(%s%.1f)' % (
player,
"+" if sortedscores[i] > 0 else "",
sortedscores[i])
def one_record(self, store, last_key):
"""
incorporate one record into the log
"""
key = store['log']
if key in self.logs and not self._flags.force:
return
if key[0:10] < last_key:
self._flags.need_to_sort = True
if key not in self.logs:
self.logs[key] = {}
self.logs[key].update(store)
if 'uname' in self.logs[key] and self.username not in self.logs[key]['uname']:
del self.logs[key]
return
if not self._flags.no_web:
print('gathering game: %s' % key)
loghttp = requests.get(
self.GAMEURL % key,
headers={'referer': 'http://tenhou.net/3/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
)
if loghttp.ok:
self.logs[key]['content'] = loghttp.content
else:
print('WARNING: failed to download %s' % key)
if 'content' not in self.logs[key] or self.logs[key]['content'] == '':
del self.logs[key]
return
self._load_from_text(key, self.logs[key]['content'])
def _find_place_and_rate(self, this_log, key_index, logkeys):
"""
given a particular log, find our score, and check the R rates are consistent
"""
# check that the progression of R-scores is consistent;
# If it's not, swap the order of specific games when that improves things
numself = len(logkeys)
next_rate = (this_log['rate']
+ (this_log['meanrate'] - this_log['rate']) / 200 # TODO assumes 400+ games played
+ 10 - 4 * this_log['place'])
if (this_log['meanrate']
and key_index < numself - 1
and abs(next_rate - self.logs[logkeys[key_index + 1]]['rate']) > 0.02):
delta = 2
while (key_index < numself - delta and
logkeys[key_index + 1][0:8] == logkeys[key_index + delta][0:8]):
# TOFIX the above test fails if neighbouring games cross days
if abs(next_rate - self.logs[logkeys[key_index + delta]]['rate']) < 0.02:
# try swapping keys and see if that helps
# print('swapping %d with the one following' % (key_index + 1))
self.logs.move_to_end(logkeys[key_index + delta])
self.logs.move_to_end(logkeys[key_index + 1])
for replace in chain(
range(key_index + 2, key_index + delta),
range(key_index + delta + 1, numself)):
self.logs.move_to_end(logkeys[replace])
return True
delta = delta + 1
return False
def write_csv(self):
"""
write out rates csv for excel file
"""
redo = True
if self._flags.need_to_sort:
print('running re-sort')
self.logs = OrderedDict(sorted(self.logs.items()))
print('compiling csv')
while redo:
redo = False
output = ''
last_hour = '-1'
this_minute = 0
logkeys = tuple(self.logs)
for key_index, key in enumerate(logkeys):
this_log = self.logs[key]
if self._find_place_and_rate(this_log, key_index, logkeys):
redo = True
break
this_hour = key[8:10]
this_minute = this_minute + 5 if this_hour == last_hour else 10
output = output + (
'%s-%s-%s %s:%d,"%s",%.2f,%.2f,%d,"http://tenhou.net/3/?log=%s&tw=%d"\n' %
(key[0:4], key[4:6], key[6:8], this_hour, this_minute,
this_log['players'].replace('"','""'),
this_log['rate'], this_log['meanrate'], this_log['place'],
key, this_log['uname'].index(self.username))
)
last_hour = this_hour
with open(self.outdir + self.username + '.csv', 'w', encoding='utf-8-sig') as csv:
csv.write(output)
def add_from_file(self, filepath):
"""
receives a filepath, stores the mjlog in that file in the db
extracts the unique game id (key) from the filename.
If it fails to parse the file, it will try to download the game
using the key
"""
try:
key = filepath.stem.split('&')[0]
if key in self.logs and not self._flags.force:
return
print(key)
with filepath.open(encoding='utf-8') as f:
text = f.read()
self.logs[key] = {'content': bytes(text, encoding='utf-8')}
self._load_from_text(key, text)
except:
# failed to load file, try downloading instead
self.one_record({'log': key}, '')
@staticmethod
def add_json(json_in):
"""
process JSON and append to list of games to log
"""
out = []
jsonstring = json_in.replace('"{', '{').replace('}"', '}')
while jsonstring[0] != '{':
jsonstring = jsonstring[1 : -1]
todo = json.loads(jsonstring)
if 'log' in todo:
out.append(todo)
else:
for i in range(0, 40):
next_log = 'log%d' % i
if next_log in todo:
out.append(todo[next_log])
return out
def _guarantee_defaults(self):
"""
guarantee that certain properties are always available for each log
"""
defaults = {'players': '', 'rate': 0, 'meanrate': 0, 'place': 0}
for key in tuple(self.logs):
for check_key, default_val in defaults.items():
if check_key not in self.logs[key]:
self.logs[key][check_key] = default_val
def load(self):
"""
load logs from file
"""
self._lockfile = portalocker.Lock(self._lockfile, timeout=10)
try:
with lzma.open(self.pickle_file, 'rb') as infile:
self.logs = pickle.load(infile)
except FileNotFoundError:
pass
def add_games(self, games_to_add):
"""
add a set of games
"""
existing_keys = tuple(self.logs)
latest_key = next(reversed(self.logs))[0:10] if self.logs else ''
if self._flags.force:
self._flags.need_to_sort = True
new_games = []
for one_log in games_to_add:
if 'log' in one_log and (
self._flags.force or one_log['log'] not in existing_keys
):
new_games.append(one_log)
for one_log in new_games:
self.one_record(one_log, latest_key)
self._guarantee_defaults()
def save(self):
"""
save sorted self
"""
self.write_csv()
if self._flags.have_new:
print('saving logs')
with lzma.open(self.pickle_file, 'wb') as outfile:
pickle.dump(self.logs, outfile, protocol=4)
try:
del self._lockfile
except IOError:
pass
| StarcoderdataPython |
19144 | #!/usr/bin/env python
u"""
radial_basis.py
Written by <NAME> (01/2022)
Interpolates data using radial basis functions
CALLING SEQUENCE:
ZI = radial_basis(xs, ys, zs, XI, YI, polynomial=0,
smooth=smooth, epsilon=epsilon, method='inverse')
INPUTS:
xs: scaled input X data
ys: scaled input Y data
zs: input data
XI: scaled grid X for output ZI
YI: scaled grid Y for output ZI
OUTPUTS:
ZI: interpolated data grid
OPTIONS:
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
default is mean Euclidean distance
polynomial: polynomial order if augmenting radial basis functions
default None: no polynomials
method: radial basis function
multiquadric
inverse_multiquadric or inverse (default)
inverse_quadratic
gaussian
linear (first-order polyharmonic spline)
cubic (third-order polyharmonic spline)
quintic (fifth-order polyharmonic spline)
thin_plate: thin-plate spline
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
REFERENCES:
<NAME>, Multiquadric equations of topography and other irregular
surfaces, J. Geophys. Res., 76(8), 1905-1915, 1971.
<NAME>, "Radial Basis Functions", Cambridge Monographs on Applied and
Computational Mathematics, 2003.
UPDATE HISTORY:
Updated 01/2022: added function docstrings
Updated 07/2021: using scipy spatial distance routines
Updated 09/2017: using rcond=-1 in numpy least-squares algorithms
Updated 01/2017: epsilon in polyharmonic splines (linear, cubic, quintic)
Updated 08/2016: using format text within ValueError, edit constant vector
added low-order polynomial option (previously used default constant)
Updated 01/2016: new hierarchical_radial_basis function
that first reduces to points within distance. added cutoff option
Updated 10/2014: added third dimension (spherical)
Written 08/2014
"""
from __future__ import print_function, division
import numpy as np
import scipy.spatial
def radial_basis(xs, ys, zs, XI, YI, smooth=0.0, metric='euclidean',
epsilon=None, method='inverse', polynomial=None):
"""
Interpolates data using radial basis functions
Arguments
---------
xs: scaled input x-coordinates
ys: scaled input y-coordinates
zs: input data
XI: scaled output x-coordinates for data grid
YI: scaled output y-coordinates for data grid
Keyword arguments
-----------------
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
method: radial basis function
- multiquadric
- inverse_multiquadric or inverse (default)
- inverse_quadratic
- gaussian
- linear (first-order polyharmonic spline)
- cubic (third-order polyharmonic spline)
- quintic (fifth-order polyharmonic spline)
- thin_plate: thin-plate spline
polynomial: polynomial order if augmenting radial basis functions
Returns
-------
ZI: interpolated data grid
"""
#-- remove singleton dimensions
xs = np.squeeze(xs)
ys = np.squeeze(ys)
zs = np.squeeze(zs)
XI = np.squeeze(XI)
YI = np.squeeze(YI)
#-- size of new matrix
if (np.ndim(XI) == 1):
nx = len(XI)
else:
nx,ny = np.shape(XI)
#-- Check to make sure sizes of input arguments are correct and consistent
if (len(zs) != len(xs)) | (len(zs) != len(ys)):
raise Exception('Length of X, Y, and Z must be equal')
if (np.shape(XI) != np.shape(YI)):
raise Exception('Size of XI and YI must be equal')
#-- create python dictionary of radial basis function formulas
radial_basis_functions = {}
radial_basis_functions['multiquadric'] = multiquadric
radial_basis_functions['inverse_multiquadric'] = inverse_multiquadric
radial_basis_functions['inverse'] = inverse_multiquadric
radial_basis_functions['inverse_quadratic'] = inverse_quadratic
radial_basis_functions['gaussian'] = gaussian
radial_basis_functions['linear'] = poly_spline1
radial_basis_functions['cubic'] = poly_spline3
radial_basis_functions['quintic'] = poly_spline5
radial_basis_functions['thin_plate'] = thin_plate
#-- check if formula name is listed
if method in radial_basis_functions.keys():
RBF = radial_basis_functions[method]
else:
raise ValueError("Method {0} not implemented".format(method))
#-- Creation of data distance matrix
#-- Data to Data
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Rd = distance_matrix(
np.array([xs, ys]),
np.array([xs, ys])
)
else:
#-- use scipy spatial distance routines
Rd = scipy.spatial.distance.cdist(
np.array([xs, ys]).T,
np.array([xs, ys]).T,
metric=metric)
#-- shape of distance matrix
N,M = np.shape(Rd)
#-- if epsilon is not specified
if epsilon is None:
#-- calculate norm with mean euclidean distance
uix,uiy = np.nonzero(np.tri(N,M=M,k=-1))
epsilon = np.mean(Rd[uix,uiy])
#-- possible augmentation of the PHI Matrix with polynomial Vectors
if polynomial is None:
#-- calculate radial basis function for data-to-data with smoothing
PHI = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
DMAT = zs.copy()
else:
#-- number of polynomial coefficients
nt = (polynomial**2 + 3*polynomial)//2 + 1
#-- calculate radial basis function for data-to-data with smoothing
PHI = np.zeros((N+nt,M+nt))
PHI[:N,:M] = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
#-- augmentation of PHI matrix with polynomials
POLY = polynomial_matrix(xs,ys,polynomial)
DMAT = np.concatenate(([zs,np.zeros((nt))]),axis=0)
#-- augment PHI matrix
for t in range(nt):
PHI[:N,M+t] = POLY[:,t]
PHI[N+t,:M] = POLY[:,t]
#-- Computation of the Weights
w = np.linalg.lstsq(PHI,DMAT[:,np.newaxis],rcond=-1)[0]
#-- Computation of distance Matrix
#-- Computation of distance Matrix (data to mesh points)
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Re = distance_matrix(
np.array([XI.flatten(),YI.flatten()]),
np.array([xs,ys])
)
else:
#-- use scipy spatial distance routines
Re = scipy.spatial.distance.cdist(
np.array([XI.flatten(),YI.flatten()]).T,
np.array([xs, ys]).T,
metric=metric)
#-- calculate radial basis function for data-to-mesh matrix
E = RBF(epsilon,Re)
#-- possible augmentation of the Evaluation Matrix with polynomial vectors
if polynomial is not None:
P = polynomial_matrix(XI.flatten(),YI.flatten(),polynomial)
E = np.concatenate(([E, P]),axis=1)
#-- calculate output interpolated array (or matrix)
if (np.ndim(XI) == 1):
ZI = np.squeeze(np.dot(E,w))
else:
ZI = np.zeros((nx,ny))
ZI[:,:] = np.dot(E,w).reshape(nx,ny)
#-- return the interpolated array (or matrix)
return ZI
#-- define radial basis function formulas
def multiquadric(epsilon, r):
#-- multiquadratic
f = np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_multiquadric(epsilon, r):
#-- inverse multiquadratic
f = 1.0/np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_quadratic(epsilon, r):
#-- inverse quadratic
f = 1.0/(1.0+(epsilon*r)**2)
return f
def gaussian(epsilon, r):
#-- gaussian
f = np.exp(-(epsilon*r)**2)
return f
def poly_spline1(epsilon, r):
#-- First-order polyharmonic spline
f = (epsilon*r)
return f
def poly_spline3(epsilon, r):
#-- Third-order polyharmonic spline
f = (epsilon*r)**3
return f
def poly_spline5(epsilon, r):
#-- Fifth-order polyharmonic spline
f = (epsilon*r)**5
return f
def thin_plate(epsilon, r):
#-- thin plate spline
f = r**2 * np.log(r)
#-- the spline is zero at zero
f[r == 0] = 0.0
return f
#-- calculate Euclidean distances between points as matrices
def distance_matrix(x,cntrs):
s,M = np.shape(x)
s,N = np.shape(cntrs)
D = np.zeros((M,N))
for d in range(s):
ii, = np.dot(d,np.ones((1,N))).astype(np.int)
jj, = np.dot(d,np.ones((1,M))).astype(np.int)
dx = x[ii,:].transpose() - cntrs[jj,:]
D += dx**2
D = np.sqrt(D)
return D
#-- calculate polynomial matrix to augment radial basis functions
def polynomial_matrix(x,y,order):
c = 0
M = len(x)
N = (order**2 + 3*order)//2 + 1
POLY = np.zeros((M,N))
for ii in range(order + 1):
for jj in range(ii + 1):
POLY[:,c] = (x**jj)*(y**(ii-jj))
c += 1
return POLY
| StarcoderdataPython |
1797285 | from django.urls import path
from . import views
app_name = 'todo'
urlpatterns = [
path('create/', views.create_group, name='create_group'),
path('update/', views.update_group, name='update_group'),
path('delete/', views.delete_group, name='delete_group'),
path('<int:group_id>/', views.todolist, name='todolist'),
path('<int:group_id>/create/', views.create_task, name='create_task'),
path('<int:group_id>/update/', views.update_task, name='update_task'),
path('<int:group_id>/delete/', views.delete_task, name='delete_task'),
]
| StarcoderdataPython |
3262383 | <gh_stars>1-10
def bubble_swap(swapstring):
for desc in range(len(swapstring) -1, 0, -1):
for x in range(desc):
if swapstring[x] > swapstring[x+1]:
(swapstring[x], swapstring[x+1]) = (swapstring[x+1], swapstring[x])
return swapstring
swapstring = bubble_swap([8,4,2,9,5,6,7,6])
print swapstring
| StarcoderdataPython |
1761582 | from typing import List, Union
import pandas
from pandas.core.series import Series
from ..model.node_classification_model import NCModel
from ..pipeline.classification_training_pipeline import ClassificationTrainingPipeline
from ..query_runner.query_runner import QueryRunner
class NCTrainingPipeline(ClassificationTrainingPipeline):
def selectFeatures(self, node_properties: Union[str, List[str]]) -> Series:
query = f"{self._query_prefix()}selectFeatures($pipeline_name, $node_properties)"
params = {"pipeline_name": self.name(), "node_properties": node_properties}
return self._query_runner.run_query(query, params).squeeze() # type: ignore
def feature_properties(self) -> Series:
pipeline_info = self._list_info()["pipelineInfo"][0]
return pandas.Series(pipeline_info["featurePipeline"]["featureProperties"], dtype=object)
def _query_prefix(self) -> str:
return "CALL gds.beta.pipeline.nodeClassification."
def _create_trained_model(self, name: str, query_runner: QueryRunner) -> NCModel:
return NCModel(name, query_runner, self._server_version)
| StarcoderdataPython |
1609256 | <reponame>acc-cosc-1336/cosc-1336-fall-2017-alfonsosalinas2
class Course:
def __init__(self, course_id, title, credit_hour, professor):
self.course_id = course_id
self.title = title
self.credit_hour = credit_hour
self.professor = professor
| StarcoderdataPython |
3359322 | # -*- coding: utf-8 -*-
import click
import logging
from dotenv import find_dotenv, load_dotenv
import os
import sqlite3
from sqlite3 import Error
import pandas as pd
TABLE_NAME = "LONDON"
CSV_COL_NAMES = ["Month", "Latitude", "Longitude", "Location", "Crime type"]
DB_COL_NAMES = ["MONTH", "LATITUDE", "LONGITUDE", "DESCRIPTION", "CRIME_TYPE"]
DB_COL_TYPES = ["TEXT", "REAL", "REAL", "TEXT", "TEXT"]
def list_files(startpath):
full_file_paths = []
for root, directories, filenames in os.walk(startpath):
for filename in filenames:
if filename.endswith('.csv'):
full_file_paths.append(os.path.join(root, filename))
return full_file_paths
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
return conn
except Error as e:
print(e)
def create_crime_table(db_conn):
cursor = db_conn.cursor()
cursor.execute("CREATE TABLE {tn} (ID INTEGER)".format(tn=TABLE_NAME))
for (col_name, col_type) in zip(DB_COL_NAMES, DB_COL_TYPES):
cursor.execute("ALTER TABLE {tn} ADD COLUMN {cn} {ct}".format(tn=TABLE_NAME, cn=col_name, ct=col_type))
def move_csv_to_sql(sql_conn, csv_file_paths):
for file_path in csv_file_paths:
print(file_path)
crime_df = pd.read_csv(file_path, usecols=CSV_COL_NAMES)
crime_df.columns = DB_COL_NAMES
print(crime_df.shape)
crime_df.to_sql(TABLE_NAME, sql_conn, if_exists='append', index_label="ID")
def get_specific_crimes(db_conn, crime_type, start_date, end_date):
query = """
SELECT * FROM {tn}
WHERE {tn}.CRIME_TYPE='{ct}' AND LATITUDE IS NOT NULL
""".format(tn=TABLE_NAME, ct=crime_type)
crime_all_period = pd.read_sql(query, db_conn, parse_dates=["MONTH"], index_col="ID")
return crime_all_period[(crime_all_period['MONTH'] >= start_date) & (crime_all_period['MONTH'] <= end_date)]
def bootstrap_scripts(input_filepath, output_filepath):
db_conn = create_connection(output_filepath)
try:
create_crime_table(db_conn)
except sqlite3.OperationalError as error:
print(error)
all_crime_csv_file_paths = list_files(input_filepath)
move_csv_to_sql(db_conn, all_crime_csv_file_paths)
db_conn.close()
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('Making final data set from raw data')
if os.path.isfile(output_filepath):
logger.info("Removing previous database.")
os.remove(output_filepath)
bootstrap_scripts(input_filepath, output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
load_dotenv(find_dotenv())
main()
| StarcoderdataPython |
7893 | class Solution:
# dictionary keys are tuples, storing results
# structure of the tuple:
# (level, prev_sum, val_to_include)
# value is number of successful tuples
def fourSumCount(self, A, B, C, D, prev_sum=0, level=0, sums={}):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
# handle clearing dictionary between tests
sums = {} if level == 3 else sums
# base case:
if level == 3:
total = 0
for num in D:
if prev_sum + num == 0:
print("At level 3, 0 total found using entry w/ value {0}".
format(num))
total += 1
return total
total = 0
lists = [A, B, C]
for num in lists[level]:
if level == 0:
print(str(sums))
if (level, prev_sum, num) in sums:
total += sums[(level, prev_sum, num)]
print("Used dictionary entry {0}, making total {1}".
format((level, prev_sum, num), total))
else:
print("Call from level {0} to level {1}; current sum is {2}".
format(level, level + 1, prev_sum + num))
result = self.fourSumCount(A, B, C, D, prev_sum + num,
level + 1, sums)
sums[(level, prev_sum, num)] = result
total += result
if level == 0:
sums = {}
print(sums)
return total
sol = Solution()
A = [1]
B = [-1]
C = [0]
D = [1]
result = sol.fourSumCount(A, B, C, D)
print("Test 1: {0}".format(result))
A = [1, 2]
B = [-2, -1]
C = [-1, 2]
D = [0, 2]
result = sol.fourSumCount(A, B, C, D)
print("Test 2: {0}".format(result))
| StarcoderdataPython |
4832916 | import ast
import astunparse
import black
def ast_name(name):
return ast.Name(id=name)
def ast_module(*body):
return ast.Module(body=ast_body(body))
def ast_body(body):
return [item for item in body if item]
def ast_import(module, name=None, alias=None):
if name:
if isinstance(name, dict):
names = [(n, a) for n, a in name.items()]
elif isinstance(name, (list, tuple)):
names = [(n, None) for n in name]
else:
names = [(name, alias)]
return ast.ImportFrom(
module=module,
names=[ast.alias(name=n, asname=a) for n, a in names],
level=0,
)
else:
return ast.Import(names=[ast.alias(name=module, asname=alias)], level=0)
def ast_expr(val):
return ast.Expr(val)
def ast_call(fname, *args, comma=None, **kwargs):
keywords = ast_keywords(kwargs)
if comma or (kwargs and len(args) + len(kwargs) > 1):
keywords.append(ast_trailing_comma())
return ast.Call(
func=ast_name(fname),
args=ast_args(args),
keywords=keywords,
)
def ast_assign(name, val):
return ast.Assign(
targets=[ast_name(name)],
value=ast_val(val),
)
def ast_val(val):
if isinstance(val, ast.AST):
return val
else:
return ast.Constant(val, kind=None)
def ast_args(args):
return [ast_val(arg) for arg in args]
def ast_keywords(kwargs):
keywords = []
for key, val in kwargs.items():
keywords.append(
ast.keyword(
key,
ast_val(val),
)
)
return keywords
def ast_class(name, bases, *body):
return ast.ClassDef(
name=name,
bases=[ast_name(base) for base in bases],
keywords=[],
starargs=[],
kwargs=[],
body=ast_body(body),
decorator_list=[],
)
def ast_def(name, args, *body):
return ast.FunctionDef(
name=name,
args=ast.arguments(
posonlyargs=[],
args=[ast.arg(arg=arg, annotation=None) for arg in args],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
),
body=ast_body(body),
decorator_list=[],
returns=None,
)
def ast_return(value):
return ast.Return(value)
def ast_trailing_comma():
return ast.Name(id="")
def ast_newline():
return ast.Expr(ast.Name(id=""))
def ast_list(args):
return ast.List(elts=ast_args(list(args)) + [ast_trailing_comma()])
def ast_tuple(args):
return ast.Tuple(elts=ast_args(list(args)) + [ast_trailing_comma()])
def ast_dict(obj=None, **kwargs):
if obj:
kwargs = obj
return ast.Dict(
keys=ast_args(kwargs.keys()) + [None],
values=ast_args(kwargs.values()) + [ast_trailing_comma()],
)
def unparse(tree):
code = astunparse.unparse(tree).replace(", **}", ",}")
return black.format_str(code, mode=black.FileMode())
| StarcoderdataPython |
1749926 | <reponame>wilsonify/euler
#
#
import heapq
from euler_python.utils import eulerlib
# Finding all the relatives of 2 can be seen as a single-source shortest path problem,
# which we solve here using Dijkstra's algorithm. The key insight is that at each node (prime number),
# we consider the connection path from 2 to it, and store the maximum path number at the node.
# It is amenable to dynamic programming because it's always best to minimize the maximum path number.
#
# For example, 2 is connected to 103 because 2 <-> 3 <-> 13 <-> 113 <-> 103.
# The maximum number along this path is 113, and among all paths
# this is the minimum possible maximum, so 103 is not a relative of 2.
def problem425():
LIMIT = 10 ** 7
isprime = eulerlib.list_primality(LIMIT)
# pathmax[i] = None if i is not prime or i is not connected to 2.
# Otherwise, considering all connection paths from 2 to i and for each path computing
# the maximum number, pathmax[i] is the minimum number among all these maxima.
pathmax = [None] * len(isprime)
# Process paths in increasing order of maximum number
queue = [(2, 2)]
while len(queue) > 0:
pmax, n = heapq.heappop(queue)
if pathmax[n] is not None and pmax >= pathmax[n]:
# This happens if at the time this update was queued, a better
# or equally good update was queued ahead but not processed yet
continue
# Update the target node and explore neighbors
pathmax[n] = pmax
# Try all replacements of a single digit, including the leading zero.
# This generates exactly all (no more, no less) the ways that a number m is connected to n.
digits = to_digits(n)
tempdigits = list(digits)
for i in range(len(tempdigits)): # For each digit position
for j in range(10): # For each digit value
tempdigits[i] = j
m = to_number(tempdigits)
nextpmax = max(m, pmax)
if (
m < len(isprime)
and isprime[m]
and (pathmax[m] is None or nextpmax < pathmax[m])
):
heapq.heappush(queue, (nextpmax, m))
tempdigits[i] = digits[i] # Restore the digit
ans = sum(
i
for i in range(len(isprime))
if isprime[i] and (pathmax[i] is None or pathmax[i] > i)
)
return ans
# Returns the given non-negative integer as an array of digits, in big endian, with an extra leading zero.
# e.g. 0 -> [0,0]; 1 -> [0,1]; 8 -> [0,8]; 42 -> [0,4,2]; 596 -> [0,5,9,6].
def to_digits(n):
if n < 0:
raise ValueError()
# Extract base-10 digits in little endian
temp = []
while True:
temp.append(n % 10)
n //= 10
if n == 0:
break
temp.append(0)
temp.reverse()
return temp
def to_number(digits):
result = 0
for x in digits:
result = result * 10 + x
return result
if __name__ == "__main__":
print(problem425())
| StarcoderdataPython |
3274656 | #Modelo de regresion logistica
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
import pickle
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from unidecode import unidecode
from nltk.corpus import stopwords
spanish_stopwords = stopwords.words('spanish')
#from sklearn.pipeline import make_pipeline
def tokenizer_porter(text):
return text.split()
#TF-IDF
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None,
tokenizer=tokenizer_porter,
use_idf=True,
norm='l2',
smooth_idf=True)
def preprocessor(text):
text = re.sub('<[^>]*>', '', text)
text = re.sub(r'[^\w\s]','', text)
text = text.lower()
text = re.sub('\d','', text)
text = re.sub('number','', text)
text = unidecode(text)
return text
#df = pd.read_csv('../Datos/fakes1000.csv')
df = pd.read_csv('../Datos/test.csv')[['Class','Source','Text']]
x = df.loc[:,['Source','Text']]
x['source'] = x["Source"].astype(str) +" "+ x["Text"]
x = x.drop(['Source','Text'],axis=1)
x = x.source.apply(preprocessor)
y = df.Class
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.30)
tfidf_vect = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None,
tokenizer=tokenizer_porter,
use_idf=True,
norm='l2',
smooth_idf=True,
stop_words = spanish_stopwords)
tfidf_train = tfidf_vect.fit_transform(x_train)
tfidf_test = tfidf_vect.transform(x_test)
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vect.get_feature_names())
Adab = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10),n_estimators=5,random_state=1)
Adab.fit(tfidf_train, y_train)
y_pred3 = Adab.predict(tfidf_test)
ABscore = metrics.accuracy_score(y_test,y_pred3)
print("accuracy: %0.3f" %ABscore)
DecTree = open('DecTree.sav', 'wb')
pickle.dump(Adab, DecTree)
DecTree.close()
# Accuracy: 0.777 | StarcoderdataPython |
1677704 | # DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from flask import Blueprint
from . import handlers
api_api = Blueprint('api_api', __name__)
@api_api.route('/api/nodes', methods=['GET'])
def ListCapacity():
"""
List all the nodes capacity
It is handler for GET /api/nodes
"""
return handlers.ListCapacityHandler()
@api_api.route('/api/nodes', methods=['POST'])
def RegisterCapacity():
"""
Register a node capacity
It is handler for POST /api/nodes
"""
return handlers.RegisterCapacityHandler()
@api_api.route('/api/nodes/<node_id>', methods=['GET'])
def GetCapacity(node_id):
"""
Get detail about capacity of a node
It is handler for GET /api/nodes/<node_id>
"""
return handlers.GetCapacityHandler(node_id)
@api_api.route('/api/nodes/<node_id>/reserved', methods=['PUT'])
def UpdateReservedCapacity(node_id):
"""
Mark some capacity on a node to be reserved
It is handler for PUT /api/nodes/<node_id>/reserved
"""
return handlers.UpdateReservedCapacityHandler(node_id)
@api_api.route('/api/nodes/<node_id>/actual', methods=['PUT'])
def UpdateActualUsedCapacity(node_id):
"""
Set the actual usage of the capacity of a node
It is handler for PUT /api/nodes/<node_id>/actual
"""
return handlers.UpdateActualUsedCapacityHandler(node_id)
@api_api.route('/api/farmer_create', methods=['GET'])
def RegisterFarmer():
"""
Register a farmer
It is handler for GET /api/farmer_create
"""
return handlers.RegisterFarmerHandler()
@api_api.route('/api/farmer_update', methods=['GET'])
def UpdateFarmer():
"""
Update a farmer
It is handler for GET /api/farmer_update
"""
return handlers.UpdateFarmerHandler()
@api_api.route('/api/farmers', methods=['GET'])
def ListFarmers():
"""
List Farmers
It is handler for GET /api/farmers
"""
return handlers.ListFarmersHandler()
@api_api.route('/api/farmers/<iyo_organization>', methods=['GET'])
def GetFarmer(iyo_organization):
"""
Get detail about a farmer
It is handler for GET /api/farmers/<iyo_organization>
"""
return handlers.GetFarmerHandler(iyo_organization)
| StarcoderdataPython |
3247891 | """
Messages API.
url: https://documentation.mailgun.com/en/latest/user_manual.html#sending-via-api
"""
import json
from mailgun_sdk.base import ApiDomainResource
class Messages(ApiDomainResource):
"""
Mailing list resource.
"""
api_endpoint = "messages"
require_tls = False
"""
data example
data={
"from": "Excited User <YOU@YOUR_DOMAIN_NAME>",
"to": ["<EMAIL>"],
"subject": "Hello",
"template": "template.test",
"h:X-Mailgun-Variables": json.dumps(
{"title": "API documentation", "body": "Sending messages with templates"}
)
},
"""
def send_via_template(self, from_name: str, from_email: str, to: str, subject: str, template: str, variables: dict):
payload = {
"from": "{} <{}>".format(from_name, from_email),
"to": [to],
"subject": subject,
"template": template,
"h:X-Mailgun-Variables": json.dumps(variables)
}
print(self.base_url)
print(self.api_endpoint)
if self.require_tls:
payload['o:require-tls'] = 'True'
return self.request(
"POST",
data=payload,
)
| StarcoderdataPython |
3237080 | # -*- coding: utf-8 -*-
# vim: set ai sm sw=4 sts=4 ts=8 syntax=python
# vim: set filetype=python fileencoding=utf-8:
import hashlib
import os
class DirFileHash(object):
def __init__(self, path):
if not os.path.isdir(path):
raise ValueError("Path is not a directory: '%'" % path)
else:
self.path = path
def __getitem__(self, key):
try:
with open(os.path.join(self.path, key), 'rb') as file:
return hashlib.md5(file.read()).hexdigest()
except (FileNotFoundError, TypeError):
return None
def main():
d = DirFileHash('/etc/')
print(d['hosts'])
print(d['no_such_file'])
print(d[2])
if __name__ == '__main__':
main()
| StarcoderdataPython |
3318638 | <reponame>markbasham/boolsi<filename>tests/simulate_tests.py
import ZODB
from boolsi.constants import NodeStateRange
from boolsi.testing_tools import build_predecessor_nodes_lists_and_truth_tables, \
count_simulation_problems, configure_encode_and_simulate, generate_test_description, \
UPDATE_RULES_A, UPDATE_RULES_B
from boolsi.simulate import Simulation, init_simulation_db_structure, simulate_master, simulate_until_max_t, store_simulation
from boolsi.mpi import MPICommWrapper
def test_simulate_master_A():
"""
`simulate_master`
Feature A: performing simulations irrespectively of performance tuning.
"""
predecessor_node_lists, truth_tables = build_predecessor_nodes_lists_and_truth_tables(UPDATE_RULES_B)
initial_state = [False, False, True, False, False, False]
fixed_nodes = dict()
perturbed_nodes_by_t = dict()
max_t = 101
initial_state_variations = []
fixed_nodes_variations = []
perturbed_nodes_by_t_variations = [(40, 0, NodeStateRange.MAYBE_TRUE)]
n_simulation_problems = count_simulation_problems(
initial_state_variations, fixed_nodes_variations, perturbed_nodes_by_t_variations)
# Test for {single batch per process, multiple batches per process}.
n_simulation_problem_batches_per_process_1 = 1
n_simulation_problem_batches_per_process_2 = 5
expected_simulation_states_1 = \
[initial_state] + 25 * [[True, False, False, True, True, False],
[True, True, False, False, True, True],
[False, True, False, False, False, True],
[False, False, True, False, False, False]] + \
[[True, False, False, True, True, False]]
expected_simulation_1 = Simulation(expected_simulation_states_1, dict(), dict())
expected_simulation_states_2 = \
expected_simulation_states_1[:40] + [[True, False, True, False, False, False],
[True, True, False, True, False, False]] + \
60 * [[True, True, False, False, False, False]]
expected_simulation_2 = Simulation(expected_simulation_states_2, dict(), {40: {0: True}})
expected_simulations = [expected_simulation_1, expected_simulation_2]
for n_simulation_problem_batches_per_process in [n_simulation_problem_batches_per_process_1,
n_simulation_problem_batches_per_process_2]:
db_conn = ZODB.connection(None)
init_simulation_db_structure(db_conn)
simulate_master(
MPICommWrapper(), n_simulation_problem_batches_per_process,
(initial_state, fixed_nodes, perturbed_nodes_by_t),
(initial_state_variations, fixed_nodes_variations, perturbed_nodes_by_t_variations),
predecessor_node_lists, truth_tables, max_t, n_simulation_problems, db_conn, None)
test_description = generate_test_description(
locals(), 'n_simulation_problem_batches_per_process')
assert list(db_conn.root.simulations.values()) == expected_simulations, test_description
assert db_conn.root.n_simulations() == len(expected_simulations), test_description
def test_simulate_A():
"""
`simulate_until_max_t`
Feature A: simulating until the time cap.
"""
predecessor_node_lists, truth_tables = build_predecessor_nodes_lists_and_truth_tables(UPDATE_RULES_A)
initial_state = [False, True, True, False, False]
perturbed_nodes_by_t = dict()
# Test for {not reaching attractor, reaching attractor}.
max_t_1 = 3
max_t_2 = 20
expected_simulation_states_1 = \
[initial_state, [False, False, True, False, True], [True, False, False, False, True],
[True, True, False, True, True]]
expected_simulation_states_2 = \
4 * [initial_state, [False, False, True, False, True], [True, False, False, False, True],
[True, True, False, True, True], [True, True, True, True, False]] + \
[initial_state]
for max_t, expected_simulation_states in zip(
[max_t_1, max_t_2], [expected_simulation_states_1, expected_simulation_states_2]):
_, _simulate_until_attractor_or_target_substate_or_max_t = \
configure_encode_and_simulate(max_t=max_t)
simulation_states = simulate_until_max_t(
max_t, _simulate_until_attractor_or_target_substate_or_max_t, initial_state,
perturbed_nodes_by_t, predecessor_node_lists, truth_tables)
test_description = generate_test_description(locals(), 'max_t')
assert expected_simulation_states == simulation_states, test_description
| StarcoderdataPython |
3321857 | # test
# Created by JKChang
# 27/01/2020, 15:58
# Tag:
# Description:
from numpy import *
import operator
from os import listdir
import matplotlib.pyplot as plt
# simulating a pandas df['type'] column
types = ['apple', 'orange', 'apple', 'pear', 'apple', 'orange', 'apple', 'pear']
x_coords = [10, 10, 5, 4, 3, 20, 19, 21]
y_coords = [21, 23, 12, 21, 10, 20, 14, 2]
for i, type in enumerate(types):
x = x_coords[i]
y = y_coords[i]
plt.scatter(x, y, marker='x', color='red')
plt.text(x + 0.3, y + 0.3, type, fontsize=9)
plt.show()
# group, labels = createDataSet()
#
# for column,label in zip(group,labels):
# plt.plot(group , label=label)
#
# plt.legend()
# plt.show()
# arr = np.random.random((10, 5))
# ax.plot(arr)
#
# labels = ['a', 'b', 'c', 'd', 'e']
#
# for column, label in zip(arr.T, labels):
# ax.plot(column, label=label) | StarcoderdataPython |
1782156 | <filename>test/tests/dict_setting_old_dict.py
# Funny case: if we get the attrwrapper of an object, then change it to be dict-backed,
# the original attrwrapper should remain valid but no longer connected to that object.
class C(object):
pass
c1 = C()
aw = c1.__dict__
c1.a = 1
print aw.items()
c1.__dict__ = d = {}
print aw.items()
c2 = C()
olddict = c2.__dict__
c2.__dict__ = { "should not be there" : 1 }
print olddict.has_key("should not be there")
| StarcoderdataPython |
3239306 | <reponame>kelp404/Victory
from base_form import *
from wtforms import TextField, validators
class ProfileForm(BaseForm):
name = TextField('Name',
validators=[validators.length(min=1, max=50)],
filters=[lambda x: x.strip() if isinstance(x, basestring) else None])
| StarcoderdataPython |
1720 | import requests
import aiohttp
from constants import API_KEY
class User(object):
def __init__(self, author_info):
# "author": {
# "about": "",
# "avatar": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "isCustom": false,
# "large": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg",
# "small": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar32.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "5472588",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": true,
# "joinedAt": "2010-11-20T04:45:33",
# "location": "",
# "name": "felix1999",
# "profileUrl": "https://disqus.com/by/felix1999/",
# "signedUrl": "",
# "url": "",
# "username": "felix1999"
# },
self._basic_info = author_info
self._detailed_info = None
async def load(self):
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
user_info = await session.get(
'https://disqus.com/api/3.0/users/details.json',
params={'user': self.id, 'api_key': API_KEY}
)
detail_json = await user_info.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
def _get_detailed_info(self):
# https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY>
# {
# "code": 0,
# "response": {
# "about": "",
# "avatar": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "isCustom": true,
# "large": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg",
# "small": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "137780765",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": false,
# "joinedAt": "2015-01-02T18:40:14",
# "location": "",
# "name": "Bob",
# "numFollowers": 2,
# "numFollowing": 0,
# "numForumsFollowing": 0,
# "numLikesReceived": 8967,
# "numPosts": 4147,
# "profileUrl": "https://disqus.com/by/disqus_FqhLpDGmTT/",
# "rep": 3.5297520000000002,
# "reputation": 3.5297520000000002,
# "reputationLabel": "High",
# "signedUrl": "",
# "url": "",
# "username": "disqus_FqhLpDGmTT"
# }
# }
print("WARNING: auto-loading user in async version of code!!!!")
details = requests.get(
'https://disqus.com/api/3.0/users/details.json',
{'user': self.id, 'api_key': API_KEY}
)
detail_json = details.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
@property
def anonymous(self):
return 'id' not in self._basic_info
@property
def private(self):
return self.anonymous or self._basic_info.get('isPrivate')
@property
def id(self):
if self.private:
return 'Private'
return self._basic_info.get('id', 'Anonymous')
@property
def name(self):
return self._basic_info.get('name')
@property
def username(self):
return self._basic_info.get('username')
@property
def location(self):
return self._basic_info.get('location')
@property
def joined_at(self):
return self._basic_info.get('joinedAt')
@property
def profile_url(self):
return self._basic_info.get('profileUrl')
@property
def total_posts(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numPosts')
@property
def total_likes(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numLikesReceived')
@property
def user_info_row(self):
return [
self.id,
self.name,
self.username,
self.total_posts,
self.total_likes,
self.location,
self.joined_at,
self.profile_url
]
| StarcoderdataPython |
1611386 | from aces import Aces
#the origin BP structure is optimized and we use it directly
class sub(Aces):
def submit(self):
opt=dict(
units="metal",
species="Bi4I4_computed",
method="greenkubo",
nodes=1,
procs=1,
queue="q1.4",
runTime=10000000
,runner="shengbte"
)
app=dict(th=True,ispin=True,useMini=False,shengcut=-4,kpoints=[8,8,8],engine='vasp',supercell=[3,2,2],mekpoints=[4,4,4],ekpoints=[3,3,3])
self.commit(opt,app);
if __name__=='__main__':
sub().run()
| StarcoderdataPython |
18875 | from src.main.common.model import endpoint
class TableEndpoint(endpoint.Endpoint):
@classmethod
def do_get(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name", None)
response = None
if tb_system_name is None:
descriptor_dicts = []
descriptors = DescriptorUtils.get_tbs_descriptor(db_system_name)
for d in descriptors:
descriptor_dicts.append(d.to_dict())
response = descriptor_dicts
else:
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
response = descriptor.to_dict()
return response
@classmethod
def do_post(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
from src.main.admin_api.model.table import Table
db_system_name = kwargs.get("db_system_name")
response = None
body = TableEndpoint.get_body()
name = body.get("name", None)
if name is not None:
descriptor = Table.from_json(body)
if not DescriptorUtils.does_tb_descriptor_exist(db_system_name, descriptor):
descriptor.save(db_system_name)
response = descriptor.to_dict()
return response
@classmethod
def do_put(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name")
response = None
body = TableEndpoint.get_body()
if tb_system_name is not None:
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
name = body.get("name", None)
if name is not None:
descriptor.set_name(name)
description = body.get("description", None)
if description is not None:
descriptor.set_description(description)
fields = body.get("fields", None)
if fields is not None:
descriptor.set_fields(fields)
descriptor.save(db_system_name)
response = descriptor.to_dict()
return response
@classmethod
def do_delete(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name")
response = None
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
response = descriptor.delete(db_system_name)
return response
| StarcoderdataPython |
133239 | <reponame>Serkan-devel/m.css<filename>pelican-plugins/m/dot.py
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import pelican
import re
import subprocess
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
import dot2svg
def _is_graph_figure(parent):
# The parent has to be a figure, marked as m-figure
if not isinstance(parent, nodes.figure): return False
if 'm-figure' not in parent.get('classes', []): return False
# And as a first visible node of such type
for child in parent:
if not isinstance(child, nodes.Invisible): return False
return True
class Dot(rst.Directive):
has_content = True
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
def run(self, source):
set_classes(self.options)
# If this is the first real node inside a graph figure, put the SVG
# directly inside
parent = self.state.parent
if _is_graph_figure(parent):
svg = dot2svg.dot2svg(source, attribs=' class="{}"'.format(' '.join(['m-graph'] + self.options.get('classes', []))))
node = nodes.raw('', svg, format='html')
return [node]
# Otherwise wrap it in a <div class="m-graph">
svg = dot2svg.dot2svg(source)
container = nodes.container(**self.options)
container['classes'] = ['m-graph'] + container['classes']
node = nodes.raw('', svg, format='html')
container.append(node)
return [container]
class Digraph(Dot):
def run(self):
# We need to pass "" for an empty title to get rid of <title>,
# otherwise the output contains <title>%3</title> (wtf!)
return Dot.run(self, 'digraph "{}" {{\n{}}}'.format(
self.arguments[0] if self.arguments else '',
'\n'.join(self.content)))
class StrictDigraph(Dot):
def run(self):
# We need to pass "" for an empty title to get rid of <title>,
# otherwise the output contains <title>%3</title> (wtf!)
return Dot.run(self, 'strict digraph "{}" {{\n{}}}'.format(
self.arguments[0] if self.arguments else '',
'\n'.join(self.content)))
class Graph(Dot):
def run(self):
# We need to pass "" for an empty title to get rid of <title>,
# otherwise the output contains <title>%3</title> (wtf!)
return Dot.run(self, 'graph "{}" {{\n{}}}'.format(
self.arguments[0] if self.arguments else '',
'\n'.join(self.content)))
class StrictGraph(Dot):
def run(self):
# We need to pass "" for an empty title to get rid of <title>,
# otherwise the output contains <title>%3</title> (wtf!)
return Dot.run(self, 'strict graph "{}" {{\n{}}}'.format(
self.arguments[0] if self.arguments else '',
'\n'.join(self.content)))
def configure(pelicanobj):
dot2svg.configure(
pelicanobj.settings.get('M_DOT_FONT', 'Source Sans Pro'),
pelicanobj.settings.get('M_DOT_FONT_SIZE', 16.0))
def register():
pelican.signals.initialized.connect(configure)
rst.directives.register_directive('digraph', Digraph)
rst.directives.register_directive('strict-digraph', StrictDigraph)
rst.directives.register_directive('graph', Graph)
rst.directives.register_directive('strict-graph', StrictGraph)
| StarcoderdataPython |
142694 | <reponame>chickenPopcorn/food-truck-inc<filename>src/server/data_access/user_data_access.py
import bcrypt
from forms import LoginForm, ChangePasswordForm, UpdateProfileForm, DeleteForm, VendorRegisterForm, CustomerRegisterForm
class UserDataAccess:
def __init__(self, users):
self.users = users
def authorize(self, requestForm):
user, status, message = {}, False, ""
form = LoginForm(requestForm)
if form.validate():
login_user = self.users.find_one({
'username' : form.username.data,
})
if login_user and UserDataAccess.check_ps(login_user, form.password.data):
status, message = True, 'Login successful!'
user = UserDataAccess.return_user(login_user)
else:
message = 'The username and password does not match!'
else:
message = "Invalide form"
return UserDataAccess.return_output(status, message, user)
@staticmethod
def check_ps(login_user, password):
return bcrypt.hashpw(password.encode('utf-8'), \
login_user["password"].encode('utf-8')) \
== login_user["password"].encode('utf-8')
def register(self, requestForm, role):
user, status, message = {}, False, ""
if role == "Vendor":
form = VendorRegisterForm(requestForm)
else:
form = CustomerRegisterForm(requestForm)
if form.validate():
is_unique, message = self.__is_unique(form.username.data, form.email.data)
if not is_unique:
message = "email or username already registered"
return UserDataAccess.return_output(status, message, {})
else:
status = True
hashpass = bcrypt.hashpw(form.password.data.encode('utf-8'), bcrypt.gensalt())
if role == "Vendor":
self.users.insert({
"email": form.email.data,
"password": <PASSWORD>,
"firstname": form.firstname.data,
"lastname": form.lastname.data,
"username": form.username.data,
"storeName": form.storeName.data
})
else:
self.users.insert({
"email": form.email.data,
"password": <PASSWORD>,
"firstname": form.firstname.data,
"lastname": form.lastname.data,
"username": form.username.data,
"cell": form.cell.data
})
# TODO add creationdate
# user['creationdate'] = creationdate
login_user = self.users.find_one({
'username' : form.username.data,
})
message = 'The registration is successful!'
user = UserDataAccess.return_user(login_user)
else:
message = "Invalide form"
return UserDataAccess.return_output(status, message, user)
def __is_unique(self, username, email):
message = 'You can use this username and email!'
status = True
if self.users.find_one({'username' : username}):
status, message = False, 'The username has been taken!'
if status:
if self.users.find_one({'email' : email}):
status = False
message = 'The email has been taken!'
return status, message
@staticmethod
def return_user(user_info):
return {
'username': user_info["username"],
'lastname': user_info["lastname"],
'firstname': user_info["firstname"],
'email': user_info["email"]
}
@staticmethod
def return_output(status, message, user):
return {
'status': status,
'message': message,
'result': {
'user': user
}
}
def __is_your_email_unique(self, email):
if self.users.find_one({'email':email}):
return False, 'The email has been taken!'
return True, 'You can use this email!'
def update_profile(self, requestForm):
status, message = False, ""
form = UpdateProfileForm(requestForm)
if form.validate():
status, message = self.__is_your_email_unique(form.email.data)
if status:
message = 'You have successfully updated your profile!'
self.users.update(
{'username': username},
{"email": email, "firsname": fistname, "lastname": lastname},
{ upsert: True }
)
return UserDataAccess.return_output(status, message, {})
def change_password(self, requestForm, username):
status, message = False, ""
form = ChangePasswordForm(requestForm)
if form.validate():
login_user = self.users.find_one({
'username' : username,
})
if login_user and UserDataAccess.check_ps(login_user, form.oldpassword.data):
self.users.update_one(
{'username': login_user["username"]},
{'$set': {"password":
<PASSWORD>.hashpw(form.newpassword.data.encode('utf-8'), bcrypt.gensalt())
}}
)
status, message = True, 'Your password has been changed!'
else:
message = 'The old password is NOT correct!'
else:
message = "Invalide form"
return UserDataAccess.return_output(status, message, {})
def delete(self, requestForm):
status, message = False, ""
form = DeleteForm(requestForm)
if form.validate():
login_user = self.users.find_one({
'username' : form.username.data,
})
if UserDataAccess.check_ps(login_user, form.password.data):
self.users.delete_one(
{ 'username': form.username.data}
)
status, message = True, 'Your account has been deleted!'
else:
message = 'Missing info!'
return UserDataAccess.return_output(status, message, {})
'''
def get_user(self, user_id):
output = {'result': {}, 'status': False, 'message': ''}
user = {}
cursor = self.conn.execute('select u.* from users u where u.uid=%s', user_id)
for row in cursor:
user = dict(row)
del user['password']
cursor.close()
output['status'] = True
output['result']['user'] = user
return output
''' | StarcoderdataPython |
1707356 | from flask import Flask
from sys import platform
app = Flask(__name__) # the main flask object
from PhotoZPE import views # the site views, defined in views.py
app.config['SPECTRA_FOLDER'] = app.root_path+'/static/spectra/'
app.config['SECRET_KEY'] = 'random!modnar'
| StarcoderdataPython |
3309194 | <gh_stars>0
# Generated by Django 3.2.11 on 2022-01-13 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0023_auto_20210824_1414'),
]
operations = [
migrations.AddField(
model_name='option',
name='default',
field=models.CharField(max_length=255, null=True, verbose_name='Default initial data'),
),
migrations.AddField(
model_name='option',
name='sequence',
field=models.IntegerField(default=0, help_text='Compile show options in an ordered sequence', verbose_name='Sequence'),
),
]
| StarcoderdataPython |
4809276 | <reponame>brunnergino/JamBot
shifted = True
shift_folder = ''
if shifted:
shift_folder = 'shifted/'
# If you only want to process a subfolder like '/A' or '/A/A' for tests
subfolder = '/'
source_folder = 'data/original' + subfolder
tempo_folder1 = 'data/' + 'tempo' + subfolder
histo_folder1 = 'data/' + 'histo' + subfolder
tempo_folder2 = 'data/' + shift_folder + 'tempo' + subfolder
shifted_folder = 'data/' + shift_folder + 'shifted' + subfolder
pickle_folder = 'data/' + shift_folder + 'pianoroll' + subfolder
roll_folder = 'data/' + shift_folder + 'indroll' + subfolder
histo_folder2 = 'data/' + shift_folder + 'histo' + subfolder
chords_folder = 'data/' + shift_folder + 'chords' + subfolder
chords_index_folder = 'data/' + shift_folder + 'chord_index' + subfolder
song_histo_folder = 'data/' + shift_folder + 'song_histo' + subfolder
# Test Paths:
#source_folder = 'data/test'
#tempo_folder = 'data/' + shift_folder + 'test_tempo'
#pickle_folder = 'data/' + shift_folder + 'test_pianoroll'
#roll_folder = 'data/' + shift_folder + 'test_indroll'
#histo_folder = 'data/' + shift_folder + 'test_histo'
#chords_folder = 'data/' + shift_folder + 'test_chords'
#chords_index_folder = 'data/' + shift_folder + 'test_chord_index'
#song_histo_folder = 'data/' + shift_folder + 'test_song_histo'
#shifted_folder = 'data/' + shift_folder + 'test_shifted'
dict_path = 'data/'
chord_dict_name = 'chord_dict.pickle'
index_dict_name = 'index_dict.pickle'
if shifted:
chord_dict_name = 'chord_dict_shifted.pickle'
index_dict_name = 'index_dict_shifted.pickle'
# Specifies the method how to add the chord information to the input vector
# 'embed' uses the chord embedding of the chord model
# 'onehot' encodes the chord as one hot vector
# 'int' just appends the chord id to the input vector
chord_embed_method = 'embed'
# Adds the count of the beat as a feature to the input vector
counter_feature = True
counter_size = 0
if counter_feature:
counter_size = 3
# Appends also the next cord to the feature vector:
next_chord_feature = True
high_crop = 84#84
low_crop = 24#24
num_notes = 128
new_num_notes = high_crop - low_crop
chord_embedding_dim = 10
#double_sample_chords = False
double_sample_notes = True
sample_factor = 2
one_hot_input = False
collapse_octaves = True
discretize_time = False
offset_time = False
discritezition = 8
offset = 16
# Some parameters to extract the pianorolls
# fs = 4 for 8th notes
fs = 4
samples_per_bar = fs*2
octave = 12
melody_fs = 4
# Number of notes in extracted chords
chord_n = 3
# Number of notes in a key
key_n = 7
# Chord Vocabulary size
num_chords = 100
if shifted:
num_chords = 50
UNK = '<unk>'
# Some Chords:
C = tuple((0,4,7))
Cm = tuple((0,3,7))
Csus4 = tuple((0,5,7))
Csus6 = tuple((0,7,9))
Dm = tuple((2,5,9))
D = tuple((2,6,9))
Dsus4 = tuple((2,7,9))
Em = tuple((4,7,11))
E = tuple((4,8,11))
F = tuple((0,5,9))
Fm = tuple((0,5,8))
G = tuple((2,7,11))
Gm = tuple((2,7,10))
Gsus4 = tuple((0,2,7))
Am = tuple((0,4,9))
Asus7 = tuple((4,7,9))
A = tuple((1,4,9))
H = tuple((3,6,11))
Hverm = tuple((2,5,11))
Hm = tuple((2,6,11))
B = tuple((2,5,10))
Es = tuple((3,7,10))
As = tuple((0,3,8))
Des = tuple((1,5,8))
Fis = tuple((1,6,10))
| StarcoderdataPython |
3330554 | from absl import app
from absl import flags
from bark_ml.experiment.experiment_runner import ExperimentRunner
FLAGS = flags.FLAGS
flags.DEFINE_enum("mode",
"visualize",
["train", "visualize", "evaluate", "print", "save"],
"Mode the configuration should be executed in.")
# NOTE: absolute paths are required
flags.DEFINE_string("exp_json",
"/Users/hart/Development/bark-ml/experiments/configs/highway_gnn.json",
"Path to the experiment json.")
flags.DEFINE_string("save_path",
"/Users/hart/Development/bark-ml/experiments/configs/highway_gnn/",
"Path to the experiment json.")
flags.DEFINE_integer("random_seed", 0, "Random seed to be used.")
# run experiment
def run_experiment(argv):
exp_runner = ExperimentRunner(json_file=FLAGS.exp_json, mode=FLAGS.mode,
random_seed=FLAGS.random_seed)
if __name__ == '__main__':
app.run(run_experiment) | StarcoderdataPython |
166226 | """cascade delete for period stars
Revision ID: dbf1daf55faf
Revises: <KEY>
Create Date: 2016-10-08 10:14:03.852963
"""
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.drop_constraint('report_all_daily_ibfk_1', 'report_all_daily', type_='foreignkey')
op.drop_constraint('report_all_monthly_ibfk_1', 'report_all_monthly', type_='foreignkey')
op.drop_constraint('report_all_weekly_ibfk_1', 'report_all_weekly', type_='foreignkey')
op.create_foreign_key(None, 'report_all_daily', 'repos', ['id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'report_all_monthly', 'repos', ['id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'report_all_weekly', 'repos', ['id'], ['id'], ondelete='CASCADE')
def downgrade():
op.drop_constraint(None, 'report_all_weekly', type_='foreignkey')
op.create_foreign_key('report_all_weekly_ibfk_1', 'report_all_weekly', 'repos', ['id'], ['id'])
op.drop_constraint(None, 'report_all_monthly', type_='foreignkey')
op.create_foreign_key(
'report_all_monthly_ibfk_1', 'report_all_monthly', 'repos', ['id'], ['id']
)
op.drop_constraint(None, 'report_all_daily', type_='foreignkey')
op.create_foreign_key('report_all_daily_ibfk_1', 'report_all_daily', 'repos', ['id'], ['id'])
| StarcoderdataPython |
111084 | {1: 'one', 2: 'two'}
{a: 2, b:4}
| StarcoderdataPython |
52495 | <gh_stars>1-10
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .max_iou_assigner_coeff import MaxIoUAssigner_coeff
from .max_iou_ud_assigner import MaxIoUUDAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'MaxIoUAssigner_coeff', "MaxIoUUDAssigner"
]
| StarcoderdataPython |
21850 | <filename>src/baskerville/models/model_interface.py
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import logging
class ModelInterface(object):
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def get_param_names(self):
return list(inspect.signature(self.__init__).parameters.keys())
def set_params(self, **params):
param_names = self.get_param_names()
for key, value in params.items():
if key not in param_names:
raise RuntimeError(
f'Class {self.__class__.__name__} does not '
f'have {key} attribute')
setattr(self, key, value)
def get_params(self):
params = {}
for name in self.get_param_names():
params[name] = getattr(self, name)
return params
def _get_class_path(self):
return f'{self.__class__.__module__}.{self.__class__.__name__}'
def train(self, df):
pass
def predict(self, df):
pass
def save(self, path, spark_session=None):
pass
def load(self, path, spark_session=None):
pass
def set_logger(self, logger):
self.logger = logger
| StarcoderdataPython |
3295993 | <reponame>wusimo/NILM<gh_stars>1-10
import numpy as np
import scipy as sp
import scipy.misc
import pandas as pd
from os import path
import datetime
def rel_change(y):
"""
return relative change comparing to the closer neighbouring points
"""
return np.min([np.abs(y[1] - y[0]), np.abs(y[1] - y[2])]) / float(y[1])
def rel_change_filter_0819_3(t, data_input, thre=.2):
"""
filter data based on relative change
data points in data_input that below or above both neighbouring points
and have relative change above thre will be set as the average of neighbouring data.
"""
thre_2 = thre/(1-thre)
id_filter = [i for i in range(1, len(data_input)-1)
if (data_input[i]>data_input[i-1] and data_input[i]>data_input[i+1] and rel_change(data_input[i-1:i+2])>thre) or
(data_input[i]<data_input[i-1] and data_input[i]<data_input[i+1] and rel_change(data_input[i-1:i+2])>thre_2)
]
data_input_2 = [(data_input[i-1]+data_input[i+1])/2 if (i in id_filter) else dat for i, dat in enumerate(data_input) ]
return t, data_input_2
def read_dat_0819(date, h_start, h_end, folder_path):
t = []
dat = []
start_time = None
for h in range(h_start, h_end):
try:
file_name = '%d-%d-%d.csv' % (date.month, date.day, h)
file_path = path.join(folder_path, file_name)
data_pd = pd.read_csv(file_path, names=['time', 'data'])
time_tmp = pd.to_datetime(data_pd.time,infer_datetime_format=True)
if not start_time:
start_time = time_tmp[0]
tmp = [(x - start_time).seconds/3600. for x in time_tmp]
t.extend(tmp)
dat.extend( [x/3 for x in data_pd.data] )
except Exception as inst:
print type(inst), inst.args, inst # the exception instance
print '%s failed' % file_path
return t, dat
def read_dat_0910(datetime_s, datetime_e, folder_path):
t = []
dat = []
start_time = None
current_time = datetime_s
while current_time < datetime_e:
try:
file_name = '%d-%d-%d.csv' % (current_time.month, current_time.day, current_time.hour)
file_path = path.join(folder_path, file_name)
data_pd = pd.read_csv(file_path, names=['time', 'data'])
time_tmp = pd.to_datetime(data_pd.time,infer_datetime_format=True)
if not start_time:
start_time = time_tmp[0]
tmp = [(x - start_time).days*24.+(x - start_time).seconds/3600. for x in time_tmp]
t.extend(tmp)
dat.extend( [x/3 for x in data_pd.data] )
except Exception as inst:
print type(inst), inst.args, inst # the exception instance
print '%s failed' % file_path
current_time += datetime.timedelta(hours=1)
return t, dat
def test_func():
print 5
def log_norm_pdf(x
, mu
, sigma_2 # sigma^2
):
return -(x-mu)**2/sigma_2 - np.log(2*np.pi*sigma_2)/2
def set_prior_6(para):
"""
set prior before the first data came in
doc details to be added
"""
n_shape = para['n_shape']
log_prob = [ [] for i_shape in range(n_shape) ]
delta_mean = [ [] for i_shape in range(n_shape) ]
delta_var = [ [] for i_shape in range(n_shape) ]
time_since_last_cp = [ [] for i_shape in range(n_shape) ]
return log_prob, delta_mean, delta_var, time_since_last_cp
def update_with_datum_6(datum,
log_prob,
delta_mean,
delta_var,
time_since_last_cp,
last_datum,
para):
# extract parameters
shape = para['shape']
n_shape = para['n_shape']
H = para['H'] # log probability that a new cp forms
H_2_exp = 1 - np.exp(H)
delta_shape = para['delta_shape'] # shape noise
Q = para['Q'] # process noise
R = para['R'] # measurement noise
delta_init = para['delta_init']
# a function that return element within the list or
# the last element of the list if that is not possible
shape_helper = lambda i_shape, x: shape[i_shape][x] if x<len(shape[i_shape]) else shape[i_shape][-1]
# step 1, grow log probability, and time since the last change point
log_prob_grow = [ [] for _ in range(n_shape) ]
time_since_last_cp_grow = [ [] for _ in range(n_shape)]
# find the longest distance in time_since_last_cp
if len(time_since_last_cp[0]) == 0: # this is the first data
new_cp_prob = 1/float(n_shape)
for i_shape in range(n_shape):
log_prob_grow[i_shape] = [np.log(new_cp_prob)]
time_since_last_cp_grow[i_shape] = [0]
else:
# distance from this data point to the last confirmed change point
r_max = np.max( [t for x in time_since_last_cp for t in x] )
# find probability of all shapes at r_max
total_prob_since_last_cp = np.sum( [np.exp(t[-1]) for t in log_prob] )
new_cp_prob = total_prob_since_last_cp * H_2_exp / n_shape
if r_max < 5:
new_cp_prob = np.exp(-50)
for i_shape in range(n_shape):
log_prob_grow[i_shape] = [np.log(new_cp_prob)] + log_prob[i_shape][:-1] + [ log_prob[i_shape][-1]+H ]
time_since_last_cp_grow[i_shape] = [0] + [x+1 for x in time_since_last_cp[i_shape]]
# step 2, update the estimation of next data
delta_mean_grow = [ [] for _ in range(n_shape) ]
delta_var_grow = [ [] for _ in range(n_shape) ]
for i_shape in range(n_shape):
delta_mean_grow[i_shape] = [
shape_helper(i_shape, x)+y
for x, y in zip(time_since_last_cp_grow[i_shape], [last_datum]+delta_mean[i_shape])
]
delta_var_grow[i_shape] = [
delta_init[i_shape]
] + [
x+Q for x in delta_var[i_shape]
]
# estimate probability
p_predict = [ [ ] for i_shape in range(n_shape) ]
for i_shape in range(n_shape):
n_tau = len(delta_mean_grow[i_shape])
tmp = [ 0 for _ in range(n_tau) ]
for i_tau in range(n_tau):
tmp[i_tau] = log_norm_pdf( datum, delta_mean_grow[i_shape][i_tau], delta_var_grow[i_shape][i_tau] + R )
p_predict[i_shape] = tmp
# Update step
delta_mean_posterior = [ [] for _ in range(n_shape)]
delta_var_posterior = [ [] for _ in range(n_shape) ]
for i_shape in range(n_shape):
n_tau = len(delta_mean_grow[i_shape])
delta_mean_tmp = [ [] for _ in range(n_tau) ]
delta_var_tmp = [ [] for _ in range(n_tau) ]
for i_tau in range(n_tau):
K = delta_var_grow[i_shape][i_tau] / (delta_var_grow[i_shape][i_tau]+R)
offset = datum - delta_mean_grow[i_shape][i_tau]
delta_mean_tmp[i_tau] = delta_mean_grow[i_shape][i_tau] + K * offset
delta_var_tmp[i_tau] = (1-K) * delta_var_grow[i_shape][i_tau]
delta_mean_posterior[i_shape] = delta_mean_tmp
delta_var_posterior[i_shape] = delta_var_tmp
# update prob
log_prob_posterior = [ [] for _ in range(n_shape) ]
for i_shape in range(n_shape):
log_prob_posterior[i_shape] = [x+y for x,y in zip(log_prob_grow[i_shape], p_predict[i_shape])]
# normalization
Z = sp.misc.logsumexp([x for t in log_prob_posterior for x in t])
for i_shape in range(n_shape):
log_prob_posterior[i_shape] = [x-Z for x in log_prob_posterior[i_shape]]
# discount mean
time_since_last_cp_posterior = time_since_last_cp_grow
for i_shape in range(n_shape):
delta_mean_posterior[i_shape] = [x-shape_helper(i_shape, y) for x, y in zip(delta_mean_posterior[i_shape], time_since_last_cp_posterior[i_shape])]
return log_prob_posterior, delta_mean_posterior, delta_var_posterior, time_since_last_cp_posterior
def is_happy(prob, prob_thre=.3, len_protect = 5):
last_cp_prob = np.sum( [np.exp(t[-1]) for t in prob] )
return (last_cp_prob>prob_thre) or ( len(prob[0])<len_protect )
# log_prob, delta_mean, delta_var, time_since_last_cp
def trim_5(var, time_since_last_cp, time_thre=5):
new_var = [[] for _ in range(len(var))]
for i in range(len(var)):
new_var[i] = [
val
for pos, val in enumerate(var[i])
if ((time_since_last_cp[i][pos]<time_thre) or (pos+1==len(var[i]) ))
]
return new_var
def disaggregate(data, para):
unhappy_count_thre = para['unhappy_count_thre']
len_protected = para['len_protected']
current_data_pos = 0
last_datum = 0
log_prob, delta_mean, delta_var, time_since_last_cp = set_prior_6(para)
last_cp = 0
cp_list = [last_cp]
unhappy_count = 0
while (current_data_pos<len(data)):
datum = data[current_data_pos]
log_prob, delta_mean, delta_var, time_since_last_cp = update_with_datum_6(datum, log_prob, delta_mean, delta_var, time_since_last_cp, last_datum, para)
leader_prob = np.sum( [np.exp(t[-1]) for t in log_prob] )
leader_shape = np.argmax( [t[-1] for t in log_prob] )
flag_happy = is_happy(log_prob)
if current_data_pos >= 3149 and current_data_pos<3159:
pass
# print flag_happy, log_prob
if flag_happy:
unhappy_count = 0 # reset counter
log_prob = trim_5(log_prob, time_since_last_cp) # trim data
delta_mean = trim_5(delta_mean, time_since_last_cp)
delta_var = trim_5(delta_var, time_since_last_cp)
time_since_last_cp = trim_5(time_since_last_cp, time_since_last_cp)
i = np.argmax([t[-1] for t in log_prob])
if current_data_pos >= 3149 and current_data_pos<3159:
pass
# print current_data_pos, [t[-1] for t in log_prob]
else:
unhappy_count += 1
if (unhappy_count == unhappy_count_thre):
last_cp = current_data_pos - unhappy_count_thre
cp_list.append(last_cp)
unhappy_count = 0
log_prob, delta_mean, delta_var, time_since_last_cp = set_prior_6(para)
last_datum = np.mean( data[(last_cp-3):last_cp] )
for current_data_pos_t in range(last_cp, last_cp + len_protected):
log_prob, delta_mean, delta_var, time_since_last_cp = update_with_datum_6(datum,
log_prob,
delta_mean,
delta_var,
time_since_last_cp,
last_datum,
para)
log_prob = [[t[-1]] for t in log_prob]
delta_mean = [[t[-1]] for t in delta_mean]
delta_var = [[t[-1]] for t in delta_var]
time_since_last_cp = [[t[-1]] for t in time_since_last_cp]
z = np.log(np.sum([np.exp(t[-1]) for t in log_prob]))
log_prob = [[t[-1]-z] for t in log_prob]
current_data_pos += 1
if current_data_pos < 3:
last_datum = np.mean( data[0:current_data_pos] )
else:
last_datum = np.mean( data[(current_data_pos-3):current_data_pos] )
return cp_list
| StarcoderdataPython |
1696816 | <filename>app/main.py
import os
import boto3
import configparser
import argparse
import sys
from typing import Any, Tuple
from app.dynamodb import csv_import, csv_export, truncate
__version__ = "1.3.4"
config_file = "config.ini"
def main() -> None:
"""Main routine
"""
(message, code) = execute()
print(message)
sys.exit(code)
def execute() -> Tuple:
"""Command execute
Raises:
ValueError: invalied config
Returns:
Tuple: result message and exit code
"""
result = "No operations."
# arguments parse
args = parse_args(sys.argv[1:])
# boto3 config setting
try:
table = config_read_and_get_table(args)
except ValueError as e:
return (str(e), 1)
except Exception:
return (f"Invalid format {config_file} file", 1)
# csv import
if args.imp:
if args.file is not None:
result = csv_import(table, args.file)
else:
return ("Import mode requires a input file option.", 1)
# csv export
if args.exp:
if args.output is not None:
result = csv_export(table, args.output)
else:
return ("Export mode requires a output file option.", 1)
# truncate table
if args.truncate:
result = truncate(table)
return result
def parse_args(args: str) -> Any:
"""Parse arguments
Args:
args (str): _description_
Returns:
Any: parsed args
"""
parser = argparse.ArgumentParser(
description="Import CSV file into DynamoDB table utilities")
parser.add_argument("-v", "--version", action="version",
version=__version__,
help="show version")
parser.add_argument(
"-i", "--imp", help="mode import", action="store_true")
parser.add_argument(
"-e", "--exp", help="mode export", action="store_true")
parser.add_argument(
"--truncate", help="mode truncate", action="store_true")
parser.add_argument(
"-t", "--table", help="DynamoDB table name", required=True)
parser.add_argument(
"-f", "--file", help="UTF-8 CSV file path required import mode")
parser.add_argument(
"-o", "--output", help="output file path required export mode")
return parser.parse_args()
def config_read_and_get_table(args: Any) -> Any:
"""Config read and Create DynamoDB table instance
Args:
args (Any): arguments
Returns:
Any: DynamoDB table class
"""
if not os.path.isfile(config_file):
raise ValueError(f"Please make your {config_file} file")
config = configparser.ConfigParser()
config.read_dict({"AWS": {"ENDPOINT_URL": ""}})
config.read(config_file)
endpoint_url = None
if config.get("AWS", "ENDPOINT_URL"):
endpoint_url = config.get("AWS", "ENDPOINT_URL")
dynamodb = boto3.resource("dynamodb",
region_name=config.get("AWS", "REGION"),
aws_access_key_id=config.get("AWS", "AWS_ACCESS_KEY_ID"),
aws_secret_access_key=config.get("AWS", "AWS_SECRET_ACCESS_KEY"),
endpoint_url=endpoint_url)
table = dynamodb.Table(args.table)
return table
if __name__ == "__main__":
main()
| StarcoderdataPython |
1647261 | # -*- coding: utf-8 -*-
"""
heroku.core
~~~~~~~~~~~
This module provides the base entrypoint for heroku.py.
"""
from .api import Heroku
def from_key(api_key, **kwargs):
"""Returns an authenticated Heroku instance, via API Key."""
h = Heroku(**kwargs)
# Login.
h.authenticate(api_key)
return h
def from_pass(username, password):
"""Returns an authenticated Heroku instance, via password."""
key = get_key(username, password)
return from_key(key)
def get_key(username, password):
"""Returns an API Key, fetched via password."""
return Heroku().request_key(username, password) | StarcoderdataPython |
4834869 | <gh_stars>1-10
import cv2, functools, glob, keras, random, maskutils, math, os
from keras.utils.np_utils import to_categorical
from keras.preprocessing import image
import slidingwindow as sw
import numpy as np
# From <https://github.com/keras-team/keras-preprocessing/blob/1.0.2/keras_preprocessing/image.py#L341>
# Duplicated here since this is not exposed by all versions of Keras
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
class WindowSequenceBase(keras.utils.Sequence):
'''
Base Sequence class for sets of images to which a sliding window is applied.
'''
def __init__(self, directory, batchSize, windowSize, validationSplit, subset, numLabels):
# Store our parameters
self.batchSize = batchSize
self.windowSize = windowSize
self.subset = subset
self.numLabels = numLabels
# Validate the specified subset string
if self.subset not in ['training', 'validation']:
raise RuntimeError('invalid subset "{}"'.format(self.subset))
# Retrieve the list of input images in the input directory
images = sorted(glob.glob(os.path.join(directory, '*.png')))
totalImages = len(images)
# Slice the list of input images based on our subset
validationOffset = math.floor((1.0 - validationSplit) * totalImages)
self.images = images[validationOffset:] if subset == 'validation' else images[:validationOffset]
@functools.lru_cache(maxsize=5)
def _getDataAndLabels(self, imagePath):
'''
Retrieves the image data and associated classification label(s) for the specified image file.
The return value is a tuple containing the image data and the per-pixel labels, both as NumPy arrays.
'''
raster = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED)
channels, mask = maskutils.splitAlphaMask(raster)
ret, mask = cv2.threshold(mask, self.numLabels-1, self.numLabels-1, cv2.THRESH_BINARY)
return channels, mask
def computeWeightings(self, numImages = 10):
'''
Computes the label weightings to use for training, based on the extent to which each classification
label is represented in a random sample of the training data
'''
# Sample the masks from up to the user-specified number of randomly-selected images
samples = random.sample(self.images, min(len(self.images), numImages))
totals = np.zeros((self.numLabels), dtype=np.int64)
for sample in samples:
channels, mask = self._getDataAndLabels(sample)
histogram, bins = np.histogram(mask, bins = list(range(0, self.numLabels+1)))
totals += histogram
# Compute the label weightings
overall = np.sum(totals)
weights = 1.0 - (totals / overall)
return weights
class PreWindowedSequence(WindowSequenceBase):
'''
Sequence class that reads pre-windowed images from an input directory.
Images should have per-pixel classification labels and all be of the same resolution.
'''
def __init__(self, directory, batchSize, windowSize, validationSplit, subset, numLabels):
'''
Creates a new sequence for the images in the specified input directory.
Note that the resolution of all of the images in the input directory must match the specified window size.
Arguments:
- `directory`: the input directory containing the images
- `batchSize`: the batch size
- `windowSize`: the size of the sliding window, in pixels
- `validationSplit`: the percentage of input images to be used as validation data
- `subset`: which subset of images to iterate over, either 'training' or 'validation'
'''
# Validate and store our parameters
super().__init__(directory, batchSize, windowSize, validationSplit, subset, numLabels)
# Verify that the dimensions of the first input image match the specified window size
firstImage = image.img_to_array(image.load_img(self.images[0], color_mode='grayscale'))
if firstImage.shape[0] != self.windowSize or firstImage.shape[1] != self.windowSize:
raise RuntimeError('all images must have a resolution of {}x{}, found an image of {}x{}'.format(
self.windowSize,
self.windowSize,
firstImage.shape[1],
firstImage.shape[0]
))
# Display the number of detected images
print('Found {} pre-windowed {} image(s) with resolution {}x{}'.format(
len(self.images),
subset,
firstImage.shape[1],
firstImage.shape[0],
))
def __len__(self):
'''
Returns the length of the iterator in batches
'''
return int(math.ceil(len(self.images) / self.batchSize))
def __getitem__(self, index):
'''
Returns the batch at the specified index
'''
# Determine which images have been requested
startIndex = index * self.batchSize
endIndex = startIndex + self.batchSize
# Retrieve and transform the image data and labels for the requested windows
transformed = [self._transformImage(imageIndex) for imageIndex in range(startIndex, endIndex)]
# Split the data from the labels
data = [result[0] for result in transformed]
labels = [result[1] for result in transformed]
# Return a tuple containing the data and the labels
return np.concatenate(data), np.concatenate(labels)
def _transformImage(self, imageIndex):
'''
Retrieves the specified image and transforms it as appropriate
'''
# Retrieve the image data and label(s)
data, labels = self._getDataAndLabels(self.images[imageIndex % len(self.images)])
# Apply our random transforms if we are processing training data
if self.subset == 'training':
# Randomly apply a horizontal flip
if np.random.random() < 0.5:
data = flip_axis(data, 1)
labels = flip_axis(labels, 1)
# Randomly apply a vertical flip
if np.random.random() < 0.5:
data = flip_axis(data, 0)
labels = flip_axis(labels, 0)
# One-hot encode the labels
labels = np.expand_dims(to_categorical(np.ravel(labels), self.numLabels), axis=0)
return np.expand_dims(data, axis=0), labels
class SlidingWindowSequence(WindowSequenceBase):
'''
Sequence class that runs a sliding window over the images in an input directory.
Images should have per-pixel classification labels and all be of the same resolution.
'''
def __init__(self, directory, batchSize, windowSize, overlap, validationSplit, subset, numLabels):
'''
Creates a new sequence for the images in the specified input directory.
Note that all of the images in the input directory must be of the same resolution.
Arguments:
- `directory`: the input directory containing the images
- `batchSize`: the batch size
- `windowSize`: the size of the sliding window, in pixels
- `overlap`: the percentage of overlap between the generated windows
- `validationSplit`: the percentage of input images to be used as validation data
- `subset`: which subset of images to iterate over, either 'training' or 'validation'
'''
# Validate and store our parameters
super().__init__(directory, batchSize, windowSize, validationSplit, subset, numLabels)
self.overlap = overlap
# Use the dimensions of the first input image to determine how many windows each image will contain
firstImage = image.img_to_array(image.load_img(self.images[0], color_mode='grayscale'))
windows = self._generateWindows(firstImage)
self.windowsPerImage = len(windows)
# Display the number of detected images
print('Found {} {} image(s) with resolution {}x{} ({} windows per image)'.format(
len(self.images),
subset,
firstImage.shape[1],
firstImage.shape[0],
self.windowsPerImage
))
def _transformIndex(self, index):
'''
Transforms a one-dimensional overall window index into a tuple of (image,window)
'''
return index // self.windowsPerImage, index % self.windowsPerImage
def __len__(self):
'''
Returns the length of the iterator in batches
'''
return int(math.ceil((len(self.images) * self.windowsPerImage) / self.batchSize))
def __getitem__(self, index):
'''
Returns the batch at the specified index
'''
# Transform the supplied index to determine which windows from which images have been requested
startIndex = index * self.batchSize
endIndex = startIndex + self.batchSize
startImage, startWindow = self._transformIndex(startIndex)
endImage, endWindow = self._transformIndex(endIndex)
# Generate the list of requested images
images = list(range(startImage, endImage+1))
if endWindow == 0:
images = images[:-1]
# Generate the list of required windows for each requested image
imageWindows = []
for image in images:
if image == startImage and image == endImage:
imageWindows.append(list(range(startWindow, endWindow)))
elif image == startImage:
imageWindows.append(list(range(startWindow, self.windowsPerImage)))
elif image == endImage:
imageWindows.append(list(range(0, endWindow)))
else:
imageWindows.append(list(range(0, self.windowsPerImage)))
# Retrieve and transform the image data and labels for the requested windows
transformed = [self._transformWindows(image, windows) for image, windows in zip(images, imageWindows)]
# Split the data from the labels
data = [result[0] for result in transformed]
labels = [result[1] for result in transformed]
# Return a tuple containing the data and the labels
return np.concatenate(data), np.concatenate(labels)
def _generateWindows(self, data):
'''
Generates the set of sliding windows for the supplied NumPy array
'''
return sw.generate(data, sw.DimOrder.HeightWidthChannel, self.windowSize, self.overlap)
def _transformWindows(self, imageIndex, windowIndices):
'''
Applies the specified set of windows to the specified image and transforms them as appropriate
'''
# Retrieve the image data and label(s)
data, labels = self._getDataAndLabels(self.images[imageIndex % len(self.images)])
# Extract the specified set of windows
windows = self._generateWindows(data)
windows = [windows[index] for index in windowIndices]
# Apply our random transforms if we are processing training data
data = [window.apply(data) for window in windows]
labels = [window.apply(labels) for window in windows]
if self.subset == 'training':
# Randomly apply a horizontal flip
if np.random.random() < 0.5:
data = [flip_axis(window, 1) for window in data]
labels = [flip_axis(window, 1) for window in labels]
# Randomly apply a vertical flip
if np.random.random() < 0.5:
data = [flip_axis(window, 0) for window in data]
labels = [flip_axis(window, 0) for window in labels]
# Convert the list of windows into a single NumPy array
data = np.concatenate([np.expand_dims(d, axis=0) for d in data])
# One-hot encode the labels
labels = np.concatenate([
np.expand_dims(to_categorical(np.ravel(label), self.numLabels), axis=0)
for label in labels
])
return data, labels
| StarcoderdataPython |
1710171 | # @lc app=leetcode id=65 lang=python3
#
# [65] Valid Number
#
# https://leetcode.com/problems/valid-number/description/
#
# algorithms
# Hard (16.04%)
# Likes: 906
# Dislikes: 5589
# Total Accepted: 200.3K
# Total Submissions: 1.2M
# Testcase Example: '"0"'
#
# A valid number can be split up into these components (in order):
#
#
# A decimal number or an integer.
# (Optional) An 'e' or 'E', followed by an integer.
#
#
# A decimal number can be split up into these components (in order):
#
#
# (Optional) A sign character (either '+' or '-').
# One of the following formats:
#
# At least one digit, followed by a dot '.'.
# At least one digit, followed by a dot '.', followed by at least one
# digit.
# A dot '.', followed by at least one digit.
#
#
#
#
# An integer can be split up into these components (in order):
#
#
# (Optional) A sign character (either '+' or '-').
# At least one digit.
#
#
# For example, all the following are valid numbers: ["2", "0089", "-0.1",
# "+3.14", "4.", "-.9", "2e10", "-90E3", "3e+7", "+6e-1", "53.5e93",
# "-123.456e789"], while the following are not valid numbers: ["abc", "1a",
# "1e", "e3", "99e2.5", "--6", "-+3", "95a54e53"].
#
# Given a string s, return true if s is a valid number.
#
#
# Example 1:
#
#
# Input: s = "0"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "e"
# Output: false
#
#
# Example 3:
#
#
# Input: s = "."
# Output: false
#
#
# Example 4:
#
#
# Input: s = ".1"
# Output: true
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 20
# s consists of only English letters (both uppercase and lowercase), digits
# (0-9), plus '+', minus '-', or dot '.'.
#
#
#
#
#
# @lc tags=math;string
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 判断是否是合法的数字字符串。
# 有限状态自动机 FSA。
# 负值状态是不合法的。
#
# @lc idea=end
# @lc group=math
# @lc rank=10
# @lc code=start
class Solution:
def isNumber(self, s: str) -> bool:
fsa = [
# d + . e
# 0 init
[1, -2, -3, -1],
# 1 d
[1, -1, 2, -4],
# 2 d .
[3, -1, -1, -4],
# 3 d . d
[3, -1, -1, -4],
# 4 d e d
[4, -1, -1, -1],
# -5 d e +
[4, -1, -1, -1],
# -4 d e
[4, -5, -1, -1],
# -3 .
[2, -1, -1, -1],
# -2 +
[1, -1, -3, -1],
# -1 error
[-1, -1, -1, -1],
]
i = 0
j = -1
for n in s:
if '0' <= n <= '9':
j = 0
elif n == '+' or n == '-':
j = 1
elif n == '.':
j = 2
elif n == 'e' or n == 'E':
j = 3
else:
i = -1
i = fsa[i][j]
if i == -1:
return False
if i <= 0:
return False
return True
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('s = "0"')
print('Output :')
print(str(Solution().isNumber("0")))
print('Exception :')
print('true')
print()
print('Example 2:')
print('Input : ')
print('s = "e"')
print('Output :')
print(str(Solution().isNumber("e")))
print('Exception :')
print('false')
print()
print('Example 3:')
print('Input : ')
print('s = "."')
print('Output :')
print(str(Solution().isNumber(".")))
print('Exception :')
print('false')
print()
print('Example 4:')
print('Input : ')
print('s = ".1"')
print('Output :')
print(str(Solution().isNumber(".1")))
print('Exception :')
print('true')
print()
pass
# @lc main=end | StarcoderdataPython |
163028 | <gh_stars>0
#!/usr/bin/python
"""
__version__ = "$Revision: 1.14 $"
__date__ = "$Date: 2004/08/12 19:19:01 $"
"""
from PythonCard import dialog, model
import wx
class SimpleBrowser(model.Background):
def on_initialize(self, event):
filename = self.application.applicationDirectory + '/index.html'
self.components.htmlDisplay.text = filename
btnFlags = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
sizer2.Add(self.components.btnBack, 0, btnFlags, 5)
sizer2.Add(self.components.btnForward, 0, btnFlags, 5)
sizer2.Add(self.components.btnReload, 0, btnFlags, 5)
sizer2.Add(self.components.fldURL, 1, btnFlags, 5)
sizer2.Add(self.components.btnGo, 0, btnFlags, 5)
sizer1 = wx.BoxSizer(wx.VERTICAL)
sizer1.Add(sizer2, 0, wx.EXPAND)
sizer1.Add((5, 5), 0) # spacer
sizer1.Add(self.components.htmlDisplay, 1, wx.EXPAND)
sizer1.Fit(self)
sizer1.SetSizeHints(self)
self.panel.SetSizer(sizer1)
self.panel.SetAutoLayout(1)
self.panel.Layout()
def on_htmlDisplay_titleChange(self, event):
self.title = "SimpleIEBrowser: %s" % event.Text
def on_htmlDisplay_statusTextChange(self, event):
self.statusBar.text = event.Text
def on_htmlDisplay_documentComplete(self, evt):
self.current = evt.URL
self.components.fldURL.text = self.current
def on_goReload_command(self, event):
"""
enum wxIEHtmlRefreshLevel {
wxIEHTML_REFRESH_NORMAL = 0,
wxIEHTML_REFRESH_IFEXPIRED = 1,
wxIEHTML_REFRESH_CONTINUE = 2,
wxIEHTML_REFRESH_COMPLETELY = 3
};
"""
# 3 is the same as wxIEHTML_REFRESH_COMPLETELY
self.components.htmlDisplay.Refresh(3)
def on_goBack_command(self, event):
self.components.htmlDisplay.GoBack()
def on_goForward_command(self, event):
self.components.htmlDisplay.GoForward()
def addTextToItems(self):
target = self.components.fldURL
text = target.text
items = target.items
if not items.count(text):
items.insert(0, text)
target.items = items
target.text = text
target.SetInsertionPointEnd()
target.SetMark(-1, -1)
def on_goURL_command(self, event):
# KEA 2004-04-06
# clean up the URL
# by getting rid of leading and trailing whitespace
# and adding http:// if it is missing from the front
# of the url
target = self.components.fldURL
text = target.text.strip()
if not text.startswith('http://'):
text = 'http://' + text
if target.text != text:
target.text = text
self.addTextToItems()
self.components.htmlDisplay.text = self.components.fldURL.text
def openFile(self, path):
self.components.htmlDisplay.text = path
def on_menuFileOpen_select(self, event):
wildcard = "HTML files (*.htm;*.html)|*.htm;*.html|All files (*.*)|*.*"
result = dialog.openFileDialog(None, "Open file", '', '', wildcard)
if result.accepted:
path = result.paths[0]
self.openFile(path)
def on_fldURL_keyPress(self, event):
keyCode = event.keyCode
target = event.target
if keyCode == 13:
self.on_goURL_command(None)
else:
event.skip()
if __name__ == '__main__':
app = model.Application(SimpleBrowser)
app.MainLoop()
| StarcoderdataPython |
1780088 | <filename>toolsconf.py
'''
Configuration parameters for SimSem.
Author: <NAME> <<NAME> se>
Version: 2010-02-09
'''
# TODO: This really belongs under tools/config.py
# Which should be a package
from os.path import dirname, join
from sys import path
### SimString variables
# Path to the SimString Python Swig module directory
SIMSTRING_LIB_PATH = join(dirname(__file__),
'external/simstring-1.0/swig/python')
### LibLinear variables
LIBLINEAR_DIR = join(dirname(__file__), 'external/liblinear-1.7')
LIBLINEAR_PYTHON_DIR = join(LIBLINEAR_DIR, 'python')
### Only here until we find a better place to stuff them
LIB_PATH = join(dirname(__file__), 'lib')
| StarcoderdataPython |
3235907 | from .config import PixivAPITestCase, tape
from pixiv import AppCursor
class AppCursorTests(PixivAPITestCase):
@tape.use_cassette('testcursoritems.json')
def testcursoritems(self):
items = list(AppCursor(self.api.search_illust, word='original').items(5))
self.assertEqual(len(items), 5)
@tape.use_cassette('testcursorpages.json')
def testcursorpages(self):
pages = list(AppCursor(self.api.search_illust, word='original').pages(5))
self.assertEqual(len(pages), 5)
| StarcoderdataPython |
4811814 | from functools import wraps
def get_list(function):
"""
列表页面 获取 搜索
:param function: self.model
:return:
"""
@wraps(function)
def wrapped(self):
# user = self.request.user
# groups = [x['name'] for x in self.request.user.groups.values()]
# request_type = self.request.method
# model = str(self.model._meta).split(".")[1]
filter_dict = {}
not_list = ['page', 'order_by', 'csrfmiddlewaretoken']
for k, v in dict(self.request.GET).items():
if [i for i in v if i != ''] and (k not in not_list):
if '__in' in k:
filter_dict[k] = v
else:
filter_dict[k] = v[0]
self.filter_dict = filter_dict
self.queryset = self.model.objects.filter(**filter_dict).order_by('-id')
order_by_val = self.request.GET.get('order_by', '')
if order_by_val:
self.queryset = self.queryset.order_by(order_by_val) if self.queryset else self.queryset
result = function(self)
return result
return wrapped
| StarcoderdataPython |
3250051 | <filename>Calibration.py
import numpy as np
import cv2
import math
import Preprocess as pp
import Main
import os
import imutils
import DetectChars
import DetectPlates
import PossiblePlate
def nothing(x):
pass
def calibration (image):
WindowName1 = "Calibrating Position of image"
WindowName2 = "Color Thresholding"
WindowName3 = "Calibrating for Preprocess"
#make window
cv2.namedWindow(WindowName2)
cv2.namedWindow(WindowName3)
cv2.namedWindow(WindowName1)
# Load saved data from calibrated value
(w ,h, rotationx, rotationy, rotationz, panX, panY, stretchX, dist, G_S_F_W, G_S_F_H, A_T_B, A_T_W, T_V, Xtrans, Ytrans) = np.loadtxt("calibrated_value.txt")
#convert from load data to xyzwd
Xtrans = int(round( Xtrans+100 ))
Ytrans = int(round( Ytrans+100 ))
xValue = int(round( 100-(rotationx*20000.0)))
yValue = int(round((rotationy*20000.0)+100))
zValue = int(round(100-(rotationz*100)))
wValue = int(round(100 -((dist-1.0)*200.0)))
dValue = int(round((stretchX-1.0)*-200.0 +100))
#make Trackbar
cv2.createTrackbar('Xtrans',WindowName1,Xtrans,200,nothing) # for rotation in x axis
cv2.createTrackbar('Ytrans',WindowName1,Ytrans,200,nothing) # for rotation in x axis
cv2.createTrackbar("Xrot",WindowName1,xValue,200,nothing) # for rotation in x axis
cv2.createTrackbar("Yrot",WindowName1,yValue,200,nothing) # for rotation in y axis
cv2.createTrackbar("Zrot",WindowName1,zValue,200,nothing) # for rotation in z axis
cv2.createTrackbar("ZOOM",WindowName1,wValue,200,nothing) # for Zooming the image
cv2.createTrackbar("Strech",WindowName1,dValue,200,nothing) # for strech the image in x axis
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, WindowName3,0,1,nothing) # switch to see the preprocess threshold, for more detail see Preprocess.py
cv2.createTrackbar('G_S_F_W',WindowName3, int(G_S_F_W),50,nothing) #GAUSSIAN_SMOOTH_FILTER_SIZE_WEIGHT
cv2.createTrackbar('G_S_F_H',WindowName3, int(G_S_F_H),50,nothing) #GAUSSIAN_SMOOTH_FILTER_SIZE_HEIGHT
cv2.createTrackbar('A_T_B',WindowName3, int(A_T_B),50,nothing) #ADAPTIVE_THRESH_BLOCK_SIZE
cv2.createTrackbar('A_T_W',WindowName3, int(A_T_W),50,nothing) #ADAPTIVE_THRESH_WEIGHT
cv2.createTrackbar('T_V',WindowName3, int(T_V),255,nothing) #THRESHOLD_VALUE
cv2.createTrackbar("RGBSwitch",WindowName2,0,1,nothing)
cv2.createTrackbar('Ru',WindowName2,255,255,nothing)
cv2.createTrackbar('Gu',WindowName2,255,255,nothing)
cv2.createTrackbar('Bu',WindowName2,255,255,nothing)
cv2.createTrackbar('Rl',WindowName2,0,255,nothing)
cv2.createTrackbar('Gl',WindowName2,0,255,nothing)
cv2.createTrackbar('Bl',WindowName2,50,255,nothing)
# Allocate destination image
backGround1 = np.ones((100,500))
backGround2 = np.ones((100,500))
backGround3 = np.ones((100,500))
# Loop for get trackbar pos and process it
while True:
# Get position in trackbar for change transform
Xtrans = cv2.getTrackbarPos('Xtrans', WindowName1)
Ytrans = cv2.getTrackbarPos('Ytrans', WindowName1)
X = cv2.getTrackbarPos("Xrot", WindowName1)
Y = cv2.getTrackbarPos("Yrot", WindowName1)
Z = cv2.getTrackbarPos("Zrot", WindowName1)
W = cv2.getTrackbarPos("ZOOM", WindowName1)
D = cv2.getTrackbarPos("Strech", WindowName1)
# Get position in trackbar for switch
S = cv2.getTrackbarPos(switch,WindowName3) #switch for see the calibration threshold
# Get the value from tracbar and make it ood and value more than 3 for calibrating threshold
G_S_F_W = makeood(cv2.getTrackbarPos('G_S_F_W', WindowName3))
G_S_F_H = makeood(cv2.getTrackbarPos('G_S_F_H', WindowName3))
A_T_B = makeood(cv2.getTrackbarPos('A_T_B', WindowName3))
A_T_W = makeood(cv2.getTrackbarPos('A_T_W', WindowName3))
T_V = float (cv2.getTrackbarPos('T_V', WindowName3))
RGB = cv2.getTrackbarPos("RGBSwitch", WindowName2)
Ru = cv2.getTrackbarPos('Ru', WindowName2)
Gu = cv2.getTrackbarPos('Gu', WindowName2)
Bu = cv2.getTrackbarPos('Bu', WindowName2)
Rl = cv2.getTrackbarPos('Rl', WindowName2)
Gl = cv2.getTrackbarPos('Gl', WindowName2)
Bl = cv2.getTrackbarPos('Bl', WindowName2)
lower = np.array([Bl,Gl,Rl],dtype=np.uint8)
upper = np.array([Bu,Gu,Ru],dtype=np.uint8)
Xtrans = (Xtrans - 100)
Ytrans = (Ytrans - 100)
rotationx = -(X - 100) / 20000.0
rotationy = (Y - 100) / 20000.0
rotationz = -(Z - 100) / 100.0
dist = 1.0 - (W - 100) / 200.0
stretchX = 1.0 + (D - 100) / -200.0
w = np.size(image, 1)
h = np.size(image, 0)
panX = 0
panY = 0
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training
if blnKNNTrainingSuccessful == False: # if KNN training was not successful
print("\nerror: KNN traning was not successful\n") # show error message
return
imaged = imutils.translate(image, Xtrans, Ytrans)
# Apply transform
M = imutils.getTransform (w, h, rotationx, rotationy, rotationz, panX, panY, stretchX, dist)
imgOriginalScene = cv2.warpPerspective(imaged, M, (w,h),cv2.INTER_CUBIC or cv2.WARP_INVERSE_MAP)
if (S == 1):
imgGrayscale = pp.extractValue(imgOriginalScene)
#imgGrayscale = np.invert(imgGrayscale) # last best use this
imgMaxContrastGrayscale = pp.maximizeContrast(imgGrayscale)
imgMaxContrastGrayscale = np.invert(imgMaxContrastGrayscale)
height, width = imgGrayscale.shape
imgBlurred = np.zeros((height, width, 1), np.uint8)
imgBlurred = cv2.GaussianBlur(imgMaxContrastGrayscale, (G_S_F_H,G_S_F_W), 0)
#imgBlurred = np.invert(imgBlurred)
imgOriginalScene = cv2.adaptiveThreshold(imgBlurred, T_V , cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, A_T_B, A_T_W)
#imgThresh = np.invert(imgThresh)
#cv2.imshow("cobaaa", imgThresh)
if (RGB == 1):
imgOriginalScene = cv2.inRange(imgOriginalScene, lower, upper)
# give definition for each initial on image or windows
cv2.putText(imgOriginalScene,"Press 's' to save the value",(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.75,(255,255,255),2,bottomLeftOrigin = False)
cv2.putText(imgOriginalScene,"Press 'o' to out the value",(10,60),cv2.FONT_HERSHEY_SIMPLEX, 0.75,(255,255,255),2,bottomLeftOrigin = False)
cv2.putText(imgOriginalScene,"Press 'c' to check the result",(10,90),cv2.FONT_HERSHEY_SIMPLEX, 0.75,(255,255,255),2,bottomLeftOrigin = False)
cv2.putText(imgOriginalScene,"Press 'esc' to close all windows",(10,120),cv2.FONT_HERSHEY_SIMPLEX, 0.75,(255,255,255),2,bottomLeftOrigin = False)
cv2.putText(backGround1,"X for rotating the image in x axis",(10,10),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround1,"Y for rotating the image in y axis",(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround1,"Z for rotating the image in z axis",(10,50),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround1,"ZOOM for Zoom in or Zoom out the image",(10,70),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround1,"S for streching the image",(10,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround2,"R,G,B = Red,Green,Blue",(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround2,"u,l = Upper and lower",(10,50),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround3,"G_S_F_H = GAUSSIAN_SMOOTH_FILTER_SIZE_HEIGHT",(10,10),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround3,"G_S_F_H = GAUSSIAN_SMOOTH_FILTER_SIZE_WEIGHT",(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround3,"A_T_B = ADAPTIVE_THRESH_BLOCK_SIZE",(10,50),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround3,"A_T_W = ADAPTIVE_THRESH_WEIGHT",(10,70),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
cv2.putText(backGround3,"T_V = THRESHOLD_VALUE",(10,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,0),1,bottomLeftOrigin = False)
# Show in window
cv2.imshow("image", imgOriginalScene)
cv2.imshow(WindowName1, backGround1)
cv2.imshow(WindowName2, backGround2)
cv2.imshow(WindowName3, backGround3)
ch = cv2.waitKey(5)
# chomand switch
if ch == ord('c'): # press c to check the result of processing
Main.searching(imgOriginalScene,True)
cv2.imshow("check",imgOriginalScene)
cv2.waitKey(0)
return
if S == 1 and ch == ord('p') : # press c to check the result of processing
imgOriginalScene = np.invert(imgOriginalScene)
cv2.imwrite("calib.png",imgOriginalScene)
cv2.imshow("calib",imgOriginalScene)
return
if ch == ord('o'): # press o to see the value
print("CAL_VAL =")
print( w ,h, rotationx, rotationy, rotationz, panX, panY, stretchX, dist, G_S_F_W, G_S_F_H, A_T_B, A_T_W, T_V, Xtrans, Ytrans)
if ch == ord('s'): # press s to save the value
CAL_VAL = np.array([[w ,h, rotationx, rotationy, rotationz, panX, panY, stretchX, dist, G_S_F_W, G_S_F_H, A_T_B, A_T_W, T_V, Xtrans, Ytrans ]])
np.savetxt('calibrated_value.txt',CAL_VAL)
print( w ,h, rotationx, rotationy, rotationz, panX, panY, stretchX, dist, G_S_F_W, G_S_F_H, A_T_B, A_T_W, T_V, Xtrans, Ytrans)
print("Value saved !")
if ch == 27: # press esc for exit the calibration
break
cv2.destroyAllWindows()
return
def makeood(value):
if (value%2 == 0):
value = value - 1
if (value < 3):
value = 3
return value
| StarcoderdataPython |
3372729 | <gh_stars>0
#!/usr/bin/env python3
""" Compiles the ipsys source code.
Usage:
{this_script} [options]
Options:
-h --help Prints this docstring.
-l --launch Executes the bin if compiled, with what follows as args.
-d --debug Standard debuging build, defines DEBUG, launches with -d.
--use-glew Use the GLEW OpenGL extention loader.
--opengl-notifications Enables OpenGL notifications.
Example usage for debug:
{this_script} -d -l
"""
import sys
import os
import re
def print_blue(*args, **kwargs):
print("\x1b[36m", end = "")
print(*args, **kwargs)
print("\x1b[39m", end = "", flush = True)
# Launch option -l
if "-l" in sys.argv[1:]:
option_launch = True
i = sys.argv[1:].index("-l")
elif "--launch" in sys.argv[1:]:
option_launch = True
i = sys.argv[1:].index("--launch")
else:
option_launch = False
if option_launch:
options = sys.argv[1:i+1]
launch_args = sys.argv[i+2:]
else:
options = sys.argv[1:]
# Options
def cmdline_has_option(*option_names):
for option_name in option_names:
if option_name in options:
return True
return False
option_help = cmdline_has_option("-h", "--help")
option_debug = cmdline_has_option("-d", "--debug")
option_use_glew = cmdline_has_option("--use-glew")
option_opengl_notifications = cmdline_has_option("--opengl-notifications")
option_mac = cmdline_has_option("--mac")
release_build = not option_debug
src_dir_name = "src"
bin_dir_name = "bin"
bin_name = "ipsys"
# Help message if -h
if option_help:
this_script = sys.argv[0]
python = "" if this_script.startswith("./") else "python3 "
print(__doc__.strip().format(this_script = python + sys.argv[0]))
sys.exit(0)
# Embed content
embedded_header_file_name = "embedded.h" # See this file for some explanations
embedded_source_file_name = "embedded.c" # This one will be overwritten
embed_re = r"EMBEDDED\s*\(\s*\"([^\"]+)\"\s*\)\s*([^\s][^;]+[^\s])\s*;"
def escaped_file_content(filepath):
if option_debug:
print(f"Embed file \"{filepath}\"")
try:
with open(filepath, "r") as file:
return file.read().translate({
ord("\""): "\\\"", ord("\\"): "\\\\",
ord("\n"): "\\n", ord("\t"): "\\t"})
except FileNotFoundError as error:
print("\x1b[31mEmbedded file error:\x1b[39m " +
"The embedded content generator couldn't find the file " +
f"\"{filepath}\" used in an EMBEDDED macro in the " +
f"\"{embedded_header_file_name}\" header file.")
raise error
generated_c = []
generated_c.append("")
generated_c.append("/* This file is overwritten at each compilation.")
generated_c.append(f" * Do not modify, see \"{embedded_header_file_name}\" " +
"or \"_comp.py\" instead. */")
embedded_header_path = os.path.join(src_dir_name, embedded_header_file_name)
with open(embedded_header_path, "r") as embedded_header_file:
for match in re.finditer(embed_re, embedded_header_file.read()):
partial_file_path = match.group(1)
file_path = os.path.join(src_dir_name, partial_file_path)
escaped_content = escaped_file_content(file_path)
variable_declaration = match.group(2)
generated_c.append("")
generated_c.append(f"/* Content of \"{file_path}\". */")
generated_c.append(f"{variable_declaration} = \"{escaped_content}\";")
embedded_source_path = os.path.join(src_dir_name, embedded_source_file_name)
with open(embedded_source_path, "w") as embedded_source_file:
embedded_source_file.write("\n".join(generated_c) + "\n")
# List src files
src_file_names = []
for dir_name, _, file_names in os.walk(src_dir_name):
for file_name in file_names:
if file_name.split(".")[-1] == "c":
src_file_names.append(os.path.join(dir_name, file_name))
# Bin directory
if not os.path.exists(bin_dir_name):
os.makedirs(bin_dir_name)
# Build
build_command_args = ["gcc"]
for src_file_name in src_file_names:
build_command_args.append(src_file_name)
build_command_args.append("-o")
build_command_args.append(os.path.join(bin_dir_name, bin_name))
build_command_args.append("-I" + src_dir_name)
if option_mac:
build_command_args.append("-I$(brew --prefix sdl2)/include")
build_command_args.append("-std=c11")
build_command_args.append("-Wall")
build_command_args.append("-Wextra")
build_command_args.append("-pedantic")
build_command_args.append("-Wno-overlength-strings")
build_command_args.append("-Wno-maybe-uninitialized")
if option_debug:
build_command_args.append("-DDEBUG")
build_command_args.append("-g")
if release_build:
build_command_args.append("-O2")
build_command_args.append("-fno-stack-protector")
build_command_args.append("-flto")
build_command_args.append("-s")
if option_opengl_notifications:
build_command_args.append("-DENABLE_OPENGL_NOTIFICATIONS")
if option_mac:
build_command_args.append("-framework OpenGL")
else:
build_command_args.append("-lGL")
if option_use_glew:
build_command_args.append("-DGLEW_STATIC") # Doesn't seem to be enough ><
build_command_args.append("-lGLEW")
build_command_args.append("-DUSE_GLEW")
if option_mac:
build_command_args.append("-I$(brew --prefix glew)/include")
build_command_args.append("`sdl2-config --cflags --libs`") # See the SDL2 doc
build_command_args.append("-lm")
build_command = " ".join(build_command_args)
print(("RELEASE" if release_build else "DEBUG") + " BUILD")
print_blue(build_command)
build_exit_status = os.system(build_command)
# Launch if -l
if option_launch and build_exit_status == 0:
launch_command_args = ["./" + bin_name]
if option_debug:
launch_command_args.append("-d")
for launch_arg in launch_args:
launch_command_args.append(launch_arg)
launch_command = " ".join(launch_command_args)
os.chdir(bin_dir_name)
print_blue(launch_command)
launch_exit_status_raw = os.system(launch_command)
launch_exit_status = launch_exit_status_raw >> 8
if bin_dir_name != ".":
os.chdir("..")
if launch_exit_status != 0:
print_blue(f"exit status {launch_exit_status}")
| StarcoderdataPython |
91599 | <filename>tools/build/get_header_directory.py
#!/usr/bin/env python3
import pathlib
import sys
import os
def main():
argv = sys.argv
if len(argv) != 3:
raise RuntimeError(
f"Usage: {argv[0]} <header install directory> <absolute path of current header directory>"
)
anchor = 'source' # should this name ever change, this code will be broken
# all source code with headers must be below `anchor`
install_base = os.path.join(
*[x for x in pathlib.PurePath(argv[1].strip()).parts])
path = pathlib.PurePath(argv[2].strip())
subpath = []
append = False
for part in path.parts:
if part == anchor:
append = True
continue
if append:
subpath.append(part)
print(os.path.join(install_base, *subpath)) # stripped directory name
if __name__ == "__main__":
main()
| StarcoderdataPython |
3337367 | <reponame>Gustavo-Martins/learning<gh_stars>0
# Factorial calculator
n = int(input('Digite um número para calcular seu fatorial: '))
counter = n
f = 1 # Prevents multiplication by zero
print('Calculando {}! = '.format(n), end='')
while counter > 0:
print('{}'.format(counter), end='')
print(' x ' if counter > 1 else ' = ', end='')
f *= counter
counter -= 1
print('{}'.format(f))
| StarcoderdataPython |
1675522 | from pymongo import ReturnDocument
from past.builtins import basestring
from collections import OrderedDict
from plynx.db.node import Node
from plynx.constants import NodeRunningStatus, Collections, NodeStatus
from plynx.utils.common import to_object_id, parse_search_string
from plynx.utils.db_connector import get_db_connector
_PROPERTIES_TO_GET_FROM_SUBS = ['node_running_status', 'logs', 'outputs', 'cache_url']
class NodeCollectionManager(object):
"""NodeCollectionManager contains all the operations to work with Nodes in the database."""
def __init__(self, collection):
super(NodeCollectionManager, self).__init__()
self.collection = collection
def get_db_objects(
self,
status='',
node_kinds=None,
search='',
per_page=20,
offset=0,
user_id=None,
):
"""Get subset of the Objects.
Args:
status (str, None): Node Running Status
search (str, None): Search pattern
per_page (int): Number of Nodes per page
offset (int): Offset
Return:
(list of dict) List of Nodes in dict format
"""
if status and isinstance(status, basestring):
status = [status]
if node_kinds and isinstance(node_kinds, basestring):
node_kinds = [node_kinds]
aggregate_list = []
search_parameters, search_string = parse_search_string(search)
# Match
and_query = {}
if node_kinds:
and_query['kind'] = {'$in': node_kinds}
if status:
and_query['node_status'] = {'$in': status}
if search_string:
and_query['$text'] = {'$search': search_string}
if 'original_node_id' in search_parameters:
and_query['original_node_id'] = to_object_id(search_parameters['original_node_id'])
if len(and_query):
aggregate_list.append({"$match": and_query})
# Join with users
aggregate_list.append({
'$lookup': {
'from': 'users',
'localField': 'author',
'foreignField': '_id',
'as': '_user'
}
})
# rm password hash
aggregate_list.append({
"$project": {
"_user.password_hash": 0,
}
})
# Match username
and_query = {}
if 'author' in search_parameters:
and_query['_user.username'] = search_parameters['author']
if len(and_query):
aggregate_list.append({"$match": and_query})
# sort
sort_dict = OrderedDict()
if 'sort' in search_parameters:
# TODO more sort options
if search_parameters['sort'] == 'starred':
sort_dict['starred'] = -1
sort_dict['insertion_date'] = -1
aggregate_list.append({
"$sort": sort_dict
}
)
aggregate_list.append({
"$addFields": {
'_readonly': {'$ne': ["$author", to_object_id(user_id)]},
}
})
# counts and pagination
aggregate_list.append({
'$facet': {
"metadata": [{"$count": "total"}],
"list": [{"$skip": int(offset)}, {"$limit": int(per_page)}],
}
})
return next(get_db_connector()[self.collection].aggregate(aggregate_list), None)
def get_db_objects_by_ids(self, ids, collection=None):
"""Find all the Objects with a given IDs.
Args:
ids (list of ObjectID): Object Ids
"""
db_objects = get_db_connector()[collection or self.collection].find({
'_id': {
'$in': list(ids)
}
})
return list(db_objects)
def _update_sub_nodes_fields(self, sub_nodes_dicts, reference_node_id, target_props, reference_collection=None):
if not sub_nodes_dicts:
return
reference_collection = reference_collection or self.collection
id_to_updated_node_dict = {}
upd_node_ids = set(map(lambda node_dict: node_dict[reference_node_id], sub_nodes_dicts))
for upd_node_dict in self.get_db_objects_by_ids(upd_node_ids, collection=reference_collection):
id_to_updated_node_dict[upd_node_dict['_id']] = upd_node_dict
for sub_node_dict in sub_nodes_dicts:
if sub_node_dict[reference_node_id] not in id_to_updated_node_dict:
continue
for prop in target_props:
sub_node_dict[prop] = id_to_updated_node_dict[sub_node_dict[reference_node_id]][prop]
def get_db_node(self, node_id, user_id=None):
"""Get dict representation of a Node.
Args:
node_id (ObjectId, str): Object ID
user_id (str, ObjectId, None): User ID
Return:
(dict) dict representation of the Object
"""
res = self.get_db_object(node_id, user_id)
if not res:
return res
sub_nodes_dicts = None
for parameter in res['parameters']:
if parameter['name'] == '_nodes':
sub_nodes_dicts = parameter['value']['value']
break
# TODO join collections using database capabilities
if self.collection == Collections.RUNS:
self._update_sub_nodes_fields(sub_nodes_dicts, '_id', _PROPERTIES_TO_GET_FROM_SUBS)
self._update_sub_nodes_fields(sub_nodes_dicts, 'original_node_id', ['node_status'], reference_collection=Collections.TEMPLATES)
return res
def get_db_object(self, object_id, user_id=None):
"""Get dict representation of an Object.
Args:
object_id (ObjectId, str): Object ID
user_id (str, ObjectId, None): User ID
Return:
(dict) dict representation of the Object
"""
res = get_db_connector()[self.collection].find_one({'_id': to_object_id(object_id)})
if not res:
return res
res['_readonly'] = (user_id != to_object_id(res['author']))
return res
@staticmethod
def _transplant_node(node, new_node):
if new_node._id == node.original_node_id:
return node
new_node.apply_properties(node)
new_node.original_node_id = new_node._id
new_node.parent_node_id = new_node.successor_node_id = None
new_node._id = node._id
return new_node
def upgrade_sub_nodes(self, main_node):
"""Upgrade deprecated Nodes.
The function does not change the original graph in the database.
Return:
(int): Number of upgraded Nodes
"""
assert self.collection == Collections.TEMPLATES
sub_nodes = main_node.get_parameter_by_name('_nodes').value.value
node_ids = set(
[node.original_node_id for node in sub_nodes]
)
db_nodes = self.get_db_objects_by_ids(node_ids)
new_node_db_mapping = {}
for db_node in db_nodes:
original_node_id = db_node['_id']
new_db_node = db_node
if original_node_id not in new_node_db_mapping:
while new_db_node['node_status'] != NodeStatus.READY and 'successor_node_id' in new_db_node and new_db_node['successor_node_id']:
n = self.get_db_node(new_db_node['successor_node_id'])
if n:
new_db_node = n
else:
break
new_node_db_mapping[original_node_id] = new_db_node
new_nodes = [
NodeCollectionManager._transplant_node(
node,
Node.from_dict(new_node_db_mapping[to_object_id(node.original_node_id)])
) for node in sub_nodes]
upgraded_nodes_count = sum(
1 for node, new_node in zip(sub_nodes, new_nodes) if node.original_node_id != new_node.original_node_id
)
main_node.get_parameter_by_name('_nodes').value.value = new_nodes
return upgraded_nodes_count
def pick_node(self, kinds):
node = get_db_connector()[self.collection].find_one_and_update(
{
'$and': [
{
'kind': {
'$in': kinds,
}
},
{
'node_running_status': {
'$in': [
NodeRunningStatus.READY,
NodeRunningStatus.IN_QUEUE,
]
}
},
],
},
{
'$set': {
'node_running_status': NodeRunningStatus.RUNNING
}
},
return_document=ReturnDocument.AFTER
)
return node
| StarcoderdataPython |
1634581 | import datetime
from django.test import TestCase
from freezegun import freeze_time
from .views import AboutChasView, AboutEvanView, HomePageView
class TestAboutChasViewGetContextData(TestCase):
def test_context_contains_age(self):
view = AboutChasView()
context = view.get_context_data()
self.assertTrue("age" in context.keys())
def test_age_ten_years_after_birthday_is_ten(self):
view = AboutChasView()
with freeze_time("1990-04-21"):
context = view.get_context_data()
self.assertEqual(10, context["age"])
def test_age_ten_years_minus_one_day_after_birthday_is_nine(self):
view = AboutChasView()
with freeze_time("1990-04-20"):
context = view.get_context_data()
self.assertEqual(9, context["age"])
class TestAboutEvanViewGetContextData(TestCase):
def test_context_contains_age(self):
view = AboutEvanView()
context = view.get_context_data()
self.assertTrue("age" in context.keys())
def test_age_ten_years_after_birthday_is_ten(self):
view = AboutEvanView()
with freeze_time("1990-12-06"):
context = view.get_context_data()
self.assertEqual(10, context["age"])
def test_age_ten_years_minus_one_day_after_birthday_is_nine(self):
view = AboutEvanView()
with freeze_time("1990-12-05"):
context = view.get_context_data()
self.assertEqual(9, context["age"])
class TestHomePageViewGetContextData(TestCase):
def test_context_contains_news_articles(self):
view = HomePageView()
# Inject so we don't try to hit database
view.get_published_news_articles = lambda: []
context = view.get_context_data()
self.assertTrue("news_articles" in context.keys())
def test_context_contains_only_three_news_articles_when_4_come_back(self):
view = HomePageView()
# Inject so we return 4 test double news articles
view.get_published_news_articles = lambda: [{}, {}, {}, {}]
context = view.get_context_data()
self.assertTrue("news_articles" in context.keys())
| StarcoderdataPython |
167529 | """Faça um programa que leia um número Inteiro qualquer e mostre na tela a sua tabuada."""
n = int(input('Digite um número inteiro para ver sua tabuada: '))
#print(' {} \n {} \n {} \n {} \n {} \n {} \n {} \n {} \n {} \n {}'.format(n * 1, n * 2, n * 3, n * 4, n * 5, n * 6, n * 7, n * 8, n * 9, n * 10))
print('-'*12)
print(f'{n} x {1:2} = {n*1:3}')
print(f'{n} x {2:2} = {n*2:3}')
print(f'{n} x {3:2} = {n*3:3}')
print(f'{n} x {4:2} = {n*4:3}')
print(f'{n} x {5:2} = {n*5:3}')
print(f'{n} x {6:2} = {n*6:3}')
print(f'{n} x {7:2} = {n*7:3}')
print(f'{n} x {8:2} = {n*8:3}')
print(f'{n} x {9:2} = {n*9:3}')
print(f'{n} x {10:2} = {n*10:3}')
print('-'*12)
| StarcoderdataPython |
43783 | #!/usr/bin/env python
""" MultiQC submodule to parse output from Roary """
import logging
import statistics
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc import config
from multiqc.plots import bargraph, heatmap, linegraph
log = logging.getLogger('multiqc')
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
super(MultiqcModule, self).__init__(name='Roary', anchor='roary',
href="https://sanger-pathogens.github.io/Roary/",
info="calculates the pan genome from annotated genome assemblies.")
""" Part 1. Getting the gene summary counts """
self.summary_data = dict()
for myfile in self.find_log_files('roary/summary'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.summary_data.update({ directory : self.parse_summary(myfile)})
if len(self.summary_data) == 0:
raise UserWarning
self.summary_data = self.ignore_samples(self.summary_data)
log.info("Found {} logs".format(len(self.summary_data)))
self.write_data_file(self.summary_data, 'multiqc_roary_summary')
self.add_section(
name = 'Summary Statistics',
anchor = 'roary-summary',
description = 'This plot shows the number of genes that created the core genome',
plot = self.summary_plot() )
""" Part 2. Visualizing the kraken qc report """
self.kraken_data = dict()
self.kraken_keys = list()
for myfile in self.find_log_files('roary/qc'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.kraken_data.update({directory : self.parse_kraken(myfile)})
self.kraken_data = self.ignore_samples(self.kraken_data)
self.write_data_file(self.kraken_data, 'multiqc_roary_qc')
self.add_section(
name = 'QC',
anchor = 'roary-qc',
description = 'This plot shows the organisms identified by kraken that went into the plot',
plot = self.kraken_plot() )
""" Part 3. Gene presence and absence heatmap for each directory """
self.roary_gene_data = dict()
self.roary_gene_genes = dict()
self.roary_gene_samples = dict()
for myfile in self.find_log_files('roary/gene_presence'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.getdata(myfile, directory)
self.add_section(
name = 'Gene presence heatmap for ' + directory,
anchor = 'roary-' + directory,
description = 'This heatmap shows the score for each sample with the genes supplied from ' + directory,
plot = self.roary_heatmap_plot(directory) )
""" Part 4. Number of genes and Number of genomes """
# histogram of the number of genomes and number of genes. I don't think it's necessary at this time.
""" Part 5. Conserved Genes """
self.roary_gene_counts = dict()
self.roary_gene_counts['conserved'] = dict()
for myfile in self.find_log_files('roary/conserved_genes'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.roary_gene_counts['conserved'].update({ directory : self.parse_gene_files(myfile) })
self.add_section(
name = 'Conserved Genes',
anchor = 'roary-conserved',
description = 'This plot shows the number of estimated conserved genes as the number of isolates increases',
plot = self.roary_gene_line_graph('conserved'))
""" Part 6. Total genes """
self.roary_gene_counts['total'] = dict()
for myfile in self.find_log_files('roary/total_genes'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.roary_gene_counts['total'].update({ directory : self.parse_gene_files(myfile) })
self.add_section(
name = 'Total Genes',
anchor = 'roary-total',
description = 'This plot shows the number of estimated total genes as the number of isolates increases',
plot = self.roary_gene_line_graph('total'))
""" Part 7. Number of new genes """ # line graph dotted and solid
self.roary_gene_counts['new'] = dict()
for myfile in self.find_log_files('roary/new_genes'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.roary_gene_counts['new'].update({ directory : self.parse_gene_files(myfile) })
self.add_section(
name = 'New Genes',
anchor = 'roary-new',
description = 'This plot shows the number of new genes as the number of isolates increases',
plot = self.roary_gene_line_graph('new'))
""" Part 8. Number of unique genes """
self.roary_gene_counts['unique'] = dict()
for myfile in self.find_log_files('roary/unique_genes'):
directory='_'.join(myfile['root'].split('/')[-4:-1])
self.roary_gene_counts['unique'].update({ directory : self.parse_gene_files(myfile) })
self.add_section(
name = 'Unique Genes',
anchor = 'roary-unique',
description = 'This plot shows the number of unique genes as the number of isolates increases',
plot = self.roary_gene_line_graph('unique'))
""" Part 1. Getting the gene summary counts """
def parse_summary(self, myfile):
parsed_data = dict()
for line in myfile['f'].splitlines():
keys = ['Core genes',
'Soft core genes',
'Shell genes',
'Cloud genes',
'Total genes']
for key in keys:
if key in line:
parsed_data[key] = line.split('\t')[-1]
return(parsed_data)
def summary_plot(self):
config = {
'id' : "roary_summary",
'title': "Roary: Summary Statistics",
'ylab': "Number of Genes"
}
keys = ['Core genes',
'Soft core genes',
'Shell genes',
'Cloud genes'
]
return bargraph.plot(self.summary_data, keys, config)
""" Part 2. Visualizing the kraken qc report """
def parse_kraken(self, myfile):
parsed_data = dict()
for line in myfile['f'].splitlines():
if 'Sample,Genus,Species' not in line:
species=line.split(",")[2]
if species not in self.kraken_keys:
self.kraken_keys.append(species)
# count each occurent of that organism for each qc result
if species not in parsed_data:
parsed_data[species] = 1
else:
parsed_data[species] = parsed_data[species] + 1
return(parsed_data)
def kraken_plot(self):
config = {
'id' : "roary_qc",
'title': "Roary: QC report",
'xlab': "Sample",
'ylab': "Organism"
}
return bargraph.plot(self.kraken_data, self.kraken_keys, config)
""" Part 3. Gene presence and absence heatmap for each directory """
def getdata(self, myfile, directory):
self.roary_gene_genes[directory] = []
self.roary_gene_data[directory] = []
for line in myfile['f'].splitlines():
if not line.split("\t")[0] == "Gene":
# gets the sample name
self.roary_gene_genes[directory].append(line.split("\t")[0])
self.roary_gene_data[directory].append(line.split("\t")[1:])
else:
self.roary_gene_samples[directory] = line.split("\t")[1:]
def roary_heatmap_plot(self, directory):
config = {
'id' : "roary_" + directory,
'title': "Roary: " + directory,
'square': False,
'colstops': [ [0, '#FFFFFF'], [1, '#000000'], ],
'legend': False,
}
return heatmap.plot(self.roary_gene_data[directory], self.roary_gene_samples[directory], self.roary_gene_genes[directory], config)
""" Part 5-8. Parsing Rtab files Conserved Genes """
def parse_gene_files(self, myfile):
line_averages={}
number_of_lines = len(myfile['f'].splitlines())
number_of_columns = len(myfile['f'].splitlines()[0].split('\t'))
for i in range(0, number_of_columns):
column_numbers=[]
for j in range(0, number_of_lines):
result = int(myfile['f'].splitlines()[j].split('\t')[i])
column_numbers.append(result)
average=statistics.mean(column_numbers)
line_averages.update({ i : average })
return(line_averages)
def roary_gene_line_graph(self, type):
config = {
'id' : "roary_" + type,
'title': "Roary: Number of " + type + " genes as isolates are included",
}
return linegraph.plot(self.roary_gene_counts[type], config)
| StarcoderdataPython |
1779102 | #!/usr/bin/env python3
import sys, os
from threading import Thread
import gym
import threading
import numpy as np
import tensorflow as tf
from a3c import A3C_Worker, A3C_Net
def main():
#training vars
worker_num = 4
global_train_steps = 1000 #50000 = 1 million steps
test = True
env_name = 'CartPole-v0'
#env_name = 'Pendulum-v0'
#env_name = 'Pong-v0'
#env_name = 'SpaceInvaders-v0'
#env_name = 'FetchReach-v0'
#env = gym.wrappers.Monitor(env, directory='./logs', force=True,
# video_callable=lambda n: n == episode_max)
#init global, workers
workers = []
coordinator = tf.train.Coordinator()
sess = tf.Session()
env = gym.make(env_name)
env.seed(0)
global_net = A3C_Net(env, 'global', sess)
for i in range(worker_num):
name = 'worker_%s' % i
local_net = A3C_Net(env, name, sess)
workers.append(A3C_Worker(coordinator, global_net, local_net,
name))
print ('training for %s batches\n' % global_train_steps)
#start training asynchronously
threads = []
for i,worker in enumerate(workers):
#lambda so we can avoid passing args to thread call
env = gym.make(env_name)
env.seed(i)
work = lambda: worker.train(env, global_train_steps)
thread = Thread(target=work)
#thread.daemon = True
thread.start()
#thread.join() #block main until thread terminates
#worker.train(env, steps=step_max)
threads.append(thread)
#apply gradients while threads do work
global_net.update_loop(global_train_steps)
coordinator.join(threads) #wait for threads to finish
#output training info
print
for worker in workers:
print ('[*] %s trained for: %s' % (worker.scope,
worker.local_net.get_step()))
print ('[*] global trained for: %s' % global_net.get_step())
#worker2 = A3C_Worker(env, tf.train.Coordinator, global_net, 'worker2')
#worker1.train(env)
#worker2.train(env)
#test
if test:
for i, worker in enumerate(workers):
env = gym.make(env_name)
env.seed(i)
worker.test(gym.make(env_name))
env.close()
'''
#init the global net, worker agents
coordinator = tf.train.Coordinator()
global_net = A3C_Net()
workers = []
for _ in range(worker_max):
worker = A3C_worker(coordinator, global_net)
work = lambda:
thread = threading.Thread(target=work)
coord.join(workers)
'''
if __name__ == '__main__':
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.