index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,900 | 6f107d0d0328c2445c0e1d0dd10e51227da58129 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the SageMaker TrainingJob API.
"""
import pytest
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import (
service_marker,
create_sagemaker_resource,
wait_for_status,
get_sagemaker_training_job,
assert_training_status_in_sync,
assert_tags_in_sync,
)
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.common import config as cfg
RESOURCE_PLURAL = "trainingjobs"
@pytest.fixture(scope="function")
def xgboost_training_job_debugger():
resource_name = random_suffix_name("xgboost-trainingjob-debugger", 50)
replacements = REPLACEMENT_VALUES.copy()
replacements["TRAINING_JOB_NAME"] = resource_name
reference, _, resource = create_sagemaker_resource(
resource_plural=RESOURCE_PLURAL,
resource_name=resource_name,
spec_file="xgboost_trainingjob_debugger",
replacements=replacements,
)
assert resource is not None
yield (reference, resource)
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_training_rule_eval_sagemaker_status(training_job_name: str, rule_type: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc[rule_type+"EvaluationStatuses"][0]["RuleEvaluationStatus"]
def get_training_rule_eval_resource_status(reference: k8s.CustomResourceReference, rule_type: str):
resource = k8s.get_resource(reference)
resource_status = resource["status"][rule_type+"EvaluationStatuses"][0][
"ruleEvaluationStatus"
]
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(
self,
training_job_name,
rule_type: str,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_rule_eval_sagemaker_status,
training_job_name,
rule_type,
)
def _wait_resource_training_rule_eval_status(
self,
reference: k8s.CustomResourceReference,
rule_type: str,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_rule_eval_resource_status,
reference,
rule_type,
)
def _assert_training_rule_eval_status_in_sync(
self, training_job_name, sagemaker_rule_type, reference, expected_status
):
resource_rule_type = sagemaker_rule_type[0].lower() + sagemaker_rule_type[1:]
assert (
self._wait_sagemaker_training_rule_eval_status(
training_job_name, sagemaker_rule_type, expected_status,
)
== self._wait_resource_training_rule_eval_status(reference, resource_rule_type, expected_status)
== expected_status
)
def test_completed(self, xgboost_training_job_debugger):
(reference, resource) = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource["spec"].get("trainingJobName", None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
training_job_arn = training_job_desc["TrainingJobArn"]
resource_arn = k8s.get_resource_arn(resource)
if resource_arn is None:
logging.error(
f"ARN for this resource is None, resource status is: {resource['status']}"
)
assert resource_arn == training_job_arn
assert training_job_desc["TrainingJobStatus"] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
assert_training_status_in_sync(
training_job_name, reference, cfg.JOB_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
# Assert debugger rule evaluation completed
self._assert_training_rule_eval_status_in_sync(
training_job_name, "DebugRule", reference, cfg.RULE_STATUS_COMPLETED
)
# Assert profiler rule evaluation completed
self._assert_training_rule_eval_status_in_sync(
training_job_name, "ProfilerRule", reference, cfg.RULE_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True")
resource_tags = resource["spec"].get("tags", None)
assert_tags_in_sync(training_job_arn, resource_tags)
# Check that you can delete a completed resource from k8s
_, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)
assert deleted is True
|
3,901 | 135401ea495b80fc1d09d6919ccec8640cb328ce | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from core import models
class ChildTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
def test_child_create(self):
child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.assertEqual(child, models.Child.objects.get(first_name='First'))
self.assertEqual(child.slug, 'first-last')
self.assertEqual(str(child), 'First Last')
self.assertEqual(child.name(), 'First Last')
self.assertEqual(child.name(reverse=True), 'Last, First')
def test_child_count(self):
self.assertEqual(models.Child.count(), 0)
models.Child.objects.create(
first_name='First 1',
last_name='Last 1',
birth_date=timezone.localdate()
)
self.assertEqual(models.Child.count(), 1)
child = models.Child.objects.create(
first_name='First 2',
last_name='Last 2',
birth_date=timezone.localdate()
)
self.assertEqual(models.Child.count(), 2)
child.delete()
self.assertEqual(models.Child.count(), 1)
class DiaperChangeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.change = models.DiaperChange.objects.create(
child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
wet=1,
solid=1,
color='black',
amount=1.25
)
def test_diaperchange_create(self):
self.assertEqual(self.change, models.DiaperChange.objects.first())
self.assertEqual(str(self.change), 'Diaper Change')
self.assertEqual(self.change.child, self.child)
self.assertTrue(self.change.wet)
self.assertTrue(self.change.solid)
self.assertEqual(self.change.color, 'black')
self.assertEqual(self.change.amount, 1.25)
def test_diaperchange_attributes(self):
self.assertListEqual(
self.change.attributes(), ['Wet', 'Solid', 'Black'])
class FeedingTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_feeding_create(self):
feeding = models.Feeding.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
type='formula',
method='bottle',
amount=2
)
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.duration, feeding.end - feeding.start)
def test_method_both_breasts(self):
feeding = models.Feeding.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
type='breast milk',
method='both breasts'
)
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.method, 'both breasts')
class NoteTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_note_create(self):
note = models.Note.objects.create(
child=self.child, note='Note', time=timezone.localtime())
self.assertEqual(note, models.Note.objects.first())
self.assertEqual(str(note), 'Note')
class SleepTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_sleep_create(self):
sleep = models.Sleep.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
)
self.assertEqual(sleep, models.Sleep.objects.first())
self.assertEqual(str(sleep), 'Sleep')
self.assertEqual(sleep.duration, sleep.end - sleep.start)
class TemperatureTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.temp = models.Temperature.objects.create(
child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
temperature=98.6
)
def test_temperature_create(self):
self.assertEqual(self.temp, models.Temperature.objects.first())
self.assertEqual(str(self.temp), 'Temperature')
self.assertEqual(self.temp.temperature, 98.6)
class TimerTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.user = User.objects.first()
self.named = models.Timer.objects.create(
name='Named',
end=timezone.localtime(),
user=self.user,
child=child
)
self.unnamed = models.Timer.objects.create(
end=timezone.localtime(),
user=self.user
)
def test_timer_create(self):
self.assertEqual(self.named, models.Timer.objects.get(name='Named'))
self.assertEqual(str(self.named), 'Named')
self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))
self.assertEqual(
str(self.unnamed), 'Timer #{}'.format(self.unnamed.id))
def test_timer_title_with_child(self):
self.assertEqual(self.named.title_with_child, str(self.named))
models.Child.objects.create(
first_name='Child',
last_name='Two',
birth_date=timezone.localdate()
)
self.assertEqual(
self.named.title_with_child,
'{} ({})'.format(str(self.named), str(self.named.child))
)
def test_timer_user_username(self):
self.assertEqual(self.named.user_username, self.user.get_username())
self.user.first_name = 'User'
self.user.last_name = 'Name'
self.user.save()
self.assertEqual(self.named.user_username, self.user.get_full_name())
def test_timer_restart(self):
self.named.restart()
self.assertIsNone(self.named.end)
self.assertIsNone(self.named.duration)
self.assertTrue(self.named.active)
def test_timer_stop(self):
stop_time = timezone.localtime()
self.unnamed.stop(end=stop_time)
self.assertEqual(self.unnamed.end, stop_time)
self.assertEqual(
self.unnamed.duration.seconds,
(self.unnamed.end - self.unnamed.start).seconds)
self.assertFalse(self.unnamed.active)
def test_timer_duration(self):
timer = models.Timer.objects.create(user=User.objects.first())
# Timer.start uses auto_now_add, so it cannot be set in create().
timer.start = timezone.localtime() - timezone.timedelta(minutes=30)
timer.save()
timer.refresh_from_db()
self.assertEqual(
timer.duration.seconds,
timezone.timedelta(minutes=30).seconds)
timer.stop()
self.assertEqual(
timer.duration.seconds,
timezone.timedelta(minutes=30).seconds)
class TummyTimeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_tummytime_create(self):
tummy_time = models.TummyTime.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
)
self.assertEqual(tummy_time, models.TummyTime.objects.first())
self.assertEqual(str(tummy_time), 'Tummy Time')
self.assertEqual(
tummy_time.duration, tummy_time.end - tummy_time.start)
|
3,902 | 1cc77ed1c5da025d1b539df202bbd3310a174eac | # import gmplot package
import gmplot
import numpy as np
# generate 700 random lats and lons
latitude = (np.random.random_sample(size = 700) - 0.5) * 180
longitude = (np.random.random_sample(size = 700) - 0.5) * 360
# declare the center of the map, and how much we want the map zoomed in
gmap = gmplot.GoogleMapPlotter(0, 0, 2)
# plot heatmap
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
#Your Google_API_Key
gmap.apikey = "AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00"
# save it to html
gmap.draw("c:\\users\\jackc\desktop\\country_heatmap.html")
'''
import csv
import pandas as pd
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import mplcursors
import gmplot
def outputScatter():
data = pd.read_csv('C:\\Users\\jackc\\Desktop\\ctran\dataMerge.csv')
df = data.groupby('location_id')
gmap = gmplot.GoogleMapPlotter(0,0,2)
counter = 0
result = []
result_lon = []
result_lat = []
result_calculation = []
result_lon_static = []
result_lat_static = []
result_toSCV = []
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
index = 0
colors = ['r','y','g','b']
for x,y in df:
for z in range(y.location_distance.values.size):
result_lon_static.append(y.y_coordinate.values[z])
result_lat_static.append(y.x_coordinate.values[z])
if(y.location_distance.values[z] > 30):
counter = counter + 1
if(y.location_distance.values[z] > 50):
above50ft = above50ft + 1
if(y.location_distance.values[z] > 70):
above70ft = above70ft + 1
if(y.location_distance.values[z] > 90):
above90ft = above90ft + 1
if(y.location_distance.values[z] > 150):
above150ft = above150ft + 1
cal=counter/(y.location_distance.values.size)
result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])
result_lat.append(y.stop_lat.values[0])
result_lon.append(y.stop_lon.values[0])
result_calculation.append(cal)
result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])
index = index+1
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
counter = 0
result = sorted(result,key=itemgetter(1), reverse=True)
result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)
plt.scatter(result_lat_static,result_lon_static, c='black')
code_id = []
for x in result:
#code_id.append(x[0])
#result_calculation.append(x[1])
#result_lat.append(x[2])
#result_lon.append(x[3])
if x[1] > 0.9:
red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')
#red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])
elif x[1] > 0.8:
yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')
#yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])
elif x[1] > 0.7:
green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')
#green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])
else:
blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')
#blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])
with open('C:\\Users\\Jackc\\Desktop\\Ctran\\outputPercentError.csv', mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])
for x in result_toSCV:
writer.writerow(x)
'''
|
3,903 | 57e9c1a4ac57f68e0e73c2c67c6828de8efb1b16 | import uuid
import json
import pytest
import requests
import httpx
from spinta.testing.manifest import bootstrap_manifest
from spinta.utils.data import take
from spinta.testing.utils import error
from spinta.testing.utils import get_error_codes, RowIds
from spinta.testing.context import create_test_context
from spinta.testing.client import create_test_client
from spinta.manifests.tabular.helpers import striptable
from spinta.testing.tabular import create_tabular_manifest
from spinta.testing.data import listdata
test_data = [
{
'_type': 'report',
'status': 'OK',
'report_type': 'STV',
'count': 10,
'notes': [{
'note': 'hello',
'note_type': 'simple',
'create_date': '2019-03-14',
}],
'operating_licenses': [{
'license_types': ['valid', 'invalid'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'VMI',
'count': 42,
'notes': [{
'note': 'world',
'note_type': 'daily',
'create_date': '2019-04-20',
}],
'operating_licenses': [{
'license_types': ['expired'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'STV',
'count': 13,
'notes': [{
'note': 'foo bar',
'note_type': 'important',
'create_date': '2019-02-01',
}],
},
]
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [
{
**res,
'_op': 'insert',
'_type': model,
}
for res in data or test_data
]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_lower(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_non_string(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, non string type
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# single field fsearch, non string type
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# single non-existing field value search
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
# single non-existing field search
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search
resp = app.get(f'/{model}?status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search, case insensitive
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count!=10&count!=42')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structure
resp = app.get(f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structures and not full data in all resources
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower().contains("tv")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field search
# test if operators are joined with AND logic for same field
resp = app.get(f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_type_check(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_with_select(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# `contains` with select
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'count': 42,
}
# `contains` with select and always_show_id
mocker.patch.object(context.get('config'), 'always_show_id', True)
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
'count': 42,
}
# `contains` with always_show_id should return just id
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
}
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_startswith(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# sanity check that `startswith` searches from the start
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
# `startswith` type check
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# nested `exact` search
resp = app.get(f'/{model}?notes.note="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search, case insensitive
resp = app.get(f'/{model}?notes.note.lower()="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search with dates
resp = app.get(f'/{model}?notes.create_date="2019-03-14"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# nested `gt` search
resp = app.get(f'/{model}?notes.create_date>"2019-04-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# nested non existant field
resp = app.get(f'/{model}?notes.foo.bar="baz"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
# nested `contains` search
resp = app.get(f'/{model}?notes.note.contains("bar")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3, = _push_test_data(app, model)
# nested `startswith` search
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
def ids(resources):
if isinstance(resources, (requests.models.Response, httpx.Response)):
resp = resources
assert resp.status_code == 200, resp.json()
resources = resp.json()['_data']
return [r['_id'] for r in resources]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse_lower(model, context, app):
r1, r2, r3, = ids(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(status).lower()="ok"')
assert ids(resp) == [r1]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_nested_recurse_multiple_props(model, context, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'fi'},
{'govid': '2', 'country': 'se'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'no'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country)="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="no"')
assert ids(resp) == [r2]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_recurse_multiple_props_lower(model, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'FI'},
{'govid': '2', 'country': 'SE'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'NO'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
# TODO: add mongo
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
# TODO: add mongo
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_in_list_of_scalars(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",operating_licenses.license_types,"valid","invalid","expired")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",operating_licenses.license_types,"expired")')
assert sorted(ids(resp)) == [0]
# TODO: add mongo
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
# TODO: add mongo
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_startswith(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision'][:5]
resp = app.get(f'/{model}?_revision.startswith("{revision}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_group(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"&status="OK")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lower_contains(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?report_type.lower().contains("st")')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| code | string
| name | string
'''))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [
{'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},
{'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},
{'_op': 'insert', 'code': 'ee', 'name': 'Estija'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == [
"Estija",
"Latvija",
"Lietuva",
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [
{'_op': 'insert', 'code': 'lt'},
{'_op': 'insert', 'code': 'lv'},
{'_op': 'insert', 'code': 'ee'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [
('ee', None),
('lt', None),
('lv', None),
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc, '''
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
''', backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={
'_id': _id,
'id': 1,
'name': 'Base location',
'type': 'city'
})
app.post('/datasets/gov/example/base/City', json={
'_id': _id,
'name': 'City',
'population': 100
})
resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')
assert resp.json()['_data'] == [
{
'_base': {'name': 'Base location', 'type': 'city'},
'id': 1,
'name': 'City',
'population': 100
}
]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {
'_revision': revision
}
|
3,904 | f2b978b9a4c00469cdd2f5e1e9275df73c7379b8 | import numpy as np
from math import ceil, log2
def avg(list):
return np.mean(list)
def dispersion(list):
res = 0
for i in list:
res += (i - np.mean(list)) ** 2
return res / len(list)
def variation_coefficient(list):
return (dispersion(list) ** (1/2) / np.mean(list)) * 100
def chi_square(list):
b = sorted(list)
k = ceil(log2(len(list)) + 1)
step = 10000 / k
p = 1 / k
frequency_vector = []
for i in range(k):
counter = 0
for j in b:
if (j > i * step) and (j <= (i + 1) * step):
counter += 1
else:
continue
frequency_vector.append(counter)
chi = 0
for i in range(k):
chi += ((frequency_vector[i] - p * len(list)) ** 2) / (p * len(list))
return 0.8 <= chi <= 16.8
|
3,905 | 03bc377bef1de7d512b7982a09c255af1d82fb7d | """4. Начните работу над проектом «Склад оргтехники». Создайте класс, описывающий склад. А также класс «Оргтехника»,
который будет базовым для классов-наследников. Эти классы — конкретные типы оргтехники (принтер, сканер, ксерокс).
В базовом классе определить параметры, общие для приведенных типов. В классах-наследниках реализовать параметры,
уникальные для каждого типа оргтехники.
5. Продолжить работу над первым заданием. Разработать методы, отвечающие за приём оргтехники на склад и передачу в
определенное подразделение компании. Для хранения данных о наименовании и количестве единиц оргтехники, а также других
данных, можно использовать любую подходящую структуру, например словарь.
6. Продолжить работу над вторым заданием. Реализуйте механизм валидации вводимых пользователем данных. Например, для
указания количества принтеров, отправленных на склад, нельзя использовать строковый тип данных.
Подсказка: постарайтесь по возможности реализовать в проекте «Склад оргтехники» максимум возможностей, изученных на
уроках по ООП.
"""
class Equipment():
def __init__(self, c_name, model, sn):
self.c_name = c_name # название фирмы
self.model = model # модель устройства
self.sn = sn
self.holder = None # местонахождение
def _move(self, holder):
self.holder = holder
def add(self, qnt):
pass
class Whouse:
def __init__(self, max_volume):
self.max_volume = max_volume
self.total = 0
self.storage = {'printers': set()}
self.add_mapper = {Printer: 'printers'}
def get_tech_to_whouse(self, equip: Equipment):
if self.total == self.max_volume:
raise OverflowError('Склад заполнен!')
self.storage[self.add_mapper[type(equip)]].add(equip)
print(type(equip))
equip._move('whouse')
self.total += 1
def move_holder(self, tech_type, holder):
print(self.storage[tech_type] )
tech_to_holder = self.storage[tech_type].pop()
tech_to_holder._move(holder)
self.total -= 1
def __call__(self, *args, **kwargs):
self.get_tech_to_whouse(*args, **kwargs)
class Printer(Equipment):
def __init__(self, c_name, model, sn, ptype, color):
super().__init__(c_name, model, sn)
self.ptype = ptype
self.color = color
def add(self):
return f'Company: {self.c_name} Model: {self.model} s/n {self.sn} Paper type: {self.ptype} ' \
f'Color: {self.color} Holder: {self.holder}'
def __call__(self, *args, **kwargs):
self.add()
def __str__(self):
return f'Company: {self.c_name}\nModel: {self.model}\ns/n {self.sn}\nPaper type: {self.ptype}\n' \
f'Color: {self.color}\nHolder: {self.holder}'
printer1 = Printer('hp', 'lj 1100', '1212223', 'A4', 'BW')
printer2 = Printer('hp', 'lj 1100', '1212224', 'A4', 'BW')
printer3 = Printer('hp', 'lj 1100', '1212225', 'A4', 'BW')
printer4 = Printer('hp', 'lj 1100', '1212226', 'A4', 'BW')
printer5 = Printer('hp', 'lj 1100', '1212223', 'A4', 'BW')
"""Почему set() не отрабатывает? 1 и 5 одинаковые"""
warehouse = Whouse(5)
print(warehouse.total)
warehouse.get_tech_to_whouse(printer1)
warehouse.get_tech_to_whouse(printer2)
warehouse.get_tech_to_whouse(printer3)
warehouse.get_tech_to_whouse(printer4)
warehouse.get_tech_to_whouse(printer5)
warehouse.move_holder('printers', 'IT')
"""как в данном примере переместить printer3, а не последний созданный?"""
print(warehouse.total)
print(printer1)
print(printer2)
print(printer3)
print(printer4)
print(printer5)
|
3,906 | 3bfa9d42e3fd61cf6b7ffaac687f66c2f4bc073e | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 20:06:32 2020
@author: Supriyo
"""
import networkx as nx
import matplotlib.pyplot as plt
g=nx.Graph()
#l=[1,2,3]
# g.add_node(1)
# g.add_node(2)
# g.add_node(3)
# g.add_nodes_from(l)
# g.add_edge(1,2)
# g.add_edge(2,3)
# g.add_edge(3,1)
# print(g.nodes())
# print(g.edges())
g=nx.complete_graph(10)
h=nx.gnp_random_graph(10,0.5)#0.55 is the probability
nx.draw(g)
nx.draw(h)
plt.show()
nx.write_gexf(g,"test.gexf") |
3,907 | cf7aeacedec211e76f2bfcb7f6e3cb06dbfdc36e | import hashlib
import math
import random
from set5.ch_4 import get_num_byte_len
class Server:
def __init__(self):
self.private_key = random.randint(0, 2**100)
self.salt = random.randint(0, 2**100)
self.salt_bytes = self.salt.to_bytes(
byteorder="big",
length=get_num_byte_len(self.salt)
)
self.u = random.randint(0, 2**128)
def agree_params(self, n, g, password):
self.n = n
self.g = g
self.generate_password_params(password)
def generate_password_params(self, password):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + password.encode("ascii"))
x = int(hasher.digest().hex(), 16)
self.v = pow(self.g, x, self.n)
def send_salt_public_key_u(self, client):
self.public_key = pow(self.g, self.private_key, self.n)
client.accept_salt_public_key_u(self.salt, self.public_key, self.u)
def accept_public_key(self, client_public_key):
self.client_public_key = client_public_key
def compute_hashes(self):
self.s = pow(self.client_public_key * pow(self.v, self.u, self.n), self.private_key, self.n)
s_bytes = self.s.to_bytes(
byteorder="big",
length=get_num_byte_len(self.s)
)
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, client_hmac):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
check_hmac = hasher.digest().hex()
if check_hmac == client_hmac:
return True
else:
print(check_hmac, client_hmac)
return False
class Client:
def __init__(self, n, g, password):
self.n = n
self.g = g
self.password = password
self.private_key = random.randint(0, 2**100)
def agree_params(self, server):
server.agree_params(self.n, self.g, self.password)
def accept_salt_public_key_u(self, salt, server_public_key, u):
self.salt = salt
self.salt_bytes = self.salt.to_bytes(
byteorder="big",
length=get_num_byte_len(self.salt)
)
self.server_public_key = server_public_key
self.u = u
def send_public_key(self, server):
self.public_key = pow(self.g, self.private_key, self.n)
server.accept_public_key(self.public_key)
def compute_hashes(self):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + self.password.encode("ascii"))
x = int(hasher.digest().hex(), 16)
self.s = pow(self.server_public_key, self.private_key + (self.u * x), self.n)
s_bytes = self.s.to_bytes(
byteorder="big",
length=get_num_byte_len(self.s)
)
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, server):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
client_hmac = hasher.digest().hex()
if server.authenticate(client_hmac):
print("Successfully authenticated")
else:
raise Exception("Failed to authenticate")
class BadServer(Server):
def __init__(self, n, g):
self.private_key = random.randint(0, 2**100)
self.salt = random.randint(0, 2**100)
self.salt_bytes = self.salt.to_bytes(
byteorder="big",
length=get_num_byte_len(self.salt)
)
self.u = random.randint(0, 2**128)
self.n = n
self.g = g
def compute_hashes(self):
pass
def authenticate(self, client_hmac):
self.client_hmac = client_hmac
return True
def load_dict(self, path_to_dict):
with open(path_to_dict) as dict_file:
self.valid_words = set(dict_file.read().split())
def crack_password(self, path_to_dict):
self.load_dict(path_to_dict)
for w in self.valid_words:
hasher_x = hashlib.sha256()
hasher_x.update(self.salt_bytes + w.encode("ascii"))
x = int(hasher_x.digest().hex(), 16)
v = pow(self.g, x, self.n)
s = pow(self.client_public_key * pow(v, self.u, self.n), self.private_key, self.n)
s_bytes = s.to_bytes(
byteorder="big",
length=get_num_byte_len(s)
)
hasher_k = hashlib.sha256()
hasher_k.update(s_bytes)
k = hasher_k.digest()
hasher_hmac = hashlib.sha256()
hasher_hmac.update(k + self.salt_bytes)
check_hmac = hasher_hmac.digest().hex()
if check_hmac == self.client_hmac:
print("Successfully cracked password. Password = {}".format(w))
return
raise Exception("Failed to crack password")
def attempt_simple_srp_authenticate(client, server):
client.agree_params(server)
client.send_public_key(server)
server.send_salt_public_key_u(client)
server.compute_hashes()
client.compute_hashes()
client.authenticate(server)
def crack_simple_srp(client, server):
client.send_public_key(server)
server.send_salt_public_key_u(client)
server.compute_hashes()
client.compute_hashes()
client.authenticate(server)
server.crack_password("/Users/Adam/Dev/cryptopals_resources/words.txt")
if __name__=="__main__":
nist_p_hex = "ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff"
nist_p_bytearr = bytearray.fromhex(nist_p_hex)
n = int.from_bytes(nist_p_bytearr, byteorder="big")
g = 2
password = "castle"
client = Client(n, g, password)
server = Server()
attempt_simple_srp_authenticate(client, server)
naive_client = Client(n, g, password)
bad_server = BadServer(n, g)
crack_simple_srp(naive_client, bad_server)
|
3,908 | 72d1a0689d4cc4f78007c0cfa01611e95de76176 | #! /usr/bin/env python3
import arg_parser
import colors
import logging
import sys
def parse_args(argv):
parser = arg_parser.RemoteRunArgParser()
return parser.parse(argv[1:])
def main(argv):
logging.basicConfig(
format='%(levelname)s: %(message)s',
level='INFO',
handlers=[colors.ColorizingStreamHandler(sys.stderr)])
try:
args = parse_args(argv)
except Exception as exc:
logging.exception(exc)
return 1
try:
action = args['action'](args)
except Exception as exc:
logging.error(exc)
return 1
try:
return not action.launch()
except Exception as exc:
if 'log_level' in action.config and action.config['log_level'] == 'DEBUG':
logging.exception(exc)
else:
logging.error(str(exc))
return 2
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
3,909 | c651d49c98a4cf457c8252c94c6785dea8e9af60 | import datetime
import logging
import random
import transform
import timelapse
# merge two iterators producing sorted values
def merge(s1, s2):
try:
x1 = next(s1)
except StopIteration:
yield from s2
return
try:
x2 = next(s2)
except StopIteration:
yield from s1
return
while True:
if x2 > x1:
yield x1
try:
x1 = next(s1)
except StopIteration:
yield x2
yield from s2
return
else:
yield x2
try:
x2 = next(s2)
except StopIteration:
yield x1
yield from s1
return
def sliding_stream(delay_secs=20):
ts = datetime.datetime.now()
delay = datetime.timedelta(0,delay_secs)
while True:
yield(ts, random.choice(transform.all_transforms))
ts = ts + delay
class Sliders(timelapse.TimeLapse):
def __init__(self, server_list, nick="Sliders", channel="#sliders", realname="Sliders",
sliding_window = 60, **params):
super().__init__(server_list, nick=nick, channel=channel, **params)
self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))
self.sliders_transform = random.choice(transform.all_transforms)
def on_lapsed_message(self, msg):
if isinstance(msg, transform.Transform):
self.sliders_transform = msg
self.connection.privmsg(self.lapsed_channel,
"\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux "
+ msg.name + "\x01")
else:
super().on_lapsed_message(self.sliders_transform(msg))
|
3,910 | 4ee47435bff1b0b4a7877c06fb13d13cf53b7fce | import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac']
suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')
|
3,911 | 4a620957b2cd1e5945d98e49a5eae5d5592ef5a2 | import tests.functions as functions
if __name__ == "__main__":
# functions.validate_all_redirects("linked.data.gov.au-vocabularies.json")
conf = open("../conf/linked.data.gov.au-vocabularies.conf")
new = [
"anzsrc-for",
"anzsrc-seo",
"ausplots-cv",
"australian-phone-area-codes",
"care",
"corveg-cv",
"nrm",
"reg-roles",
"reg-statuses",
"address-type",
"australian-states-and-territories",
"bc-labels",
"data-access-rights",
"dataciteroles",
"depth-reference",
"geo-commodities",
"geoadminfeatures",
"geofeatures",
"geological-observation-instrument",
"geological-observation-method",
"geological-observation-type",
"geological-sites",
"geometry-roles",
"georesource-report",
"gsq-alias",
"gsq-dataset-theme",
"gsq-roles",
"gsq-sample-facility",
"iso639-1",
"iso-19157-data-quality-dimension",
"iso-iec-25012-data-quality-dimension",
"nsw-quality-dimension",
"party-identifier-type",
"qg-agent",
"qg-file-types",
"qg-security-classifications",
"qg-sites",
"qld-data-licenses",
"iso19115-1/RoleCode",
"minerals",
"nslvoc",
"observation-detail-type",
"organisation-activity-status",
"organisation-name-types",
"organisation-type",
"party-relationship",
"queensland-crs",
"qld-resource-permit-status",
"qld-resource-permit",
"qld-utm-zones",
"geou",
"iso11179-6/RolesAndResponsibilities",
"qesd-qkd",
"qesd-uom",
"qld-obsprop",
"report-detail-type",
"report-status",
"resource-project-lifecycle",
"resource-types",
"result-type",
"sample-detail-type",
"sample-location-status",
"sample-location-types",
"sample-material",
"sample-preparation-methods",
"sample-relationship",
"sample-type",
"seismic-dimensionality",
"site-detail-type",
"site-relationships",
"site-status",
"supermodel/terms",
"survey-detail-type",
"survey-method",
"survey-relationship-type",
"survey-status",
"survey-type",
"telephone-type",
"tk-labels",
"trs"
]
lines = conf.readlines()
for n in new:
for line in lines:
if n in line:
pattern, match = line.split("$", 1)
print(pattern.strip().replace("RewriteRule ^", "https://linked.data.gov.au/"), " -- ", match.split("[R")[0].replace('"', '').strip())
break |
3,912 | 3e7d80fdd1adb570934e4b252bc25d5746b4c68e | from py.test import raises
from ..lazymap import LazyMap
def test_lazymap():
data = list(range(10))
lm = LazyMap(data, lambda x: 2 * x)
assert len(lm) == 10
assert lm[1] == 2
assert isinstance(lm[1:4], LazyMap)
assert lm.append == data.append
assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'
def test_lazymap_iter():
data = list(range(2))
lm = LazyMap(data, lambda x: 2 * x)
iter_lm = iter(lm)
assert iter_lm.next() == 0
assert iter_lm.next() == 2
with raises(StopIteration):
iter_lm.next()
|
3,913 | 227e78312b5bad85df562b6ba360de352c305e7b | import sys
word = input()
if word[0].islower():
print('{}{}'.format(word[0].upper(), word[1:]))
sys.exit()
else:
print(word)
sys.exit()
|
3,914 | adae1d7cc2a866c9bc3cd21cb54a0191389f8083 | import sys, os
def carp():
sys.stderr = sys.stdin
print "content-type: text/plain"
print
#carp()
import sesspool
import cornerhost.config
## set up session
pool = sesspool.SessPool("sess/sessions.db")
SESS = sesspool.Sess(pool, REQ, RES)
SESS.start()
ENG.do_on_exit(SESS.stop)
CLERK = cornerhost.config.makeClerk()
|
3,915 | e9de42bb8ed24b95e5196f305fe658d67279c078 | import types
from robot.libraries.BuiltIn import BuiltIn
def GetAllVariableBySuffix (endswith):
all_vars = BuiltIn().get_variables()
result = {}
for var_name, var in all_vars.items():
#print var_name
if var_name.endswith(endswith+"}"):
print var_name
#print var
def CountFinalPoints ():
all_vars = BuiltIn().get_variables()
result = 0
result = int(result)
for var_name, var in all_vars.items():
#print var_name
if var_name.endswith("Points}"):
result += int(var)
#print var
return result
|
3,916 | dd79ffe3922494bcc345aec3cf76ed9efeb5185c | #!/usr/bin/env python3
"""
02-allelefreq.py <vcf file>
"""
import sys
import matplotlib.pyplot as plt
import pandas as pd
vcf = open(sys.argv[1])
maf = []
for line in vcf:
if "CHR" in line:
continue
cols = line.rstrip("\n").split()
values = float(cols[4])
maf.append(values)
fig, ax = plt.subplots()
ax.hist(maf, bins = 100, density = True)
plt.tight_layout()
fig.savefig("allelefeq.png")
plt.close(fig) |
3,917 | c0f9a1c39ff5d7cc99a16cf00cddcc14705937ba | from datetime import datetime
from random import seed
from pandas import date_range, DataFrame
import matplotlib.pyplot as plt
from matplotlib import style
from numpy import asarray
import strategy_learner as sl
from util import get_data
style.use('ggplot')
seed(0)
def run_algo(sym, investment, start_date, end_date, bench_sym):
# instantiate the strategy learner
learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose)
# train the learner
learner.add_evidence(symbol=sym, start_date=start_date, end_date=end_date, investment=investment)
# get some data for reference
syms = [sym]
dates = date_range(start_date, end_date)
prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym)
prices = prices_all[syms]
# test the learner
df_trades = learner.test_policy(symbol=sym, start_date=start_date, end_date=end_date, investment=investment)
return df_trades
def evaluate(sym, orders, start_val, fee, slippage, bench_sym):
# Read orders file
orders_df = orders
orders_df.sort_index(inplace=True)
start_date = orders_df.index[0]
end_date = orders_df.index[-1]
# Collect price data for each ticker in order
df_prices = get_data(symbols=[sym], dates=date_range(start_date, end_date), bench_sym=bench_sym)
df_prices = df_prices.drop(bench_sym, 1)
df_prices["cash"] = 1
# Track trade data
df_trades = df_prices.copy()
df_trades[:] = 0
# Populate trade dataframe
for i, date in enumerate(orders_df.index):
# Get order information
if orders_df.Order[i] == "BUY":
order = 1
else:
order = -1
# Start with 1/2 position at first
if i == 0:
shares = 100
else:
shares = 200
# Calculate change in shares and cash
df_trades[sym][date] += order * shares
df_trades['cash'][date] -= order * (1 - slippage) * shares * df_prices[sym][date] - fee
# Track total holdings
df_holdings = df_prices.copy()
df_holdings[:] = 0
# Include starting value
df_holdings['cash'][0] = start_val
# Update first day of holdings
for c in df_trades.columns:
df_holdings[c][0] += df_trades[c][0]
# Update every day, adding new day's trade information with previous day's holdings
for i in range(1, len(df_trades.index)):
for c in df_trades.columns:
df_holdings[c][i] += df_trades[c][i] + df_holdings[c][i - 1]
# Track monetary values
df_values = df_prices.mul(df_holdings)
# Define port_val
port_val = df_values.sum(axis=1)
return port_val
if __name__ == "__main__":
symbol = "NASDAQ1001440"
bench_sym = "S&P5001440"
verbose = False
investment = 100000 # 100k = 100 contracts
fee = 0
slippage = 0.0025 # in %
start_date_insample = datetime(2013, 5, 1)
end_date_insample = datetime(2015, 5, 1)
start_date_outsample = datetime(2015, 5, 2)
end_date_outsample = datetime(2017, 12, 7)
# Train
df_trades_in, benchmark_in = run_algo(sym=symbol, investment=investment, start_date=start_date_insample, end_date=end_date_insample, bench_sym=bench_sym)
df_trades_out, benchmark_out = run_algo(sym=symbol, investment=investment, start_date=start_date_outsample, end_date=end_date_outsample, bench_sym=bench_sym)
# Evaluate
insample = evaluate(sym=symbol, orders=df_trades_in, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)
insample = DataFrame(insample)
bench_insample = evaluate(sym=symbol, orders=benchmark_in, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)
bench_insample = DataFrame(bench_insample)
outsample = evaluate(sym=symbol, orders=df_trades_out, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)
outsample = DataFrame(outsample)
bench_outsample = evaluate(sym=symbol, orders=benchmark_out, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)
bench_outsample = DataFrame(bench_outsample)
# Cumulative returns
port_ret_in = float(asarray(insample.values)[-1])
port_ret_out = float(asarray(outsample.values)[-1])
bench_ret_in = float(asarray(bench_insample.values)[-1])
bench_ret_out = float(asarray(bench_outsample.values)[-1])
# Print results
print()
print("Cumulative return in-sample:\t\t${:,.2f}\t\t(+{:.2f} %)".format(port_ret_in - investment, 100 * (port_ret_in - investment) / investment))
print("Benchmark return in-sample:\t\t\t${:,.2f}\t\t(+{:.2f} %)".format(bench_ret_in - investment, 100 * (bench_ret_in - investment) / investment))
print("Cumulative return out-of-sample:\t${:,.2f}\t\t(+{:.2f} %)".format(port_ret_out - investment, 100 * (port_ret_out - investment) / investment))
print("Benchmark return out-of-sample:\t\t${:,.2f}\t\t(+{:.2f} %)".format(bench_ret_out - investment, 100 * (bench_ret_out - investment) / investment))
# Plot charts
plt.subplot(1, 2, 1)
plt.plot(insample.index, insample, c="mediumseagreen", lw=3)
plt.plot(bench_insample.index, bench_insample, c="skyblue")
plt.legend(["Strategy", "Buy and Hold"])
plt.title("In-sample")
plt.xlabel("Date")
plt.ylabel("Value")
plt.subplot(1, 2, 2)
plt.plot(outsample.index, outsample, c="mediumseagreen", lw=3)
plt.plot(bench_outsample.index, bench_outsample, c="skyblue")
plt.legend(["Strategy", "Buy and Hold"])
plt.title("Out-of-sample")
plt.xlabel("Date")
plt.ylabel("Value")
plt.show()
|
3,918 | ff1346060141ee3504aa5ee9de3a6ec196bcc216 | from skimage.measure import structural_similarity as ssim
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import pathlib
import warnings
from PIL import Image
from numpy import array
source_path = "/home/justin/Desktop/FeatureClustering/"
feature_length = len(os.listdir(source_path))
vector_data = []
recorded_lines = []
labels =[]
for folder in os.listdir(source_path):
for filename in os.listdir(source_path + folder +"/"):
if(filename != "---.png"):
linename = filename.split("-")
linename = linename[0]+"-"+linename[1]
if(linename not in recorded_lines):
vector = np.zeros(shape=(feature_length))
label = 0 if "G" in filename else 1
vector_data.append(vector)
labels.append(label)
recorded_lines.append(linename)
else:
index = recorded_lines.index(linename)
vector_data[index][int(folder)] += 1
#print(np.c_[recorded_lines,vector_data])
np.save("/home/justin/Desktop/vector_data.npy", vector_data)
np.save("/home/justin/Desktop/label_data.npy", labels) |
3,919 | 9e01ba8c489791ec35b86dffe12d0cedb5f09004 | import pandas as pd
from scipy.stats import shapiro
import scipy.stats as stats
df_test = pd.read_excel("datasets/ab_testing_data.xlsx", sheet_name="Test Group")
df_control = pd.read_excel("datasets/ab_testing_data.xlsx", sheet_name="Control Group")
df_test.head()
df_control.head()
df_control.info()
df_test.info()
df_test.shape
df_control.shape
# Setting threshold value for outliers
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
# Checks for any outliers in the variable.
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].shape[0]
print(col, " : ", number_of_outliers, "outliers")
for var in df_control:
print(var, "has ", has_outliers(df_control, [var]), "Outliers")
for var in df_test:
print(var, "has ", has_outliers(df_test, [var]), "Outliers")
# How would you describe the hypothesis of the A / B test?
# H0 : There is no statistical difference between the control and test groups in terms of average number of purchases.
# H1 : There is a statistical difference between the control and test groups in terms of the average number of purchases.
df_control["Purchase"].mean()
df_test["Purchase"].mean()
group_a = df_control["Purchase"]
group_b = df_test["Purchase"]
# 1- Assumption Check
# 1.1 - Normality Assumption
test_statistics, pvalue = shapiro(group_a)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# group_a is distributed normally.
test_statistics, pvalue = shapiro(group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# group_b is distributed normally.
# 1.2 - Variance Homogeneity Assumption
# H0: Variances Are Homogeneous
# H1: Variances Are Not Homogeneous
test_statistics, pvalue = stats.levene(group_a, group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# Variance homogeneity provided.
# HO: there is no statistical difference between the control and test groups in terms of average number of purchases.
# H1: there is a statistical difference between the control and test groups in terms of average number of purchases
# 1.1 Independent two-sample t-test if assumptions are provided (parametric test)
test_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
# Can we make statistically significant results?
# There is no statistically significant difference between the control group and test groups.
# The two groups are alike.
# Which test did you use? Why is that?
# We used the two-sample t-test (parametric test) since both assumptions are satisfied
# What is your advice to the customer?
# There is no statistical difference between average bidding and maximum bidding
# It can be preferred with a low cost per click.
# We can evaluate the differences in interaction gain and conversion rates and determine which method is more profitable.
# The test can be extended for 1 month.
# The number of observations can be increased.
|
3,920 | 9852d2a15047b110c7f374fd75e531c60c954724 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# (c) Simen Sommerfeldt, @sisomm, simen.sommerfeldt@gmail.com Licensed as CC-BY-SA
import os
import argparse,time
import pygame
import paho.mqtt.client as paho
parser = argparse.ArgumentParser()
parser.add_argument("-s","--server", default="127.0.0.1", help="The IP address of the MQTT server")
parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1], default=0,
help="increase output verbosity")
args = parser.parse_args()
def task_laugh():
pygame.mixer.music.load("../sounds/witchlaugh.wav")
pygame.mixer.music.play()
def task_goodbye():
pygame.mixer.music.load("../sounds/despicable.wav")
pygame.mixer.music.play()
def task_hello():
pygame.mixer.music.load("../sounds/mday.wav")
pygame.mixer.music.play()
def task_doh():
print("SOUNDPLAYER DOH!")
pygame.mixer.music.load("../sounds/doh.wav")
pygame.mixer.music.play()
def on_message(mosq, obj, msg):
print("SOUNDPLAYER: Message received on topic "+msg.topic+" with payload "+msg.payload)
print(len(msg.payload));
if(msg.payload=="GOODBYE"):
task_goodbye()
if(msg.payload=="HELLO"):
task_hello()
if(msg.payload=="DOH"):
task_doh()
if(msg.payload=="LAUGH"):
task_laugh()
print("SOUNDPLAYER: Connecting")
mypid = os.getpid()
client = paho.Client("sound_broker_"+str(mypid))
client.connect(args.server)
connect_time=time.time()
client.on_message = on_message
client.subscribe('/raspberry/1/incoming',0)
pygame.mixer.init()
try:
while client.loop()==0:
pass
except KeyboardInterrupt:
print('SOUNDPLAYER: Interrupt')
client.unsubscribe("/raspberry/1/incoming")
client.disconnect()
|
3,921 | bc25338612f525f616fb26c64d8b36667d297d40 | from django.shortcuts import render,get_object_or_404, redirect
from django.contrib import admin #어드민 쓸꺼면 써야됨
from .models import Blog #앱을 가지고 오겠다는거
from django.utils import timezone
admin.site.register(Blog) #블로그 형식을 가져와 등록하겠다.
# Create your views here.
def home(request):
blogs = Blog.objects
return render(request,'home.html',{'blogs':blogs})
def detail(request,blog_id):
blog_detail= get_object_or_404(Blog,pk=blog_id)
return render(request,'detail.html',{'blog': blog_detail})
def new(request):
return render(request,'new.html')
def create(request):
blog=Blog()
blog.title=request.GET['title']
blog.body=request.GET['body']
blog.pub_date=timezone.datetime.now()
blog.save()
return redirect('/blog/'+str(blog.id)) |
3,922 | 2542998c3a7decd6329856a31d8e9de56f82bae1 | from collections import namedtuple
from weakref import ref
l = list()
_l = list()
# Point = namedtuple('Point', ['x', 'y'])
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def callback(ref):
print ('__del__', ref)
for x in range(10):
p = Point(x,x**2)
t = ref(p,callback)
print(t)
l.append(t)
_l.append(p)
print(len(l),l)
print(len(_l),_l)
t = _l[6]
del t,_l[6]
print(len(_l),_l)
# print(len(l),l) |
3,923 | 4692b2d19f64b3b4bd10c5eadd22a4b5a2f2ef37 | from custom_layers import custom_word_embedding
from custom_layers import Attention
from utils import load_emb_weights
import torch
from torch import nn
class classifier(nn.Module):
#define all the layers used in model
def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers, embed_weights,
bidirectional=False, glove=True, init=True, dropout=0):
#Constructor
super().__init__()
self.bidirectional = bidirectional
if glove:
# Embedding layer using GloVe
self.embedding = custom_word_embedding(embed_weights)
else:
# Embedding layer without GloVe
self.embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1])
# LSTM layer and initialization
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=True)
if init:
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
# Dense layer with initialization
if self.bidirectional:
self.fc = nn.Linear(hidden_dim * 2, output_dim)
else:
self.fc = nn.Linear(hidden_dim * 1, output_dim)
if init:
nn.init.xavier_normal_(self.fc.weight)
#activation function
#self.act = nn.Sigmoid()
self.act = nn.Softmax(dim = 1)
def forward(self, text, text_lengths=None):
#text = [batch size,sent_length]
text = text.view(text.size()[1], -1) # Remove the useless 1st axis
embedded = self.embedding(text.long())
#embedded = [batch size, sent_len, emb dim]
embedded = embedded.float().cuda()
#packed sequence
#packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)
#si = embedded.size()
#embedded = embedded.view(si[1],si[2],si[3])
packed_output, (hidden, cell) = self.lstm(embedded)
#hidden = [batch size, num layers * num directions,hid dim]
#cell = [batch size, num layers * num directions,hid dim]
#concat the final forward and backward hidden state
if self.bidirectional:
hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)
#hidden = [batch size, hid dim * num directions]
dense_outputs=self.fc(hidden)
#Final activation function
outputs=self.act(dense_outputs)
return outputs
class AT_LSTM(nn.Module):
#define all the layers used in model
def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,
output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):
#Constructor
super().__init__()
# ATAE ?
self.ae = ae
self.at = at
self.embedding_dim= embedding_dim
# Embedding layer using GloVe or fasttext
self.embedding = custom_word_embedding(embed_weights)
# Embedding layer using Glove for aspects
self.aspects_embedding = custom_word_embedding(embed_weights)
# Embedding layer without GloVe
# self.embedding = nn.Embedding(emb_mat.shape[0], emb_mat.shape[1])
# LSTM layer and initialization
if self.ae:
self.lstm = nn.LSTM(embedding_dim*2,
hidden_dim,
num_layers=n_layers,
bidirectional=False,
dropout=dropout,
batch_first=True)
else:
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=False,
dropout=dropout,
batch_first=True)
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
# Attention layer with initialization
if self.at:
self.attention = Attention(aspect_embedding_dim, hidden_dim)
self.attention.xavier_init()
# Final dense layer with initialization
self.fc = nn.Linear(embedding_dim, output_dim)
nn.init.xavier_normal_(self.fc.weight)
#activation function
#self.act = nn.Sigmoid()
self.act = nn.Softmax(dim = 1)
def forward(self, inp, text_lengths=None):
text = inp[0].view(inp[0].size()[1], -1) # Remove the useless 1st axis
#text = [batch_size, sent_length]
categories = inp[1].view(inp[1].size()[1]).long() #categories = [batch_size]
embedded = self.embedding(text.long())
# ATAE
if self.ae:
embedded_input_aspect = self.aspects_embedding(categories)
embedded_input_aspect = embedded_input_aspect.view(embedded_input_aspect.size()[0],1,self.embedding_dim)
embedded_input_aspect = embedded_input_aspect.repeat(1,embedded.size()[1],1)
embedded = torch.cat((embedded, embedded_input_aspect), -1)
#packed sequence
#packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)
#si = embedded.size()
#embedded = embedded.view(si[1],si[2],si[3])
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
#packed_output = [batch_size, sent_length, hid_dim]
#hidden = [batch size, num layers * num directions,hid dim]
#cell = [batch size, num layers * num directions,hid dim]
embedded_aspects = self.aspects_embedding(categories)
embedded_aspects = embedded_aspects.float().cuda()
#embedded_aspects = [batch_size, aspect_embedding_dim]
if self.at:
final_hidden = self.attention(embedded, embedded_aspects, packed_output)
else:
final_hidden = hidden
#hidden = [batch size, hid dim * num directions]
dense_outputs=self.fc(final_hidden)
#Final activation function
outputs=self.act(dense_outputs)
return outputs
|
3,924 | ce97da4aab2b9de40267730168690475c899526d | import os,sys,glob
sys.path.append("../../../../libs/VASNet/")
from VASNet_frame_scoring_lib import *
sys.path.append("../../../config")
from config import *
if __name__ == '__main__':
#************************************************************************
# Purpose: frame scoring (Summarizing Videos with Attention)
# Inputs:
# - path_pretrained_model: path pretrained model
# - path_feature: path feature extraction of video(' .npy' with shape: x,1024 (GoogLeNet))
# Output: Score
# Author: Trivl
#************************************************************************
path_pretrained_model = cfg.PATH_DRDSN_PRETRAINED_MODEL
path_feature = cfg.PATH_FEATURE_GOOGLENET
from os import walk
f = []
for (dirpath, dirnames, filenames) in walk(path_feature):
f.extend(filenames)
break
for i in f:
features = np.load(os.path.join(path_feature,i))
score = get_VASNet_score(features,path_pretrained_model=path_pretrained_model)
sys.exit(0)
|
3,925 | fcf19c49bb161305eaa5ba8bc26e276a8e8db8ea | import unittest
from textwrap import dedent
from simplesat import InstallRequirement, Repository
from simplesat.test_utils import packages_from_definition
from ..compute_dependencies import (compute_dependencies,
compute_leaf_packages,
compute_reverse_dependencies)
PACKAGE_DEF_0 = dedent("""\
A 0.0.0-1; depends (B ^= 0.0.0)
B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)
C 0.0.0-1; depends (E >= 1.0.0)
""")
PACKAGE_DEF_1 = dedent("""\
D 0.0.0-2
E 0.0.0-1
E 1.0.0-1
E 1.0.1-1
""")
PACKAGE_DEF_2 = dedent("""\
B 0.0.0-1; depends (D == 0.0.0-2)
C 0.0.0-1; depends (E >= 1.0.0)
""")
class TestComputeDependencies(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
self.repos = [repo_0, repo_1]
def test_no_dependency(self):
requirement = InstallRequirement._from_string('D == 0.0.0-2')
expected_deps = set()
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, expected_deps)
def test_simple_dependency(self):
requirement = InstallRequirement._from_string('C *')
expected_deps = packages_from_definition(
"""E 1.0.0-1
E 1.0.1-1""")
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_requirements(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0) """
)
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_requirements_transitive(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)
D 0.0.0-2 """
)
deps = compute_dependencies(self.repos, requirement, transitive=True)
self.assertEqual(deps, set(expected_deps))
class TestComputeReverseDependencies(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
self.repos = [repo_0, repo_1]
def test_no_dependency(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set())
def test_simple_dependency(self):
requirement = InstallRequirement._from_string('E *')
expected_deps = packages_from_definition(
'C 0.0.0-1; depends (E >= 1.0.0)'
)
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_dependencies(self):
requirement = InstallRequirement._from_string('D ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)"""
)
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_dependencies_transitive(self):
requirement = InstallRequirement._from_string('D ^= 0.0.0')
expected_deps = packages_from_definition(
"""A 0.0.0-1; depends (B ^= 0.0.0)
B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)"""
)
deps = compute_reverse_dependencies(self.repos, requirement,
transitive=True)
self.assertEqual(deps, set(expected_deps))
class TestComputeLeafPackages(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))
self.repos = [repo_0, repo_1, repo_2]
def test_simple(self):
expected_leaf_packages = packages_from_definition(
"""A 0.0.0-1; depends (B ^= 0.0.0)
C 0.0.0-1; depends (E >= 1.0.0)
E 0.0.0-1 """
)
leaf_packages = compute_leaf_packages(self.repos)
self.assertEqual(leaf_packages, set(expected_leaf_packages))
|
3,926 | 4f4af4caf81397542e9cd94c50b54303e2f81881 | import datetime
import time
import boto3
from botocore.config import Config
# FinSpace class with Spark bindings
class SparkFinSpace(FinSpace):
import pyspark
def __init__(
self,
spark: pyspark.sql.session.SparkSession = None,
config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict = None
):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
# data_frame.write.option('header', 'true').csv(upload_location)
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):
print("Uploading data...")
upload_location = self.upload_dataframe(data_frame)
print("Data upload finished. Ingesting data...")
return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(
self,
dataset_id: str,
view_id: str
):
# TODO: switch to DescribeMatz when available in HFS
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
# 0. Ensure view is ready to be read
if (view['status'] != 'SUCCESS'):
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
# Query Glue table directly with catalog function of spark
return self.spark.table(f"`{glue_db_name}`.`{glue_table_name}`")
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):
from pyspark.sql.types import StructType
# for translation to FinSpace's schema
# 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'
DoubleType = "DOUBLE"
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {
"BinaryType" : StringType,
"BooleanType" : BooleanType,
"ByteType" : IntegerType,
"DateType" : DateType,
"DoubleType" : FloatType,
"IntegerType" : IntegerType,
"LongType" : IntegerType,
"NullType" : StringType,
"ShortType" : IntegerType,
"StringType" : StringType,
"TimestampType" : TimestampType,
}
for i in items:
# print( f"name: {i.name} type: {i.dataType}" )
habType = switcher.get( str(i.dataType), StringType)
hab_columns.append({
"dataType" : habType,
"name" : i.name,
"description" : ""
})
return( hab_columns )
|
3,927 | a8ea91797942616779ae0acc884db1e521c7ad28 | from utils import *
name = 'topological'
def topological(above):
"Topologically sort a DAG by removing a layer of sources until empty."
result = []
while above:
sources = set(above) - set(flatten(above.values()))
result.extend(sources)
for node in sources:
del above[node]
return result
above = defaultdict(list)
for edge in Array(Input(name))[1:]:
above[edge[0]].append(edge[1])
above[edge[1]]
print(rosalind_pretty(topological(above)))
|
3,928 | a4f2418e746cc43bd407b6a212de9802044351e1 | # -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
import json
import dateutil.parser
from tests.controllers.controller_test_base import ControllerTestBase
from tests.test_helper import TestHelper
from tests.http_response_catcher import HttpResponseCatcher
from plastiqpublicapi.api_helper import APIHelper
from plastiqpublicapi.controllers.categories_controller import CategoriesController
class CategoriesControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(CategoriesControllerTests, cls).setUpClass()
cls.response_catcher = HttpResponseCatcher()
cls.controller = CategoriesController(cls.config, cls.response_catcher)
# Retrieve a paginated list of Categories by query parameter(s)
def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self):
# Perform the API call through the SDK function
result = self.controller.retrieve_a_paginated_list_of_categories_by_query_parameter_s()
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['trace-id'] = None
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
|
3,929 | a84920821982f04b9835391eb267707971f8f7c1 | import hashlib
from ast import literal_eval
# import requests
# from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import render, redirect, HttpResponse,get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.forms import PasswordChangeForm,AuthenticationForm
from django.contrib.auth import update_session_auth_hash,login,logout
from django.contrib.auth.decorators import login_required
from accounts.forms import RegistrationForm,EditProfileForm,EditUserProfileForm, ResetPasswordForm, SetPasswordForm, SendEmailForm
from django.core.mail import send_mail
from .models import User,UserProfile
from django.http import JsonResponse
from html2text import html2text
# class UserProfileList(generics.ListCreateAPIView):
# queryset = UserProfile.objects.all()
# serializer_class = UserProfileSerializer
#
# def perform_create(self, serializer):
# serializer.save()
#
# class DetailsView(generics.RetrieveUpdateDestroyAPIView):
#
# queryset = UserProfile.objects.all()
# serializer_class = UserProfileSerializer
class UserProfileList(APIView):
def get(self,request):
result = []
for each in User.objects.all():
result.append(each.userprofile.as_json())
return JsonResponse(result,safe=False)
def post(self,request):
data_dict = literal_eval(request.body)
print data_dict
try:
user = User.objects.create(
username=data_dict.get('username'),
email = data_dict.get('email'),
first_name = data_dict.get('first_name'),
last_name = data_dict.get('last_name'),
password = data_dict.get('password'),
)
except:
return JsonResponse({'msg': 'Invalid data'})
try:
user.userprofile.phone = data_dict.get('phone')
user.userprofile.website = data_dict.get('website')
user.userprofile.city = data_dict.get('city')
user.userprofile.description = data_dict.get('description')
user.userprofile.save()
except:
return JsonResponse({'msg1': 'User created succesfully','msg2': 'Userprofile created succesfully withe empty data', 'userid': user.id})
return JsonResponse({'msg':'User created succesfully','userid':user.id})
class DetailsView(APIView):
def get(self,request,pk):
result =[]
try:
user = User.objects.get(pk=pk)
except:
return JsonResponse({"msg": "User not found"})
result.append(user.userprofile.as_json())
return JsonResponse(result, safe=False)
def put(self,request,pk):
try:
user = User.objects.get(pk=pk)
except:
return JsonResponse({"msg": "User not found"})
pass
data_dict = literal_eval(request.body)
edited = False
if 'email' in data_dict.keys():
user.email = data_dict['email']
edited = True
if 'first_name' in data_dict.keys():
user.email = data_dict['first_name']
edited = True
if 'last_name' in data_dict.keys():
user.email = data_dict['last_name']
edited = True
if 'phone' in data_dict.keys():
user.userprofile.phone = data_dict['phone']
edited = True
if 'website' in data_dict.keys():
user.userprofile.website = data_dict['website']
edited = True
if 'city' in data_dict.keys():
user.userprofile.city = data_dict['city']
edited = True
if 'description' in data_dict.keys():
user.userprofile.description = data_dict['description']
edited = True
if edited == True:
user.save()
user.userprofile.save()
return JsonResponse({"msg": "User successfully modified"})
return JsonResponse({"msg":"Invalid data"})
def delete(self,request,pk):
try:
user = User.objects.get(pk=pk)
except:
return JsonResponse({"msg": "User not found"})
user.delete()
return JsonResponse({"msg":"User has been deleted"})
# @csrf_exempt
# def userprofileapiview(request):
# result = []
#
# if request.method == 'POST':
# data_dict = literal_eval(request.body)
# try:
# user = User.objects.create(
# username=data_dict.get('username'),
# email = data_dict.get('email'),
# first_name = data_dict.get('first_name'),
# last_name = data_dict.get('last_name'),
# password = data_dict.get('password'),
# )
# except:
# return JsonResponse({'msg': 'Invalid data'})
# try:
# user.userprofile.phone = data_dict.get('phone')
# user.userprofile.website = data_dict.get('website')
# user.userprofile.city = data_dict.get('city')
# user.userprofile.description = data_dict.get('description')
# user.userprofile.save()
# except:
# return JsonResponse({'msg1': 'User created succesfully','msg2': 'Userprofile created succesfully withe empty data', 'userid': user.id})
#
# return JsonResponse({'msg':'User created succesfully','userid':user.id})
#
# if request.method == 'GET':
# for each in User.objects.all():
# result.append(each.userprofile.as_json())
#
# return JsonResponse(result,safe=False)
#
# @csrf_exempt
# def userdetailapiview(request,pk):
# result = []
# if request.method == 'GET':
# try:
# user = User.objects.get(pk=pk)
# except:
# return JsonResponse({"msg": "User not found"})
# result.append(user.userprofile.as_json())
# return JsonResponse(result, safe=False)
#
# if request.method == 'DELETE':
# try:
# user = User.objects.get(pk=pk)
# except:
# return JsonResponse({"msg": "User not found"})
# user.delete()
# return JsonResponse({"msg":"User has been deleted"})
#
# if request.method == 'PUT':
# try:
# user = User.objects.get(pk=pk)
# except:
# return JsonResponse({"msg": "User not found"})
# pass
# data_dict = literal_eval(request.body)
# edited = False
# if 'email' in data_dict.keys():
# user.email = data_dict['email']
# edited = True
# if 'first_name' in data_dict.keys():
# user.email = data_dict['first_name']
# edited = True
# if 'last_name' in data_dict.keys():
# user.email = data_dict['last_name']
# edited = True
# if 'phone' in data_dict.keys():
# user.userprofile.phone = data_dict['phone']
# edited = True
# if 'website' in data_dict.keys():
# user.userprofile.website = data_dict['website']
# edited = True
# if 'city' in data_dict.keys():
# user.userprofile.city = data_dict['city']
# edited = True
# if 'description' in data_dict.keys():
# user.userprofile.description = data_dict['description']
# edited = True
# if edited == True:
# user.save()
# user.userprofile.save()
# return JsonResponse({"msg": "User successfully modified"})
# return JsonResponse({"msg":"Invalid data"})
def loginview(request):
if request.POST:
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request,user)
return redirect('/account/profile')
form = AuthenticationForm()
args = {"form": form}
return render(request, 'accounts/login.html', args)
@login_required
def logoutview(request):
logout(request)
return render(request, 'accounts/logout.html')
@login_required
def view_all(request):
user_list = UserProfile.objects.filter(is_live=True)
table = {'user_list': user_list}
return render(request,'accounts/view_all.html',table)
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect('/account/login')
form = RegistrationForm()
args = {"form":form}
return render(request,'accounts/reg_form.html',args)
@login_required
def view_profile(request):
args = {'user':request.user}
return render(request,'accounts/profile.html',args)
@login_required
def edit_profile(request):
userprofile = UserProfile.objects.get(user=request.user)
if request.method=='POST':
userform = EditProfileForm(request.POST, instance=request.user)
userprofileform = EditUserProfileForm(request.POST, instance=request.user.userprofile)
if userform.is_valid() and userprofileform.is_valid():
userform.save()
userprofileform.save()
return redirect('/account/profile')
initial = {'description': userprofile.description, 'city': userprofile.city, 'website': userprofile.website,
'phone': userprofile.phone}
userform = EditProfileForm(instance=request.user)
userprofileform = EditUserProfileForm(initial=initial)
args = {
'userform':userform,
'userprofileform':userprofileform,
}
return render(request,'accounts/edit_profile.html',args)
@login_required
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST,user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request,form.user)
return redirect('/account/profile')
return redirect('account/change_password')
form = PasswordChangeForm(user=request.user)
args = {'form': form}
return render(request, 'accounts/change_password.html', args)
@login_required
def delete_profile(request):
userprofile = UserProfile.objects.get(user=request.user)
if request.method == 'POST':
userprofile.is_live = False
userprofile.save()
return redirect('/account/profile/view_all')
return render(request,'accounts/delete_profile.html',{'user':userprofile})
def password_reset(request):
if request.method == 'POST':
form = ResetPasswordForm(request.POST)
if form.is_valid():
if form.data['email'] in (User.objects.values_list('email',flat=True)):
user = User.objects.get(email=form.data['email'])
token = hashlib.md5(str(user.id)).hexdigest()
user.userprofile.token = token
user.userprofile.save()
reset_password_link = 'http://127.0.0.1:8000/account/password_reset/confirm/?token='+str(token)+'&id='+str(user.id)
email_body = 'Hi, you can click the following link to reset your password\n\n'+reset_password_link
send_mail(
'Reset Password',
email_body,
'atul.prakash@stayabode.com',
[form.data['email'],],
fail_silently=False,
)
return redirect('/account/reset_password/done/')
return HttpResponse('This email id does not exist')
return HttpResponse('Enter a valid email id')
form = ResetPasswordForm()
args = {'form':form}
return render(request,'accounts/password_reset.html',args)
def password_reset_confirm(request):
token = request.GET.get('token')
id = request.GET.get('id')
user = User.objects.get(pk=id)
if request.method == 'POST':
form = SetPasswordForm(request.POST)
if form.is_valid():
user.set_password(form.data['password'])
user.save()
return HttpResponse('You password was reset successfully.<br><br>You can login <a href="http://127.0.0.1:8000/">here</a> ')
if user.userprofile.token == token:
form = SetPasswordForm()
args = {'form':form}
return render(request,'accounts/password_reset_confirm.html',args)
return HttpResponse('Token expired')
def send_email(request):
if request.method == "POST":
form = SendEmailForm(request.POST)
try:
for each in User.objects.filter(id__in=form.data.getlist('user')):
body = form.data.get('body').replace('{{user}}', each.username)
send_mail(
subject=form.data.get('subject'),
message=html2text(body),
from_email='atul.prakash@stayabode.com',
# recipient_list=User.objects.filter(id__in=form.data.getlist('user')).values_list('email', flat=True),
recipient_list=[each.email],
fail_silently=False,
html_message=body,
)
return HttpResponse('email sent succesfully')
except:
return HttpResponse('Invalid data or email')
form = SendEmailForm
args = {'form': form}
return render(request,'accounts/send_email.html',args)
|
3,930 | c034fba0b9204545b00ba972a17e63cf9c20854e | import pandas as pd
def _get_site_name(f,i):
data_file = f +"\\"+"new_desc_sele_data.csv"
site_name=pd.read_csv(data_file)["SITE_ID"][i]
return site_name
def _get_site_DD_dataset_csv(f,i):
'''获取经过全部数据集(经过全部的特征选择)'''
site_path=_get_site_folder(f,i)
data_path=site_path+"\\data_confirm.csv"
data=pd.read_csv(data_path)
return data
def _get_site_IGBP(f,i):
data_file = f +"\\"+"new_desc_sele_data_origin.csv"
site_IGBP=pd.read_csv(data_file)["IGBP"][i]
return site_IGBP
def _get_site_feature_ale(f,i,feauture):
site_path=_get_site_folder(f,i)
prefix="ale_1_"
if type(feauture) is str:
ale_path=site_path+"\\"+prefix+feauture+".csv"
ale_data=pd.read_csv(ale_path)
return ale_data
def _get_version_res_folder(f,version,site_name=None,i=None):
import os
version_folder=f+"\\"+version
if i:
site_name=_get_site_name(f,i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder=version_folder+"\\"+site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f,i=None,feature_name=None):
data_file = f + "\\" + "new_desc_sele_data_origin.csv"
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path=data_content["SITE_PATH"][i]
return site_path
elif type(feature_name) is str:
site_path = data_content["SITE_PATH"][data_content["SITE_ID"]==feature_name].values[0]
return site_path
else:
print("lack of index or feature_name.")
|
3,931 | 6b0d1de4c77841f20670331db3332cf87be7ad84 | from django.apps import AppConfig
class PersianConfig(AppConfig):
name = 'persian'
|
3,932 | 99c2bd56deccc327faf659e91fc1fd0f6ff7a219 | from mf_app import db
from mf_app.models import User
db.create_all()
#test input data
admin = User('admin', 'admin@admin.com', 'admin')
guest = User('guest', 'guest@guest.com', 'guest')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
users = User.query.all()
print(users) |
3,933 | 95c0ba757b7561ef6cc0ad312034e2695f8420c3 | #!/usr/bin/env python3
x = "Programming is like building a multilingual puzzle\n"
print (x)
|
3,934 | 736861f18936c7a87ecf3deb134f589b9d7eed92 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_overscan(overscan, img, TITLE, OUT_DIR):
""" plot overscan in 9x2 plots with 16 channels """
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[0, 0])
for j in range(9, 17):
plt.plot(overscan[i, j - 1] + 500 *
(j - 8), label='seg' + str(j + 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if x != 0:
ax2.set_yticklabels([])
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax2.set_title(f.dev_name + ' (seg 10-17)')
ax1 = plt.subplot(gs[0, 1])
for j in range(1, 9):
plt.plot(overscan[i, j - 1] + 500 * j, label='seg' + str(j - 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if x != 2:
ax1.set_yticklabels([])
if x == 2:
ax1.yaxis.tick_right()
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax1.set_title(f.dev_name + ' (seg 0-7)')
fig.suptitle('Overscan ' + TITLE, y=0.94, size=20)
plt.subplots_adjust(wspace=0.05)
plt.savefig(OUT_DIR + TITLE + '_spatial.png')
plt.close(fig)
def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):
""" plot overscan with subtracted 7th / 17th channel """
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[0, 0])
for j in range(9, 17):
plt.plot(overscan[i, j - 1] - overscan[i, 15] +
500 * (j - 8), label='seg' + str(j + 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if(x != 0):
ax2.set_yticklabels([])
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax2.set_title(f.dev_name + ' (seg 10-17)')
ax1 = plt.subplot(gs[0, 1])
for j in range(1, 9):
plt.plot(overscan[i, j - 1] - overscan[i, 7] +
500 * j, label='seg' + str(j - 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if(x != 2):
ax1.set_yticklabels([])
if(x == 2):
ax1.yaxis.tick_right()
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax1.set_title(f.dev_name + ' (seg 0-7)')
# ax1.set_title('S-'+f[7:9]+' (seg 0-7)')
fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)
plt.subplots_adjust(wspace=0.05)
plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')
plt.close(fig)
def plot_mean_std_stddelta(m, n, nd, img, TITLE, OUT_DIR):
""" plot std vs. mean vs. std_delta (comparison) """
fig = plt.figure(figsize=(15, 10))
for i, f in enumerate(img):
ax1 = plt.subplot(3, 3, f.dev_index + 1)
lns1 = ax1.plot(m[i], 'o', color='green', label='offset')
ax1.set_ylabel('mean')
ax1.set_xlabel('segment num')
ax2 = ax1.twinx()
lns2 = ax2.plot(n[i], '^', color='blue', label='noise')
ax2.set_ylabel('stdev')
lns3 = ax2.plot(nd[i], 'v', color='red', label='dnoise')
lns = lns1 + lns2 + lns3
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, bbox_to_anchor=(0., 1.07, 1., .102),
fontsize='small', ncol=3, numpoints=1, loc=9)
plt.grid()
plt.title(' ' + f.dev_name, y=1.15)
fig.suptitle('Offset, noise, dnoise comparison ' + TITLE, y=0.99, size=20)
plt.subplots_adjust(wspace=0.5, hspace=0.6)
plt.savefig(OUT_DIR + TITLE + '_std_vs_mean.png')
plt.close(fig)
def plot_histogram_mean(m, TITLE, OUT_DIR):
fig = plt.figure(figsize=(15, 15))
m_all = m.ravel()
for bin_num in np.arange(10, 100, 10):
plt.subplot(3, 3, bin_num / 10)
plt.hist(m_all, bin_num, facecolor='green')
plt.title('Bins = ' + str(bin_num))
plt.subplots_adjust(wspace=0.2, hspace=0.2)
fig.suptitle('offset histogram ' + TITLE, y=0.92, size=20)
plt.savefig(OUT_DIR + TITLE + '_mean_histo.png')
plt.close(fig)
def plot_histogram_std(n, TITLE, OUT_DIR):
fig = plt.figure(figsize=(15, 15))
n_all = n.ravel()
for bin_num in np.arange(10, 100, 10):
plt.subplot(3, 3, bin_num / 10)
plt.hist(n_all, bin_num, facecolor='green')
plt.title('Bins = ' + str(bin_num))
fig.suptitle('noise histogram ' + TITLE, y=0.92, size=20)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig(OUT_DIR + TITLE + '_std_histo.png')
plt.close(fig)
def plot_histogram_std_dev(nd, TITLE, OUT_DIR):
fig = plt.figure(figsize=(15, 15))
nd_all = nd.ravel()
for bin_num in np.arange(10, 100, 10):
plt.subplot(3, 3, bin_num / 10)
plt.hist(nd_all, bin_num, facecolor='green')
plt.title('Bins = ' + str(bin_num))
fig.suptitle('dnoise histogram ' + TITLE, y=0.92, size=20)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig(OUT_DIR + TITLE + '_stddelta_histo.png')
plt.close(fig)
def plot_histogram_all(m, n, nd, TITLE, OUT_DIR):
plot_histogram_mean(m, TITLE, OUT_DIR)
plot_histogram_std(n, TITLE, OUT_DIR)
plot_histogram_std_dev(nd, TITLE, OUT_DIR)
def plot_histogram_all_one_binning(m, n, nd, TITLE, OUT_DIR, bin_num=45,
num_ccd=9, omit_REBs=[], read_REBs=set([0, 1, 2])):
from matplotlib.patches import Rectangle
if num_ccd != len(read_REBs) * 3:
print "ERROR! num_ccd = %i while number of REBs being read is %i." % (
num_ccd, len(read_REBs)
)
return "\n"
fig = plt.figure(figsize=(15, 6))
m_all = m.ravel()
m_all = m_all[0:16 * num_ccd]
n_all = n.ravel()
n_all = n_all[0:16 * num_ccd]
nd_all = nd.ravel()
nd_all = nd_all[0:16 * num_ccd]
# detect dead channels, DEF: noise <= 5
dead = []
for i in range(16 * num_ccd):
if n_all[i] <= 5:
dead.append(i)
# not count not-clocking REBs for statistics
# data stored in order 22, 21, 20 (REB 2), 12, 11, 10 (REB 1),...
omit_REBs = set(omit_REBs)
for REB in omit_REBs:
if REB not in [0, 1, 2]:
print "WARNING! Wrong configuration of REBs to omit %s - unrecognized REBs.\nContinuing with all REBs." % str(omit_REBs)
break
else:
if omit_REBs:
print "Omiting REBs %s" % omit_REBs
i = -1
for REB in read_REBs:
i += 1
if REB not in omit_REBs:
continue
pos = len(read_REBs) - i - 1
omit = np.arange(pos * 48, pos * 48 + 48)
dead = np.append(dead, omit)
m_no_dead = np.delete(m_all, dead)
n_no_dead = np.delete(n_all, dead)
# get rid of subtracted channels for dnoise
sub = np.arange(7, 16 * num_ccd, 8)
dead = np.append(dead, sub)
nd_no_dead = np.delete(nd_all, dead)
nd_all = np.delete(nd_all, sub)
# summary statstics computed only with live channels
if len(n_no_dead):
n_mean, n_median, n_std = np.mean(
n_no_dead), np.median(n_no_dead), np.std(n_no_dead)
else:
n_mean, n_median, n_std = 0, 0, 0
if len(m_no_dead):
m_mean, m_median, m_std = np.mean(
m_no_dead), np.median(m_no_dead), np.std(m_no_dead)
else:
m_mean, m_median, m_std = 0, 0, 0
if len(nd_no_dead):
nd_mean, nd_median, nd_std = np.mean(
nd_no_dead), np.median(nd_no_dead), np.std(nd_no_dead)
else:
nd_mean, nd_median, nd_std = 0, 0, 0
bin_num_lin = 4 * bin_num / 5
bin_num_log = 1 * bin_num / 5
bins_lin = np.linspace(0, 30, bin_num_lin)
val_max = max(max(n_all), max(nd_all))
if val_max <= 30:
val_max = 50
bins_log = np.logspace(np.log10(30), np.log10(val_max), bin_num_log)
ax1 = fig.add_subplot(1, 2, 1)
plt.hist(m_all, bin_num, facecolor='green')
plt.title('Offset')
textstr1 = '$\mu=%.0f$\n$\mathrm{median}=%.0f$\n$\sigma=%.0f$' % (
m_mean, m_median, m_std)
props1 = dict(boxstyle='round', facecolor='green', alpha=0.4)
ax1.text(0.76, 0.97, textstr1, transform=ax1.transAxes, fontsize=10,
verticalalignment='top', bbox=props1)
ax2 = fig.add_subplot(1, 2, 2)
plt.hist(n_all, bins_lin, facecolor='blue', alpha=0.5, label='noise')
plt.hist(nd_all, bins_lin, facecolor='red', alpha=0.5, label='dnoise')
plt.title('Noises')
plt.legend(loc='upper left')
ax2.axvspan(0, 5, hatch='x', fill=False)
ax2.set_xscale('linear')
ax2.set_xlim((0, 30))
ax2.set_xlim(left=0)
ax2.spines['right'].set_visible(False)
ax2.yaxis.set_ticks_position('left')
plt.setp(ax2.get_xticklabels(), visible=True)
divider = make_axes_locatable(ax2)
axLin = divider.append_axes("right", size=1.4, pad=0, sharey=ax2)
axLin.set_xscale('log')
axLin.hist(n_all, bins_log, facecolor='blue', alpha=0.5, label='noise')
axLin.hist(nd_all, bins_log, facecolor='red', alpha=0.5, label='dnoise')
axLin.autoscale()
axLin.set_xlim(left=30)
axLin.spines['left'].set_visible(False)
axLin.yaxis.set_visible(False)
axLin.yaxis.set_ticks_position('left')
textstr2 = '$\mu=%.1f$\n$\mathrm{median}=%.1f$\n$\sigma=%.1f$' % (
n_mean, n_median, n_std)
props2 = dict(boxstyle='round', facecolor='blue', alpha=0.4)
plt.text(1.98, 0.97, textstr2, transform=ax1.transAxes, fontsize=10,
verticalalignment='top', bbox=props2)
textstr3 = '$\mu=%.1f$\n$\mathrm{median}=%.1f$\n$\sigma=%.1f$' % (
nd_mean, nd_median, nd_std)
props3 = dict(boxstyle='round', facecolor='red', alpha=0.4)
plt.text(1.98, 0.80, textstr3, transform=ax1.transAxes, fontsize=10,
verticalalignment='top', bbox=props3)
fig.suptitle(TITLE, y=0.98, size=20)
# plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig(OUT_DIR + TITLE + '_histo.png')
plt.close(fig)
string_info = "\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n" % (
m_mean, m_median, m_std, n_mean, n_median, n_std, nd_mean, nd_median, nd_std)
return string_info
def plot_summary(data, run, OUT_DIR, SUPTITLE="Runs comparison"):
cols = len(data)
fig = plt.figure(figsize=(25, 9))
x = range(cols)
ax1 = plt.subplot(3, 1, 1)
ax1.plot(x, data[:, 0], 'o', color='darkgreen', label='mean')
ax1.errorbar(x, data[:, 0], marker='o',
color='darkgreen', yerr=data[x, 2], linestyle='None')
ax1.plot(x, data[:, 1], 'o', color='greenyellow', label='median')
ax1.set_ylabel('Offset', color='green')
ax1.legend(numpoints=1)
ax2 = plt.subplot(3, 1, 2)
ax2.plot(x, data[:, 3], 'o', color='darkblue', label='mean')
ax2.errorbar(x, data[:, 3], marker='o', color='darkblue',
yerr=data[x, 5], linestyle='None')
ax2.plot(x, data[:, 4], 'o', color='lightskyblue', label='median')
ax2.set_ylabel('Noise', color='blue')
ax2.set_ylim([0, 20])
# ax2.set_ylim(bottom=0)
ax2.legend(numpoints=1)
ax3 = plt.subplot(3, 1, 3)
ax3.plot(x, data[:, 6], 'o', color='darkred', label='mean')
ax3.errorbar(x, data[:, 6], marker='o', color='darkred',
yerr=data[x, 8], linestyle='None')
ax3.plot(x, data[:, 7], 'o', color='salmon', label='median')
ax3.set_ylabel('DNoise', color='red')
ax3.set_ylim([0, 20])
# ax3.set_ylim(bottom=0)
ax3.legend(numpoints=1)
plt.xticks(x, run, rotation=45, ha='right', fontsize=7)
fig.suptitle(SUPTITLE, y=0.96, size=20)
plt.subplots_adjust(hspace=0.0, bottom=0.20, left=0.05)
plt.savefig(OUT_DIR + 'Runs_summary.png')
plt.close(fig)
def plot_one_run_summary(f, OUT_DIR, SUPTITLE="Run summary"):
data = np.loadtxt(f, usecols=range(1, 10))
run = np.loadtxt(f, usecols=[0], dtype=str)
if data.size == 9:
print "WARNING! Only one row in '%s'. Summary is not plotting.\n" % f
return
plot_summary(data, run, OUT_DIR, SUPTITLE)
def plot_cor_ccd(a, img, TITLE, OUT_DIR, vmin=0, vmax=0.2):
fig = plt.figure(figsize=(15, 15))
seg = [0, 7, 8, 15]
lab = ["0", "7", "10", "17"]
for i, f in enumerate(img):
ax1 = plt.subplot(3, 3, f.dev_index + 1)
i_min = 16 * i
i_max = i_min + 16
aa = a[i_min:i_max, i_min:i_max]
im = plt.imshow(aa, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)
ax1.set_title(f.dev_name)
ax1.set_xlim(15.5, -0.5)
ax1.set_ylim(-0.5, 15.5)
ax1.set_xticks(seg)
ax1.set_xticklabels(lab)
ax1.set_yticks(seg)
ax1.set_yticklabels(lab)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.137, 0.05, 0.73])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle("Inter CCD correlations " + TITLE, y=0.93, size=20)
plt.savefig(OUT_DIR + TITLE + '_cor_ccd.png')
plt.close(fig)
def plot_cor_all(a, img, TITLE, OUT_DIR, vmin=0, vmax=0.2):
fig = plt.figure(figsize=(15, 15))
im = plt.imshow(a, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)
seg = np.arange(0, len(a), 16)
r = img.ccd_num / 9.0
plt.xticks(seg)
plt.yticks(seg)
for i, f in enumerate(img):
plt.text(-10 * r, 8 + 16 * i, f.dev_name,
size=15, verticalalignment='center')
widthB = 54 / img.ccd_num
widthB = str(widthB)
for i in np.arange(0, img.ccd_num, 3):
REB = 'REB' + img[i].dev_name[1:2]
plt.annotate(REB, xy=(-11 * r, 24 + i * 16), xytext=(-18 * r, 24 + i * 16), xycoords='data',
fontsize=20, annotation_clip=False, ha='center', va='center',
arrowprops=dict(arrowstyle='-[, widthB=%s, lengthB=1.5' % widthB, lw=2.0))
fig.subplots_adjust(right=0.82)
cbar_ax = fig.add_axes([0.85, 0.155, 0.05, 0.695])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle("Overall correlations " + TITLE, y=0.91, size=20)
plt.savefig(OUT_DIR + TITLE + '_cor_all.png')
plt.close(fig)
def plot_cor_ccd_mean(a, img, TITLE, OUT_DIR, vmin=-1, vmax=1):
fig = plt.figure(figsize=(15, 15))
im = plt.imshow(a, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)
loc = range(img.ccd_num)
labels = []
for fli in img:
labels.append(fli.dev_name)
plt.xticks(loc, labels)
plt.yticks(loc, labels)
fig.subplots_adjust(right=0.82)
cbar_ax = fig.add_axes([0.85, 0.155, 0.05, 0.695])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle("Correlations of means of CCDs " + TITLE, y=0.91, size=20)
plt.savefig(OUT_DIR + TITLE + '_cor_ccd_mean.png')
plt.close(fig)
def plot_gains(gains, gain_ref, TITLES, OUT_DIR):
""" plot gains with respect to the reference gain,
whre reference gain is number => gains[gain_ref]"""
# print 'directory: %s' % OUT_DIR
# print 'TITLES:%s', TITLES
gain_ref_np = np.array(gains[gain_ref].gain)
ratios = []
for gain in gains:
gain_np = np.array(gain.gain)
dim = (min(gain_ref_np.shape[0], gain_np.shape[0]),
min(gain_ref_np.shape[1], gain_np.shape[1])
)
# print 'dim = ', dim
ratios.append(gain_np[0:dim[0], 0:dim[1]] / gain_ref_np[0:dim[0], 0:dim[1]])
# print 'Ratios = ', ratios
rows = 2*((len(ratios) -1) / 6 + 1)
cmap = plt.get_cmap('gnuplot')
colors = [cmap(i) for i in np.linspace(0, 1, len(ratios))]
fig, axes = plt.subplots(nrows=rows, ncols=6)
fig.set_size_inches(20,20)
axfl = axes.flatten()
for i, ratio in enumerate(ratios):
# print 'Plotting %s', TITLES[i]
j = (i / 6)*12 + i % 6
ax = axfl[j]
ax2 = axfl[j+6]
ax.hist(np.reshape(ratio, -1), 20, range=(0.9, 1.1), facecolor=colors[i])
ax.set_title(TITLES[i], size=20)
ax2.hist(np.reshape(ratio, -1), 50, range=(0., 2.), facecolor=colors[i])
fig.suptitle("Gains with ref gain '%s'" % TITLES[gain_ref], y=0.95, size=25)
# fig.tight_layout()
plt.savefig(OUT_DIR + 'gain.png')
plt.close(fig)
def plot_raft_map(data, img, TITLE, OUTDIR, vmin=None, vmax=None):
""" create a raft map 6x24 for data in CCDsx16 array """
map = np.zeros((6, 24))
for i, fli in enumerate(img):
x = (fli.dev_index / 3) * 2 # [0, 2, 4]
y = (fli.dev_index % 3) * 8 # [0, 8, 16]
for j in range(16):
xx = x + j / 8 # [0, 1,..., 5]
yy = y + j % 8 # [0, 1,..., 23]
map[xx, yy] = data[i, j]
yseg = range(6)
ylab = ["00-07", "10-17", "00-07", "10-17", "00-07", "10-17"]
xseg = range(0, 24, 4)
xlab = ["0", "4", "0", "4", "0", "4"]
fig = plt.figure(figsize=(10, 10))
ax1 = fig.add_subplot(111)
im = ax1.imshow(map, interpolation='nearest', cmap='jet', aspect=4, vmin=vmin, vmax=vmax)
plt.yticks(yseg, ylab)
plt.xticks(xseg, xlab)
plt.annotate('S22', xy=(0, 0), xytext=(4, -0.8), fontsize=15, ha='center', va='center')
plt.annotate('S12', xy=(0, 0), xytext=(12, -0.8), fontsize=15, ha='center', va='center')
plt.annotate('S02', xy=(0, 0), xytext=(20, -0.8), fontsize=15, ha='center', va='center')
plt.annotate('S02', xy=(0, 0), xytext=(24., 0.5), fontsize=15, ha='left', va='center')
plt.annotate('S01', xy=(0, 0), xytext=(24., 2.5), fontsize=15, ha='left', va='center')
plt.annotate('S00', xy=(0, 0), xytext=(24., 4.5), fontsize=15, ha='left', va='center')
ax1.vlines(7.5, -0.5, 5.5)
ax1.vlines(15.5, -0.5, 5.5)
ax1.hlines(1.5, -0.5, 23.5)
ax1.hlines(3.5, -0.5, 23.5)
plt.subplots_adjust(left=0.07, bottom=0.05, right=0.8, top=0.95, wspace=0, hspace=0)
#cbar_ax = fig.add_axes([0.15, 0.03, 0.7, 0.05])
#fig.colorbar(im, cax=cbar_ax, orientation="horizontal")
cbar_ax = fig.add_axes([0.87, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle(TITLE, y=0.98, size=19)
plt.savefig(OUTDIR + TITLE + '.png')
plt.show()
plt.close(fig)
def plot_voltage_all(x, data, imgs, title, out_dir, suptitle=''):
if suptitle == '':
suptitle = title
fig = plt.figure(figsize=(20, 24))
cmap = plt.get_cmap('gist_ncar')
colors = [cmap(i) for i in np.linspace(0, 1, 16)]
for k in range(9):
ax1 = plt.subplot(3, 3, imgs[0][k].dev_index + 1)
ax1.set_title(imgs[0][k].dev_name)
for j in range(16):
y = []
for i in range(len(x)):
y.append(data[i][k][j])
plt.plot(x, y, label='Segment %i' % j, color=colors[j])
fig.suptitle(suptitle + '; all segments', y=0.99, size=20)
plt.legend(loc='lower left', bbox_to_anchor=(0.87, 1.1), ncol=4)
plt.subplots_adjust(bottom=0.04, left=0.04, top=0.88, right=0.96, wspace=0.1, hspace=0.1)
plt.savefig(out_dir + title + '_all.png')
plt.close(fig)
def plot_voltage_ccd(x, data, imgs, title, out_dir, suptitle=''):
if suptitle == '':
suptitle = title
fig = plt.figure(figsize=(15, 15))
for k in range(9):
ax1 = plt.subplot(3, 3, imgs[0][k].dev_index + 1)
ax1.set_title(imgs[0][k].dev_name)
y = []
for i in range(len(x)):
y.append(np.mean(data[i][k]))
plt.plot(x, y)
fig.suptitle(suptitle + '; mean of segments, per CCD', y=0.94, size=20)
plt.savefig(out_dir + title + '_CCD.png')
plt.close(fig)
def plot_voltage_raft(x, data, imgs, title, out_dir, suptitle=''):
if suptitle == '':
suptitle = title
fig = plt.figure(figsize=(7, 7))
y = []
for i in range(len(x)):
y.append(np.mean(data[i]))
plt.plot(x, y)
fig.suptitle(suptitle + '; mean of all segments', y=0.96, size=20)
plt.savefig(out_dir + title + '_raft.png')
plt.close(fig)
|
3,935 | e1eb86480fa4eadabf05f10cc54ff9daa790438c | class Node():
def __init__(self, value):
self.value = value
self.next = None
def linked_list_from_array(arr):
head = Node(arr[0])
cur = head
for i in range(1, len(arr)):
cur.next = Node(arr[i])
cur = cur.next
return head
def array_from_linked_list(head):
arr = []
cur = head
while cur:
arr.append(cur.value)
cur = cur.next
return arr
def reverse_linked_list(head):
prev = None
cur = head
while cur:
next = cur.next # save
cur.next = prev # assign next to prev
prev = cur
cur = next
return prev
array = [9, 1, 2, 3, 6, 8, 11, 5]
ll = linked_list_from_array(array)
rev_ll = reverse_linked_list(ll)
rev_array = array_from_linked_list(rev_ll)
print(array)
print(rev_array)
def reverse_linked_list_section(head, start, end):
pass
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# (0, 3) => [3, 2, 1, 0, 4, 5, 6, 7, 8, 9]
# (2, 4) => [0, 1, 4, 3, 2, 5, 6, 7, 8, 9]
# (6, 9) => [0, 1, 2, 3, 4, 5, 9, 8, 7, 6]
|
3,936 | bcdf1c03d996520f3d4d8d12ec4ef34ea63ef3cf | #!/usr/bin/python3
###################################################
### Euler project
### zdrassvouitie @ 10/2016
###################################################
file_name = '013_largeSum_data'
tot = 0
with open(file_name, "r") as f:
stop = 1
while stop != 0:
line = f.readline()
if len(line) < 1:
break
tot += float(line)
print(tot)
|
3,937 | 19ffac718008c7c9279fb8cbc7608597d2d3e708 | print('-'*60)
print('Welcome to CLUB425, the most lit club in downtown ACTvF. Before you can enter, I need you yo answer some question...')
print()
age = input('What is your age today? ')
age = int(age)
if age >= 21:
print('Cool, come on in.')
else:
print('Your gonna need to back up. This club is 21+ only so find somewhere else to party or find out what robot punches feel like. ')
print('Anyway...have a good day! ')
print('-'*60) |
3,938 | 5e4a334b373d912ba37b18f95e4866450bda5570 | # Generated by Django 2.2.2 on 2019-07-30 01:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='usuario',
name='inicio',
field=models.DateField(verbose_name='Data Inicio'),
),
migrations.AlterField(
model_name='usuario',
name='saida',
field=models.DateField(null=True, verbose_name='Data de Saida'),
),
]
|
3,939 | 4328d526da14db756fad8d05457724a23e3e3ef6 | from datetime import datetime
import warnings
import numpy as np
import xarray as xr
from .common import HDF4, expects_file_info
pyhdf_is_installed = False
try:
from pyhdf import HDF, VS, V
from pyhdf.SD import SD, SDC
pyhdf_is_installed = True
except ImportError:
pass
__all__ = [
'CloudSat',
]
class CloudSat(HDF4):
"""File handler for CloudSat data in HDF4 files.
"""
# This file handler always wants to return at least time, lat and lon
# fields. These fields are required for this:
standard_fields = {
"UTC_start",
"Profile_time",
"Latitude",
"Longitude"
}
# Map the standard fields to standard names:
mapping = {
"Latitude": "lat",
"Longitude": "lon",
"dim_0": "scnline",
}
def __init__(self, **kwargs):
# Call the base class initializer
super().__init__(**kwargs)
@expects_file_info()
def get_info(self, file_info, **kwargs):
"""Return a :class:`FileInfo` object with parameters about the
file content.
Args:
file_info: Path and name of the file of which to retrieve the info
about.
**kwargs: Additional keyword arguments.
Returns:
A FileInfo object.
"""
file = SD(file_info.path, SDC.READ)
file_info.times[0] = \
datetime.strptime(getattr(file, 'start_time'), "%Y%m%d%H%M%S")
file_info.times[1] = \
datetime.strptime(getattr(file, 'end_time'), "%Y%m%d%H%M%S")
return file_info
@expects_file_info()
def read(self, file_info, **kwargs):
"""Read and parse HDF4 files and load them to a xarray.Dataset
A description about all variables in CloudSat dataset can be found in
http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53.
Args:
file_info: Path and name of the file as string or FileInfo object.
**kwargs: Additional keyword arguments that are valid for
:class:`typhon.files.handlers.common.HDF4`.
Returns:
A xarray.Dataset object.
"""
# We need to import at least the standard fields
user_fields = kwargs.pop("fields", {})
fields = self.standard_fields | set(user_fields)
# We catch the user mapping here, since we do not want to deal with
# user-defined names in the further processing. Instead, we use our own
# mapping
user_mapping = kwargs.pop("mapping", None)
# Load the dataset from the file:
dataset = super().read(
file_info, fields=fields, mapping=self.mapping, **kwargs
)
dataset["time"] = self._get_time_field(dataset, file_info)
# Remove fields that we do not need any longer (expect the user asked
# for them explicitly)
dataset = dataset.drop_vars(
{"UTC_start", "Profile_time"} - set(user_fields),
)
if user_mapping is not None:
dataset = dataset.rename(user_mapping)
return dataset
def _get_time_field(self, dataset, file_info):
# This gives us the starting time of the first profile in seconds
# since midnight in UTC:
first_profile_time = round(dataset['UTC_start'].item(0))
# This gives us the starting time of all other profiles in seconds
# since the start of the first profile.
profile_times = dataset['Profile_time']
# Convert the seconds to milliseconds
profile_times *= 1000
profile_times = profile_times.astype("int")
try:
date = file_info.times[0].date()
except AttributeError:
# We have to load the info by ourselves:
date = self.get_info(file_info).times[0].date()
# Put all times together so we obtain one full timestamp
# (date + time) for each data point. We are using the
# starting date coming from parsing the filename.
profile_times = \
np.datetime64(date) \
+ np.timedelta64(first_profile_time, "s") \
+ profile_times.astype("timedelta64[ms]")
return profile_times
|
3,940 | 805bc144a4945b46b398853e79ded17370ada380 | import glob
import os
import partition
import pickle
import matplotlib.pyplot as plt
import numpy as np
from Cluster import fishermans_algorithm
import argparse
parser = argparse.ArgumentParser()
plt.ion()
parser.add_argument("--fish", help="flag for using fisherman's algorithm")
parser.add_argument("--heat", help="flag for using heatmap")
parser.add_argument("--object", help="flag for dumping the clusters")
args = parser.parse_args()
print(args)
print(args.fish)
print(args.object)
for file in glob.glob("./examples/*.p"):
print(file)
name = file[11:-2]
recover = open("./examples/" + name + ".p", "rb")
input_list = pickle.load(recover)
print("Loaded ...")
cancer_cells = []
T_cells = []
cyto_T_cells = []
for i, row in enumerate(input_list):
try:
row = [int(x) for x in row]
except ValueError:
continue
if row[4] > 0:
cancer_cells.append([row[0], row[1], row[2], row[3]])
if row[5] > 0:
T_cells.append([row[0], row[1], row[2], row[3]])
if row[6] > 0:
cyto_T_cells.append([row[0], row[1], row[2], row[3]])
cancer_cells = np.asarray(cancer_cells)
T_cells = np.asarray(T_cells)
cyto_T_cells = np.asarray(cyto_T_cells)
print("Separated ...")
t = 25
partitioned_cancer_cells, windows, w, h = partition.partition(cancer_cells, tile_size=t, to_list=True)
print("Cancer cells partitioned ...")
if args.heat:
spatial_distribution = np.zeros_like(partitioned_cancer_cells)
for i in range(t):
for j in range(t):
spatial_distribution[i][j] = len(partitioned_cancer_cells[i][j])
with open("./inputs/spatial/" + name + ".txt", "w", newline="") as dest:
dest.write(str(spatial_distribution))
if args.fish:
result = fishermans_algorithm(partitioned_cancer_cells, (t, t), windows, w, h)
print("Result retrieved ...")
if args.object:
with open("./inputs/object/" + name + ".p", "wb") as dest:
pickle.dump(result, dest)
dups = set()
histogram = np.zeros(21, dtype=np.uint32)
for cluster in result:
dups.add(cluster)
total_cluster_cells = 0
clusters_sum = 0
dups_length = len(dups)
for i in dups:
value = len(i.cells)
clusters_sum += value
total_cluster_cells += len(i.cells)
if value > 20:
histogram[20] += 1
else:
histogram[value - 1] += 1
print("Histogram retrieved ...")
clusters_avg = clusters_sum / dups_length
assert(total_cluster_cells == len(cancer_cells))
y = np.array(histogram)
x = np.arange(21) + 1
plt.bar(x, y)
plt.xlabel("Value")
plt.ylabel("Frequency")
# plt.savefig("./inputs/" + name + ".png", bbox_inches='tight')
plt.show()
plt.close()
if args.object:
with open("./inputs/object/" + name + ".txt", "w", newline="") as dest:
dest.write("Average size of cluster: " + str(clusters_avg) + "\n")
dest.write("Number of clusters: " + str(len(dups)) + "\n")
dest.write("Total number of cells: " + str(total_cluster_cells) + "\n")
dest.write("Cluster counts: " + "\n")
for i, x in enumerate(histogram):
dest.write(str(i) + ", " + str(x) + "\n")
os.system('say "All pickle files done in this batch."')
# End of file
|
3,941 | 8280f321b102cace462761f9ece2aebf9e28a432 | #!/usr/bin/python3
"""display your id from github.
"""
from sys import argv
import requests
if __name__ == "__main__":
get = requests.get('https://api.github.com/user',
auth=(argv[1], argv[2])).json().get('id')
print(get)
|
3,942 | 1daecce86769e36a17fe2935f89b9266a0197cf0 | from django.db import models
class TamLicense(models.Model):
license = models.TextField("Inserisci qui il tuo codice licenza.")
|
3,943 | 7a793c2081032745ae58f92a4572954333742dfd | import os
# __file__: 当前文件
# os.path.dirname(): 所在目录
# os.path.abspath(): 当前文件/目录的绝对路径
# os.path.join(): 路径连接
# 项目路径
BASEDIR = os.path.abspath(
os.path.dirname(
os.path.dirname(
__file__)))
# 数据文件目录
DATA_DIR = os.path.join(BASEDIR, "data")
DATA_FILE = os.path.join(DATA_DIR, 'data.yaml') |
3,944 | ece80a7765674f9d2991029bb86486b616a90f58 | class Solution(object):
def moveZeroes(self, nums):
"""
给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
---
输入: [0,1,0,3,12]
输出: [1,3,12,0,0]
---
思路;
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
num = nums.count(0)
while 0 in nums:
nums.remove(0)
for i in range(num):
nums.append(0)
def moveZeroes1(self, nums):
n = len(nums)
i = 0
j = 0
while i < n:
if nums[i] != 0:
nums[j],nums[i] = nums[i],nums[j]
j += 1
i += 1
|
3,945 | 83d35c413af0cefb71964671b43df1e815aa2115 | # coding: utf-8
"""
Provides test-related code that can be used by all tests.
"""
import os
DATA_DIR = 'tests/data'
def get_data_path(file_name):
return os.path.join(DATA_DIR, file_name)
def assert_strings(test_case, actual, expected):
# Show both friendly and literal versions.
message = """\
Expected: \"""%s\"""
Actual: \"""%s\"""
Expected: %s
Actual: %s""" % (expected, actual, repr(expected), repr(actual))
test_case.assertEquals(actual, expected, message)
|
3,946 | d7aa85c2458ee12a8de0f75419945fbe2acdf95d | #! /usr/bin/python3
class Animal:
def eat(self):
print("吃")
def bark(self):
print("喝")
def run(seft):
print("跑")
def sleep(self):
print("睡")
class Dog(Animal):
# 子类拥有父类的所有属性和方法
def bark(self):
print("汪汪叫")
class XiaoTianQuan(Dog): # 3. 增加其他子类代码
def bark(self):
# 1. 针对子类特有的需求, 编写代码
print("像神一样的叫唤...")
# 2. 使用super(). 调用原来在父类中封装的方法
# super().bark()
# 注意: 如果使用子类调用方法, 会出现递归调用 - 死循环
# 父类名.方法(self)
Dog.bark(self)
# 3. 增加其他子类代码
print("$%^*%^#%$%")
def fly(self):
print("我会飞")
xtq = XiaoTianQuan()
xtq.bark()
|
3,947 | 5a3431b79b8f42b3042bb27d787d0d92891a7415 | # -*- coding:utf-8 -*-
'''
Created on 2016��4��8��
@author: liping
'''
import sys
from PyQt4 import QtGui,QtCore
class QuitButton(QtGui.QWidget):
def __init__(self,parent = None):
QtGui.QWidget.__init__(self,parent)
self.setGeometry(300,300,250,150)
self.setWindowTitle('quitButton')
quit = QtGui.QPushButton('Close',self)
quit.setGeometry(100,100,60,35)
self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()'))
app = QtGui.QApplication(sys.argv)
qb = QuitButton()
qb.show()
sys.exit(app.exec_()) |
3,948 | dff454cbde985a08b34377b80dd8e3b22f1cc13a | from django.http import response
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import User
from .serializers import UserSerializer,UserCreationSerialier,UserEditionSerializer
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
class Users(APIView):
# permission_classes = [IsAuthenticated]
def get(self,request):
users = User.objects.filter(is_removed=False)
serialized_users = UserSerializer(instance=users,many=True)
return Response(serialized_users.data,status=status.HTTP_200_OK)
class UserDetail(APIView):
def get(self,request,pk):
user = User.objects.filter(pk=pk,is_removed=False).first()
if user is None:
return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serailized_data = UserSerializer(instance=user)
return Response(serailized_data.data,status=status.HTTP_200_OK)
class CreateUser(APIView):
def post(self,request):
serialized_data = UserCreationSerialier(data=request.data)
if serialized_data.is_valid():
data = serialized_data.validated_data
user = User.objects.filter(email=data['email'],is_removed=False).first()
if user is not None:
return Response({'error':'This email is Already Taken!','success':False},status=status.HTTP_400_BAD_REQUEST)
user = User(email=data['email'],full_name=data['full_name'])
user.set_password(data['password'])
user.save()
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data,status=status.HTTP_201_CREATED)
return Response(serialized_data.errors,status=status.HTTP_400_BAD_REQUEST)
class EditUser(APIView):
def put(self,request,pk):
user = User.objects.filter(pk=pk,is_removed=False).first()
if user is None:
return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serialized_user = UserEditionSerializer(data=request.data,instance=user)
if serialized_user.is_valid():
user = serialized_user.save()
return Response(UserSerializer(instance=user).data,status=status.HTTP_202_ACCEPTED)
return Response(serialized_user.errors,status=status.HTTP_400_BAD_REQUEST)
class RemoveUser(APIView):
def delete(self,request,pk):
user = User.objects.filter(pk=pk,is_removed=False).first()
if user is None:
return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)
user.is_removed = True
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class GetUserFromToken(APIView):
permission_classes = [IsAuthenticated]
def get(self,request):
user = request.user
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data) |
3,949 | d8cfd9de95e1f47fc41a5389f5137b4af90dc0f1 | from datetime import datetime
import pytz
from pytz import timezone
##PDXtime = datetime.now()
##print(PDXtime.hour)
##
##NYCtime = PDXtime.hour + 3
##print(NYCtime)
##
##Londontime = PDXtime.hour + 8
##print(Londontime)
Londontz = timezone('Europe/London')
Londonlocaltime = datetime.now(Londontz)
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format
PDXtz = timezone('America/Los_Angeles')
PDXlocaltime = datetime.now(PDXtz)
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
NYCtz = timezone('America/New_York')
NYClocaltime = datetime.now(NYCtz)
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
|
3,950 | b9bc6a9dbb3dbe51fbae45078bd499fb97fa003f | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.utils import type_schema
from c7n.filters.core import ValueFilter
@resources.register('mysql-flexibleserver')
class MySQLFlexibleServer(ArmResourceManager):
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Databases']
service = 'azure.mgmt.rdbms.mysql_flexibleservers'
client = 'MySQLManagementClient'
enum_spec = ('servers', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup'
)
resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
"""Filter by configuration parameter for mysql flexible server
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"value": "TLSv1.2"
"description": "Which protocols the server permits for encrypted
connections. By default, TLS 1.2 is enforced",
"defaultValue": "TLSv1.2",
"dataType": "Set",
"allowedValues": "TLSv1,TLSv1.1,TLSv1.2",
"source": "system-default",
"isReadOnly": "False",
"isConfigPendingRestart": "False",
"isDynamicConfig": "False",
}
:example:
Find Mysql Flexible servers with tls_version not set to TLSV1.2
.. code-block:: yaml
policies:
- name: mysql-flexible-server-tls-version
resource: azure.mysql-flexibleserver
filters:
- type: server-parameter
name: tls_version
key: value
op: eq
value: 'TLSv1.2'
"""
schema = type_schema(
'server-parameter',
required=['type', 'name'],
rinherit=ValueFilter.schema,
name={
'type': 'string',
'allowed_value': ['TLSv1.2']
},
)
def __call__(self, resource):
key = f'c7n:config-params:{self.data["name"]}'
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(
resource['resourceGroup'],
resource['name'],
self.data["name"]
)
resource['properties'][key] = query.serialize(True).get('properties')
return super().__call__(resource['properties'].get(key))
|
3,951 | 1049a7d2cdc54c489af6246ec014deb63a98f96d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('levantamiento', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FichaTecnica',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numero', models.IntegerField(default=0)),
('largo', models.FloatField(default=0)),
('ancho', models.FloatField(default=0)),
('alto', models.FloatField(default=0)),
('parcial', models.IntegerField(default=0)),
('unidad', models.IntegerField(default=0)),
('punitario', models.IntegerField(default=0)),
('form', models.ForeignKey(related_name='ficha_tecnica', to='levantamiento.Levantamiento')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Metrado1',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('codigo', models.CharField(max_length=25)),
('descripcion', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Metrado2',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('codigo', models.CharField(max_length=25)),
('descripcion', models.TextField()),
('metrado1', models.ForeignKey(related_name='metrado_2', to='metrados.Metrado1')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Metrado3',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('codigo', models.CharField(max_length=25)),
('descripcion', models.TextField()),
('metrado2', models.ForeignKey(related_name='metrado_3', to='metrados.Metrado2')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Metrado4',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('codigo', models.CharField(max_length=25)),
('descripcion', models.TextField()),
('metrado3', models.ForeignKey(related_name='metrado_4', to='metrados.Metrado3')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='fichatecnica',
name='metrado1',
field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado1'),
preserve_default=True,
),
migrations.AddField(
model_name='fichatecnica',
name='metrado2',
field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado2'),
preserve_default=True,
),
migrations.AddField(
model_name='fichatecnica',
name='metrado3',
field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado3'),
preserve_default=True,
),
migrations.AddField(
model_name='fichatecnica',
name='metrado4',
field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado4'),
preserve_default=True,
),
]
|
3,952 | 384588e1a767081191228db2afa4a489f967a220 | """
AlbumInfo-related frames for the Album view.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Iterator, Collection, Any
from ds_tools.caching.decorators import cached_property
from tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer
from tk_gui.elements.buttons import Button, EventButton as EButton
from tk_gui.elements.choices import ListBox, CheckBox, Combo
from tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame
from tk_gui.elements.menu import Menu, MenuItem
from tk_gui.elements.rating import Rating
from tk_gui.popups import pick_file_popup
from music.common.disco_entry import DiscoEntryType
from music.files import SongFile
from music.manager.update import TrackInfo, AlbumInfo
from ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file
from .helpers import IText
from .images import AlbumCoverImageBuilder
from .list_box import EditableListBox
if TYPE_CHECKING:
from tk_gui.typing import Layout, Bool, XY
__all__ = ['AlbumInfoFrame', 'TrackInfoFrame']
log = logging.getLogger(__name__)
ValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input
LRG_FONT = ('Helvetica', 20)
class TagModMixin:
_tag_vals_and_eles: dict[str, tuple[Any, ValueEle]]
def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]:
for key, (original_val, val_ele) in self._tag_vals_and_eles.items():
if (value := val_ele.value) != original_val:
yield key, val_ele, original_val, value
def reset_tag_values(self):
for key, val_ele, original_val, value in self._iter_changes():
match val_ele:
case ListBox() | EditableListBox():
val_ele.update(choices=original_val, replace=True, select=True)
case _: # Input() | Text() | CheckBox() | Combo() | Rating()
val_ele.update(original_val)
def get_modified(self) -> dict[str, tuple[Any, Any]]:
return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()}
class AlbumInfoFrame(TagModMixin, InteractiveFrame):
album_info: AlbumInfo
def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs):
super().__init__(**kwargs)
self.album_info = get_album_info(album)
self.album_dir = get_album_dir(album)
self.cover_size = cover_size
self._tag_vals_and_eles = {}
# region Layout Generation
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)]
yield [HorizontalSeparator()]
yield from self.build_buttons()
def build_meta_rows(self):
data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()}
for track in self.album_dir:
info = track.info
for key, values in data.items():
if value := info[key]:
values.add(str(value))
data = {key: ' / '.join(sorted(values)) for key, values in data.items()}
yield [
Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)),
Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)),
Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)),
]
yield [HorizontalSeparator()]
def build_tag_rows(self):
tooltips = {
'name': 'The name that was / should be used for the album directory',
'parent': 'The name that was / should be used for the artist directory',
'singer': 'Solo singer of a group, when the album should be sorted under their group',
'solo_of_group': 'Whether the singer is a soloist',
}
disabled = self.disabled
for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items():
if tooltip := tooltips.get(key):
kwargs = {'tooltip': tooltip}
else:
kwargs = {}
key_ele = label_ele(key, **kwargs)
if key == 'type':
types = [de.real_name for de in DiscoEntryType]
if value:
if isinstance(value, DiscoEntryType):
value = value.real_name
elif value not in types:
types.append(value)
val_ele = Combo(
types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type
)
elif key == 'genre':
val_ele = _genre_list_box(value, self.album_info, disabled, key=key)
elif key in {'mp4', 'solo_of_group'}:
kwargs['disabled'] = True if key == 'mp4' else disabled
val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs)
else:
if key.startswith('wiki_'):
kwargs['link'] = True
elif key == 'number':
kwargs['change_cb'] = self._update_numbered_type
value = _normalize_input_value(value)
val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [key_ele, val_ele]
@cached_property
def cover_image_frame(self) -> Frame:
class ImageMenu(Menu):
MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled)
# TODO: Include get_wiki_cover_choice?
cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size)
return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu())
# endregion
# region Layout Generation - Buttons
def build_buttons(self) -> Layout:
# These frames need to be in the same row for them to occupy the same space when visible
yield [self.view_buttons_frame, self.edit_buttons_frame]
@cached_property
def view_buttons_frame(self) -> Frame:
rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()]
return Frame(rows, visible=self.disabled, side='t')
def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa
kwargs = {'size': (18, 1), 'borderwidth': 3}
yield [
EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs),
EButton('View All Tags', key='view_all_tags', **kwargs),
EButton('Edit', key='edit_album', **kwargs),
EButton('Wiki Update', key='wiki_update', **kwargs),
]
kwargs['size'] = (25, 1)
# TODO: Handle replacing inferior versions in real destination directory
yield [
# EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs),
EButton('Sort Into Library', key='sort_into_library', **kwargs),
# EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs),
]
yield [
EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs),
EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs),
]
# TODO: Unify the above/below rows / shorten text / merge functionality with the sort view
yield [
EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs),
EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs),
]
open_btn = EButton('\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3)
album_dir = self.album_dir
# TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories
if len(album_dir.parent) > 1:
kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3)
yield [
EButton('\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)),
open_btn,
EButton('\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)),
]
else:
yield [open_btn]
@cached_property
def edit_buttons_frame(self) -> BasicRowFrame:
kwargs = {'size': (18, 1), 'borderwidth': 3}
row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)]
return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled)
# endregion
# region Event Handling
def enable(self):
if not self.disabled:
return
super().enable()
self.view_buttons_frame.hide()
self.edit_buttons_frame.show()
def disable(self):
if self.disabled:
return
super().disable()
self.edit_buttons_frame.hide()
self.view_buttons_frame.show()
def _update_numbered_type(self, var_name, unknown, action):
# Registered as a change_cb for `type` and `number`
num_ele: Input = self._tag_vals_and_eles['number'][1]
value = ''
try:
value = num_ele.value.strip()
num_val = int(value)
except (TypeError, ValueError, AttributeError):
num_ele.validated(not value)
return
else:
num_ele.validated(True)
type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value)
if type_val == DiscoEntryType.UNKNOWN:
return
num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1]
num_type_ele.update(type_val.format(num_val))
def _replace_cover_image(self, event=None):
if self.disabled:
return
if path := pick_file_popup(title='Pick new album cover'):
cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1]
cover_path_ele.update(path.as_posix())
image_ele: Image = self.cover_image_frame.rows[0].elements[0]
image_ele.image = path
# endregion
class TrackInfoFrame(TagModMixin, InteractiveFrame):
track_info: TrackInfo
song_file: SongFile
show_cover: Bool = False
def __init__(self, track: TrackIdentifier, **kwargs):
super().__init__(**kwargs)
self.track_info = get_track_info(track)
self.song_file = get_track_file(track)
self._tag_vals_and_eles = {}
@cached_property
def path_str(self) -> str:
return self.track_info.path.as_posix()
@cached_property
def file_name(self) -> str:
return self.track_info.path.name
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield from self.build_info_rows()
def build_meta_rows(self) -> Iterator[list[Element]]:
yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))]
sf = self.song_file
yield [
Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)),
Text('Type:'), IText(sf.tag_version, size=(20, 1)),
]
def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]:
fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating']
if keys:
fields = [f for f in fields if f not in keys]
track_info, disabled = self.track_info, self.disabled
for key in fields:
if key == 'genre':
value = track_info.genre_set.difference(track_info.album.genre_set)
val_ele = _genre_list_box(value, track_info, disabled)
elif key == 'rating':
if (value := track_info[key]) is None:
value = 0
val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled)
else:
value = _normalize_input_value(track_info[key])
val_ele = Input(value, size=(50, 1), disabled=disabled)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [label_ele(key, size=(6, 1)), val_ele]
def _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox:
kwargs.setdefault('add_title', 'Add genre')
kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}')
kwargs.setdefault('list_width', 40)
return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs)
def _normalize_input_value(value) -> str:
if value is None:
value = ''
elif not isinstance(value, str):
value = str(value)
return value
def label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text:
return Text(text.replace('_', ' ').title(), size=size, **kwargs)
class TagFrame(InteractiveFrame):
def enable(self):
if not self.disabled:
return
for row in self.rows:
for ele in row.elements:
try:
if ele.key == 'mp4': # Read-only
continue
except AttributeError:
pass
try:
ele.enable() # noqa
except AttributeError:
pass
self.disabled = False
|
3,953 | e008f9b11a9b7480e9fb53391870809d6dea5497 | import numpy as np
from global_module.implementation_module import Autoencoder
from global_module.implementation_module import Reader
import tensorflow as tf
from global_module.settings_module import ParamsClass, Directory, Dictionary
import random
import sys
import time
class Test:
def __init__(self):
self.iter_test = 0
def run_epoch(self, session, min_loss, model_obj, reader, input, writer):
global epoch_combined_loss, step
params = model_obj.params
epoch_combined_loss = 0.0
output_file = open(model_obj.dir_obj.log_emb_path + '/latent_representation.csv', 'w')
for step, curr_input in enumerate(reader.data_iterator(input)):
feed_dict = {model_obj.input: curr_input}
total_loss, latent_rep, summary_test = session.run([model_obj.loss, model_obj.rep, model_obj.merged_summary_test], feed_dict=feed_dict)
epoch_combined_loss += total_loss
self.iter_test += 1
if self.iter_test % params.log_step == 0 and params.log:
writer.add_summary(summary_test, self.iter_test)
for each_rep in latent_rep:
output_file.write(' '.join(str(x) for x in each_rep).strip() + '\n')
epoch_combined_loss /= step
output_file.close()
return epoch_combined_loss, min_loss
def run_test(self):
global test_writer
mode_test = 'TE'
# test object
params_test = ParamsClass(mode=mode_test)
dir_test = Directory(mode_test)
test_reader = Reader(params_test)
test_instances = test_reader.read_image_data(dir_test.data_filename)
random.seed(4321)
global_min_loss = sys.float_info.max
print('***** INITIALIZING TF GRAPH *****')
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope("model"):
test_obj = Autoencoder(params_test, dir_test)
model_saver = tf.train.Saver()
model_saver.restore(session, test_obj.dir_obj.test_model)
if params_test.log:
test_writer = tf.summary.FileWriter(dir_test.log_path + '/test')
print('**** TF GRAPH INITIALIZED ****')
start_time = time.time()
test_loss, _, = self.run_epoch(session, global_min_loss, test_obj, test_reader, test_instances, test_writer)
print("Epoch: %d Test loss: %.4f" % (1, test_loss))
curr_time = time.time()
print('1 epoch run takes ' + str((curr_time - start_time) / 60) + ' minutes.')
if params_test.log:
test_writer.close()
|
3,954 | 44bf409d627a6029ab4c4f1fff99f102b8d57279 | # cook your dish here
t=int(input())
while t:
n=int(input())
a=list(map(int,input().split()))
a.sort(reverse=True)
s=0
for i in range(n):
k=a[i]-i
if k>=0:
s+=k
print(s%1000000007)
t-=1
|
3,955 | 6db0adf25a7cc38c8965c07cc80bde0d82c75d56 | import os
from sqlalchemy import Column, ForeignKey, Integer, String, Float, Boolean, DateTime
from sqlalchemy import and_, or_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker, scoped_session, load_only
from sqlalchemy.pool import NullPool
from datetime import datetime
Base = declarative_base()
days = ['M','T','W','T','F', 'S', 'S']
# using sqlalchemy declare a class for each table in our database.
class Station(Base):
"""this one is for storing information about each station."""
__tablename__ = "station"
number = Column(Integer, primary_key=True, autoincrement=False)
contract_name = Column(String(250), nullable=False)
name = Column(String(250), nullable=False)
address = Column(String(250), nullable=False)
position_lat = Column(Float, nullable=False)
position_long = Column(Float, nullable=False)
banking = Column(Boolean, nullable=True)
bonus = Column(Boolean, nullable=True)
station_usage = relationship("UsageData", lazy="dynamic")
@property
def last_updated(self):
"""this method is used in the scraper to return the last updated station.
this lets us pull only updated data."""
try:
return max(self.station_usage, key=lambda x: x.last_update).dt_last_update
except ValueError:
return datetime.fromtimestamp(0)
@classmethod
def get_current_station_info(cls, dbsession):
"""as the method name suggests this returns the up to date station information."""
sub = dbsession.query(UsageData.station_id, func.max(UsageData.id).label('max_update')).group_by(
UsageData.station_id).subquery()
return dbsession.query(
UsageData.last_update,
UsageData.available_bike_stands, UsageData.available_bikes).join(sub, and_(
sub.c.max_update == UsageData.id)).all()
class UsageData(Base):
"""holds data about bicycle usage for every station."""
__tablename__ = "bike_usage"
id = Column(Integer, primary_key=True)
station_id = Column(Integer, ForeignKey('station.number'))
status = Column(Boolean, nullable=False)
bike_stands = Column(Integer, nullable=False)
available_bike_stands = Column(Integer, nullable=False)
available_bikes = Column(Integer, nullable=False)
last_update = Column(DateTime, nullable=False)
@property
def dt_last_update(self):
"""return when was the last update. Once again this is used in the scraper to determine newly updated data."""
return self.last_update
@dt_last_update.setter
def dt_last_update(self, val):
"""creates a datetime object which is added to the database with an update from the dublinbikes api.
once again used by the scraper. essentially the adds the time at which the update was entered."""
self.last_update = datetime.fromtimestamp(int(val)/1000)
@classmethod
def get_bikes_for_weekday(cls, dbsession, weekday, station_id):
"""returns a list of bikes for a provided weekday and station.
averaged per hour so 24 results."""
station = [("Time", "Available Bikes", "Available Stands")]
station_data = dbsession.query(func.hour(cls.last_update),
func.avg(cls.available_bikes),
func.avg(cls.available_bike_stands)) \
.filter(cls.station_id == station_id,
func.weekday(cls.last_update) == weekday) \
.group_by(func.hour(cls.last_update)) \
.all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(a, float(b), float(c)) for a, b, c in station_data])
else:
station.extend([(0,0,0)])
return station
@classmethod
def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):
"""very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.
returns a list of bikes for a provided datetime object (wetdate) and station."""
# averaged per hour so 24 results.
station = [("Time", "Available Bikes", "Available Stands")]
station_data = dbsession.query(
func.hour(cls.last_update),
func.avg(cls.available_bikes),
func.avg(cls.available_bike_stands))\
.filter(cls.station_id == station_id,
func.date(cls.last_update) == wetdate.date())\
.group_by(func.hour(cls.last_update)).all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(a, float(b), float(c)) for a, b, c in station_data])
else:
station.extend([(0,0,0)])
return station
@classmethod
def get_bikes_for_week(cls, dbsession, station_id):
"""as method name describes.
similar to methods above but averaged over week."""
station = [("Day", "Available Bikes")]
station_data = dbsession.query(func.weekday(cls.last_update),
func.avg(cls.available_bikes)) \
.filter(cls.station_id == station_id) \
.group_by(func.weekday(cls.last_update)) \
.all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(days[a], float(b)) for a, b in station_data])
else:
station.extend([(0,0)])
return station
class Weather(Base):
"""holds data scraped from the open weather API."""
__tablename__ = "weather"
id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)
coord_lon = Column(Float)
coord_lat = Column(Float)
weather_id = Column(Integer)
weather_main = Column(String(45))
weather_description = Column(String(45))
weather_icon = Column(String(10))
base = Column(String(45))
main_temp = Column(Integer)
main_pressure = Column(Integer)
main_humidity = Column(Integer)
main_temp_min = Column(Integer)
main_temp_max = Column(Integer)
visibility = Column(Integer)
wind_speed = Column(Float)
wind_deg = Column(Integer)
clouds_all = Column(Integer)
dt = Column(DateTime)
sys_type = Column(Integer)
sys_id = Column(Integer)
sys_message = Column(Float)
sys_country = Column(String(2))
sys_sunrise = Column(DateTime)
sys_sunset = Column(DateTime)
city_id = Column(Integer)
city_name = Column(String(6))
cod = Column(Integer)
@classmethod
def findWetWeatherDays(self, dbsession, today):
"""finds days where there was wet weather."""
wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == "light rain", self.weather_description == "moderate rain")).all()
# if one of those days is today return it.
# else just return a wet day.
for i in range(len(wetDays)):
if today == wetDays[i][0].weekday():
return wetDays[i][0]
else:
return wetDays[0][0]
# path to DB
connection_string='mysql+mysqldb://{username}:{password}@{host}:3306/dublinbikesdata'.format(username=os.environ['DatabaseUser'],
password=os.environ['DatabasePassword'],
host=os.environ['DatabaseServer'])
engine = create_engine(connection_string, poolclass=NullPool)
# create the session using sqlalchemy.
db_session = scoped_session(sessionmaker(bind=engine, autocommit=False, autoflush=False))
if __name__=="__main__":
"""Below is used for testing if the database is working by running this file directly.
not used in the actual app."""
station_id = 42
static_info = db_session.query(Station.number,
Station.name,
Station.address,
Station.position_lat,
Station.position_long).all()
dynamic_info = Station.get_current_station_info(db_session)
static_fields = ['number', 'name', 'address', 'position_lat', 'position_long']
dynamic_fields = ['last_update', 'available_bike_stands', 'available_bikes']
json_data = [dict(zip(static_fields + dynamic_fields, static + dynamic))
for static, dynamic in
zip(static_info, dynamic_info)]
print(json_data)
|
3,956 | 26744d51dbce835d31d572a053294c9d280e1a8b | #SEE /etc/rc.local FOR BOOTUP COMMANDS
from Measure_and_File import *
from WebServer import *
from multiprocessing import *
web = WebServer()
board_boy = Measurer_and_Filer()
#try:
proc1 = Process( target=board_boy.measure_and_file, args=() )
proc1.start()
proc2 = Process( target=web.serve, args=() )
proc2.start()
#except:
#print ("Error: unable to start processes")
|
3,957 | f3d61a9aa4205e91811f17c4e9520811445cc6a9 | import sys
import random
#coming into existence, all does not begin and end at this moment;
#not yet fully conscious, you pick up only snippets of your environment
for line in sys.stdin:
line = line.strip()
randLow = random.randint(0, 10)
randHigh = random.randint(11, 20)
print line[randLow:randHigh] |
3,958 | a5f3af6fc890f61eecb35bd157fc51bb65b4c586 | # Standard Library imports:
import argparse
import os
from pathlib import Path
from typing import Dict, List
# 3rd Party imports:
import keras.backend as K
from keras.layers import *
from keras.models import Model
import tensorflow as tf
from tensorflow.python.framework import graph_io, graph_util
from tensorflow.python.tools import import_pb_to_tensorboard
def keras_to_tensorflow(
keras_model,
output_dir: Path,
model_name,
out_prefix="output_",
log_tensorboard=True,
):
"""Convert from keras to tf"""
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
output_dir: str = str(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(
os.path.join(output_dir, model_name), output_dir
)
"""
We explicitly redefine the SqueezNet architecture since Keras has no predefined
SqueezNet
"""
def squeezenet_fire_module(input, input_channel_small=16, input_channel_large=64):
channel_axis = 3
input = Conv2D(input_channel_small, (1, 1), padding="valid")(input)
input = Activation("relu")(input)
input_branch_1 = Conv2D(input_channel_large, (1, 1), padding="valid")(input)
input_branch_1 = Activation("relu")(input_branch_1)
input_branch_2 = Conv2D(input_channel_large, (3, 3), padding="same")(input)
input_branch_2 = Activation("relu")(input_branch_2)
input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)
return input
def SqueezeNet(input_shape=(224, 224, 3)):
"""Returns a new keras SqueezeNet model"""
image_input = Input(shape=input_shape)
network = Conv2D(64, (3, 3), strides=(2, 2), padding="valid")(image_input)
network = Activation("relu")(network)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(
input=network, input_channel_small=16, input_channel_large=64
)
network = squeezenet_fire_module(
input=network, input_channel_small=16, input_channel_large=64
)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(
input=network, input_channel_small=32, input_channel_large=128
)
network = squeezenet_fire_module(
input=network, input_channel_small=32, input_channel_large=128
)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(
input=network, input_channel_small=48, input_channel_large=192
)
network = squeezenet_fire_module(
input=network, input_channel_small=48, input_channel_large=192
)
network = squeezenet_fire_module(
input=network, input_channel_small=64, input_channel_large=256
)
network = squeezenet_fire_module(
input=network, input_channel_small=64, input_channel_large=256
)
# Remove layers like Dropout and BatchNormalization, they are only needed in training
# network = Dropout(0.5)(network)
network = Conv2D(1000, kernel_size=(1, 1), padding="valid", name="last_conv")(
network
)
network = Activation("relu")(network)
network = GlobalAvgPool2D()(network)
network = Activation("softmax", name="output")(network)
input_image = image_input
model = Model(inputs=input_image, outputs=network)
return model
def get_tf_filename(keras_filename) -> str:
return keras_filename.replace(".h5", ".pb")
def main(opt):
"""Convert a model from keras to tensorflow lite."""
weights_path: Path = Path("../weights")
model_path = weights_path / opt.model_path
if not model_path.exists():
raise ValueError(f"Invalid model path: {model_path}")
print(f"Loading keras model: '{model_path}'")
keras_model = SqueezeNet()
keras_model.load_weights(model_path)
output_file = get_tf_filename(str(model_path))
keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=output_file)
print("MODEL SAVED")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
default="squeezenet.h5",
help="filename of model to convert. Path should be relative to the ./training/models/ folder",
)
opt = parser.parse_args()
main(opt)
|
3,959 | 79390f3ae5dc4cc9105a672d4838a8b1ba53a248 | from flask import Flask, render_template, request, redirect
#from gevent.pywsgi import WSGIServer
import model as db
import sys
import time, calendar
import jsonify
def get_date(date):
date = date.split("/")
time = str(date[0])
day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]), int(date[1]))] # .day_abbr[]
day_num = str(int(date[1]))
month = calendar.month_name[int(date[2])]
year = str(date[3])
if int(day_num) == 1:
day_num = "1st "
elif int(day_num) == 2:
day_num = "2nd "
elif int(day_num) == 3:
day_num = "3rd "
else:
return str(time + " " + day_str + ", the " + day_num + "th " + month + " " + year)
return str(time + " " + day_str + ", the " + day_num + month + " " + year)
app = Flask(__name__)
@app.route("/")
def index():
post = db.get_last_post()
post[3] = get_date(post[3])
return render_template("index.html", last_post=post)
@app.route("/edit")
def add():
table = db.get_all_posts()
return render_template("edit.html", action="write", table=table)
@app.route("/edit", methods=["POST"])
def edit():
if request.method == "POST":
title = request.form["title"]
under_title = request.form["under_title"]
author = request.form["author"]
release = time.strftime("%H:%M/%-d/%m/%Y") # HH:MM/dd/mm/yyyy format
content = request.form["content"]
if db.add_post(title, under_title, author, release, content):
print("Failed to add post to database!", file=sys.stderr)
return render_template("add.html", post=1)
else: # successfull
print("Successfully added post to database!", file=sys.stderr)
return render_template("add.html", post=0)
@app.route("/edit/d")
def d():
pass
@app.route("/posts")
def posts():
posts = db.get_all_posts()
for post in posts:
post[3] = get_date(post[3])
return render_template("posts.html", posts=posts)
@app.route("/about")
def about():
return render_template("about.html")
# @app.route("/register", methods=["POST"])
# def get_registration_data():
# if request.method == "POST": # only if website sends sth
# email = request.form["email"] # get userinput via HTML-form
# username = request.form["username"]
# if register_user(username, email): # if sth is wrong with the db
# print("Failed to register!", file=sys.stderr)
# return render_template('register.html',
# action="register",
# status="Failed to register! Please try again!",
# status_color="#ff0033")
# else: # db check successfull
# print("Successfully registered!", file=sys.stderr)
# return render_template('register.html',
# action="finish",
# status="You have been successfully registered!",
# status_color="#08da94",
# username=username)
if __name__ == "__main__":
db.check()
# development/debugging (flask default):
app.run(host="0.0.0.0", port=8000, debug=True)
# basic server, ready for real-life usage [http://localhost:8000/]
#server = WSGIServer(('0.0.0.0', 8000), app)
#server.serve_forever()
|
3,960 | 9fa5f4b4aeb7fe42d313a0ec4e57ce15acbfcf46 | from keras.models import Sequential
from keras.layers import Convolution2D # for 2d images
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.layers import Dense
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
cnn = Sequential()
rgb = 64
# step 1: convolution
# slide feature detectors ("filters") along image
# results feature maps that form convolutional layer
cnn.add(Convolution2D(32, 3, 3, input_shape=(rgb, rgb, 3), activation='relu')) # 32, 3x3 filters
# step 2: pooling
cnn.add(MaxPool2D(pool_size=(2, 2)))
# step 3: flatten
# this vector will be the input of a future ann
cnn.add(Flatten())
# step 4: full connection
cnn.add(Dense(output_dim=128, activation='relu')) # add hidden layers
cnn.add(Dense(output_dim=1, activation='sigmoid')) # sigmoid for binary output
# compile cnn
cnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# image augmentation - prevent overfitting
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size=(rgb, rgb),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'dataset/test_set',
target_size=(rgb, rgb),
batch_size=32,
class_mode='binary')
cnn.fit_generator(
train_set,
steps_per_epoch=8000, # we have 8k images in our training set
epochs=10,
validation_data=test_set,
validation_steps=2000)
print(cnn.summary())
cnn.save('CatDogModel.h5')
|
3,961 | 201279c0cba2d52b6863204bfadb6291a0065f60 | from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from fish.labinterface.models import *
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
from labinterface.models import StaffMember
class CustomRegistrationBackend(object):
def register(self, request, **kwargs):
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email, password, site)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
new_profile = StaffMember.objects.get(user=new_user)
new_profile.first_name=kwargs['first_name']
new_profile.last_name=kwargs['last_name']
new_profile.position=kwargs['position']
new_profile.save()
return new_user
def activate(self, request, activation_key):
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()
labGroup = LabGroup.objects.filter(pk=1).get()
newMember.lab_group = labGroup
newMember.save()
return ('registration_activation_complete', (), {}) |
3,962 | ab4145ccc0b360dcca9b9aa6ebe919bdddac65a2 | from django.urls import path
from photo.api.views import api_photo_detail_view, api_photos_view
urlpatterns = [
path('<int:id>', api_photo_detail_view, name='user_detail'),
path('', api_photos_view, name='users')
] |
3,963 | 2194fb4f0b0618f1c8db39f659a4890457f45b1d | from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(
r'^create_new/$',
'hx_lti_assignment.views.create_new_assignment',
name="create_new_assignment",
),
url(
r'^(?P<id>[0-9]+)/edit/',
'hx_lti_assignment.views.edit_assignment',
name="edit_assignment",
),
url(
r'^(?P<id>[0-9]+)/delete/',
'hx_lti_assignment.views.delete_assignment',
name="delete_assignment",
),
url(
r'^import_assignment/$',
'hx_lti_assignment.views.import_assignment',
name="import_assignment",
),
url(
r'^(?P<course_id>[0-9]+)/get_assignments',
'hx_lti_assignment.views.assignments_from_course',
name="assignments_from_course",
),
url(
r'^(?P<old_course_id>[0-9]+)/(?P<new_course_id>[0-9]+)/(?P<assignment_id>[0-9]+)/import',
'hx_lti_assignment.views.moving_assignment',
name="moving_assignment",
),
)
|
3,964 | 721f23d2b6109194b8bca54b1cd04263e30cdf24 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 16:04:19 2018
@author: khanhle
"""
# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.utils import np_utils
from keras.layers.convolutional import Convolution2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout, Flatten
from keras.callbacks import ModelCheckpoint
import numpy as np
from sklearn.metrics import confusion_matrix
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
print(__doc__)
import h5py
import os
import sys
from keras.models import model_from_json
#define params
trn_file = sys.argv[1]
tst_file = sys.argv[2]
json_file = sys.argv[3]
h5_file = sys.argv[4]
nb_classes = 2
nb_kernels = 3
nb_pools = 2
window_sizes = 19
# load training dataset
dataset = np.loadtxt(trn_file, delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)
Y = dataset[:,window_sizes*20]
Y = np_utils.to_categorical(Y,nb_classes)
#print X,Y
#nb_classes = Y.shape[1]
#print nb_classes
# load testing dataset
dataset1 = np.loadtxt(tst_file, delimiter=",")
# split into input (X) and output (Y) variables
X1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)
Y1 = dataset1[:,window_sizes*20]
true_labels = np.asarray(Y1)
Y1 = np_utils.to_categorical(Y1,nb_classes)
#print('label : ', Y[i,:])
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
# model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
## add the model on top of the convolutional base
#model.add(top_model)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
#model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(nb_classes))
#model.add(BatchNormalization())
model.add(Activation('softmax'))
# f = open('model_summary.txt','w')
# f.write(str(model.summary()))
# f.close()
#model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
#plot_filters(model.layers[0],32,1)
# Fit the model
# save best weights
model = cnn_model()
#plot_model(model, to_file='model.png')
filepath = "weights.best.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
# balance data
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])
## evaluate the model
scores = model.evaluate(X1, Y1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
model.load_weights(filepath)
predictions = model.predict_classes(X1)
print(confusion_matrix(true_labels, predictions))
#serialize model to JSON
model_json = model.to_json()
with open(json_file, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(h5_file)
print("Saved model to disk")
|
3,965 | b16e64edd0ff55a424ce3d4589321ee4576e930c | #
# PySNMP MIB module AN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:22:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Bits, IpAddress, MibIdentifier, Counter32, Unsigned32, ModuleIdentity, Counter64, NotificationType, TimeTicks, Gauge32, Integer32, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Bits", "IpAddress", "MibIdentifier", "Counter32", "Unsigned32", "ModuleIdentity", "Counter64", "NotificationType", "TimeTicks", "Gauge32", "Integer32", "enterprises")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 255)
sni = MibIdentifier((1, 3, 6, 1, 4, 1, 231))
siemensUnits = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7))
oenProductMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1))
an = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 2))
xld = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 2, 1))
onu = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 2, 1, 1))
olt = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 2, 1, 2))
xldOnuSnmVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 2, 1, 1, 100))
xldSnmMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 2, 1, 1, 100, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xldSnmMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: xldSnmMibVersion.setDescription(" Version of ONU SNMP MIB. The string is 'V1.0'. ")
xldSnmAgentVersion = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 2, 1, 1, 100, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xldSnmAgentVersion.setStatus('mandatory')
if mibBuilder.loadTexts: xldSnmAgentVersion.setDescription(" Version of ONU SNMP agent. The string is 'V1.0'. ")
mibBuilder.exportSymbols("AN-MIB", DisplayString=DisplayString, siemensUnits=siemensUnits, oenProductMibs=oenProductMibs, xldSnmAgentVersion=xldSnmAgentVersion, xldSnmMibVersion=xldSnmMibVersion, an=an, sni=sni, onu=onu, olt=olt, xldOnuSnmVersion=xldOnuSnmVersion, xld=xld)
|
3,966 | e90e4d2c777554999ab72d725d7e57bdfd508d3a | #!/usr/bin/env python
import rospy
from mark1.srv import WordCount, WordCountResponse
s= set('',)
def count_words(request):
s.update(set( request.words.split() ))
print s
return WordCountResponse( len( request.words.split()))
rospy.init_node('mark_service_server')
service = rospy.Service('Word_count', WordCount, count_words)
rospy.spin()
|
3,967 | 1d004ec0f4f5c50f49834f169812737d16f22b96 | w=int(input())
lst=[i+1 for i in range(100)]
for i in range(2,100):
lst.append(i*100)
lst.append(i*10000)
lst.append(10000)
print(297)
print(*lst)
|
3,968 | 5607d4fea315fa7bf87337453fbef90a93a66516 | import random
firstNames = ("Thomas", "Daniel", "James", "Aaron", "Tommy", "Terrell", "Jack", "Joseph", "Samuel", "Quinn", "Hunter", "Vince", "Young", "Ian", "Erving", "Leo")
lastNames = ("Smith", "Johnson", "Williams", "Kline","Brown", "Garcia", "Jones", "Miller", "Davis","Williams", "Alves", "Sobronsky", "Hall", "Murphy", "Morris")
# Verifies statistics are not negative
f = lambda x : 0 if (x < 0) else x
def improvementFunction(age, maxMu):
return (maxMu/-30) * (age - 17) * (age - 30)
class profile:
def __init__ (self):
self.name = firstNames[random.randrange(0,len(firstNames))] + " " + lastNames[random.randrange(0,len(lastNames))]
self.years = 2020
self.ppg = [f(round( random.gauss(10.5, 2.4), 1))]
self.apg = [f(round(random.gauss(5.2, 2.4), 1))]
self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]
self.bpg = [f(round(random.gauss(1, .8), 1))]
self.spg = [f(round(random.gauss(.9, 1.2), 1))]
self.tpg = [f(round(random.gauss(1.8, .5), 1))]
self.age = random.randrange(18,24)
self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]
self.tpp = [f(round(random.gauss(28.7, 6), 1))]
def getStats (self):
output = {"Age:" : self.age,
"name" : self.name,
"points per game" : self.ppg[-1],
"assists per game" : self.apg[-1],
"rebounds per game" : self.rpg[-1],
"blocks per game" : self.bpg[-1],
"steals per game" : self.spg[-1],
"turnovers per game" : self.tpg[-1],
"field goal percentage" : self.fgp[-1],
"three point percentage" : self.tpp[-1]}
return output
def incrementAge (self):
self.age += 1
def updateStats (self):
self.ppg.append(f(round(self.ppg[-1] + random.gauss(improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))
self.apg.append(f(round(self.apg[-1] + random.gauss(improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))
self.rpg.append(f(round(self.rpg[-1] + random.gauss(improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))
self.bpg.append(f(round(self.bpg[-1] + random.gauss(improvementFunction(self.age, self.bpg[-1] * 2 - 1), .5), 1)))
self.spg.append(f(round(self.spg[-1] + random.gauss(improvementFunction(self.age, self.spg[-1] * 2 - 1), .5), 1)))
self.tpg.append(f(round(self.tpg[-1] + random.gauss(improvementFunction(self.age, 2.5 - .5), .5), 1)))
self.fgp.append(f(round(self.fgp[-1] + random.gauss(improvementFunction(self.age, 10 - 3), 2.5), 1)))
self.tpp.append(f(round(self.tpp[-1] + random.gauss(improvementFunction(self.age, 8 - 3), 1.9), 1)))
|
3,969 | 246ec0d6833c9292487cb4d381d2ae82b220677e | import sys
def show_data(data):
for line in data:
print(''.join(line))
print("")
def check_seat(data, i, j):
if data[i][j] == '#':
occupied = 1
found = True
elif data[i][j] == 'L':
occupied = 0
found = True
else:
occupied = 0
found = False
return occupied, found
def is_top_left_occupied(data,i,j):
found = False
occupied = 0
while (i >= 0) and (j >= 0) and (not found):
occupied, found = check_seat(data, i, j)
i -= 1
j -= 1
return occupied
def is_top_occupied(data,i,j):
found = False
occupied = 0
while (j >= 0) and (not found):
occupied, found = check_seat(data, i, j)
j -= 1
return occupied
def is_top_right_occupied(data,i,j):
found = False
occupied = 0
while (i < len(data)) and (j >= 0) and (not found):
occupied, found = check_seat(data, i, j)
i += 1
j -= 1
return occupied
def is_right_occupied(data,i,j):
found = False
occupied = 0
while (i < len(data)) and (not found):
occupied, found = check_seat(data, i, j)
i += 1
return occupied
def is_bottom_right_occupied(data,i,j):
found = False
occupied = 0
while (i < len(data)) and (j < len(data[i])) and (not found):
occupied, found = check_seat(data, i, j)
i += 1
j += 1
return occupied
def is_bottom_occupied(data,i,j):
found = False
occupied = 0
while (j < len(data[0])) and (not found):
occupied, found = check_seat(data, i, j)
j += 1
return occupied
def is_bottom_left_occupied(data,i,j):
found = False
occupied = 0
while (i >= 0) and (j < len(data[i])) and (not found):
occupied, found = check_seat(data, i, j)
i -= 1
j += 1
return occupied
def is_left_occupied(data,i,j):
found = False
occupied = 0
while (i >= 0) and (not found):
occupied, found = check_seat(data, i, j)
i -= 1
return occupied
def get_occupied_seats(data,i,j):
occupied_seats = ( is_top_left_occupied(data, i-1, j-1) +
is_top_occupied(data, i, j-1) +
is_top_right_occupied(data, i+1, j-1) +
is_right_occupied(data, i+1, j) +
is_bottom_right_occupied(data, i+1, j+1) +
is_bottom_occupied(data, i, j+1) +
is_bottom_left_occupied(data, i-1, j+1) +
is_left_occupied(data, i-1, j) )
# print(occupied_seats)
return occupied_seats
def count_seats(data):
seats = 0
for line in data:
for x in line:
if x == "#": seats += 1
return seats
def main():
with open('input.txt') as f:
lines = f.readlines()
data = [[char for char in line[:-1]] for line in lines]
data_next = [['.' for char in line[:-1]] for line in lines]
end = False
round = 1
while not end:
for i in range(0,len(data)):
for j in range(0,len(data[i])):
if (data[i][j] == 'L') and (get_occupied_seats(data,i,j) == 0):
data_next[i][j] = '#'
elif (data[i][j] == '#') and (get_occupied_seats(data,i,j) >= 5):
data_next[i][j] = 'L'
print ("Round %d" % round)
round += 1
if data == data_next:
seats = count_seats(data)
print(seats)
end = True
else:
data = [x[:] for x in data_next]
if __name__ == '__main__':
main()
|
3,970 | 78db25586f742b0a20bc3fad382b0d4f1a271841 | #!/usr/bin/python3
experiment_name = "nodes10"
wall = "wall2"
wall_image = "irati_110"
mr_dif_policy = True
spn_dif_policy = True
destination_ip = "2001:40b0:7500:286:84:88:81:57"
|
3,971 | e0c10dfa4074b0de4d78fc78a6f373074ef4dadd | letters = ['a', 'b', 'c']
def delete_head(letters):
del letters[0]
print letters
print delete_head(letters)
|
3,972 | e007e2d32fa799e7658813f36911616f7bf58b48 | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.__version__
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
import pathlib
from IPython import display
###------------------------------------------------------###
# READ IN IMAGE DATA
#(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
AUTOTUNE = tf.data.experimental.AUTOTUNE
import pathlib
data_root_orig = "Images"
data_root = pathlib.Path(data_root_orig)
#print(data_root)
#for item in data_root.iterdir():
# print(item)
import random
# Changed from orginal cause maybe a problem with the windows file system
#all_image_paths = list(data_root.glob('*/*'))
all_image_paths = list(data_root.glob('*'))
#print(all_image_paths)
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
#print(image_count)
# No good PATH format
# print(all_image_paths[:10])
img_path = all_image_paths[0]
#print(img_path)
img_raw = tf.io.read_file(img_path)
#print(repr(img_raw)[:100]+"...")
img_tensor = tf.image.decode_image(img_raw)
#print(img_tensor.shape)
#print(img_tensor.dtype)
img_final = tf.image.resize(img_tensor, [280, 280])
img_final = img_final/255.0
#print(img_final.shape)
#print(img_final.numpy().min())
#print(img_final.numpy().max())
#-----------------------------------------#
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [280, 280])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.io.read_file(path)
return preprocess_image(image)
#-----------------------------------------#
# BUILD A DATASET
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
#print(path_ds)
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
#print(ds)
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(280, 280, 3), include_top=False)
mobile_net.trainable=False
help(tf.keras.applications.mobilenet_v2.preprocess_input)
def change_range(image):
return 2*image-1
keras_ds = ds.map(change_range)
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
#print(feature_map_batch.shape)
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense((image_count))])
logit_batch = model(image_batch).numpy()
#print("min logit:", logit_batch.min())
#print("max logit:", logit_batch.max())
#print()
#print("Shape:", logit_batch.shape)
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
#print(len(model.trainable_variables))
model.summary()
steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
#print(steps_per_epoch)
#model.fit(ds, epochs=1, steps_per_epoch=3)
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print (decision)
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# We will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
#time
train(ds, EPOCHS) |
3,973 | c007dc2416d3f7c883c44dea5471927ea6f816d6 | # Uses python3
import sys
from operator import attrgetter
from collections import namedtuple
Segment = namedtuple('Segment', 'start end')
def optimal_points(segments):
segments = sorted(segments, key=attrgetter('end'), reverse=True)
points = []
#write your code here
while len(segments) > 0:
segement = segments.pop()
point = segement.end
while len(segments) > 0 and point >= segments[-1].start:
segments.pop()
if point not in points:
points.append(point)
return points
if __name__ == '__main__':
input = sys.stdin.read()
#input = input()
n, *data = map(int, input.split())
segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))
points = optimal_points(segments)
print(len(points))
print(*points)
|
3,974 | 395ff2e7c052b57548151fc71fad971c94ebceea | #@@---------------------------@@
# Author: Chamil Jayasundara
# Date: 5/18/17
# Description: Extract SFLOW data from slow logs
#@@---------------------------@@
import itertools
from collections import defaultdict
"""Flow Sample and Datagram Objects"""
class Container(object):
def __init__(self, id):
self.id = id
self.content = defaultdict(int)
def __getitem__(self, key):
return self.content[key]
def __setitem__(self, key, value):
self.content[key] = value
class Datagram(Container):
datagram_counter = itertools.count().next
def __init__(self):
super(Datagram, self).__init__(Datagram.datagram_counter())
self['flowSamples'] = {}
class FlowSample(Container):
flowsample_counter = itertools.count().next
def __init__(self):
super(FlowSample, self).__init__(FlowSample.flowsample_counter())
#############################
"""Data Extraction"""
def process_line_and_store_in_obj(line, obj):
partition = line.partition(" ")
obj[partition[0]] = partition[2].rstrip()
###State Machine Classses
class WithinDatagram(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
def process(self,line):
if "startDatagram" in line:
self.current_datagram = Datagram()
elif "endDatagram" in line:
self.Trace.callable(self.current_datagram.content)
elif "startSample" in line:
self.Trace.currentState = self.Trace.within_flowsample
self.Trace.within_flowsample.re_init(FlowSample(), self.current_datagram)
else:
process_line_and_store_in_obj(line, self.current_datagram)
class WithinFlowsample(object):
def __init__(self, traceObj):
self.Trace = traceObj
self.current_datagram = None
self.current_flowsample = None
def re_init(self, flowsampleObj, datagramObj):
self.current_datagram = datagramObj
self.current_flowsample = flowsampleObj
def process(self,line):
if "endSample" in line:
self.current_datagram['flowSamples'][self.current_flowsample.id] = self.current_flowsample.content
self.Trace.currentState = self.Trace.within_datagram
else:
process_line_and_store_in_obj(line, self.current_flowsample)
class Trace(object):
def __init__(self, callable=None):
self.within_datagram = WithinDatagram(self)
self.within_flowsample = WithinFlowsample(self)
self.currentState = self.within_datagram
self.callable = callable
def process(self, line):
self.currentState.process(line)
|
3,975 | 6aa7114db66a76cfa9659f5537b1056f40f47bd2 | import requests
import json
ROOT_URL = "http://localhost:5000"
def get_all_countries():
response = requests.get("{}/countries".format(ROOT_URL))
return response.json()["countries"]
def get_country_probability(countryIds):
body = {"countryIds": countryIds}
response = requests.get("{}/countries/probability".format(ROOT_URL), data=body)
return response.json()["probability"]
def add_country(country_name, country_code):
body = {"country_name": country_name, "country_code": country_code}
response = requests.post("{}/countries".format(ROOT_URL), data=body)
return response.json()
def update_country(id, country_name=None, country_code=None):
body = {"id": id}
if country_name != None:
body["country_name"] = country_name
if country_code != None:
body["country_code"] = country_code
response = requests.put("{}/countries".format(ROOT_URL), data=body)
return response.json()["updates"]
def delete_country(id):
body = {"id": id}
response = requests.delete("{}/countries".format(ROOT_URL), data=body)
return response.json()
def get_all_symptoms():
response = requests.get("{}/symptoms".format(ROOT_URL))
return response.json()["symptoms"]
def get_symptom_probability(symptomIds):
body = {"symptomIds": symptomIds}
response = requests.get("{}/symptoms/probability".format(ROOT_URL), data=body)
return response.json()["probability"]
def add_symptom(name):
body = {"name": name}
response = requests.post("{}/symptoms".format(ROOT_URL), data=body)
return response.json()
def update_symptom(id, name=None):
body = {"id": id}
if name != None:
body["name"] = name
response = requests.put("{}/symptoms".format(ROOT_URL), data=body)
return response.json()["updates"]
def delete_symptom(id):
body = {"id": id}
response = requests.delete("{}/symptoms".format(ROOT_URL), data=body)
return response.json()
def get_diagnosis(id):
id = str(id)
response = requests.get("{}/diagnoses?id={}".format(ROOT_URL, id))
return response.json()["diagnosis"]
def get_all_diagnoses():
response = requests.get("{}/diagnoses".format(ROOT_URL))
return response.json()["diagnoses"]
def add_diagnosis(name, temperature, result, countryIds, symptomIds):
body = {"name": name, "temperature": temperature, "result": result, "countryIds": countryIds, "symptomIds": symptomIds}
response = requests.post("{}/diagnoses".format(ROOT_URL), data=body)
return response.json()
def delete_diagnosis(id):
body = {"id": id}
response = requests.delete("{}/diagnoses".format(ROOT_URL), data=body)
return response.json()
if __name__ == '__main__':
pass
|
3,976 | 325708d5e8b71bad4806b59f3f86a737c1baef8d | """game"""
def get_word_score(word_1, n_1):
"""string"""
# import string
# key = list(string.ascii_lowercase)
# value = []
# x=1
sum_1 = 0
# for i in range(0, 26):
# value.append(x)
# x+=1
# dictionary_ = dict(zip(key, value))
# print(dictionary_)
dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10}
length_1 = len(word_1)
# if length_1 <= n_1:
for i in word_1:
if i in dictionary_.keys():
sum_1 = sum_1 + dictionary_[i]
sum_1 = sum_1*length_1
if n_1 == length_1:
sum_1 += 50
return sum_1
# print("worng inputs")
def main():
'''
Main function for the given problem
'''
data = input()
data = data.split(" ")
print(get_word_score(data[0], int(data[1])))
if __name__ == "__main__":
main()
|
3,977 | 7d54d5fd855c7c03d2d4739e8ad4f9ab8772ca2b | def longest(s1, s2):
# your code
s=s1+s2
st="".join(sorted(set(s)))
return st
longest("xyaabbbccccdefww","xxxxyyyyabklmopq")
|
3,978 | e72962b644fab148741eb1c528d48ada45a43e51 | # Generated by Django 3.2.2 on 2021-05-07 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='teams',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('discipline', models.CharField(max_length=50)),
('amount', models.IntegerField()),
],
options={
'ordering': ['id'],
'unique_together': {('name', 'discipline', 'amount')},
},
),
]
|
3,979 | ed6eda4b6dbf3e94d8efb53004b19cd9c49e927e | import sqlite3
import sys
import threading
from time import sleep
sq = None
def get_queue(category, parser):
if sq == None:
return liteQueue(category, parser)
return sq
"""
SqLite Job Handler class for Links
"""
class liteQueue:
_create = "CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category' TEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs' INTEGER,PRIMARY KEY(url));"
_putList = "INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)"
_iterate = "SELECT * FROM LINK WHERE FETCHED = 0"
_write_lock = "BEGIN IMMEDIATE"
_pop_get_many = "SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT "
_pop_del_many = "UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)"
def __init__(self, category, parser):
self.conn_url = "databases/" + parser + "_" + category + ".db"
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url, timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock) # lock the database
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if(len(result) > 0):
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit() # unlock the database
return result
else:
conn.commit() # unlock the database
return None |
3,980 | fa925d0ef4f9df3fdf9a51c7fcc88933609bc9e3 | import turtle
pen = turtle.Turtle()
def curve():
for i in range(200):
pen.right(1)
pen.forward(1)
def heart():
pen.fillcolor('yellow')
pen.begin_fill()
pen.left(140)
pen.forward(113)
curve()
pen.left(120)
curve()
pen.forward(112)
pen.end_fill()
heart()
|
3,981 | 78a6202f501bc116e21e98a3e83c9e3f8d6402b4 | #!/usr/bin/env python
import requests
import re
def get_content(url):
paste_info = {
'site': 'pomf',
'url': url
}
m = re.match('^.*/([0-9a-zA-Z]+)\.([a-zA-Z0-9]+)$',url)
response = requests.get(url)
if response.status_code != 200:
return
paste_info['ext'] = m.group(2)
paste_info['orig_filename'] = m.group(1)
paste_info['content'] = response.content
return paste_info
|
3,982 | 1bb82a24faed6079ec161d95eff22aa122295c13 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2014-2016 by Mike Taylor
:license: MIT, see LICENSE for more details.
Micropub Tools
"""
import requests
from bs4 import BeautifulSoup, SoupStrainer
try: # Python v3
from urllib.parse import urlparse, urljoin
except ImportError:
from urlparse import urlparse, urljoin
import ronkyuu
_html_parser = 'lxml' # 'html.parser', 'lxml', 'lxml-xml', 'html5lib'
def setParser(htmlParser='html5lib'):
global _html_parser
_html_parser = htmlParser
# find an endpoint
# look in headers for given domain for a HTTP Link header
# if not found, look for an HTML <link> element in page returned from domain given
def discoverEndpoint(domain, endpoint, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
"""Find the given endpoint for the given domain.
Only scan html element matching all criteria in look_in.
optionally the content to be scanned can be given as an argument.
:param domain: the URL of the domain to handle
:param endpoint: list of endpoints to look for
:param content: the content to be scanned for the endpoint
:param look_in: dictionary with name, id and class_. only element matching all of these will be scanned
:param test_urls: optional flag to test URLs for validation
:param validateCerts: optional flag to enforce HTTPS certificates if present
:rtype: list of endpoints
"""
if test_urls:
ronkyuu.URLValidator(message='invalid domain URL')(domain)
if content:
result = {'status': requests.codes.ok,
'headers': None,
'content': content
}
else:
r = requests.get(domain, verify=validateCerts)
result = {'status': r.status_code,
'headers': r.headers
}
# check for character encodings and use 'correct' data
if 'charset' in r.headers.get('content-type', ''):
result['content'] = r.text
else:
result['content'] = r.content
for key in endpoint:
result.update({key: set()})
result.update({'domain': domain})
if result['status'] == requests.codes.ok:
if 'link' in r.headers:
all_links = r.headers['link'].split(',', 1)
for link in all_links:
if ';' in link:
href, rel = link.split(';')
url = urlparse(href.strip()[1:-1])
if url.scheme in ('http', 'https') and rel in endpoint:
result[rel].add(url)
all_links = BeautifulSoup(result['content'], _html_parser, parse_only=SoupStrainer(**look_in)).find_all('link')
for link in all_links:
rel = link.get('rel', None)[0]
if rel in endpoint:
href = link.get('href', None)
if href:
url = urlparse(href)
if url.scheme == '' or url.netloc == '':
url = urlparse(urljoin(domain, href))
if url.scheme in ('http', 'https'):
result[rel].add(url)
return result
def discoverMicropubEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
"""Find the micropub for the given domain.
Only scan html element matching all criteria in look_in.
optionally the content to be scanned can be given as an argument.
:param domain: the URL of the domain to handle
:param content: the content to be scanned for the endpoint
:param look_in: dictionary with name, id and class_. only element matching all of these will be scanned
:param test_urls: optional flag to test URLs for validation
:param validateCerts: optional flag to enforce HTTPS certificates if present
:rtype: list of endpoints
"""
return discoverEndpoint(domain, ('micropub',), content, look_in, test_urls, validateCerts)
def discoverTokenEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
"""Find the token for the given domain.
Only scan html element matching all criteria in look_in.
optionally the content to be scanned can be given as an argument.
:param domain: the URL of the domain to handle
:param content: the content to be scanned for the endpoint
:param look_in: dictionary with name, id and class_. only element matching all of these will be scanned
:param test_urls: optional flag to test URLs for validation
:param validateCerts: optional flag to enforce HTTPS certificates if present
:rtype: list of endpoints
"""
return discoverEndpoint(domain, ('token_endpoint',), content, look_in, test_urls, validateCerts)
|
3,983 | 13e3337cf9e573b8906fe914a830a8e895af20ba | import re
class Markdown:
__formattedFile = []
__analyzing = []
def __processSingleLine(self, line):
if(self.__isHeading(line)):
self.__process("p")
self.__analyzing.append(re.sub("(#{1,6})", "", line).strip())
self.__process("h" + str(len(re.split("\s", line)[0])))
elif(self.__isHeading2(line)):
self.__process("h1")
elif(self.__isBlankLine(line)):
self.__process("p")
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match("^(#{1,6})(\s)+", line) != None
def __isHeading2(self, line):
if(len(self.__analyzing) == 1 and re.match("^[\=]+$", line) != None):
return True
return False
def __isBlankLine(self, line):
return re.match("^[\n]", line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split("\s", self.__analyzing[lIndex])
for wIndex in range(len(words)):
if(lineIndex1 == -1):
if(re.match("^[\\" + markdown + "][\S]", words[wIndex])):
lineIndex1 = lIndex
wordIndex1 = wIndex
if(lineIndex1 >= 0):
if(re.match("[\S]+[\\" + markdown + "][\.\,\;\:]*$", words[wIndex])):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if(lineIndex2 >= 0):
break
if(lineIndex2 >= 0):
newLine1 = re.split("\s", self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub("^\\" + markdown, "<" + tag + ">", newLine1[wordIndex1])
self.__analyzing[lineIndex1] = " ".join(newLine1)
newLine2 = re.split("\s", self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub("\\" + markdown, "</" + tag + ">", newLine2[wordIndex2])
self.__analyzing[lineIndex2] = " ".join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute("_", "em"): continue
while self.__convertAttribute("*{2,2}", "strong"): continue
while self.__convertAttribute("`", "code"): continue
def __convertParagraph(self, tag):
if(len(self.__analyzing) > 0):
self.__analyzing[0] = "<" + tag + ">" + self.__analyzing[0]
self.__analyzing[-1] = "".join(self.__analyzing[-1].split("\n")) + "</" + tag + ">"
def __process(self, tag):
self.__convertFormat()
self.__convertParagraph(tag)
self.__formattedFile.extend(self.__analyzing)
self.__analyzing.clear()
def toHTML(self, filepath):
f = open(filepath, "r")
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li) |
3,984 | 9178d39a44cfb69e74b4d6cd29cbe56aea20f582 | #!/usr/bin/env python
#coding:gbk
"""
Author: pengtao --<pengtao@baidu.com>
Purpose:
1. 管理和交互式调用hadoop Job的框架
History:
1. 2013/12/11 created
"""
import sys
import inspect
import cmd
import readline
#import argparse
#from optparse import (OptionParser, BadOptionError, AmbiguousOptionError)
from job import Job, MJob, PJob
from utils import _decode, hadoop_cmd
class ArgParseError(Exception):
"""透传给job的参数, 解析失败"""
pass
class Router(object):
"""
核心框架,管理脚本中每一个job/step。供shell进行调度。
Usage
=====
1. import
>> from ubs_shell.router import router
>> if __name__ == "__main__":
>> router.main()
2. decorate the functions & classes
>> @router("plot")
>> def plot_dwell_time_disribution():
>> import matplotlib
>> ...
>> @router("step2")
>> class process_groupby_data(Step):
>> def run(self, fn):
>> fh = open(fn,)
>> ...
>> @router("grep1")
>> class grep_newcookiesort_url(PJob):
>> def config(self):
>> self.input_project = "udwetl_ps_query.event_day=20131209"
>> ...
>> @router("sum")
>> class sum_hao123_and_union(Job):
>> def config(self):
>> self.command = "streaming"
>> ...
3. usage on command line
>> python script.py -h
>> python script.py -f step1 --ifn=a.txt --ofn=b.txt
>> python script.py -f step2 a -c la -d laaa bvalue
"""
def __init__(self):
self.app_path = {}
self.app_order = [] # remember the order of path
def __call__(self, path):
"""
prepare a named wrapper function. Just register and return the orginal function
"""
def wrapper(application):
self.register(path, application)
return application
return wrapper
#----------------------------------------------------------------------
def register(self, path, app_class):
"""
1. find the type of application: Job/Step/function
2. register the path to self.app_path with type info.
@type path: string
@param path: the path (short name) of an application. eg. "A", "step1"
@type app: object
@param app: a function/Job Object
"""
s = path[0].lower()
if s < 'a' or s > "z":
raise ValueError("path (short name) must start with character : %s" % path)
if path in self.app_path:
raise ValueError("duplicated path (short name) %s" % path)
if inspect.isfunction(app_class):
self.app_path[path] = (app_class, "func")
self.app_order.append(path)
elif inspect.isclass(app_class):
fathers = inspect.getmro(app_class)
if Job in fathers:
type = "Job"
if PJob in fathers:
type = "PJob"
elif MJob in fathers:
type = "MJob"
self.app_path[path] = (app_class(), type)
self.app_order.append(path)
else:
raise Exception("unknown class : %s" % app_class)
else:
raise Exception("unknown object : %s" % app_class)
return True
def route(self, func, mixed_args=[], mode="normal", opt={}):
"""
根据func的类型,执行应用逻辑。
@type func: string
@param func: app's short name
@type mixed_args: list
@param mixed_args: args passed to func
@type mode: string
@param mode: normal or debug
@type opt: dict
@param opt: other info or debug mode
"""
app, type = self.app_path[func]
try:
args, kwargs = self._arg2kw(mixed_args)
except ArgParseError:
return False
if type == "func":
return app(*args, **kwargs)
elif type in ("Job", "MJob", "PJob"):
return app.invoke(args, kwargs, mode, opt)
else:
raise TypeError("unknown type: %s" % application)
return True
def check_path(self, path):
"""
check whether the input is in self.app_path
"""
if path in self.app_path:
return True
else:
return False
#----------------------------------------------------------------------
def _arg2kw(self, mixed_args):
"""
convert
"a b --k=v -i input ok"
into
["a", "b", "ok"] {"k":"v", "i":"input"}
"""
def insert(dict_, k, v):
if k in dict_:
print "duplicated args : %s " % kv[0]
raise ArgParseError
dict_[k] = v
opts = []
args = {}
n = len(mixed_args)
i = 0
while i < n:
a = mixed_args[i]
if a == '-' or a == '--' :
opts.append(a)
elif a.startswith("---"):
print "invalid args: %s" % mixed_args
print "only the following formats are supported:"
print " arg1"
print " --input=name1"
print " --output name3"
print " -oname2"
print " -o name4"
raise ArgParseError
elif a.startswith("--"):
kv = a[2:].split("=", 1)
if len(kv) == 2:
insert(args, kv[0], kv[1])
else:
i += 1
insert(args, kv[0], mixed_args[i])
elif a.startswith("-"):
if len(a) > 2:
insert(args, a[1], a[2:])
else:
i += 1
insert(args, a[1], mixed_args[i])
else:
opts.append(a)
i += 1
return opts, args
def _parse_args(self):
"""
返回值的例子
- ("shell", "", [], {})
- ("help", "step1", [], {})
- ("run", "step1", ["arg1", "arg2"], {"k1":"v1", "k2":"v2"})
"""
def print_paths():
""""""
print "The available Job are:"
for path in self.app_order:
(app, type) = self.app_path[path]
if type == "func":
print " %-12s [%4s] --> %s" % (path, type, Job.get_func_help(app))
elif type in ("Job", "MJob", "PJob"):
print " %-12s [%4s] --> %s" % (path, type, app.get_line_help())
else:
raise Exception("unknown Object type = %s of %s" % (type, app) )
def print_general_help():
""""""
print "usage: %s [COMMAND]" % sys.argv[0]
print "where COMMAND is one of :"
print " shell enter the interactive shell mode. The DEFAULT value"
print " run execute a specific Step/Job/func in script"
print " %s run TAG [[ARG1] [ARG2] ...] " % sys.argv[0]
print " help print this help or detailed info about the specific Step/Job/func"
print " %s help TAG " % sys.argv[0]
# print "Most commands print help when invoked with w/o parameters."
print
print_paths()
sys.exit(0)
def print_run_help():
""""""
print "usage: %s run TARGET [[ARG1] [ARG2]]" % sys.argv[0]
print " TARGET the registered Step/Job/func"
print " ARG1/ARG2 all are passed to TARGET as args/kwargs"
print
print_paths()
sys.exit(0)
argv = sys.argv[1:]
# default
(cmd, path, args) = ("shell", "", [])
if len(argv) == 0:
return (cmd, path, args)
elif len(argv) == 1:
if argv[0] == "shell":
return (cmd, path, args)
elif argv[0] == "run":
print_paths()
sys.exit(0)
else: # help or unknown
print_general_help()
else:
# this will capture the -h args of invoked function
#if "-h" in argv or "--help" in argv:
#print_help()
if argv[0] in ("shell", "help", "run"):
cmd = argv[0]
path = argv[1]
if cmd == "shell":
return (cmd, path, args)
else:
if self.check_path(path):
return (cmd, path, argv[2:])
else:
print_paths()
sys.exit(0)
else:
print_general_help()
#----------------------------------------------------------------------
def print_path_help(self, path):
"""print the help of certain path"""
(app, type) = self.app_path[path]
if type == "func":
print " %s [%s] --> %s" % (path, type, Job.get_func_help(app))
for line in _decode(app.__doc__).split("\n"):
print " " + line
print Job.get_func_config(app, prefix=" ")
elif type in ("Job", "MJob", "PJob"):
print " %s [%s] --> %s" % (path, type, app.get_line_help())
for line in _decode(app.__doc__).split("\n"):
print " " + line
print app.get_config_str(prefix=" ")
else:
raise Exception("unknown Object type = %s of %s" % (type, app) )
print ""
#----------------------------------------------------------------------
def shell(self):
"""run interactive shell"""
RouterShell(self).cmdloop()
#----------------------------------------------------------------------
def main(self):
"""
entrance for Router object.
usage:
>> if __name__ == "__main__":
>> router.main()
"""
cmd, path, args = self._parse_args()
if cmd == "shell":
print "You are now in ubs shell."
print "Use \"python %s help\" to see other choice." % sys.argv[0]
self.shell()
elif cmd == "help":
self.print_path_help(path)
sys.exit(0)
elif cmd == "run":
self.route(path, args)
else:
raise Exception("unknown CMD %s" % cmd)
# the Router instance for importing
router = Router()
class RouterShell(cmd.Cmd):
"""
Simple shell command processor for ubs_hell jobs.
TODO
"""
prompt = "(ubs):"
intro = "interactively run ubs_shell job."
# 单part进行debug的优先级
_DEBUG_PRIORITY = "VERY_HIGH"
#----------------------------------------------------------------------
def __init__(self, router):
""""""
cmd.Cmd.__init__(self)
self.router = router
self.job_queue = []
# autocomplete will ignore "-"
# http://mail.python.org/pipermail/python-list/2011-March/599475.html
delims = readline.get_completer_delims().replace("-", "")
readline.set_completer_delims(delims)
# status variable list
# all status variable must begin with "v_"
self.v_remove = MJob.is_remove_output
#----------------------------------------------------------------------
def do_ls(self, pattern=""):
"""
list all jobs. similar to print_paths in Router._parse_args.
"""
if pattern:
print "The available jobs with substring %s are:" % pattern
else:
print "The available jobs are:"
app_order = self.router.app_order
app_path = self.router.app_path
n = len(self.router.app_order)
j = 0
for i in range(n):
path = app_order[i]
if path.find(pattern) != -1:
j += 1
app, type = app_path[path]
if type == "func":
print " %d. %-12s [%4s] --> %s" % (i, path, type, Job.get_func_help(app))
elif type in ("Job", "MJob", "PJob"):
print " %d. %-12s [%4s] --> %s" % (i, path, type, app.get_line_help())
else:
raise Exception("unknown Object type = %s of %s" % (type, app) )
if pattern:
print "There are %d/%d including '%s'" % (j, n, pattern)
#----------------------------------------------------------------------
def help_ls(self):
""""""
print "\n".join(["ls [pattern]",
"ls all jobs with pattern substring."])
#----------------------------------------------------------------------
def _do_run(self, path, args):
"""
run job with args
"""
try:
self.router.route(path, args)
except TypeError, e:
# To catch the follow errors
# TypeError: xxxx got an unexpected keyword argument 'k'
# TypeError: 'print_my_good() takes at least 1 argument (0 given)'
print "run job %s with arg < %s > error:" % (path, ", ".join(args))
print "%s" % e
def do_run(self, line):
"""
run job
"""
args = filter(None, line.strip().split())
if args: # []
self._do_run(args[0], args[1:])
else:
self.help_run()
def help_run(self):
print "\n".join(["run jobname [[ARG1] [ARG2] ...]",
" run the job with arguments.",
" use 'ls' to see available jobs"])
def complete_run(self, text, line, begidx, endidx):
if not text:
completions = self.router.app_order
else:
completions = [ f
for f in self.router.app_order
if f.startswith(text)
]
return completions
#----------------------------------------------------------------------
def do_queue(self, line):
""""""
line = line.strip()
if not line:
if self.job_queue:
print "current jobs in queue are:"
for i in range(len(self.job_queue)):
ele = self.job_queue[i]
print " %d. %-12s < %s >" % (i, ele[0], " ,".join(ele[1]))
else:
print "NO job in queue."
else:
parts = filter(None, line.split())
if parts[0] == "clear":
if len(parts) != 2:
self.help_queue()
return
target = parts[1]
if target == 'all':
print "clear all jobs..."
self.job_queue = []
else:
try:
target = int(target)
if target >= len(self.job_queue):
print "NO %th job in queue" % target
return
print "clear %dth job: %s" % (target, self.job_queue[target])
del self.job_queue[target]
except ValueError:
print "invalid number %s" % target
self.help_queue()
return
elif parts[0] == "start":
n = len(self.job_queue)
i = 0
while self.job_queue:
ele = self.job_queue.pop(0)
i += 1
print "==== run %d/%d jobs in queue ====\n" % (i, n)
self._do_run(ele[0], ele[1])
elif parts[0] == "add":
if len(parts) > 1:
ele = (parts[1], parts[2:])
self.job_queue.append(ele)
else:
self.help_queue()
else:
print "unknown command %s" % parts
self.help_queue()
return
#----------------------------------------------------------------------
def help_queue(self):
""""""
print "\n".join([
"queue usage : manipulate the job queue.",
" queue : show current jobs.",
" queue start : start the job queue.",
" queue clear [N|all] : clear the Nth/all job. 0-based.",
" queue add job [[ARG1]...] : add job into queue."
])
#----------------------------------------------------------------------
def complete_queue(self, text, line, begidx, endidx):
"""
"""
completions = []
parts = filter(None, line.strip().split())
n = len(parts)
if n == 1:
completions = ["add", "clear", "start"]
elif n == 2:
if text:
completions = [f for f in ["add", "clear", "start"] if f.startswith(text)]
else:
# begin with the 3rd fields
completions = self.router.app_order
elif n == 3:
completions = [ f
for f in self.router.app_order
if f.startswith(text)
]
else:
pass
return completions
#----------------------------------------------------------------------
def _debug_parse_args(self, args):
"""
input:
-m 10 -r 2 step1 -input fff -output xxx
output:
opt, path, args = {"m":10, "r":2, "n":1}, "step1", ["-input", "fff", "-output", "xxx"]
"""
arg_list = list(args)
n = len(arg_list)
i = 0
opt = {"n":1, "m":None, "r":1}
path = ""
others = []
while i < n:
cur = arg_list[i]
if cur.startswith("-"):
if cur in ('-n', "-m", "-r"):
opt[cur[1]] = arg_list[i+1]
i = i + 1
else:
raise ArgParseError
else:
path = arg_list[i]
others = arg_list[i+1:]
break
i += 1
if path == "":
raise ArgParseError
if opt["m"] is None:
opt["m"] = opt["n"]
return (opt, path, others)
def do_debug(self, line):
"""
RouterShell中的特有接口,在单步执行hadoop job时,选择hdfs上的一个part作为输入,进行debug。
debug [-n numofparts] [-m numofmapers] [-r numofreducers] job-name [[ARG1] ...]
说明见 help_debug.
"""
fields = line.strip().split()
n = len(fields)
if n == 0 :
self.help_debug()
else:
try:
(opt, path, args) = self._debug_parse_args(fields)
except ArgParseError:
self.help_debug()
return
if path not in self.router.app_path:
print "invalid job name : %s" % path
print "use \"ls\" to see all job name "
return
# def route(self, func, mixed_args=[], mode="normal", opt={}):
self.router.route(path, args, "debug", opt)
#----------------------------------------------------------------------
def help_debug(self):
""""""
print "\n".join(["debug [-n numofparts] [-m numofmappers] [-r numofreducers] job-name [[ARG1] ...] ",
"run the debug job with HIGH priority.",
" -n number of hdfs parts, default 1.",
" -m number of mappers, default == numofparts.",
" -r number of reducers, default 1."])
#----------------------------------------------------------------------
complete_debug = complete_run
#----------------------------------------------------------------------
def do_dfs(self, line):
"""invoke hadoop dfs commands"""
args = filter(None, line.strip().split())
if not args:
self.help_dfs()
else:
cmds = ["dfs"]+args
(retcode, stdout) = hadoop_cmd(cmds, MJob.hadoop_home)
if retcode is False:
pass # Popen failed
else:
print stdout
if retcode != 0:
print "hadoop dfs retcode=%s" % retcode
#----------------------------------------------------------------------
def help_dfs(self):
""""""
print "dfs [COMMAND [ARGS]...]"
print " [-ls <path>]"
print " [-lsr <path>]"
print " [-du <path>]"
print " [-dus <path>]"
print " [-count[-q] <path>]"
print " [-mv <src> <dst>]"
print " [-cp <src> <dst>]"
print " [-ln <src> <dst>]"
print " [-rm <path>]"
print " [-rmr <path>]"
print " [-expunge]"
print " [-put <localsrc> ... <dst>]"
print " [-copyFromLocal <localsrc> ... <dst>]"
print " [-moveFromLocal <localsrc> ... <dst>]"
print " [-get [-ignoreCrc] [-crc] [-repair] <src> <localdst>]"
print " [-getmerge [-addnl] <src> <localdst> | -getmerge <src> <localdst> [addnl]]"
print " [-cat <src>]"
print " [-text <src>]"
print " [-copyToLocal [-ignoreCrc] [-crc] [-repair] <src> <localdst>]"
print " [-copySeqFileToLocal [-ignoreLen] <srcFile> <localDstFile>]"
print " [-moveToLocal [-crc] <src> <localdst>]"
print " [-mkdir <path>]"
print " [-setrep [-R] [-w] [-d] <rep> <path/file>]"
print " [-touchz <path>]"
print " [-test -[ezd] <path>]"
print " [-stat [format] <path>]"
print " [-tail [-f] <file>]"
print " [-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...]"
print " [-chown [-R] [OWNER][:[GROUP]] PATH...]"
print " [-chgrp [-R] GROUP PATH...]"
print " [-help [cmd]]"
print "invoke the hadoop dfs of Mjob.hadoop_home=%s" % MJob.hadoop_home
#----------------------------------------------------------------------
def complete_dfs(self, text, line, begidx, endidx):
""" """
cmds = ("-ls", "-lsr", "-du", "-dus", "-count", "-mv", "-cp", "-ln", "-rm", "-rmr", "-expunge", "-put", "-copyFromLocal", "-moveFromLocal", "-get", "-getmerge", "-cat", "-text", "-copyToLocal", "-copySeqFileToLocal", "-moveToLocal", "-mkdir", "-setrep", "-touchz", "-test", "-stat", "-tail", "-chmod", "-chown", "-chgrp", "-help")
if not text:
completions = cmds
else:
completions = [ f
for f in cmds
if f.startswith(text)
]
return completions
#----------------------------------------------------------------------
def do_info(self, line):
"""
print help for each job.
"""
args = filter(None, line.strip().split())
if len(args) != 1 or args[0] not in self.router.app_order:
self.help_info()
else:
self.router.print_path_help(args[0])
#----------------------------------------------------------------------
def help_info(self):
""""""
print "info job-name"
print " print the detailed information of job"
print " use \"ls\" to see all jobs."
#----------------------------------------------------------------------
complete_info = complete_run
def do_string(self, line):
"""print cmd string with config args"""
args = filter(None, line.strip().split())
if not args or args[0] not in self.router.app_order:
self.help_string()
else:
app, type = self.router.app_path[args[0]]
if hasattr(app, "to_formatted_string"):
try:
v, kv = self.router._arg2kw(args[1:])
except ArgParseError:
return
try:
app.config(*v, **kv)
except TypeError, e:
print "TypeError: %s" % e
return
print app.to_formatted_string()
else:
print "%s do not support to_formatted_string" % args[0]
#----------------------------------------------------------------------
def help_string(self):
""""""
print "string job-name [[ARG1]...]"
print " print the hadoop command string of job"
print " your show provide the config arguments if needed"
print " use \"ls\" to see all jobs."
#----------------------------------------------------------------------
complete_string = complete_run
def do_set(self, line):
"""set the status variable
"""
vs = filter(None, line.strip().split())
if len(vs) == 2 :
if vs[0] == 'remove' :
if vs[1] in ("True", "T", "False", 'F'):
if vs[1].startswith("T"):
self.v_remove = True
print " now remove = True"
elif vs[1].startswith("F"):
self.v_remove = False
print " now remove = False"
else:
pass
else:
print "known value of remove: %s" % vs[1]
else:
print "unknow status variable %s=%s" % (vs[0], vs[1])
self.help_set()
else:
self.help_set()
#----------------------------------------------------------------------
def help_set(self):
"""
"""
print "\n".join([
"set var value",
" set the status variable.",
" the avaible variables are:",
" %-8s - T[rue]/F[alse]" % "remove"
])
#----------------------------------------------------------------------
def complete_set(self, text, line, begidx, endidx):
"""
"""
workline = line[:begidx]
parts = filter(None, workline.strip().split())
n = len(parts)
completions = []
if n == 1: # set xxx
if text: # part[1]
completions = [f for f in ["remove"] if f.startswith(text)]
else: # part[2]
completions = ["remove"]
elif n == 2: # set verbose xxx
if parts[1] == "remove":
if text:
completions = [f for f in ["False", "True"] if f.startswith(text)]
else:
completions = ["False", "True"]
else:
completions = []
else:
completions = []
return completions
#----------------------------------------------------------------------
def do_show(self, line):
""""""
args = filter(None, line.strip().split())
for arg in args:
name = "v_" + arg
if hasattr(self, name):
print " %s = %s" % (arg, getattr(self, name))
else:
print " %s = None" % (arg)
if not args:
var_list = filter(lambda x: x.startswith("v_"), dir(self))
var_list = map(lambda x: x[2:], var_list)
print " Availabe variables include:"
for var in var_list:
print " %s" % var
#----------------------------------------------------------------------
def help_show(self):
""""""
print "\n".join([
"show [VAR1] [VAR2] ...",
" print the varaible values."
])
#----------------------------------------------------------------------
def complete_show(self, text, line, begidx, endidx):
""""""
var_list = filter(lambda x: x.startswith("v_"), dir(self))
var_list = map(lambda x: x[2:], var_list)
completion = []
if text:
completion = [f for f in var_list if f.startswith(text)]
else:
completion = var_list
return completion
def do_EOF(self, line):
return True
#----------------------------------------------------------------------
def help_EOF(self):
""""""
print "exit the shell"
#----------------------------------------------------------------------
def do_quit(self, line):
""""""
return True
#----------------------------------------------------------------------
def help_quit(self):
""""""
print "exit the shell"
do_q = do_quit
help_q = help_quit
do_exit = do_quit
help_exit = help_quit
#----------------------------------------------------------------------
def help_help(self):
""""""
print "print this help"
#----------------------------------------------------------------------
def emptyline(self):
"""
The default emptyline function is to repeat last command, which will cause trouble.
So overide it here.
"""
self.do_ls("")
|
3,985 | b16691429d83f6909a08b10cc0b310bb62cd550d | import json
from gamestate.gamestate_module import Gamestate
from time import time
from gamestate import action_getter as action_getter
def test_action_getter():
path = "./../Version_1.0/Tests/General/Action_1.json"
document = json.loads(open(path).read())
gamestate = Gamestate.from_document(document["gamestate"])
nloops = 100
total_time = 0
for _ in range(nloops):
t = time()
action_getter.get_actions(gamestate)
total_time += time() - t
print("Time used to find all actions", str(nloops), "times:", str(round(total_time, 3))) |
3,986 | 5c8628e41c0dd544ade330fdd37841beca6c0c91 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Triangle Project Code.
# Triangle analyzes the lengths of the sides of a triangle
# (represented by a, b and c) and returns the type of triangle.
#
# It returns:
# 'equilateral' if all sides are equal
# 'isosceles' if exactly 2 sides are equal
# 'scalene' if no sides are equal
#
# The tests for this method can be found in
# about_triangle_project.py
# and
# about_triangle_project_2.py
#
def triangle(a, b, c):
'''
Determines the number of non-matching sides using len(set()). Then uses dictionary mapping to
return the type of triangle based on the number of unique side lengths.
'''
unique_sides = len({a, b, c})
type = {
3: "scalene",
2: "isosceles",
1: "equilateral"
}
def sides_positive():
if a>0 and b>0 and c>0:
return True
else:
return False
def sides_reach():
if a<b+c and b<a+c and c<a+b:
return True
else:
return False
if unique_sides in range(1,4) and sides_positive() and sides_reach():
return type.get(unique_sides)
else:
raise TriangleError
# Error class used in part 2. No need to change this code.
class TriangleError(Exception):
pass
|
3,987 | e9908e32204da8973f06d98430fc660c90b5e303 | #14681
#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.
x = int(input())
y = int(input())
if(x>0 and y>0):
print("1")
elif(x>0 and y<0):
print("4")
elif(x<0 and y>0):
print("2")
else:
print("3")
|
3,988 | 25d4fa44cb17048301076391d5d67ae0b0812ac7 | # coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.data_aggregation_setting import DataAggregationSetting # noqa: F401,E501
from swagger_client.models.raw_data_setting_v1 import RawDataSettingV1 # noqa: F401,E501
from swagger_client.models.units_setting import UnitsSetting # noqa: F401,E501
from swagger_client.models.work_hours_setting import WorkHoursSetting # noqa: F401,E501
class RawDataSettingsV1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data_aggregation_setting': 'DataAggregationSetting',
'raw_data_setting': 'RawDataSettingV1',
'units_setting': 'UnitsSetting',
'work_hours_setting': 'WorkHoursSetting'
}
attribute_map = {
'data_aggregation_setting': 'dataAggregationSetting',
'raw_data_setting': 'rawDataSetting',
'units_setting': 'unitsSetting',
'work_hours_setting': 'workHoursSetting'
}
def __init__(self, data_aggregation_setting=None, raw_data_setting=None, units_setting=None, work_hours_setting=None): # noqa: E501
"""RawDataSettingsV1 - a model defined in Swagger""" # noqa: E501
self._data_aggregation_setting = None
self._raw_data_setting = None
self._units_setting = None
self._work_hours_setting = None
self.discriminator = None
if data_aggregation_setting is not None:
self.data_aggregation_setting = data_aggregation_setting
if raw_data_setting is not None:
self.raw_data_setting = raw_data_setting
if units_setting is not None:
self.units_setting = units_setting
if work_hours_setting is not None:
self.work_hours_setting = work_hours_setting
@property
def data_aggregation_setting(self):
"""Gets the data_aggregation_setting of this RawDataSettingsV1. # noqa: E501
:return: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501
:rtype: DataAggregationSetting
"""
return self._data_aggregation_setting
@data_aggregation_setting.setter
def data_aggregation_setting(self, data_aggregation_setting):
"""Sets the data_aggregation_setting of this RawDataSettingsV1.
:param data_aggregation_setting: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501
:type: DataAggregationSetting
"""
self._data_aggregation_setting = data_aggregation_setting
@property
def raw_data_setting(self):
"""Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501
:return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501
:rtype: RawDataSettingV1
"""
return self._raw_data_setting
@raw_data_setting.setter
def raw_data_setting(self, raw_data_setting):
"""Sets the raw_data_setting of this RawDataSettingsV1.
:param raw_data_setting: The raw_data_setting of this RawDataSettingsV1. # noqa: E501
:type: RawDataSettingV1
"""
self._raw_data_setting = raw_data_setting
@property
def units_setting(self):
"""Gets the units_setting of this RawDataSettingsV1. # noqa: E501
:return: The units_setting of this RawDataSettingsV1. # noqa: E501
:rtype: UnitsSetting
"""
return self._units_setting
@units_setting.setter
def units_setting(self, units_setting):
"""Sets the units_setting of this RawDataSettingsV1.
:param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501
:type: UnitsSetting
"""
self._units_setting = units_setting
@property
def work_hours_setting(self):
"""Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501
:return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501
:rtype: WorkHoursSetting
"""
return self._work_hours_setting
@work_hours_setting.setter
def work_hours_setting(self, work_hours_setting):
"""Sets the work_hours_setting of this RawDataSettingsV1.
:param work_hours_setting: The work_hours_setting of this RawDataSettingsV1. # noqa: E501
:type: WorkHoursSetting
"""
self._work_hours_setting = work_hours_setting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RawDataSettingsV1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawDataSettingsV1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
3,989 | 59eb705d6d388de9afbcc0df3003f4d4f45f1fbd | import Tkinter
import random
secret = random.randint(1, 100)
### TKINTER ELEMENTS ###
window = Tkinter.Tk()
# greeting text
greeting = Tkinter.Label(window, text="Guess the secret number!")
greeting.pack()
# guess entry field
guess = Tkinter.Entry(window)
guess.pack()
# submit button
submit = Tkinter.Button(window, text="Submit") # add a button, but this button is doing nothing
submit.pack()
window.mainloop()
|
3,990 | 8e9aec7d3653137a05f94e4041d28f3423122751 | from os.path import basename
from .FileInfo import FileInfo
class mrk_file(FileInfo):
"""
.mrk specific file container.
"""
def __init__(self, id_=None, file=None, parent=None):
super(mrk_file, self).__init__(id_, file, parent)
self._type = '.mrk'
#region class methods
def __getstate__(self):
data = super(mrk_file, self).__getstate__()
return data
def __setstate__(self, state):
super(mrk_file, self).__setstate__(state)
def __repr__(self):
# Have a separate representation for .mrk files as this is shown in the
# info for each con file under the list of associated mrk's.
return str(basename(self.file))
|
3,991 | 5447bd3b08c22913ae50ee66ee81554d2357ef3e | import os
from typing import Union, Tuple, List
import pandas as pd
from flags import FLAGS
from helpers import load_from_pickle, decode_class, sort_results_by_metric
ROOT = FLAGS.ROOT
RESULTS_FOLDER = FLAGS.RESULTS_FOLDER
FULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, "checkpoints")
def eval_results(time_stamps: Union[Tuple, List],
excel_file_path=os.path.join(FULL_PATH_TO_CHECKPOINTS, f"xVal_results.xlsx")):
with pd.ExcelWriter(excel_file_path, mode="w") as writer:
for ts in time_stamps:
print(f"Evaluating results for time stamp: {ts}")
full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS, f"full_result_dict_{ts}.p")
full_results_dict = load_from_pickle(full_results_dict_path)
for run_id, results_dict in full_results_dict.items():
only_eval_dict = {cur_xval: [decode_class(data[3]) for data in data_list]
for cur_xval, data_list in results_dict.items()}
# convert to pandas dataframe
df = pd.DataFrame(only_eval_dict)
df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS, f"xVal_results_{run_id}.csv"), index=False, header=False)
df.to_excel(writer, run_id)
if __name__ == '__main__':
time_stamps_to_eval = ["1616007514.9154973"]
eval_results(time_stamps_to_eval)
metric = "f1score"
score_path_list, _ = sort_results_by_metric(os.path.join(ROOT, RESULTS_FOLDER, "checkpoints"), metric)
print(f"{metric}: {[s for s, p in score_path_list]}")
|
3,992 | 31761b9469cc579c209e070fbe7b71943404a1ff | import requests
import json
def display_response(rsp):
try:
print("Printing a response.")
print("HTTP status code: ", rsp.status_code)
h = dict(rsp.headers)
print("Response headers: \n", json.dumps(h, indent=2, default=str))
try:
body = rsp.json()
print("JSON body: \n", json.dumps(body, indent=2, default=str))
except Exception as e:
body = rsp.text
print("Text body: \n", body)
except Exception as e:
print("display_response got exception e = ", e)
def test_get_from_hell():
try:
url = "http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all"
print("\n test 1, ", url)
result = requests.get(url)
display_response(result)
except Exception as e:
print("POST got exception = ", e)
test_get_from_hell() |
3,993 | 35d99713df754052a006f76bb6f3cfe9cf875c0b | #!/usr/local/autopkg/python
"""
JamfScriptUploader processor for uploading items to Jamf Pro using AutoPkg
by G Pugh
"""
import os.path
import sys
from time import sleep
from autopkglib import ProcessorError # pylint: disable=import-error
# to use a base module in AutoPkg we need to add this path to the sys.path.
# this violates flake8 E402 (PEP8 imports) but is unavoidable, so the following
# imports require noqa comments for E402
sys.path.insert(0, os.path.dirname(__file__))
from JamfUploaderLib.JamfUploaderBase import JamfUploaderBase # noqa: E402
__all__ = ["JamfScriptUploader"]
class JamfScriptUploader(JamfUploaderBase):
description = (
"A processor for AutoPkg that will upload a script to a Jamf Cloud or "
"on-prem server."
)
input_variables = {
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"script_path": {
"required": False,
"description": "Full path to the script to be uploaded",
},
"script_name": {
"required": False,
"description": "Name of the script in Jamf",
},
"script_category": {
"required": False,
"description": "Script category",
"default": "",
},
"script_priority": {
"required": False,
"description": "Script priority (BEFORE or AFTER)",
"default": "AFTER",
},
"osrequirements": {
"required": False,
"description": "Script OS requirements",
"default": "",
},
"script_info": {
"required": False,
"description": "Script info field",
"default": "",
},
"script_notes": {
"required": False,
"description": "Script notes field",
"default": "",
},
"script_parameter4": {
"required": False,
"description": "Script parameter 4 title",
"default": "",
},
"script_parameter5": {
"required": False,
"description": "Script parameter 5 title",
"default": "",
},
"script_parameter6": {
"required": False,
"description": "Script parameter 6 title",
"default": "",
},
"script_parameter7": {
"required": False,
"description": "Script parameter 7 title",
"default": "",
},
"script_parameter8": {
"required": False,
"description": "Script parameter 8 title",
"default": "",
},
"script_parameter9": {
"required": False,
"description": "Script parameter 9 title",
"default": "",
},
"script_parameter10": {
"required": False,
"description": "Script parameter 10 title",
"default": "",
},
"script_parameter11": {
"required": False,
"description": "Script parameter 11 title",
"default": "",
},
"replace_script": {
"required": False,
"description": "Overwrite an existing script if True.",
"default": False,
},
"sleep": {
"required": False,
"description": "Pause after running this processor for specified seconds.",
"default": "0",
},
}
output_variables = {
"script_name": {
"required": False,
"description": "Name of the uploaded script",
},
"jamfscriptuploader_summary_result": {
"description": "Description of interesting results.",
},
}
def upload_script(
self,
jamf_url,
script_name,
script_path,
category_id,
script_category,
script_info,
script_notes,
script_priority,
script_parameter4,
script_parameter5,
script_parameter6,
script_parameter7,
script_parameter8,
script_parameter9,
script_parameter10,
script_parameter11,
script_os_requirements,
token,
obj_id=0,
):
"""Update script metadata."""
# import script from file and replace any keys in the script
if os.path.exists(script_path):
with open(script_path, "r") as file:
script_contents = file.read()
else:
raise ProcessorError("Script does not exist!")
# substitute user-assignable keys
script_contents = self.substitute_assignable_keys(script_contents)
# priority has to be in upper case. Let's make it nice for the user
if script_priority:
script_priority = script_priority.upper()
# build the object
script_data = {
"name": script_name,
"info": script_info,
"notes": script_notes,
"priority": script_priority,
"categoryId": category_id,
"categoryName": script_category,
"parameter4": script_parameter4,
"parameter5": script_parameter5,
"parameter6": script_parameter6,
"parameter7": script_parameter7,
"parameter8": script_parameter8,
"parameter9": script_parameter9,
"parameter10": script_parameter10,
"parameter11": script_parameter11,
"osRequirements": script_os_requirements,
"scriptContents": script_contents,
}
self.output(
"Script data:",
verbose_level=2,
)
self.output(
script_data,
verbose_level=2,
)
script_json = self.write_json_file(script_data)
self.output("Uploading script..")
# if we find an object ID we put, if not, we post
object_type = "script"
if obj_id:
url = "{}/{}/{}".format(jamf_url, self.api_endpoints(object_type), obj_id)
else:
url = "{}/{}".format(jamf_url, self.api_endpoints(object_type))
count = 0
while True:
count += 1
self.output(
"Script upload attempt {}".format(count),
verbose_level=2,
)
request = "PUT" if obj_id else "POST"
r = self.curl(request=request, url=url, token=token, data=script_json)
# check HTTP response
if self.status_check(r, "Script", script_name, request) == "break":
break
if count > 5:
self.output("Script upload did not succeed after 5 attempts")
self.output("\nHTTP POST Response Code: {}".format(r.status_code))
raise ProcessorError("ERROR: Script upload failed ")
if int(self.sleep) > 30:
sleep(int(self.sleep))
else:
sleep(30)
return r
def main(self):
"""Do the main thing here"""
self.jamf_url = self.env.get("JSS_URL")
self.jamf_user = self.env.get("API_USERNAME")
self.jamf_password = self.env.get("API_PASSWORD")
self.script_path = self.env.get("script_path")
self.script_name = self.env.get("script_name")
self.script_category = self.env.get("script_category")
self.script_priority = self.env.get("script_priority")
self.osrequirements = self.env.get("osrequirements")
self.script_info = self.env.get("script_info")
self.script_notes = self.env.get("script_notes")
self.script_parameter4 = self.env.get("script_parameter4")
self.script_parameter5 = self.env.get("script_parameter5")
self.script_parameter6 = self.env.get("script_parameter6")
self.script_parameter7 = self.env.get("script_parameter7")
self.script_parameter8 = self.env.get("script_parameter8")
self.script_parameter9 = self.env.get("script_parameter9")
self.script_parameter10 = self.env.get("script_parameter10")
self.script_parameter11 = self.env.get("script_parameter11")
self.replace = self.env.get("replace_script")
self.sleep = self.env.get("sleep")
# handle setting replace in overrides
if not self.replace or self.replace == "False":
self.replace = False
# clear any pre-existing summary result
if "jamfscriptuploader_summary_result" in self.env:
del self.env["jamfscriptuploader_summary_result"]
script_uploaded = False
# obtain the relevant credentials
token = self.handle_uapi_auth(self.jamf_url, self.jamf_user, self.jamf_password)
# get the id for a category if supplied
if self.script_category:
self.output("Checking categories for {}".format(self.script_category))
# check for existing category - requires obj_name
obj_type = "category"
obj_name = self.script_category
category_id = self.get_uapi_obj_id_from_name(
self.jamf_url,
obj_type,
obj_name,
token,
)
if not category_id:
self.output("WARNING: Category not found!")
category_id = "-1"
else:
self.output(
"Category {} found: ID={}".format(self.script_category, category_id)
)
else:
self.script_category = ""
category_id = "-1"
# handle files with a relative path
if not self.script_path.startswith("/"):
found_template = self.get_path_to_file(self.script_path)
if found_template:
self.script_path = found_template
else:
raise ProcessorError(f"ERROR: Script file {self.script_path} not found")
# now start the process of uploading the object
if not self.script_name:
self.script_name = os.path.basename(self.script_path)
# check for existing script
self.output(
"Checking for existing '{}' on {}".format(self.script_name, self.jamf_url)
)
self.output(
"Full path: {}".format(self.script_path),
verbose_level=2,
)
obj_type = "script"
obj_name = self.script_name
obj_id = self.get_uapi_obj_id_from_name(
self.jamf_url,
obj_type,
obj_name,
token,
)
if obj_id:
self.output(
"Script '{}' already exists: ID {}".format(self.script_name, obj_id)
)
if self.replace:
self.output(
"Replacing existing script as 'replace_script' is set to {}".format(
self.replace
),
verbose_level=1,
)
else:
self.output(
"Not replacing existing script. Use replace_script='True' to enforce.",
verbose_level=1,
)
return
# post the script
self.upload_script(
self.jamf_url,
self.script_name,
self.script_path,
category_id,
self.script_category,
self.script_info,
self.script_notes,
self.script_priority,
self.script_parameter4,
self.script_parameter5,
self.script_parameter6,
self.script_parameter7,
self.script_parameter8,
self.script_parameter9,
self.script_parameter10,
self.script_parameter11,
self.osrequirements,
token,
obj_id,
)
script_uploaded = True
# output the summary
self.env["script_name"] = self.script_name
self.env["script_uploaded"] = script_uploaded
if script_uploaded:
self.env["jamfscriptuploader_summary_result"] = {
"summary_text": "The following scripts were created or updated in Jamf Pro:",
"report_fields": [
"script",
"path",
"category",
"priority",
"os_req",
"info",
"notes",
"P4",
"P5",
"P6",
"P7",
"P8",
"P9",
"P10",
"P11",
],
"data": {
"script": self.script_name,
"path": self.script_path,
"category": self.script_category,
"priority": str(self.script_priority),
"info": self.script_info,
"os_req": self.osrequirements,
"notes": self.script_notes,
"P4": self.script_parameter4,
"P5": self.script_parameter5,
"P6": self.script_parameter6,
"P7": self.script_parameter7,
"P8": self.script_parameter8,
"P9": self.script_parameter9,
"P10": self.script_parameter10,
"P11": self.script_parameter11,
},
}
if __name__ == "__main__":
PROCESSOR = JamfScriptUploader()
PROCESSOR.execute_shell()
|
3,994 | 473c653da54ebdb7fe8a9eefc166cab167f43357 | """Config for a linear regression model evaluated on a diabetes dataset."""
from dbispipeline.evaluators import GridEvaluator
import dbispipeline.result_handlers as result_handlers
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from nlp4musa2020.dataloaders.alf200k import ALF200KLoader
from nlp4musa2020.dataloaders.alf200k import genre_target_labels
from nlp4musa2020.dataloaders.vectorizer import lda
from nlp4musa2020.dataloaders.vectorizer import tfidf
import nlp4musa2020.evaluators as evaluators
from nlp4musa2020.models.simplenn_genre import SimpleGenreNN
dataloader = ALF200KLoader(
path='data/processed/dataset-lfm-genres.pickle',
load_feature_groups=[
'rhymes',
'statistical',
'statistical_time',
'explicitness',
'audio',
],
text_vectorizers=lda() + tfidf(),
target=genre_target_labels(),
)
pipeline = Pipeline([
('scaler', StandardScaler()),
('model', SimpleGenreNN(epochs=50)),
])
evaluator = GridEvaluator(
parameters={
'model__dense_sizes': [
(32, 32),
(64, 64),
],
'model__dropout_rate': [0.1],
},
grid_parameters=evaluators.grid_parameters_genres(),
)
result_handlers = [
result_handlers.print_gridsearch_results,
]
|
3,995 | 1c1f1dab1ae2e8f18536784a5dec9de37c8a8582 | def test_{{ project_name }}():
assert True
|
3,996 | 2e66a31638eb4e619f14a29d5d3847482d207003 | from django.db import connection
from .models import Order
from .models import Package
from .models import DeliveryStatus
from .models import CalcParameters
class DataService:
def __init__(self):
pass
@staticmethod
def get_all_orders():
orders = Order.objects.order_by('-order_date')
# create new variables for display
for o in orders:
o.package_names = ', '.join([p.name for p in list(o.packages.all())])
o.delivery_date = o.deliveryinfo_set.get().delivery_date
o.delivery_charge = o.deliveryinfo_set.get().charge
return orders
@staticmethod
def get_all_packages():
return Package.objects.all()
@staticmethod
def get_shopping_list_details(order_ids, dish_ids=None):
"""
:param order_ids: a list of order ids as int or str. Or a single order id as int or str
:param dish_ids: Restrict shopping list to these dishes.
A list of dish ids as int or str. Or a single order id as int or str.
:return: Return shopping list for the given orders
"""
if isinstance(order_ids, str):
order_ids = [int(order_ids)]
if isinstance(order_ids, int):
order_ids = [order_ids]
if not isinstance(order_ids, list):
raise Exception('Expecting a single order id or a list of order ids. Got [{ids}]'.format(ids=order_ids))
SQL = """select
d.id dish_id,
d.name dish_name,
sum(op.package_qty) dish_qty,
sum(d.portion_count) portion_count,
i.name ingredient_name,
round(sum(di.ingredient_weight * op.package_qty), 2) total_ingredient_weight,
round(sum(di.ingredient_weight * (i.cost_price/i.measure) * op.package_qty), 2) total_cost_price
from
orders o, order_package op, package_dish pd, dish d, dish_ingredient di, ingredient i
where
o.id = op.order_id and
op.package_id = pd.package_id and
pd.dish_id = d.id and
d.id = di.dish_id and
di.ingredient_id = i.id and
o.id in ({ids})
group by d.id, d.name, i.name
order by d.name, i.name""".format(ids=','.join([str(x) for x in order_ids]))
with connection.cursor() as cursor:
cursor.execute(SQL)
rows = cursor.fetchall()
# return a list of tuples rather than a tuple of tuples
return [row for row in rows]
class StaticDataDao(type):
@property
def delivery_statuses(cls):
if getattr(cls, '_delivery_statuses', None) is None:
cls._delivery_statuses = list(DeliveryStatus.objects.all())
return cls._delivery_statuses
@property
def calc_parameters(cls):
if getattr(cls, '_calc_parameters', None) is None:
m = {}
for p in list(CalcParameters.objects.all()):
m[p.name] = p.value
cls._calc_parameters = m
return cls._calc_parameters
class StaticDataService(object):
__metaclass__ = StaticDataDao
|
3,997 | 4acdde648b5ec32c078579e725e6ae035298f25a | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-15 17:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Personal',
fields=[
('post_apply', models.CharField(max_length=150)),
('department', models.CharField(max_length=50)),
('application_no', models.BigAutoField(db_column='APPLICATION_NO', primary_key=True, serialize=False)),
('email', models.EmailField(max_length=254)),
('category', models.CharField(max_length=30)),
('pwd_status', models.CharField(max_length=5)),
('internal_candidate', models.BooleanField()),
('profile_image', models.ImageField(upload_to='')),
('first_name', models.CharField(max_length=100)),
('middle_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('father_name', models.CharField(max_length=100)),
('dob', models.DateField()),
('age', models.IntegerField()),
('aadhar_card', models.BigIntegerField()),
('gender', models.CharField(max_length=10)),
('nationality', models.CharField(max_length=20)),
('marital_status', models.CharField(max_length=10)),
('correspondence_address', models.CharField(max_length=200)),
('permanent_address', models.CharField(max_length=200)),
('mobile', models.CharField(max_length=10)),
('areas_of_specialization', models.TextField(max_length=300)),
('phd_thesis_title', models.CharField(max_length=200)),
('date_of_acquiring_phd', models.DateField()),
],
),
]
|
3,998 | cbcbc0d01c32693ebbdbcf285efdc8e521c447ee | import pygame
from evolution import Darwin
from Sensor import Robot, obstacleArray
# Game Settings
pygame.init()
background_colour = (0, 0, 0)
(width, height) = (1000, 600)
target_location = (800, 300)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Omar's Simulation")
screen.fill(background_colour)
# GA Hyper parameters
population_size = 50
elitism = 4
# Agent Initialisation
robots = []
for i in range(population_size):
robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))
darwin = Darwin(robot_array=robots, population_size=population_size, elitism=4, mutation_rate=0.1)
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(background_colour)
pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, height - 20), 1)
pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)
# pygame.draw.line(screen, (255, 0, 0), (800, 10), (800, 590))
for obstacle in obstacleArray:
obstacle.drawShape()
# obstacle.move_y()
# pygame.draw.circle(screen, (0, 0, 255), (500, 300), 100, 0)
# pygame.draw.circle(screen, (0, 255, 20), (200, 300), 75, 0)
# pygame.draw.polygon(screen, (255, 255, 255), new_list, 1)
# for pedestrian in all.start_pedestrians:
# pedestrian.move()
# pedestrian.update()
# all.introduce()
for robot in darwin.robot_array:
robot.move()
robot.update()
robot.collide()
robot.evaluate_fitness()
if darwin.check_if_all_dead():
darwin.get_stats()
darwin.make_next_generation()
pygame.display.update()
|
3,999 | fc5b9117ecf56401a888e2b6a5e244f9ab115e41 | # Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from unittest.mock import AsyncMock, Mock, patch
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, JoinRules
from synapse.api.errors import Codes, ResourceLimitError
from synapse.api.filtering import Filtering
from synapse.api.room_versions import RoomVersions
from synapse.handlers.sync import SyncConfig, SyncResult
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
from synapse.types import UserID, create_requester
from synapse.util import Clock
import tests.unittest
import tests.utils
class SyncTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler."""
servlets = [
admin.register_servlets,
knock.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.sync_handler = self.hs.get_sync_handler()
self.store = self.hs.get_datastores().main
# AuthBlocking reads from the hs' config on initialization. We need to
# modify its config instead of the hs'
self.auth_blocking = self.hs.get_auth_blocking()
def test_wait_for_sync_for_user_auth_blocking(self) -> None:
user_id1 = "@user1:test"
user_id2 = "@user2:test"
sync_config = generate_sync_config(user_id1)
requester = create_requester(user_id1)
self.reactor.advance(100) # So we get not 0 time
self.auth_blocking._limit_usage_by_mau = True
self.auth_blocking._max_mau_value = 1
# Check that the happy case does not throw errors
self.get_success(self.store.upsert_monthly_active_user(user_id1))
self.get_success(
self.sync_handler.wait_for_sync_for_user(requester, sync_config)
)
# Test that global lock works
self.auth_blocking._hs_disabled = True
e = self.get_failure(
self.sync_handler.wait_for_sync_for_user(requester, sync_config),
ResourceLimitError,
)
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.auth_blocking._hs_disabled = False
sync_config = generate_sync_config(user_id2)
requester = create_requester(user_id2)
e = self.get_failure(
self.sync_handler.wait_for_sync_for_user(requester, sync_config),
ResourceLimitError,
)
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
def test_unknown_room_version(self) -> None:
"""
A room with an unknown room version should not break sync (and should be excluded).
"""
inviter = self.register_user("creator", "pass", admin=True)
inviter_tok = self.login("@creator:test", "pass")
user = self.register_user("user", "pass")
tok = self.login("user", "pass")
# Do an initial sync on a different device.
requester = create_requester(user)
initial_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user, device_id="dev")
)
)
# Create a room as the user.
joined_room = self.helper.create_room_as(user, tok=tok)
# Invite the user to the room as someone else.
invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)
self.helper.invite(invite_room, targ=user, tok=inviter_tok)
knock_room = self.helper.create_room_as(
inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok
)
self.helper.send_state(
knock_room,
EventTypes.JoinRules,
{"join_rule": JoinRules.KNOCK},
tok=inviter_tok,
)
channel = self.make_request(
"POST",
"/_matrix/client/r0/knock/%s" % (knock_room,),
b"{}",
tok,
)
self.assertEqual(200, channel.code, channel.result)
# The rooms should appear in the sync response.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user)
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
self.assertIn(invite_room, [r.room_id for r in result.invited])
self.assertIn(knock_room, [r.room_id for r in result.knocked])
# Test a incremental sync (by providing a since_token).
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
since_token=initial_result.next_batch,
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
self.assertIn(invite_room, [r.room_id for r in result.invited])
self.assertIn(knock_room, [r.room_id for r in result.knocked])
# Poke the database and update the room version to an unknown one.
for room_id in (joined_room, invite_room, knock_room):
self.get_success(
self.hs.get_datastores().main.db_pool.simple_update(
"rooms",
keyvalues={"room_id": room_id},
updatevalues={"room_version": "unknown-room-version"},
desc="updated-room-version",
)
)
# Blow away caches (supported room versions can only change due to a restart).
self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
self.store.get_rooms_for_user.invalidate_all()
self.store._get_event_cache.clear()
self.store._event_ref.clear()
# The rooms should be excluded from the sync response.
# Get a new request key.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user)
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
# The rooms should also not be in an incremental sync.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
since_token=initial_result.next_batch,
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
def test_ban_wins_race_with_join(self) -> None:
"""Rooms shouldn't appear under "joined" if a join loses a race to a ban.
A complicated edge case. Imagine the following scenario:
* you attempt to join a room
* racing with that is a ban which comes in over federation, which ends up with
an earlier stream_ordering than the join.
* you get a sync response with a sync token which is _after_ the ban, but before
the join
* now your join lands; it is a valid event because its `prev_event`s predate the
ban, but will not make it into current_state_events (because bans win over
joins in state res, essentially).
* When we do a sync from the incremental sync, the only event in the timeline
is your join ... and yet you aren't joined.
The ban coming in over federation isn't crucial for this behaviour; the key
requirements are:
1. the homeserver generates a join event with prev_events that precede the ban
(so that it passes the "are you banned" test)
2. the join event has a stream_ordering after that of the ban.
We use monkeypatching to artificially trigger condition (1).
"""
# A local user Alice creates a room.
owner = self.register_user("alice", "password")
owner_tok = self.login(owner, "password")
room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok)
# Do a sync as Alice to get the latest event in the room.
alice_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(owner), generate_sync_config(owner)
)
)
self.assertEqual(len(alice_sync_result.joined), 1)
self.assertEqual(alice_sync_result.joined[0].room_id, room_id)
last_room_creation_event_id = (
alice_sync_result.joined[0].timeline.events[-1].event_id
)
# Eve, a ne'er-do-well, registers.
eve = self.register_user("eve", "password")
eve_token = self.login(eve, "password")
# Alice preemptively bans Eve.
self.helper.ban(room_id, owner, eve, tok=owner_tok)
# Eve syncs.
eve_requester = create_requester(eve)
eve_sync_config = generate_sync_config(eve)
eve_sync_after_ban: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)
)
# Sanity check this sync result. We shouldn't be joined to the room.
self.assertEqual(eve_sync_after_ban.joined, [])
# Eve tries to join the room. We monkey patch the internal logic which selects
# the prev_events used when creating the join event, such that the ban does not
# precede the join.
mocked_get_prev_events = patch.object(
self.hs.get_datastores().main,
"get_prev_events_for_room",
new_callable=AsyncMock,
return_value=[last_room_creation_event_id],
)
with mocked_get_prev_events:
self.helper.join(room_id, eve, tok=eve_token)
# Eve makes a second, incremental sync.
eve_incremental_sync_after_join: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
since_token=eve_sync_after_ban.next_batch,
)
)
# Eve should not see herself as joined to the room.
self.assertEqual(eve_incremental_sync_after_join.joined, [])
# If we did a third initial sync, we should _still_ see eve is not joined to the room.
eve_initial_sync_after_join: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
since_token=None,
)
)
self.assertEqual(eve_initial_sync_after_join.joined, [])
_request_key = 0
def generate_sync_config(
user_id: str, device_id: Optional[str] = "device_id"
) -> SyncConfig:
"""Generate a sync config (with a unique request key)."""
global _request_key
_request_key += 1
return SyncConfig(
user=UserID.from_string(user_id),
filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION,
is_guest=False,
request_key=("request_key", _request_key),
device_id=device_id,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.