text string | size int64 | token_count int64 |
|---|---|---|
import os
import numpy as np
import matplotlib.pyplot as plt
import utils.io as io
from global_constants import misc_paths
def get_infonce_data(infonce_dir,layers):
infonce_data = io.load_json_object(
os.path.join(
infonce_dir,
f'infonce_{layers}_layer.json'))
iters = []
losses = []
for time,it,loss in infonce_data:
if it==0:
continue
iters.append(it)
losses.append(round(loss,2))
return iters, losses
def get_acc_data(acc_dir,iters):
accs = [None]*len(iters)
for i,it in enumerate(iters):
results_json = os.path.join(acc_dir,f'results_val_{it}.json')
if not os.path.exists(results_json):
continue
accs[i] = io.load_json_object(results_json)['pt_recall']
return accs
def create_point_label(x,y,label,color,markersize,marker):
plt.plot(x,y,c=color,markersize=markersize,marker=marker)
plt.annotate(label,(x+0.025,y),c=color,va='center',fontsize=9,family='serif')
def main():
infonce_dir = os.path.join(
os.getcwd(),
'exp/pretrain_coco_noun_negs/infonce_acc_plot')
exp_dir = '/shared/rsaas/tgupta6/Data/context-regions/coco_exp'
colors = ['r','g','b']
num_layers = [1,2,3]
infonce_losses = {}
handles = [None]*3
labels = ['Linear', 'MLP w/ 1 hidden layer', 'MLP w/ 2 hidden layers']
arrowcolor='k' #(0.3,0.3,0.3)
ha = ['right','left','right']
for i,l in enumerate(num_layers):
iters,losses = get_infonce_data(infonce_dir,l)
acc_dir = os.path.join(
exp_dir,
f'loss_wts_neg_noun_1_self_sup_1_lang_sup_1_no_context_vgdet_nonlinear_infonce_{l}_layer_adj_batch_50')
accs = get_acc_data(acc_dir,iters)
bounds = [np.log(50)-infonce for infonce in losses]
handles[i], = plt.plot(bounds,accs,c=colors[i],markersize=0,marker='o',linewidth=1.5,label=labels[i])
k = np.argmax(accs)
labels.append(iters[k])
plt.annotate(
str(iters[k]//1000) + 'K Iters',
c=arrowcolor,
xy=(bounds[k],accs[k]),
xytext=(3.35,accs[k]),
fontsize=9,
family='serif',
arrowprops=dict(arrowstyle="->",linestyle='-',ec=arrowcolor,fc=arrowcolor),
va='center')
plt.plot(bounds[0],accs[0],c=colors[i],markersize=4,marker='o')
plt.plot(bounds[k],accs[k],c=colors[i],markersize=6,marker='*')
plt.plot(bounds[-1],accs[-1],c=colors[i],markersize=4,marker='s')
# Manual legend for iterations
lx = 3.04 #49.45
ly = 73 #66
d = 0.8
#plt.annotate('Iterations:',(lx-0.005,ly),c=arrowcolor,va='center',fontsize=9,family='serif',weight='bold')
create_point_label(lx,ly,'4K Iters',arrowcolor,markersize=4,marker='o')
create_point_label(lx,ly-d,'80K Iters',arrowcolor,markersize=4,marker='s')
create_point_label(lx,ly-2*d,'Best Accuracy',arrowcolor,markersize=6,marker='*')
# Legend for layers
plt.plot()
plt.legend(
handles=handles,
loc='upper left',
frameon=False,
prop={'size':9,'family':'serif'})
plt.xlabel("InfoNCE lower bound on COCO (Val)",fontsize=9,family='serif')
plt.ylabel('Pointing accuracy on Flickr30k Entities (Val)',fontsize=9,family='serif')
plt.yticks(size=9,family='serif')
plt.xticks(size=9,family='serif')
# a = plt.gca()
# import pdb; pdb.set_trace()
# a.set_xticklabels(a.get_xticks(), {'family':'serif'})
# a.set_yticklabels(a.get_yticks(), {'family':'serif'})
figname = os.path.join(misc_paths['scratch_dir'],'infonce_acc_plot.png')
plt.savefig(figname,dpi=600,bbox_inches='tight')
if __name__=='__main__':
main() | 3,785 | 1,467 |
import sys
sys.path.append("../../")
from skimage.data import astronaut, camera
from sciwx.canvas import ICanvas
from sciapp.action import Tool
import wx
class TestTool(Tool):
def __init__(self):
Tool.__init__(self)
def mouse_down(self, image, x, y, btn, **key):
print(
"x:%d y:%d btn:%d ctrl:%s alt:%s shift:%s"
% (x, y, btn, key["ctrl"], key["alt"], key["shift"])
)
def mouse_up(self, image, x, y, btn, **key):
pass
def mouse_move(self, image, x, y, btn, **key):
pass
def mouse_wheel(self, image, x, y, d, **key):
image.img[:] = image.img + d
key["canvas"].update()
if __name__ == "__main__":
app = wx.App()
frame = wx.Frame(None)
canvas = ICanvas(frame, autofit=True)
canvas.set_img(camera())
canvas.set_tool(TestTool())
frame.Show()
app.MainLoop()
| 889 | 315 |
import os
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
import numpy as np
import argparse
import pandas as pd
import boto3
import botocore
import json
from flask import Flask, request, abort
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
def dataset_wrapper(outcome, protected, unprivileged_groups, privileged_groups, favorable_label, unfavorable_label):
""" A wrapper function to create aif360 dataset from outcome and protected in numpy array format.
"""
df = pd.DataFrame(data=outcome,
columns=['outcome'])
df['race'] = protected
dataset = BinaryLabelDataset(favorable_label=favorable_label,
unfavorable_label=unfavorable_label,
df=df,
label_names=['outcome'],
protected_attribute_names=['race'],
unprivileged_protected_attributes=unprivileged_groups)
return dataset
def get_s3_item(client, bucket, s3_path, name):
try:
client.Bucket(bucket).download_file(s3_path, name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def fairness_check(s3_url, bucket_name, s3_username, s3_password, training_id):
cos = boto3.resource("s3",
endpoint_url=s3_url,
aws_access_key_id=s3_username,
aws_secret_access_key=s3_password)
y_test_out = 'y_test.out'
p_test_out = 'p_test.out'
y_pred_out = 'y_pred.out'
get_s3_item(cos, bucket_name, training_id + '/' + y_test_out, y_test_out)
get_s3_item(cos, bucket_name, training_id + '/' + p_test_out, p_test_out)
get_s3_item(cos, bucket_name, training_id + '/' + y_pred_out, y_pred_out)
"""Need to generalize the protected features"""
unprivileged_groups = [{'race': 4.0}]
privileged_groups = [{'race': 0.0}]
favorable_label = 0.0
unfavorable_label = 1.0
"""Load the necessary labels and protected features for fairness check"""
y_test = np.loadtxt(y_test_out)
p_test = np.loadtxt(p_test_out)
y_pred = np.loadtxt(y_pred_out)
"""Calculate the fairness metrics"""
original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
favorable_label=favorable_label,
unfavorable_label=unfavorable_label)
plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
favorable_label=favorable_label,
unfavorable_label=unfavorable_label)
classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
plain_predictions_test_dataset,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
TPR = classified_metric_nodebiasing_test.true_positive_rate()
TNR = classified_metric_nodebiasing_test.true_negative_rate()
bal_acc_nodebiasing_test = 0.5*(TPR+TNR)
print("#### Plain model - without debiasing - classification metrics on test set")
metrics = {
"Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
"Balanced classification accuracy": bal_acc_nodebiasing_test,
"Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
"Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
"Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
"Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
"Theil index": classified_metric_nodebiasing_test.theil_index(),
"False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
}
print("metrics: ", metrics)
return metrics
# with open(metric_path, "w") as report:
# report.write(json.dumps(metrics))
@app.route('/', methods=['POST'])
def fairness_api():
try:
s3_url = request.json['aws_endpoint_url']
bucket_name = request.json['training_results_bucket']
s3_username = request.json['aws_access_key_id']
s3_password = request.json['aws_secret_access_key']
training_id = request.json['model_id']
except:
abort(400)
return json.dumps(fairness_check(s3_url, bucket_name, s3_username, s3_password, training_id))
@app.route('/', methods=['OPTIONS'])
def fairness_api_options():
return "200"
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
| 5,416 | 1,628 |
# Copyright (c) Niall Asher 2022
from os import remove, path, mkdir
from socialserver.util.output import console
from socialserver.util.config import config
from socialserver import application
from werkzeug.serving import make_server
from threading import Thread
class TestingServer(Thread):
def __init__(self, application_object):
Thread.__init__(self)
self.server = make_server("127.0.0.1", 9801, application_object)
self.ctx = application_object.app_context()
self.ctx.push()
def run(self):
console.log("Starting test server.")
self.server.serve_forever()
def kill(self):
console.log("Killing test server.")
self.server.shutdown()
def pytest_sessionstart():
config.auth.registration.approval_required = False
reqd_paths = ["/tmp/socialserver_image_testing", "/tmp/socialserver_video_testing"]
for reqd_path in reqd_paths:
path.exists(reqd_path) or mkdir(reqd_path)
# start a copy of the flask server in a background
# thread, so we can test against it.
application_thread.start()
pass
def pytest_sessionfinish():
# TODO: remove old test images & videos etc.,
# rather then just leaving it to the OS
application_thread.kill()
remove("/tmp/test.db")
exit(0)
application_thread = TestingServer(application)
| 1,352 | 405 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-07-17 03:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0257_auto_20190717_1034'),
]
operations = [
migrations.AlterField(
model_name='inspection',
name='planned_for_time',
field=models.TimeField(blank=True, null=True),
),
]
| 484 | 178 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#@created: 08.09.2011
#@author: Aleksey Komissarov
#@contact: ad3002@gmail.com
from PyExp import AbstractModel
class ChomosomeModel(AbstractModel):
''' Chromosome model.
Dumpable attributes:
- "chr_genome",
- "chr_number",
- "chr_taxon",
- "chr_prefix",
- "chr_gpid",
- "chr_acronym",
- "chr_contigs",
- "chr_length",
- "chr_mean_gc",
- "chr_trs_all",
- "chr_trs_3000",
- "chr_trs_all_proc",
- "chr_trs_3000_proc",
- "chr_trs_all_length",
- "chr_trs_3000_length",
- "genome_gaps",
- "chr_sum_gc",
'''
dumpable_attributes = [
"chr_genome",
"chr_number",
"chr_taxon",
"chr_prefix",
"chr_gpid",
"chr_acronym",
"chr_contigs",
"chr_length",
"chr_mean_gc",
"chr_trs_all",
"chr_trs_3000",
"chr_trs_all_proc",
"chr_trs_3000_proc",
"chr_trs_all_length",
"chr_trs_3000_length",
"genome_gaps",
"chr_sum_gc",
]
def preprocess_data(self):
if self.chr_trs_all_length:
self.chr_trs_all_proc = self.chr_trs_all_length / float(self.chr_length)
if self.chr_trs_3000_length:
self.chr_trs_3000_proc = self.chr_trs_3000_length / float(self.chr_length)
if not self.chr_mean_gc:
self.chr_mean_gc = self.chr_sum_gc / self.chr_contigs
| 1,739 | 660 |
import os
from dotenv import load_dotenv
load_dotenv()
class Config():
DEBUG = True
ENV = 'dev'
JWT_SECRET = os.getenv('JWT_SECRET', 'secret')
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DB_URI', 'sqlite://')
SQLALCHEMY_TRACK_MODIFICATIONS = False
TESTING = False
class TestingConfig(Config):
DEBUG = False
ENV = 'testing'
SQLALCHEMY_DATABASE_URI = os.getenv('TESTING_DB_URI', 'sqlite://')
TESTING = True
| 448 | 177 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-18 14:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('DevicesAPP', '0002_auto_20180404_1557'),
]
operations = [
migrations.AlterModelOptions(
name='maindevicevarweeklyschedules',
options={'permissions': (('view_maindevicevarweeklyschedules', 'Can see available automation schedules'), ('activate_maindevicevarweeklyschedules', 'Can change the state of the schedules')), 'verbose_name': 'Main device var weekly schedule', 'verbose_name_plural': 'Main device var weekly schedules'},
),
]
| 724 | 255 |
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import settings
import json
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.generic import View, ListView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.base import TemplateResponseMixin
from sorl.thumbnail import get_thumbnail
from fouillis.views import AdminLoginRequiredMixin
from notifs.forms import NotifForm
from notifs.models import Notif, NotifTemplateImage
class NotifListView(AdminLoginRequiredMixin, ListView):
template_name = "notif_list.html"
model = Notif
form_class = NotifForm
paginate_by = settings.DEFAULT_PAGE_SIZE
def get_queryset(self):
queryset = self.model.objects.filter(
mother_brand=self.request.user.get_profile().work_for)
if getattr(self, 'search', None):
queryset = queryset.filter(name__icontains=self.search)
return queryset
def post(self, request, *args, **kwargs):
self.search = self.request.POST.get('search')
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(NotifListView, self).get_context_data(**kwargs)
context.update({
'search': getattr(self, 'search', None) or '',
})
return context
class NewNotifView(AdminLoginRequiredMixin, CreateView):
model = Notif
form_class = NotifForm
template_name = "notif.html"
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
notif = form.save(commit=True)
pp_pks = [int(pp['pk']) for pp in form.images.cleaned_data
if not pp['DELETE']]
notif.images = NotifTemplateImage.objects.filter(pk__in=pp_pks)
notif.save()
return self.form_valid(form)
return self.form_invalid(form)
def get_form_kwargs(self):
kwargs = super(NewNotifView, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def get_success_url(self):
return reverse('notif_list')
class EditNotifView(AdminLoginRequiredMixin, UpdateView):
model = Notif
form_class = NotifForm
template_name = "notif.html"
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(EditNotifView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
notif = form.save(commit=True)
pp_pks = [int(pp['pk']) for pp in form.images.cleaned_data
if not pp['DELETE']]
notif.images = NotifTemplateImage.objects.filter(pk__in=pp_pks)
notif.save()
return self.form_valid(form)
return self.form_invalid(form)
def get_success_url(self):
pk = self.kwargs.get('pk', None)
return reverse('edit_notif', args=[pk])
def get_form_kwargs(self):
kwargs = super(EditNotifView, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def get_context_data(self, **kwargs):
context = super(EditNotifView, self).get_context_data(**kwargs)
context['pk'] = self.kwargs.get('pk', None)
return context
class PreviewTemplateContentView(AdminLoginRequiredMixin, CreateView):
model = Notif
form_class = NotifForm
template_name = "template_editor.html"
def get_form_kwargs(self):
kwargs = super(CreateView, self).get_form_kwargs()
initial = {}
images = []
for _img in NotifTemplateImage.objects.all():
images.append({
'pk': _img.pk,
'url': _img.image.url,
'thumb_url': get_thumbnail(_img.image, '40x43').url,
})
initial.update({'images': images})
kwargs.update({'initial': initial})
return kwargs
class DeleteNotifView(AdminLoginRequiredMixin, DeleteView):
model = Notif
form_class = NotifForm
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponse(content=json.dumps({'pk': self.kwargs.get('pk', None)}),
mimetype="application/json")
class UploadImageView(TemplateResponseMixin, View):
template_name = ""
def post(self, request, *args, **kwargs):
if request.FILES:
new_img = request.FILES[u'files[]']
if new_img.size > settings.SALE_IMG_UPLOAD_MAX_SIZE:
content = {'status': 'max_limit_error'}
return HttpResponse(json.dumps(content), mimetype='application/json')
new_media = NotifTemplateImage(image=request.FILES[u'files[]'])
new_media.save()
thumb = get_thumbnail(new_media.image, '40x43')
to_ret = {
'status': 'ok',
'pk': new_media.pk,
'url': new_media.image.url,
'thumb_url': thumb.url,
}
return HttpResponse(json.dumps(to_ret), mimetype="application/json")
raise HttpResponseBadRequest(_("Please upload a picture."))
| 7,395 | 2,106 |
import heapq
def solution(scoville, K) :
heapq.heapify(scoville)
count = 0
while scoville :
try :
first = heapq.heappop(scoville)
second = heapq.heappop(scoville)
combine = first + second * 2
count += 1
heapq.heappush(scoville, combine)
if scoville[0] >= K :
return count
except :
return -1
answer = solution([1,2,3,9,10,12], 1000)
print(answer) | 478 | 166 |
from x_rebirth_station_calculator.station_data.station_base import Module
from x_rebirth_station_calculator.station_data.station_base import Production
from x_rebirth_station_calculator.station_data.station_base import Consumption
from x_rebirth_station_calculator.station_data import wares
names = {'L044': 'Valley Forge',
'L049': 'Talschmiede'}
productions = {'al': [Production(wares.Wheat, 5400.0)]}
consumptions = {'al': [Consumption(wares.EnergyCells, 600),
Consumption(wares.Water, 3000)]}
ValleyForge = Module(names, productions, consumptions)
| 587 | 204 |
"""empty message
Revision ID: e424d03ba260
Revises: ace8d095a26b
Create Date: 2017-10-12 11:25:11.775853
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e424d03ba260'
down_revision = 'ace8d095a26b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('gist', sa.Column('lang', sa.String(length=30), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('gist', 'lang')
# ### end Alembic commands ###
| 652 | 264 |
from setuptools import setup
setup(
name='smiegel',
version='0.0',
long_description=__doc__,
packages=['smiegel'],
include_package_data=True,
author='Erik Price',
description='Self hosted SMS mirroring service',
license='MIT',
install_requires=open('requirements.txt').readlines(),
entry_points={
'console_scripts': [
'smiegel = smiegel.__main__:main'
],
}
)
| 428 | 134 |
from buycoins.client import BuyCoinsClient
from buycoins.exceptions import AccountError, ClientError, ServerError
from buycoins.exceptions.utils import check_response
class NGNT(BuyCoinsClient):
"""The NGNT class handles the generations of virtual bank deposit account."""
def create_deposit_account(self, account_name: str):
"""Creates a virtual deposit account under the supplied name.
Args:
account_name (str): Name of the new virtual deposit account to be generated*.
Returns:
response: A JSON object containing the response from the request.
"""
try:
if not account_name:
raise AccountError("Invalid account name passed", 400)
self.account_name = account_name
_variables = {"accountName": self.account_name}
self._query = """
mutation createDepositAccount($accountName: String!) {
createDepositAccount(accountName: $accountName) {
accountNumber
accountName
accountType
bankName
accountReference
}
}
"""
response = self._execute_request(query=self._query, variables=_variables)
check_response(response, AccountError)
except (AccountError, ClientError, ServerError) as e:
return e.response
else:
return response["data"]["createDepositAccount"]
| 1,558 | 367 |
"""App Signals
"""
import logging
from django.db.models.signals import post_save
from django.dispatch import receiver
from vision_on_edge.azure_training_status.models import TrainingStatus
from vision_on_edge.notifications.models import Notification
logger = logging.getLogger(__name__)
@receiver(signal=post_save,
sender=TrainingStatus,
dispatch_uid="training_status_send_notification")
def training_status_send_notification_handler(**kwargs):
"""training_status_send_notification_handler.
Args:
kwargs:
"""
if 'sender' not in kwargs or kwargs['sender'] != TrainingStatus:
logger.info(
"'sender' not in kwargs or kwargs['sender'] != TrainingStatus")
logger.info("nothing to do")
return
if 'instance' not in kwargs:
logger.info("'instance' not in kwargs:'")
logger.info("Nothing to do")
return
instance = kwargs['instance']
if 'need_to_send_notification' in dir(
instance) and instance.need_to_send_notification:
logger.info("Azure TrainingStatus changed.")
logger.info("instance.need_to_send_notification %s",
instance.need_to_send_notification)
Notification.objects.create(notification_type="project",
sender="system",
title=instance.status.capitalize(),
details=instance.log.capitalize())
logger.info("Signal end")
| 1,510 | 396 |
import json
import brilleaux_settings
import flask
from flask_caching import Cache
from flask_cors import CORS
import logging
import sys
from pyelucidate.pyelucidate import async_items_by_container, format_results, mirador_oa
app = flask.Flask(__name__)
CORS(app)
cache = Cache(
app, config={"CACHE_TYPE": "filesystem", "CACHE_DIR": "./", "CACHE_THRESHOLD": 500}
)
@app.route("/annotationlist/<path:anno_container>", methods=["GET"])
@cache.cached(timeout=120) # Cache Flask request to save repeated hits to Elucidate.
def brilleaux(anno_container: str):
"""
Flask app.
Expects an md5 hashed annotation container as part of the path.
Montague stores annotations in a container based on the md5 hash of
the canvas uri.
Requests the annotation list from Elucidate, using the IIIF context.
Unpacks the annotation list, and reformats the JSON to be in the
IIIF Presentation API annotation list format.
Returns JSON-LD for an annotation list.
The @id of the annotation list is set to the request_url.
"""
if brilleaux_settings.ELUCIDATE_URI:
anno_server = brilleaux_settings.ELUCIDATE_URI.replace("annotation/w3c/", "")
else:
anno_server = "https://elucidate.dlcs-ida.org/" # Do we need this anymore?
if flask.request.method == "GET":
request_uri = flask.request.url
# make sure URL ends in a /
if request_uri[-1] != "/":
request_uri += "/"
annotations = async_items_by_container(
elucidate=anno_server,
container=anno_container,
header_dict={
"Accept": "Application/ld+json; profile="
+ '"http://www.w3.org/ns/anno.jsonld"'
},
flatten_ids=True,
trans_function=mirador_oa,
)
content = format_results(list(annotations), request_uri=request_uri)
if content:
resp = flask.Response(
json.dumps(content, sort_keys=True, indent=4),
headers={"Content-Type": "application/ld+json;charset=UTF-8"},
)
return resp
else:
flask.abort(404)
else:
logging.error("Brilleaux does not support this method.")
flask.abort(405)
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
)
app.run(threaded=True, debug=True, port=5000, host="0.0.0.0")
| 2,526 | 797 |
from typing import Optional
from aiohttp.web import AppRunner
# TODO fix import
from aioros.graph_resource import get_local_address
from .master_api_server import start_server
from .param_cache import ParamCache
from .registration_manager import RegistrationManager
class Master:
def __init__(self):
self._param_cache: Optional[ParamCache] = None
self._registration_manager: Optional[RegistrationManager] = None
self._server: Optional[AppRunner] = None
self._uri: Optional[str] = None
async def init(
self,
loop,
host: str = None,
port: int = 11311,
) -> None:
host = host or get_local_address()
self._registration_manager = RegistrationManager(loop)
self._param_cache = ParamCache()
self._server, self._uri = await start_server(
host,
port,
self._param_cache,
self._registration_manager)
async def close(self) -> None:
if self._server:
await self._server.cleanup()
self._server = None
self._param_cache = None
if self._registration_manager:
await self._registration_manager.close()
self._registration_manager = None
| 1,253 | 340 |
from db import db
class AgentModel(db.Model):
__tablename__ = 'agents'
id = db.Column(db.Integer, primary_key=True)
agent_id = db.Column(db.Integer, db.ForeignKey('users.id'), unique=True)
customers = db.relationship("CustomerModel", backref='agent')
name = db.Column(db.String(80))
email = db.Column(db.String(90))
commision_percentage = db.Column(db.Integer)
rating = db.Column(db.Integer, nullable=True)
def __init__(self, agent_id, name, email, commision_percentage):
self.agent_id = agent_id
self.name = name
self.email = email
self.commision_percentage = commision_percentage
self.rating = None
def json(self):
return {
'agent_id': self.agent_id,
'name': self.name,
'email': self.email,
'commision_percentage': self.commision_percentage,
'rating': self.rating,
'customers': [customer.json() for customer in self.customers]
}
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_all(cls):
return cls.query.all()
@classmethod
def find_by_agent_id(cls, agent_id):
return cls.query.filter_by(agent_id=agent_id).first_or_404(description='There is no data with {}'.format(agent_id))
| 1,344 | 439 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-23 09:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('oa', '0003_auto_20171023_1746'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='title',
new_name='title_a',
),
]
| 421 | 161 |
import time
import subprocess
import sys
import logging
class GitDirectoryError(Exception):
"""Directory not a git repository"""
def generate(directory=".") -> str:
commitDate = 0
try:
commitDate = int(
subprocess.check_output(
"git show -s --format='%ct'", shell=True, cwd=directory
)
.decode()
.strip()
)
except subprocess.CalledProcessError:
raise GitDirectoryError(
"Directory not a git repository"
) from subprocess.CalledProcessError
return time.strftime("%y.%j.%H%M%S", time.localtime(commitDate))
if __name__ == "__main__":
try:
print(generate(**dict(arg.split("=") for arg in sys.argv[1:])))
except GitDirectoryError as e:
logging.error("%s %s", "[DateVersioning]", e)
| 836 | 247 |
MAX_CONSOLE_LINE_LENGTH = 79
class CliReport:
def __init__(self):
self.is_initialized = False
def print(self, string='', length=MAX_CONSOLE_LINE_LENGTH, end='\n'):
if self.is_initialized:
number_of_spaces = 0
if length > len(string):
number_of_spaces = length - len(string)
print((string + ' ' * number_of_spaces).encode('cp866', errors='ignore').decode('cp866').encode(
'cp1251', errors='ignore').decode('cp1251'), end=end)
| 520 | 172 |
# -*- coding: utf-8 -*-
import typing
import pandas as pd
import smart_open
import awswrangler as wr
from .helpers import (
check_enumeration_s3_key_string,
get_key_size_all_objects,
group_s3_objects_no_larger_than,
)
from .options import ZFILL
def merge_csv(
s3_client,
source_bucket: str,
source_key_prefix: str,
target_bucket: str,
target_key: str,
target_size: int,
zfill: int = ZFILL,
):
check_enumeration_s3_key_string(target_key)
# analyze input data
key_and_size_list = get_key_size_all_objects(
s3_client=s3_client,
bucket=source_bucket,
prefix=source_key_prefix,
)
group_list = group_s3_objects_no_larger_than(
key_and_size_list=key_and_size_list,
max_size=target_size,
)
for nth_group, s3_object_group in enumerate(group_list):
nth_group += 1
source_uri_list = [
f"s3://{source_bucket}/{s3_key}"
for s3_key in s3_object_group
]
merge_json(
s3_client=s3_client,
source_uri_list=source_uri_list,
target_bucket=target_bucket,
target_key=target_key.format(i=str(nth_group).zfill(zfill)),
)
def merge_parquet(boto3_session,
source_uri_list: typing.List[str],
target_bucket: str,
target_key: str) -> typing.Tuple[str, str]:
"""
Merge multiple parquet file on S3 into one parquet file.
.. note::
For parquet, it has to use the awswrangler API and it only support
boto3_session other than s3_client.
"""
df_list = list()
for s3_uri in source_uri_list:
df = wr.s3.read_parquet(s3_uri, boto3_session=boto3_session)
df_list.append(df)
df = pd.concat(df_list, axis=0)
wr.s3.to_parquet(
df=df,
path=f"s3://{target_bucket}/{target_key}",
boto3_session=boto3_session
)
return target_bucket, target_key
def merge_parquet_by_prefix(boto3_session,
source_bucket,
source_key_prefix,
target_bucket,
target_key,
target_size,
zfill: int = ZFILL) -> typing.List[typing.Tuple[str, str]]:
"""
Smartly merge all parquet s3 object under the same prefix into one or many
fixed size (approximately) parquet file.
"""
check_enumeration_s3_key_string(target_key)
s3_client = boto3_session.client("s3")
target_s3_bucket_key_list = list()
# analyze input data
key_and_size_list = get_key_size_all_objects(
s3_client=s3_client,
bucket=source_bucket,
prefix=source_key_prefix,
)
group_list = group_s3_objects_no_larger_than(
key_and_size_list=key_and_size_list,
max_size=target_size,
)
for nth_group, s3_object_group in enumerate(group_list):
nth_group += 1
source_uri_list = [
f"s3://{source_bucket}/{s3_key}"
for s3_key in s3_object_group
]
bucket_and_key = merge_parquet(
boto3_session=boto3_session,
source_uri_list=source_uri_list,
target_bucket=target_bucket,
target_key=target_key.format(i=str(nth_group).zfill(zfill)),
)
target_s3_bucket_key_list.append(bucket_and_key)
return target_s3_bucket_key_list
def merge_json(s3_client,
source_uri_list: typing.List[str],
target_bucket: str,
target_key: str):
transport_params = dict(client=s3_client)
with smart_open.open(
f"s3://{target_bucket}/{target_key}", "w",
transport_params=transport_params,
) as f_out:
for source_uri in source_uri_list:
with smart_open.open(
source_uri, "r",
transport_params=transport_params,
) as f_in:
for line in f_in:
f_out.write(line)
def merge_json_by_prefix(s3_client,
source_bucket: str,
source_key_prefix: str,
target_bucket: str,
target_key: str,
target_size: int,
zfill: int = ZFILL):
check_enumeration_s3_key_string(target_key)
# analyze input data
key_and_size_list = get_key_size_all_objects(
s3_client=s3_client,
bucket=source_bucket,
prefix=source_key_prefix,
)
group_list = group_s3_objects_no_larger_than(
key_and_size_list=key_and_size_list,
max_size=target_size,
)
for nth_group, s3_object_group in enumerate(group_list):
nth_group += 1
source_uri_list = [
f"s3://{source_bucket}/{s3_key}"
for s3_key in s3_object_group
]
merge_json(
s3_client=s3_client,
source_uri_list=source_uri_list,
target_bucket=target_bucket,
target_key=target_key.format(i=str(nth_group).zfill(zfill)),
)
| 5,138 | 1,715 |
# Supress warnings caused by tensorflow
import warnings
warnings.filterwarnings('ignore', category = DeprecationWarning)
warnings.filterwarnings('ignore', category = PendingDeprecationWarning)
import pytest
from .. import Marabou
import numpy as np
import os
# Global settings
TOL = 1e-4 # Tolerance for Marabou evaluations
ONNX_FILE = "../../resources/onnx/fc1.onnx" # File for test onnx network
ACAS_FILE = "../../resources/nnet/acasxu/ACASXU_experimental_v2a_1_1.nnet" # File for test nnet network
def test_sat_query(tmpdir):
"""
Test that a query generated from Maraboupy can be saved and loaded correctly and return sat
"""
network = load_onnx_network()
# Set output constraint
outputVars = network.outputVars.flatten()
outputVar = outputVars[1]
minOutputValue = 70.0
network.setLowerBound(outputVar, minOutputValue)
# Save this query to a temporary file, and reload the query
queryFile = tmpdir.mkdir("query").join("query.txt").strpath
network.saveQuery(queryFile)
ipq = Marabou.load_query(queryFile)
# Solve the query loaded from the file and compare to the solution of the original query
# The result should be the same regardless of verbosity options used, or if a file redirect is used
tempFile = tmpdir.mkdir("redirect").join("marabouRedirect.log").strpath
opt = Marabou.createOptions(verbosity = 0)
vals_net, _ = network.solve(filename = tempFile)
vals_ipq, _ = Marabou.solve_query(ipq, filename = tempFile)
# The two value dictionaries should have the same number of variables,
# the same keys, and the values assigned should be within some tolerance of each other
assert len(vals_net) == len(vals_ipq)
for k in vals_net:
assert k in vals_ipq
assert np.abs(vals_ipq[k] - vals_net[k]) < TOL
def test_unsat_query(tmpdir):
"""
Test that a query generated from Maraboupy can be saved and loaded correctly and return unsat
"""
network = load_onnx_network()
# Set output constraint
outputVars = network.outputVars.flatten()
outputVar = outputVars[0]
minOutputValue = 2000.0
network.setLowerBound(outputVar, minOutputValue)
# Save this query to a temporary file, and reload the query):
queryFile = tmpdir.mkdir("query").join("query.txt").strpath
network.saveQuery(queryFile)
ipq = Marabou.load_query(queryFile)
# Solve the query loaded from the file and compare to the solution of the original query
opt = Marabou.createOptions(verbosity = 0)
vals_net, stats_net = network.solve(options = opt)
vals_ipq, stats_ipq = Marabou.solve_query(ipq, options = opt)
# Assert the value dictionaries are both empty, and both queries have not timed out (unsat)
assert len(vals_net) == 0
assert len(vals_ipq) == 0
assert not stats_net.hasTimedOut()
assert not stats_ipq.hasTimedOut()
def test_to_query(tmpdir):
"""
Test that a query generated from Maraboupy can be saved and loaded correctly and return timeout.
This query is expected to be UNSAT but is currently unsolveable within one second.
If future improvements allow the query to be solved within a second, then this test will need to be updated.
"""
network = load_acas_network()
# Set output constraint
outputVars = network.outputVars.flatten()
outputVar = outputVars[0]
minOutputValue = 1500.0
network.setLowerBound(outputVar, minOutputValue)
# Save this query to a temporary file, and reload the query):
queryFile = tmpdir.mkdir("query").join("query.txt").strpath
network.saveQuery(queryFile)
ipq = Marabou.load_query(queryFile)
# Solve the query loaded from the file and compare to the solution of the original query
opt = Marabou.createOptions(verbosity = 0, timeoutInSeconds = 1)
vals_net, stats_net = network.solve(options = opt)
vals_ipq, stats_ipq = Marabou.solve_query(ipq, options = opt)
# Assert timeout
assert stats_net.hasTimedOut()
assert stats_ipq.hasTimedOut()
def load_onnx_network():
"""
The test network fc1.onnx is used, which has two input variables and two output variables.
The network was trained such that the first output approximates the sum of the absolute
values of the inputs, while the second output approximates the sum of the squares of the inputs
for inputs in the range [-10.0, 10.0].
"""
filename = os.path.join(os.path.dirname(__file__), ONNX_FILE)
network = Marabou.read_onnx(filename)
# Get the input and output variable numbers; [0] since first dimension is batch size
inputVars = network.inputVars[0][0]
# Set input bounds
network.setLowerBound(inputVars[0],-10.0)
network.setUpperBound(inputVars[0], 10.0)
network.setLowerBound(inputVars[1],-10.0)
network.setUpperBound(inputVars[1], 10.0)
return network
def load_acas_network():
"""
Load one of the acas networks. This network is larger than fc1.onnx, making it a better test case
for testing timeout.
"""
filename = os.path.join(os.path.dirname(__file__), ACAS_FILE)
return Marabou.read_nnet(filename, normalize=True)
| 5,316 | 1,644 |
# Generated by Django 3.2.12 on 2022-04-24 14:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_jalali.db.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0005_alter_post_category'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'دسته بندی', 'verbose_name_plural': 'دسته بندی ها'},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-created_date',), 'verbose_name': 'پست', 'verbose_name_plural': 'پست ها'},
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='نویسنده'),
),
migrations.AlterField(
model_name='post',
name='category',
field=models.ManyToManyField(blank=True, to='blog.Category', verbose_name='دسته بندی'),
),
migrations.AlterField(
model_name='post',
name='content',
field=models.TextField(verbose_name='محتوا'),
),
migrations.AlterField(
model_name='post',
name='counted_views',
field=models.IntegerField(default=0, verbose_name='تعداد بازدید'),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ایجاد'),
),
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(default='blog/default.jpg', upload_to='blog/', verbose_name='تصویر'),
),
migrations.AlterField(
model_name='post',
name='published_date',
field=django_jalali.db.models.jDateTimeField(blank=True, null=True, verbose_name='تاریخ انتشار'),
),
migrations.AlterField(
model_name='post',
name='status',
field=models.BooleanField(default=False, verbose_name='وضعیت'),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=255, verbose_name='موضوع'),
),
migrations.AlterField(
model_name='post',
name='updated_date',
field=models.DateTimeField(auto_now=True, verbose_name='تاریخ به روز رسانی'),
),
]
| 2,703 | 870 |
import os
from dotenv import load_dotenv
import pandas as pd
import psycopg2
from psycopg2.extras import execute_values
import json
import numpy as np
load_dotenv()
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST= os.getenv("DB_HOST")
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
curs = conn.cursor()
#- How many passengers survived, and how many died?
query = 'SELECT count(survived) from passengers where survived = 0'
curs.execute(query)
hi = curs.fetchone()
print(hi[0], "passengers died.")
query = 'SELECT count(survived) from passengers where survived = 1'
curs.execute(query)
hi = curs.fetchone()
print(hi[0], "passengers survived.")
#- How many passengers were in each class?
class1 = 'SELECT count(pclass) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("There were", hi[0], "passengers in class 1.")
class2 = 'SELECT count(pclass) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("There were", hi[0], "passengers in class 2.")
class3 = 'SELECT count(pclass) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("There were", hi[0], "passengers in class 3.")
#- How many passengers survived/died within each class?
died = 'SELECT count(pclass) from passengers where survived = 0 and pclass =1'
curs.execute(died)
hi = curs.fetchone()
print("There were", hi[0], "passengers who died in class 1.")
survived = 'SELECT count(pclass) from passengers where survived = 1 and pclass =1'
curs.execute(survived)
hi = curs.fetchone()
print("There were", hi[0], "passengers who survived in class 1.")
died1 = 'SELECT count(pclass) from passengers where survived = 0 and pclass =2'
curs.execute(died1)
hi = curs.fetchone()
print("There were", hi[0], "passengers who died in class 2.")
survived1 = 'SELECT count(pclass) from passengers where survived = 1 and pclass =2'
curs.execute(survived1)
hi = curs.fetchone()
print("There were", hi[0], "passengers who survived in class 2.")
died2 = 'SELECT count(pclass) from passengers where survived = 0 and pclass =3'
curs.execute(died2)
hi = curs.fetchone()
print("There were", hi[0], "passengers who died in class 3.")
survived2 = 'SELECT count(pclass) from passengers where survived = 1 and pclass =3'
curs.execute(survived2)
hi = curs.fetchone()
print("There were", hi[0], "passengers who survived in class 3.")
#- What was the average age of survivors vs nonsurvivors?
avg_dead = 'select avg(age) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average age of passengers who died was", hi[0])
avg_surv = 'select avg(age) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average age of passengers who survived was", hi[0])
#- What was the average age of each passenger class?
class1 = 'select avg(age) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average age of passengers in class 1 was", hi[0])
class2 = 'select avg(age) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average age of passengers in class 2 was", hi[0])
class3 = 'select avg(age) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average age of passengers in class 3 was", hi[0])
#- What was the average fare by passenger class? By survival?
class1 = 'select avg(fare) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average fare of passengers in class 1 was", hi[0])
class2 = 'select avg(fare) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average fare of passengers in class 2 was", hi[0])
class3 = 'select avg(fare) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average fare of passengers in class 3 was", hi[0])
avg_dead = 'select avg(fare) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average fare of passengers who died was", hi[0])
avg_surv = 'select avg(fare) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average fare of passengers who survived was", hi[0])
#- How many siblings/spouses aboard on average, by passenger class? By survival?
class1 = 'select avg(sib_spouse_count) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average siblings/spouses aboard in class 1 was", hi[0])
class2 = 'select avg(sib_spouse_count) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average siblings/spouses aboard in class 2 was", hi[0])
class3 = 'select avg(sib_spouse_count) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average siblings/spouses aboard in class 3 was", hi[0])
avg_dead = 'select avg(sib_spouse_count) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average siblings/spouses aboard of passengers who died was", hi[0])
avg_surv = 'select avg(sib_spouse_count) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average siblings/spouses aboard of passengers who survived was", hi[0])
#- How many parents/children aboard on average, by passenger class? By survival?
class1 = 'select avg(parent_child_count) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average parents/children aboard in class 1 was", hi[0])
class2 = 'select avg(parent_child_count) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average parents/children aboard in class 2 was", hi[0])
class3 = 'select avg(parent_child_count) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average parents/children aboard in class 3 was", hi[0])
avg_dead = 'select avg(parent_child_count) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average parents/children aboard of passengers who died was", hi[0])
avg_surv = 'select avg(parent_child_count) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average parents/children aboard of passengers who survived was", hi[0])
#- Do any passengers have the same name?
name = 'SELECT count(distinct name) from passengers having count(*) >1'
curs.execute(name)
hi = curs.fetchone()
print("All", hi[0], "passengers have a different name.")
#nope!
# (Bonus! Hard, may require pulling and processing with Python) How many married
#couples were aboard the Titanic? Assume that two people (one `Mr.` and one
#`Mrs.`) with the same last name and with at least 1 sibling/spouse aboard are
#a married couple. | 6,825 | 2,305 |
import json
from flask import Flask, render_template, redirect, Response, jsonify,request
from flask_cors import CORS
from Kerberos import Server,Server_Error
app = Flask(__name__, static_folder='./static', static_url_path='/')
cors = CORS(app)
#! This server uses distinct routes for different type of requests
#? We make our Kerberos server (not HTTP Server) from the ticket generated by TGS ,
#? copied from there and saved in Tickets folder here.
server = Server.make_server_from_db('A',check_rand=True)
#* The mock databse
book_data = ['Gravitation','Clean Code']
@app.route('/data',methods=['POST'])
def get_data():
data = request.get_json()
req = data['req']
ticket = data['ticket']
try:
#? we first try to decode req param in HTTP request recieved
dec_req = server.decrypt_req(req,ticket)
req = json.loads(dec_req)
#? Then we verify that the random number used by user is used for the first time
server.verify_rand(req.get('rand',None),req['user'],request.remote_addr)
#? we encrypt the respnse(data, not HTTP) that is to be sent
enc_res = server.encrypt_res(req['user'],request.remote_addr,{'success': True,'res':book_data},ticket)
#? we return HTTP response
return Response(enc_res, status=200)
except Server_Error as e:
#? If some error occured send the error as reponse, can be encrypted, but not done here
return Response(str(e),400)
@app.route('/add',methods=['POST'])
def add_data():
data = request.get_json()
req = data['req']
ticket = data['ticket']
try:
#? we first try to decode req param in HTTP request recieved
dec_req = server.decrypt_req(req,ticket)
req = json.loads(dec_req)
#? Then we verify that the random number used by user is used for the first time
server.verify_rand(req.get('rand',None),req['user'],request.remote_addr)
#* we add the data that is send, in real application we would operate with actual database here
book_data.append(req['book'])
#? we encrypt the respnse(data, not HTTP) that is to be sent
enc_res = server.encrypt_res(req['user'],request.remote_addr,{'success':'true'},ticket)
#? we return HTTP response
return Response(enc_res, status=200)
except Server_Error as e:
#? If some error occured send the error as reponse, can be encrypted, but not done here
return Response(str(e),400)
app.run(host='0.0.0.0', port='5001', debug=True) | 2,530 | 774 |
import sys, glob
from os import listdir, remove
from os.path import dirname, join, isfile, abspath
from io import StringIO
import numpy as np
import utilsmodule as um
script_path = dirname(abspath(__file__))
datasetPath = join(script_path,"data/")
e = 'shrec'
### Compute the dice coefficient used in Table 1,
# E Moscoso Thompson, G Arvanitis, K Moustakas, N Hoang-Xuan, E R Nguyen, et al..
# SHREC’19track: Feature Curve Extraction on Triangle Meshes.
# 12th EG Workshop 3D Object Retrieval 2019,May 2019, Gênes, Italy.
print (" Processing experiment " + e)
# Fields loaded from the file
input_file_fields = ['Precision', 'Recall', 'MCC', 'TP', 'FP', 'TN', 'FN']
# Expected range for the fields (used to compute the histogram bins)
input_fields_range = [(0,1), (0,1), (-1,1), (0,1), (0,1), (0,1), (0,1)]
input_fields_bins = []
# Functions used to summarize a field for the whole dataset
input_fied_summary = {
"median": lambda buf: np.nanmedian(buf),
"mean": lambda buf: np.nanmean(buf)
}
experimentPath = join(datasetPath, e)
experimentFile = join(script_path,"../assets/js/data_" + e + ".js")
approaches = [f for f in listdir(experimentPath) if isfile(join(experimentPath, f))]
# Data loaded from the file
rawdata = dict()
# Number of samples (3D models) used in this experiment
nbsamples = 0
# Load data
for a in approaches:
if a.endswith(".txt"):
aname = a[:-4]
apath = join(experimentPath,a)
# Load and skip comments, empty lines
lines = [item.split() for item in tuple(open(apath, 'r')) if not item[0].startswith('#') or item == '']
nbsamples = len(lines)
# Current layout: lines[lineid][columnid]
# Reshape so we have columns[columnid][lineid]
rawdata[aname] = np.swapaxes( lines, 0, 1 )
# Convert array of str to numpy array of numbers
converter = lambda x:np.fromstring(', '.join(x) , dtype = np.float, sep =', ' )
rawdata[aname] = list(map(converter,rawdata[aname]))
print (" Loaded methods " + str(rawdata.keys()))
for method, data in rawdata.items():
precision = data[0]
recall = data[1]
tp = data[3]
fp = data[4]
tn = data[5]
fn = data[6]
# Compute dice
dice = (2.*tp) / (2.*tp + fn + fp)
#dice = data[2]
data.append(dice)
# Now print the latex table header
for method, data in rawdata.items():
print (method + " & ", end = '')
print("\\\\ \n \hline")
# Find max value per model
maxid = []
for i in range (0,nbsamples):
vmax = 0.
mmax = 0
m = 0
for method, data in rawdata.items():
if data[7][i] > vmax:
vmax = data[7][i]
mmax = m
m = m+1
maxid.append(mmax)
# Now print the latex table content
for i in range (0,nbsamples):
m = 0
for method, data in rawdata.items():
# print ( str(data[:-1][i]) + " & " )
valstr = "{:.2f}".format(data[7][i])
if maxid[i] == m:
valstr = "\\textbf{" + valstr + "}"
print ( valstr + " & " , end = '')
m = m+1
print("\\\\ \n \hline")
| 3,050 | 1,124 |
from firebase import *
| 23 | 6 |
import logging
from random import randint, random
from mlflow import (
active_run,
end_run,
get_tracking_uri,
log_metric,
log_param,
start_run,
)
from mlflow.tracking import MlflowClient
from dbnd import task
logger = logging.getLogger(__name__)
@task
def mlflow_example():
logger.info("Running MLFlow example!")
logger.info("MLFlow tracking URI: {}".format(get_tracking_uri()))
start_run()
# params
log_param("param1", randint(0, 100))
log_param("param2", randint(0, 100))
# metrics
log_metric("foo1", random())
log_metric("foo1", random() + 1)
log_metric("foo2", random())
log_metric("foo2", random() + 1)
# Show metadata & data from the mlflow tracking store:
service = MlflowClient()
run_id = active_run().info.run_id
run = service.get_run(run_id)
logger.info("Metadata & data for run with UUID %s: %s" % (run_id, run))
end_run()
logger.info("MLFlow example completed!")
#
# from dbnd_task
# @task
# def mlflow_example():
# pass
if __name__ == "__main__":
mlflow_example()
| 1,094 | 396 |
from .channelpad import channelpad
from .conv2d_same import conv2d_same
from .padding import get_same_padding, pad_same
from .shakedrop import shakedrop
from .sigaug import signal_augment
from .sigmoid import h_sigmoid
from .stack import adjusted_concat, adjusted_stack
from .swish import h_swish, swish
| 312 | 104 |
#Conditional Tests HW - Due Monday
# 13 Tests --> 1 True and 1 False for each
#If Statements
#Simplest structure of an if statement:
# if conditional_test:
# do something <-- Instructions/commands
#my_age = 13
#if my_age >= 18:
# print("You are old enough to vote.")
# print("Are you registered to vote?")
#Unindent!
#Indentation plays the same role for if-statements
#as it did for 'for' loops. Anything indented will be
#executed whenever the conditional test is true. Anything
#indented will be skipped whenever the conditional test is
#false.
#USE CAUTION - Don't forget to un-indent when you are finished
#with your if-block.
#Often we want one action if the conditional test is True,
#But make another action whenever it is false.
my_age = 33
if my_age >= 18:
print("You are old enough to vote.")
print("Are you registered to vote?")
else: #Catches any instances when the above test fails
print("You are not old enough to vote.")
print("Please register to vote when you turn 18.")
#The if-else structure works very well in situations in which python
#needs to always execute one of two possible actions.
#in a simple if-else block, one of the two will always be evaluated.
#if-elif-else Chain
#Python will only execute one block in an if-elif-else chain.
#As soon as one test passes, python execute that block
#and skips the rest (even if they might be true).
#Example: Admission to a theme park:
#Three price-levels:
#Under 4 --> Free
#between 4 and 18 --> $25
#18 to 65 --> $40
#65 and older--> $20
age = 66
if age < 4:
price = 0
elif age < 18: #elif = else+if --> if the above test(s) is(are) false,
#try this test next
price = 25
elif age < 65:
price = 40
#We can have more than one elif statement
elif age >= 65:
price = 20
#The catch-all 'else' statement is no longer needed.
#If you have a definite condition for the last block of an if-elif-else
#Use an elif statement with a definite conditional test. If you don't have a
#definite condition in mind for the last layer of an if-elif-else block,
#else works fine (unless you don't really need it).
print(f"Your admission cost is ${price}")
#Think about the structure of your if-elif-else blocks.
#Especially when the tests overlap
#The purpose of the above code was to determine the cost for the user
#Multiple conditions.
requested_toppings = ['mushrooms','extra cheese']
if 'mushrooms' in requested_toppings:
print("Adding mushrooms.")
if 'pepperoni' in requested_toppings:
print("Adding pepperoni")
if 'extra cheese' in requested_toppings:
print("Adding extra cheese")
print("Finished making pizza!")
| 2,673 | 844 |
#!/usr/bin/env python
# BCET Workflow
__author__ = 'Sam Brooke'
__date__ = 'September 2017'
__copyright__ = '(C) 2017, Sam Brooke'
__email__ = "sbrooke@tuta.io"
import os
import georasters as gr
import matplotlib.pyplot as plt
import numpy as np
from optparse import OptionParser
import fnmatch
import re
from scipy.interpolate import spline
parser = OptionParser()
(options, args) = parser.parse_args()
# args[0] for bcet_directory
# args[1] for no_bcet_directory
bcet_directory = False
no_bcet_directory = False
file_prefix = ''
if os.path.isdir(args[0]):
bcet_directory = args[0]
if os.path.isdir(args[1]):
no_bcet_directory = args[1]
bcet_matches = []
for root, dirnames, filenames in os.walk(bcet_directory):
for filename in fnmatch.filter(filenames, '*.tif'):
bcet_matches.append(os.path.join(root, filename))
print(bcet_matches)
no_bcet_matches = []
for root, dirnames, filenames in os.walk(no_bcet_directory):
for filename in fnmatch.filter(filenames, '*.tif'):
no_bcet_matches.append(os.path.join(root, filename))
print(no_bcet_matches)
output = args[2]
# Load Raster
colours = {
'B1':'lightblue',
'B2':'blue',
'B3':'green',
'B4':'red',
'B5':'firebrick',
'B6':'grey',
'B7':'k'
}
band_labels = {
'B1':'Band 1 - Ultra Blue',
'B2':'Band 2 - Blue',
'B3':'Band 3 - Green',
'B4':'Band 4 - Red',
'B5':'Band 5 - NIR',
'B6':'Band 6 - SWIR 1',
'B7':'Band 7 - SWIR 2'
}
# Display results
#fig = plt.figure(figsize=(8, 5))
fig, axarr = plt.subplots(2, sharex=False)
width = 25 #cm
height = 20 #cm
fig.set_size_inches(float(width)/2.54, float(height)/2.54)
for ma in no_bcet_matches:
raster = os.path.join(ma)
base = os.path.basename(raster)
m = re.search(r"B[0-9]+",base)
band_name = m.group()
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info(raster) # Raster information
# ndv = no data value
data = gr.from_file(raster) # Create GeoRaster object
crs = projection.ExportToProj4() # Create a projection string in proj4 format
sp = data.raster.ravel()
spn = len(sp)
hist, bins = np.histogram(data.raster.ravel(), bins=50)
hist_norm = hist.astype(float) / spn
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
centernew = np.linspace(center.min(),center.max(),300) #300 represents number of points to make between T.min and T.max
hist_smooth = spline(center,hist_norm,centernew)
axarr[0].plot(centernew, hist_smooth, color=colours[band_name], label=band_labels[band_name])
for ma in bcet_matches:
raster = os.path.join(ma)
base = os.path.basename(raster)
m = re.search(r"B[0-9]+",base)
band_name = m.group()
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info(raster) # Raster information
# ndv = no data value
data = gr.from_file(raster) # Create GeoRaster object
crs = projection.ExportToProj4() # Create a projection string in proj4 format
sp = data.raster.ravel()
spn = len(sp)
hist, bins = np.histogram(data.raster.ravel(), bins=25)
hist_norm = hist.astype(float) / spn
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
centernew = np.linspace(center.min(),center.max(),300) #300 represents number of points to make between T.min and T.max
hist_smooth = spline(center,hist_norm,centernew)
axarr[1].plot(centernew, hist_smooth, color=colours[band_name], label=band_labels[band_name])
axarr[0].set_xlim([0, 25000])
axarr[1].set_xlim([0,255])
axarr[0].set_ylim([0, 0.5])
axarr[1].set_ylim([0, 0.5])
axarr[0].set_xlabel('R')
axarr[1].set_xlabel('R*')
axarr[0].set_ylabel('f')
axarr[1].set_ylabel('f')
axarr[0].set_title('LANDSAT (White Mountains ROI) 2014-02-25 Unmodified Histogram')
axarr[1].set_title('LANDSAT (White Mountains ROI) 2014-02-25 BCET Histogram')
axarr[0].legend()
axarr[1].legend()
plt.savefig('histograms.pdf')
| 3,798 | 1,643 |
from pandas import DataFrame
import os
def frame_to_csv(frame:DataFrame,output_file:str,decimal_format=',',
float_format=None,date_format=None,quote_char='"',no_data_repr='',sep=';'):
"""
Converts a pandas dataframe to a csv file
Parameters
----------
output_file -> path to file to write to
decimal_format -> decimal separator to use default ","
float_format -> format mask to use for floats, default none
date_format -> format mask for date, default none
quote_char -> string quote char, default '"'
no_data_repr -> how to represent empty columns, default ''
"""
frame.to_csv(output_file,decimal=decimal_format,
float_format=float_format,date_format=date_format,
quotechar=quote_char,na_rep=no_data_repr,sep=sep)
def frame_to_csv_str(frame:DataFrame,decimal_format=',',
float_format=None,date_format=None,quote_char='"',no_data_repr='',sep=';'):
"""
Converts a pandas dataframe to a csv formatted string
Parameters
----------
decimal_format -> decimal separator to use default ","
float_format -> format mask to use for floats, default none
date_format -> format mask for date, default none
quote_char -> string quote char, default '"'
no_data_repr -> how to represent empty columns, default ''
"""
return frame.to_csv(None,decimal=decimal_format,
float_format=float_format,date_format=date_format,
quotechar=quote_char,na_rep=no_data_repr,sep=sep)
def frame_to_excel(frame:DataFrame,output_file:str,
float_format=None,no_data_rep='',sheetName='Sheet1'):
"""
Converts a pandas data frame to a excel file
Parameters
----------
output_file -> path to file to write to
float_format -> format mask for floats e.g. '%.2f' will format to 2 decimals, default None
no_data_rep -> how empty columns should be represented, default ''
"""
frame.to_excel(output_file,sheet_name=sheetName,
float_format=float_format,na_rep=no_data_rep)
| 1,989 | 631 |
'''
Integration Test for HA mode with UI stop on one node.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
node_ip = None
def test():
global node_ip
node_ip = os.environ.get('node1Ip')
test_util.test_logger("stop ui on node: %s" % (node_ip))
cmd = "zstack-ctl stop_ui"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username, host_password, cmd, 180)
test_util.test_logger("check if it still works")
zstack_ha_vip = os.environ.get('zstackHaVip')
if not test_lib.lib_network_check(zstack_ha_vip, 8888):
test_util.test_fail('Could not access UI through VIP: %s, port: 8888' % (zstack_ha_vip))
cmd = "zstack-ctl start_ui"
rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username, host_password, cmd, 180)
test_util.test_pass('Create VM Test UI Stop on one node Success')
#Will be called only if exception happens in test().
def error_cleanup():
global node_ip
cmd = "zstack-ctl start_ui"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username, host_password, cmd, 180)
| 1,562 | 578 |
# This code is licensed under the MIT License (see LICENSE file for details)
import ctypes
import atexit
# import all the autogenerated functions and definitions
# note: also pulls in common which provides AndorError and several other constants
from . import wrapper
from .wrapper import *
# Provided for reference purposes, the FeatureStrings list contains all the "feature strings"
# listed in the Andor SDK documentation. The value given for the Feature argument to functions
# provided by this module should be a string appearing in this list.
FeatureStrings = [
'AccumulateCount', # zyla only
'AcquisitionStart',
'AcquisitionStop',
'AOIBinning',
'AOIHBin',
'AOIHeight',
'AOILeft',
'AOIStride',
'AOITop',
'AOIVBin',
'AOIWidth',
'AuxiliaryOutSource',
'Baseline',
'BitDepth',
'BufferOverflowEvent',
'BytesPerPixel',
'CameraAcquiring',
'CameraFamily', # sona only
'CameraModel',
'CameraName',
'CameraPresent',
'CycleMode',
'DeviceCount', # system
'ElectronicShutteringMode',
'EventEnable',
'EventsMissedEvent',
'EventSelector',
'ExposureTime',
'ExposureEndEvent',
'ExposureStartEvent',
'ExternalTriggerDelay',
'FanSpeed',
'FirmwareVersion',
'FrameCount',
'FrameRate',
'FullAOIControl',
'GainMode', # sona only
'ImageSizeBytes',
'InterfaceType',
'IOInvert',
'IOSelector',
'LogLevel', # system
'LUTIndex',
'LUTValue',
'MaxInterfaceTransferRate',
'MetadataEnable',
'MetadataFrame',
'MetadataTimestamp',
'Overlap',
'PixelEncoding',
'PixelHeight',
'PixelReadoutRate',
'PixelWidth',
'ReadoutTime',
'RollingShutterGlobalClear', # zyla only
'RowNExposureEndEvent',
'RowNExposureStartEvent',
'RowReadTime',
'SensorCooling',
'SensorHeight',
'SensorTemperature',
'SensorWidth',
'SerialNumber',
'SimplePreAmpGainControl', # deprecated on sona
'SoftwareTrigger',
'SoftwareVersion', # system
'SpuriousNoiseFilter',
'StaticBlemishCorrection', # zyla only
'TemperatureControl',
'TemperatureStatus',
'TimestampClock',
'TimestampClockFrequency',
'TimestampClockReset',
'TriggerMode',
'VerticallyCenterAOI'
]
_AT_HANDLE_SYSTEM = 1
def _string_for_handle(handle, feature):
wrapper._at_core_lib.AT_GetString(handle, feature, wrapper._at_wchar_scratch, wrapper._at_wchar_scratch._length_)
return wrapper._at_wchar_scratch.value
def _init_core_lib(corepath='libatcore.so'):
if wrapper._at_core_lib is not None:
return
wrapper._at_core_lib = ctypes.CDLL(corepath)
wrapper._setup_core_functions()
wrapper._at_core_lib.AT_InitialiseLibrary()
atexit.register(wrapper._at_core_lib.AT_FinaliseLibrary)
def _init_util_lib(utilpath='libatutility.so'):
if wrapper._at_util_lib is not None:
return
wrapper._at_util_lib = ctypes.CDLL(utilpath)
wrapper._setup_util_functions()
wrapper._at_util_lib.AT_InitialiseUtilityLibrary()
atexit.register(wrapper._at_util_lib.AT_FinaliseUtilityLibrary)
def list_cameras():
devices_attached = wrapper._at_core_lib.AT_GetInt(_AT_HANDLE_SYSTEM, 'DeviceCount')
cameras = []
for i in range(devices_attached):
handle = wrapper._at_core_lib.AT_Open(i)
cameras.append(_string_for_handle(handle, 'CameraModel'))
wrapper._at_core_lib.AT_Close(handle)
return cameras
def _init_camera():
if wrapper._at_camera_handle is not None:
return
devices_attached = wrapper._at_core_lib.AT_GetInt(_AT_HANDLE_SYSTEM, 'DeviceCount')
# Even on the scope machine, the default Andor configuration includes two
# virtual cameras, for a total of three camera devices. A hardware camera
# will take device index 0, provided you have only one hardware camera, and
# we are very clearly working under this assumption. To be sure, we query
# the camera's name and ensure that it matches the name of our camera.
if devices_attached < 3:
raise AndorError('No Andor cameras detected. Is the camera turned on?')
wrapper._at_camera_handle = wrapper._at_core_lib.AT_Open(0)
camera_name = GetString('CameraModel')
atexit.register(close_camera)
return camera_name
def initialize():
"""Initialize the andor libraries."""
_init_core_lib()
_init_util_lib()
camera_name = _init_camera()
software_version = _string_for_handle(_AT_HANDLE_SYSTEM, 'SoftwareVersion')
return camera_name, software_version
def close_camera():
if wrapper._at_camera_handle is not None:
wrapper._at_core_lib.AT_Close(wrapper._at_camera_handle)
wrapper._at_camera_handle = None | 4,736 | 1,534 |
from app.validation.error_messages import error_messages
from tests.integration.integration_test_case import IntegrationTestCase
class TestSaveSignOut(IntegrationTestCase):
def test_save_sign_out_with_mandatory_question_not_answered(self):
# We can save and go to the sign-out page without having to fill in mandatory answer
# Given
self.launchSurvey('test', '0205', account_service_url='https://localhost/my-account', account_service_log_out_url='https://localhost/logout')
# When
self.post(action='start_questionnaire')
self.post(post_data={'total-retail-turnover': '1000'}, action='save_sign_out')
# Then we are presented with the sign out page
self.assertInUrl('/logout')
def test_save_sign_out_with_non_mandatory_validation_error(self):
# We can't save if a validation error is caused, this doesn't include missing a mandatory question
# Given
self.launchSurvey('test', '0205')
# When
self.post(action='start_questionnaire')
self.post(post_data={'total-retail-turnover': 'error'}, action='save_sign_out')
# Then we are presented with an error message
self.assertRegexPage(error_messages['INVALID_NUMBER'])
def test_save_sign_out_complete_a_block_then_revisit_it(self):
# If a user completes a block, but then goes back and uses save and come back on that block, that block
# should no longer be considered complete and on re-authenticate it should return to it
self.launchSurvey('test', '0102')
self.post(action='start_questionnaire')
block_one_url = self.last_url
post_data = {
'period-from-day': '01',
'period-from-month': '4',
'period-from-year': '2016',
'period-to-day': '30',
'period-to-month': '4',
'period-to-year': '2016'
}
self.post(post_data)
# We go back to the first page and save and complete later
self.get(block_one_url)
self.post(action='save_sign_out')
# We re-authenticate and check we are on the first page
self.launchSurvey('test', '0102')
self.assertEqual(block_one_url, self.last_url)
def test_sign_out_on_introduction_page(self):
# Given
self.launchSurvey('test', '0205', account_service_url='https://localhost/my-account', account_service_log_out_url='https://localhost/logout')
# When
self.post(action='sign_out')
# Then we are presented with the sign out page
self.assertInUrl('/logout')
def test_thank_you_without_logout_url(self):
"""
If the signed-out url is hit but there is no account_service_log_out_url, then a sign out page is rendered.
"""
self.launchSurvey('test', 'textarea')
self.post({'answer': 'This is an answer'})
token = self.last_csrf_token
self.post(action=None)
self.assertInUrl('thank-you')
self.last_csrf_token = token
self.post(action='sign_out')
self.assertInUrl('/signed-out')
self.assertInBody('Your survey answers have been saved. You are now signed out')
def test_thank_you_page_post_without_action(self):
"""
If the thank you page is posted to without an action,
it takes you back to the thank you page.
"""
self.launchSurvey('test', 'textarea')
self.post({'answer': 'This is an answer'})
token = self.last_csrf_token
self.post(action=None)
self.assertInUrl('thank-you')
self.last_csrf_token = token
self.post(action=None)
self.assertInUrl('/thank-you')
| 3,718 | 1,133 |
import torch
import torch.nn as nn, torch.nn.functional as F
from torch.nn.parameter import Parameter
import math
from torch_scatter import scatter
from torch_geometric.utils import softmax
# NOTE: can not tell which implementation is better statistically
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def normalize_l2(X):
"""Row-normalize matrix"""
rownorm = X.detach().norm(dim=1, keepdim=True)
scale = rownorm.pow(-1)
scale[torch.isinf(scale)] = 0.
X = X * scale
return X
# v1: X -> XW -> AXW -> norm
class UniSAGEConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
# TODO: bias?
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
# X0 = X # NOTE: reserved for skip connection
X = self.W(X)
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce=self.args.second_aggregate, dim_size=N) # [N, C]
X = X + Xv
if self.args.use_norm:
X = normalize_l2(X)
# NOTE: concat heads or mean heads?
# NOTE: normalize here?
# NOTE: skip concat here?
return X
# v1: X -> XW -> AXW -> norm
class UniGINConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.eps = nn.Parameter(torch.Tensor([0.]))
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
# X0 = X # NOTE: reserved for skip connection
# v1: X -> XW -> AXW -> norm
X = self.W(X)
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
X = (1 + self.eps) * X + Xv
if self.args.use_norm:
X = normalize_l2(X)
# NOTE: concat heads or mean heads?
# NOTE: normalize here?
# NOTE: skip concat here?
return X
# v1: X -> XW -> AXW -> norm
class UniGCNConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
degE = self.args.degE
degV = self.args.degV
# v1: X -> XW -> AXW -> norm
X = self.W(X)
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xe = Xe * degE
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
Xv = Xv * degV
X = Xv
if self.args.use_norm:
X = normalize_l2(X)
# NOTE: skip concat here?
return X
# v2: X -> AX -> norm -> AXW
class UniGCNConv2(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=True)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
degE = self.args.degE
degV = self.args.degV
# v3: X -> AX -> norm -> AXW
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xe = Xe * degE
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
Xv = Xv * degV
X = Xv
if self.args.use_norm:
X = normalize_l2(X)
X = self.W(X)
# NOTE: result might be slighly unstable
# NOTE: skip concat here?
return X
class UniGATConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2, skip_sum=False):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.att_v = nn.Parameter(torch.Tensor(1, heads, out_channels))
self.att_e = nn.Parameter(torch.Tensor(1, heads, out_channels))
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.attn_drop = nn.Dropout(dropout)
self.leaky_relu = nn.LeakyReLU(negative_slope)
self.skip_sum = skip_sum
self.args = args
self.reset_parameters()
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def reset_parameters(self):
glorot(self.att_v)
glorot(self.att_e)
def forward(self, X, vertex, edges):
H, C, N = self.heads, self.out_channels, X.shape[0]
# X0 = X # NOTE: reserved for skip connection
X0 = self.W(X)
X = X0.view(N, H, C)
Xve = X[vertex] # [nnz, H, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, H, C]
alpha_e = (Xe * self.att_e).sum(-1) # [E, H, 1]
a_ev = alpha_e[edges]
alpha = a_ev # Recommed to use this
alpha = self.leaky_relu(alpha)
alpha = softmax(alpha, vertex, num_nodes=N)
alpha = self.attn_drop( alpha )
alpha = alpha.unsqueeze(-1)
Xev = Xe[edges] # [nnz, H, C]
Xev = Xev * alpha
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, H, C]
X = Xv
X = X.view(N, H * C)
if self.args.use_norm:
X = normalize_l2(X)
if self.skip_sum:
X = X + X0
# NOTE: concat heads or mean heads?
# NOTE: skip concat here?
return X
__all_convs__ = {
'UniGAT': UniGATConv,
'UniGCN': UniGCNConv,
'UniGCN2': UniGCNConv2,
'UniGIN': UniGINConv,
'UniSAGE': UniSAGEConv,
}
class UniGNN(nn.Module):
def __init__(self, args, nfeat, nhid, nclass, nlayer, nhead, V, E):
"""UniGNN
Args:
args (NamedTuple): global args
nfeat (int): dimension of features
nhid (int): dimension of hidden features, note that actually it\'s #nhid x #nhead
nclass (int): number of classes
nlayer (int): number of hidden layers
nhead (int): number of conv heads
V (torch.long): V is the row index for the sparse incident matrix H, |V| x |E|
E (torch.long): E is the col index for the sparse incident matrix H, |V| x |E|
"""
super().__init__()
Conv = __all_convs__[args.model_name]
self.conv_out = Conv(args, nhid * nhead, nclass, heads=1, dropout=args.attn_drop)
self.convs = nn.ModuleList(
[ Conv(args, nfeat, nhid, heads=nhead, dropout=args.attn_drop)] +
[Conv(args, nhid * nhead, nhid, heads=nhead, dropout=args.attn_drop) for _ in range(nlayer-2)]
)
self.V = V
self.E = E
act = {'relu': nn.ReLU(), 'prelu':nn.PReLU() }
self.act = act[args.activation]
self.input_drop = nn.Dropout(args.input_drop)
self.dropout = nn.Dropout(args.dropout)
self.type_norm = args.type_norm
self.num_groups =args.num_groups
self.skip_weight=args.skip_weight
if self.type_norm in ['None', 'batch', 'pair']:
skip_connect = False
else:
skip_connect = True
self.layers_bn = torch.nn.ModuleList([])
for _ in range(nlayer-1):
self.layers_bn.append(batch_norm(nhid * nhead, self.type_norm, skip_connect, self.num_groups, self.skip_weight,
args.skipweight_learnable))
def forward(self, X):
V, E = self.V, self.E
X = self.input_drop(X)
for i, conv in enumerate(self.convs):
X = conv(X, V, E)
X=self.layers_bn[i](X)
X = self.act(X)
X = self.dropout(X)
X = self.conv_out(X, V, E)
return F.log_softmax(X, dim=1)
class UniGCNIIConv(nn.Module):
def __init__(self, args, in_features, out_features):
super().__init__()
self.W = nn.Linear(in_features, out_features, bias=False)
self.args = args
def forward(self, X, vertex, edges, alpha, beta, X0):
N = X.shape[0]
degE = self.args.degE
degV = self.args.degV
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xe = Xe * degE
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
Xv = Xv * degV
X = Xv
if self.args.use_norm:
X = normalize_l2(X)
Xi = (1-alpha) * X + alpha * X0
X = (1-beta) * Xi + beta * self.W(Xi)
return X
class UniGCNII(nn.Module):
def __init__(self, args, nfeat, nhid, nclass, nlayer, nhead, V, E):
"""UniGNNII
Args:
args (NamedTuple): global args
nfeat (int): dimension of features
nhid (int): dimension of hidden features, note that actually it\'s #nhid x #nhead
nclass (int): number of classes
nlayer (int): number of hidden layers
nhead (int): number of conv heads
V (torch.long): V is the row index for the sparse incident matrix H, |V| x |E|
E (torch.long): E is the col index for the sparse incident matrix H, |V| x |E|
"""
super().__init__()
self.V = V
self.E = E
nhid = nhid * nhead
act = {'relu': nn.ReLU(), 'prelu':nn.PReLU() }
self.act = act[args.activation]
self.input_drop = nn.Dropout(args.input_drop)
self.dropout = nn.Dropout(args.dropout)
self.convs = torch.nn.ModuleList()
self.convs.append(torch.nn.Linear(nfeat, nhid))
for _ in range(nlayer):
self.convs.append(UniGCNIIConv(args, nhid, nhid))
self.convs.append(torch.nn.Linear(nhid, nclass))
self.reg_params = list(self.convs[1:-1].parameters())
self.non_reg_params = list(self.convs[0:1].parameters())+list(self.convs[-1:].parameters())
self.dropout = nn.Dropout(args.dropout)
self.alpha_learnable=args.alpha_learnable
self.learnable_alpha= Parameter(torch.FloatTensor(nlayer, 1))
self.reset_parameters()
def reset_parameters(self):
self.learnable_alpha.data.uniform_(0.1,0.1)
def forward(self, x):
V, E = self.V, self.E
lamda, alpha = 0.2, 0.1
x = self.dropout(x)
x = F.relu(self.convs[0](x))
x0 = x
for i,con in enumerate(self.convs[1:-1]):
if self.alpha_learnable:
alpha= self.learnable_alpha[i]
x = self.dropout(x)
beta = math.log(lamda/(i+1)+1)
x = F.relu(con(x, V, E, alpha, beta, x0))
x = self.dropout(x)
x = self.convs[-1](x)
return F.log_softmax(x, dim=1)
class batch_norm(torch.nn.Module):
def __init__(self, dim_hidden, type_norm, skip_connect=False, num_groups=1,
skip_weight=0.005,sw_learnable=False,multiple=1,mul_learnable=False):
super(batch_norm, self).__init__()
self.type_norm = type_norm
self.skip_connect = skip_connect
self.num_groups = num_groups
self.skip_weight = skip_weight
self.dim_hidden = dim_hidden
self.sw_learnable=sw_learnable
self.multiple=multiple
self.mul_learnable=mul_learnable
if self.type_norm == 'batch':
self.bn = torch.nn.BatchNorm1d(dim_hidden, momentum=0.3)
elif self.type_norm == 'group':
self.bn = torch.nn.BatchNorm1d(dim_hidden*self.num_groups, momentum=0.3)
self.group_func = torch.nn.Linear(dim_hidden, self.num_groups, bias=True)
else:
pass
self.lam=Parameter(torch.FloatTensor(1, 1))
self.mul=Parameter(torch.FloatTensor(1, 1))
#self.lam =Parameter(torch.FloatTensor(num_groups, 1))
self.reset_parameters()
def reset_parameters(self):
self.lam.data.uniform_(self.skip_weight, self.skip_weight)
self.mul.data.uniform_(self.multiple, self.multiple)
def forward(self, x):
if self.type_norm == 'None':
return x
elif self.type_norm == 'batch':
# print(self.bn.running_mean.size())
return self.bn(x)
elif self.type_norm == 'pair':
col_mean = x.mean(dim=0)
x = x - col_mean
rownorm_mean = (1e-6 + x.pow(2).sum(dim=1).mean()).sqrt()
x = x / rownorm_mean
if self.mul_learnable:
x=x*self.mul
else:
x=x*self.multiple
return x
elif self.type_norm == 'group':
if self.num_groups == 1:
x_temp = self.bn(x)
else:
score_cluster = F.softmax(self.group_func(x), dim=1)
x_temp = torch.cat([score_cluster[:, group].unsqueeze(dim=1) * x for group in range(self.num_groups)], dim=1)
#x_temp = torch.cat([self.lam[group]*score_cluster[:, group].unsqueeze(dim=1) * x for group in range(self.num_groups)],dim=1)
x_temp = self.bn(x_temp).view(-1, self.num_groups, self.dim_hidden).sum(dim=1)
#x_temp = self.bn(x_temp).view(-1, self.num_groups, self.dim_hidden).self.lam*average(axis=1,weights=torch.ones(num_groups))
if self.sw_learnable:
x = x + x_temp * self.lam
else:
x = x + x_temp * self.skip_weight
'''
for i in range(self.num_groups):
x=x+x_temp[:,i,:]*self.lam[i]
'''
return x
else:
raise Exception(f'the normalization has not been implemented')
| 16,479 | 5,852 |
#!/bin/env python3
# import os
# os.environ['PYTHONASYNCIODEBUG'] = '1'
# import logging
# logging.getLogger('asyncio').setLevel(logging.DEBUG)
from datetime import datetime
import traceback
import atexit
import argparse
import os
from os import path
import sys
import logging
from struct import pack
import random
from time import time, sleep, perf_counter
from socket import socket
from configparser import ConfigParser
from shutil import which
from asyncio import sleep, Protocol, get_event_loop, Task
from pickle import dumps
import csv
from ..consts import BUILD_TIMESTAMP_VARNAME
from ..util import version, resolve, create_process, kill_all_processes, gcd
from ..util import verbose as util_verbose
from ..lib import AckTimeout, ClientProtocolMixin, SamplerSample
from ..varsfile import merge_vars_from_file_and_list
from ..dwarfutil import read_elf_variables
logger = logging.getLogger()
module_dir = os.path.dirname(os.path.realpath(__file__))
pc_dir = os.path.join(module_dir, '..', '..', '..', 'examples', 'pc_platform')
pc_executable = os.path.join(pc_dir, 'pc')
def start_fake_bench(port):
return start_fake_sine(ticks_per_second=0, port=port)
def start_fake_sine(ticks_per_second, port, build_timestamp_value):
# Run in a separate process so it doesn't hog the CPython lock
# Use our executable to work with a development environment (python executable)
# or pyinstaller (emotool.exe)
if sys.argv[0].endswith(path.basename(get_python_executable())):
cmdline = sys.argv[:2]
elif path.isfile(sys.argv[0]) or path.isfile(sys.argv[0] + '.exe'):
cmdline = [sys.argv[0]]
elif which(sys.argv[0]):
cmdline = [sys.argv[0]]
# force usage of python if the first parameter is a python script; use extension as predicate
if cmdline[0].endswith('.py'):
cmdline = [get_python_executable()] + cmdline
#print("{sys_argv} ; which said {which}".format(sys_argv=repr(sys.argv), which=which(sys.argv[0]))
return create_process(cmdline + ['--embedded', '--ticks-per-second', str(ticks_per_second), '--port', str(port),
'--build-timestamp-value', str(build_timestamp_value)])
def start_pc(port, exe, debug):
exe = os.path.realpath(exe)
cmdline = [exe, str(port)]
cmdline_str = ' '.join(cmdline)
debug_cmdline = 'EMOLOG_PC_PORT={port} cgdb --args {cmdline_str}'.format(port=port, cmdline_str=cmdline_str)
os.environ['EMOLOG_PC_PORT'] = str(port)
if debug:
input("press enter once you ran pc with: {debug_cmdline}".format(debug_cmdline=debug_cmdline))
return
return create_process(cmdline)
def iterate(prefix, initial):
while True:
yield '{}_{:03}.csv'.format(prefix, initial)
initial += 1
def next_available(folder, prefix):
filenames = iterate(prefix, 1)
for filename in filenames:
candidate = os.path.join(folder, filename)
if not os.path.exists(candidate):
return candidate
def setup_logging(filename, silent):
if silent:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.DEBUG)
if filename:
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(level=logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
stream_formatter = logging.Formatter('%(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level=logging.INFO)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)
logger.debug('debug first')
logger.info('info first')
def start_serial_process(serialurl, baudrate, hw_flow_control, port):
"""
Block until serial2tcp is ready to accept a connection
"""
serial2tcp_cmd = create_python_process_cmdline('serial2tcp.py')
if hw_flow_control is True:
serial2tcp_cmd += ['-r']
serial2tcp_cmd += ' -b {} -p {} -P {}'.format(baudrate, serialurl, port).split()
serial_subprocess = create_process(serial2tcp_cmd)
return serial_subprocess
def create_python_process_cmdline(script):
script_path = os.path.join(module_dir, script)
return [sys.executable, script_path]
def create_python_process_cmdline_command(command):
return [sys.executable, '-c', command]
class EmoToolClient(ClientProtocolMixin):
def __init__(self, ticks_per_second, verbose, dump, debug, csv_writer_factory=None):
if debug:
print("timeout set to one hour for debugging (gdb)")
ClientProtocolMixin.ACK_TIMEOUT_SECONDS = 3600.0
super().__init__(verbose=verbose, dump=dump,
ticks_per_second=ticks_per_second,
csv_writer_factory=csv_writer_factory)
@property
def running(self):
return self.cylib.running()
@property
def ticks_lost(self):
return self.cylib.csv_handler.ticks_lost
@property
def samples_received(self):
return self.cylib.csv_handler.samples_received
@property
def csv_filename(self):
return self.cylib.csv_handler.csv_filename
def reset(self, *args, **kw):
self.last_samples_received = None # don't trigger the check_progress() watchdog on the next sample
self.cylib.csv_handler.reset(*args, **kw)
def register_listener(self, *args, **kw):
self.cylib.csv_handler.register_listener(*args, **kw)
def data_received(self, data):
self.cylib.data_received(data)
async def start_transport(client, args):
loop = get_event_loop()
port = random.randint(10000, 50000)
if args.fake is not None:
if args.fake == 'gen':
start_fake_sine(ticks_per_second=args.ticks_per_second, port=port, build_timestamp_value=args.fake_gen_build_timestamp_value)
elif args.fake == 'bench':
start_fake_bench(port)
elif args.fake == 'pc' or os.path.exists(args.fake):
exe = pc_executable if args.fake == 'pc' else args.fake
start_pc(port=port, exe=exe, debug=args.debug)
else:
print("error: unfinished support for fake {fake}".format(fake=args.fake))
raise SystemExit
else:
start_serial_process(serialurl=args.serial, baudrate=args.baud, hw_flow_control=args.hw_flow_control, port=port)
attempt = 0
while attempt < 10:
attempt += 1
await sleep(0.1)
s = socket()
try:
s.connect(('127.0.0.1', port))
except:
pass
else:
break
client_transport, client2 = await loop.create_connection(lambda: client, sock=s)
assert client2 is client
args = None
def cancel_outstanding_tasks():
for task in Task.all_tasks():
logger.warning('canceling task {}'.format(task))
task.cancel()
def windows_try_getch():
import msvcrt
if msvcrt.kbhit():
return msvcrt.getch()
return None # be explicit
if sys.platform == 'win32':
try_getch_message = "Press any key to stop capture early..."
try_getch = windows_try_getch
else:
try_getch_message = "Press Ctrl-C to stop capture early..."
def try_getch():
return None
async def cleanup(args, client):
if not hasattr(client, 'transport') or client.transport is None:
cancel_outstanding_tasks()
return
if not args.no_cleanup:
logger.info("sending sampler stop")
try:
await client.send_sampler_stop()
except:
logger.info("exception when sending sampler stop in cleanup()")
client.exit_gracefully()
if client.transport is not None:
client.transport.close()
kill_all_processes()
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Emolog protocol capture tool. Implements emolog client side, captures a given set of variables to a csv file')
parser.add_argument('--fake', # TODO: can I have a hook for choices? i.e. choices=ChoicesOrExecutable['gen', 'pc', 'bench'],
help='debug only - fake a client - either generated or pc controller')
now_timestamp = int(datetime.now().timestamp() * 1000)
parser.add_argument('--fake-elf-build-timestamp-value', type=int, default=now_timestamp, help='debug only - fake build timestamp value (address is fixed)')
parser.add_argument('--fake-gen-build-timestamp-value', type=int, default=now_timestamp, help='debug only - fake build timestamp value (address is fixed)')
parser.add_argument('--serial', default='auto', help='serial URL or device name') # see http://pythonhosted.org/pyserial/pyserial_api.html#serial.serial_for_url
parser.add_argument('--baud', default=8000000, help='baudrate, using RS422 up to 12000000 theoretically', type=int)
parser.add_argument('--hw_flow_control', default=False, action='store_true', help='use CTS/RTS signals for flow control')
parser.add_argument('--elf', default=None, help='elf executable running on embedded side')
parser.add_argument('--var', default=[], action='append',
help='add a single var, example "foo,1,0" = "varname,ticks,tickphase"')
parser.add_argument('--snapshotfile', help='file containing variable definitions to be taken once at startup')
parser.add_argument('--varfile', help='file containing variable definitions, identical to multiple --var calls')
group = parser.add_mutually_exclusive_group()
group.add_argument('--out', help='Output file name. ".csv" extension is added if missing. '
'File is overwritten if already exists.')
group.add_argument('--out_prefix', default='emo', help='Output file prefix. Output is saved to the first free '
'(not already existing) file of the format "prefix_xxx.csv", '
'where xxx is a sequential number starting from "001"')
parser.add_argument('--csv-factory', help='advanced: module[.module]*.function to use as factory for csv file writing', default=None)
parser.add_argument('--verbose', default=True, action='store_false', dest='silent',
help='turn on verbose logging; affects performance under windows')
parser.add_argument('--verbose-kill', default=False, action='store_true')
parser.add_argument('--log', default=None, help='log messages and other debug/info logs to this file')
parser.add_argument('--runtime', type=float, default=3.0, help='quit after given seconds. use 0 for endless run.')
parser.add_argument('--no-cleanup', default=False, action='store_true', help='do not stop sampler on exit')
parser.add_argument('--dump')
parser.add_argument('--ticks-per-second', default=1000000 / 50, type=float,
help='number of ticks per second. used in conjunction with runtime')
parser.add_argument('--debug', default=False, action='store_true', help='produce more verbose debugging output')
# Server - used for GUI access
parser.add_argument('--listen', default=None, type=int, help='enable listening TCP port for samples') # later: add a command interface, making this suitable for interactive GUI
parser.add_argument('--gui', default=False, action='store_true', help='launch graphing gui in addition to saving')
# Embedded
parser.add_argument('--embedded', default=False, action='store_true', help='debugging: be a fake embedded target')
parser.add_argument('--check-timestamp', action='store_true', default=False, help='wip off by default for now')
ret, unparsed = parser.parse_known_args(args=args)
if ret.fake is None:
if not ret.elf and not ret.embedded:
# elf required unless fake_sine in effect
parser.print_usage()
print("{e}: error: the following missing argument is required: --elf".format(e=sys.argv[0]))
raise SystemExit
else:
if ret.fake == 'gen':
# fill in fake vars
ret.var = [
# name, ticks, phase
'a,1,0',
'b,1,0',
'c,1,0',
'd,1,0',
'e,1,0',
'f,1,0',
'g,1,0',
'h,1,0',
]
else:
if ret.elf is None:
if ret.fake == 'pc':
if not os.path.exists(pc_executable):
print("missing pc ELF file: {e}".format(e=pc_executable))
raise SystemExit
ret.elf = pc_executable
else:
ret.elf = ret.fake
if ret.varfile is None:
ret.varfile = os.path.join(module_dir, '..', '..', 'vars.csv')
ret.snapshotfile = os.path.join(module_dir, '..', '..', 'snapshot_vars.csv')
return ret
def bandwidth_calc(args, variables):
"""
:param variables: list of dictionaries
:return: average baud rate (considering 8 data bits, 1 start & stop bits)
"""
packets_per_second = args.ticks_per_second # simplification: assume a packet every tick (upper bound)
header_average = packets_per_second * SamplerSample.empty_size()
payload_average = sum(args.ticks_per_second / v['period_ticks'] * v['size'] for v in variables)
return (header_average + payload_average) * 10
async def initialize_board(client, variables):
logger.debug("about to send version")
await client.send_version()
retries = max_retries = 3
while retries > 0:
try:
logger.debug("about to send sampler stop")
await client.send_sampler_stop()
logger.debug("about to send sampler set variables")
await client.send_set_variables(variables)
logger.debug("about to send sampler start")
await client.send_sampler_start()
logger.debug("client initiated, starting to log data at rate TBD")
break
except AckTimeout:
retries -= 1
logger.info("Ack Timeout. Retry {}".format(max_retries - retries))
return retries != 0
def banner(s):
print("=" * len(s))
print(s)
print("=" * len(s))
async def run_client(args, client, variables, allow_kb_stop):
if not await initialize_board(client=client, variables=variables):
logger.error("Failed to initialize board, exiting.")
raise SystemExit
sys.stdout.flush()
logger.info('initialized board')
dt = 0.1 if args.runtime is not None else 1.0
if allow_kb_stop and try_getch_message:
print(try_getch_message)
client.start_logging_time = time()
while client.running:
if allow_kb_stop and try_getch():
break
await sleep(dt)
await client.send_sampler_stop()
async def record_snapshot(args, client, csv_filename, varsfile, extra_vars=None):
if extra_vars is None:
extra_vars = []
defs = merge_vars_from_file_and_list(filename=varsfile, def_lines=extra_vars)
names, variables = read_elf_variables(elf=args.elf, defs=defs, fake_build_timestamp=args.fake_elf_build_timestamp_value)
elf_by_name = {x['name']: x for x in variables}
client.reset(csv_filename=csv_filename, names=names, min_ticks=1, max_samples=1)
await run_client(args, client, variables, allow_kb_stop=False)
read_values = {}
try:
with open(csv_filename) as fd:
lines = list(csv.reader(fd))
except IOError as io:
logger.warning("snapshot failed, no file created")
lines = []
if len(lines) < 2:
logger.warning("snapshot failed, no data saved")
else:
read_values = dict(zip(lines[0], lines[1]))
return elf_by_name, read_values
CONFIG_FILE_NAME = 'local_machine_config.ini'
class SamplePassOn(Protocol):
def __init__(self, client):
self.client = client
def connection_made(self, transport):
self.transport = transport
self.client.register_listener(self.write_messages)
def write_messages(self, messages):
pickled_messages = dumps(messages)
self.transport.write(pack('<i', len(pickled_messages)))
self.transport.write(pickled_messages)
async def start_tcp_listener(client, port):
loop = get_event_loop()
await loop.create_server(lambda: SamplePassOn(client), host='localhost', port=port)
print("waiting on {port}".format(port=port))
async def amain_startup(args):
if not os.path.exists(CONFIG_FILE_NAME):
print("Configuration file {} not found. "
"This file is required for specifying local machine configuration such as the output folder.\n"
"Please start from the example {}.example.\n"
"Exiting.".format(CONFIG_FILE_NAME, CONFIG_FILE_NAME))
raise SystemExit
setup_logging(args.log, args.silent)
# TODO - fold this into window, make it the general IO object, so it decided to spew to stdout or to the GUI
banner("Emotool {}".format(version()))
client = EmoToolClient(ticks_per_second=args.ticks_per_second,
verbose=not args.silent, dump=args.dump, debug=args.debug,
csv_writer_factory=resolve(args.csv_factory))
await start_transport(client=client, args=args)
return client
def reasonable_timestamp_ms(timestamp):
"""
checks that the timestamp is within 100 years and not zero
this means a random value from memory will probably not be interpreted as a valid timestamp
and a better error message could be printed
"""
return timestamp != 0 and timestamp < 1000 * 3600 * 24 * 365 * 100
def check_timestamp(params, elf_variables):
if BUILD_TIMESTAMP_VARNAME not in params:
logger.error('timestamp not received from target')
raise SystemExit
read_value = int(params[BUILD_TIMESTAMP_VARNAME])
if BUILD_TIMESTAMP_VARNAME not in elf_variables:
logger.error('Timestamp variable not in ELF file. Did you add a pre-build step to generate it?')
raise SystemExit
elf_var = elf_variables[BUILD_TIMESTAMP_VARNAME]
elf_value = elf_var['init_value']
if elf_value is None or elf_var['address'] == 0:
logger.error('Bad timestamp variable in ELF: init value = {value}, address = {address}'.format(value=elf_value, address=elf_var["address"]))
raise SystemExit
elf_value = int(elf_variables[BUILD_TIMESTAMP_VARNAME]['init_value'])
if read_value != elf_value:
if not reasonable_timestamp_ms(read_value):
logger.error("Build timestamp mismatch: the embedded target probably doesn't contain a timestamp variable")
raise SystemExit
if read_value < elf_value:
logger.error('Build timestamp mismatch: target build timestamp is older than ELF')
else:
logger.error('Build timestamp mismatch: target build timestamp is newer than ELF')
raise SystemExit
print("Timestamp verified: ELF file and embedded target match")
async def amain(client, args):
defs = merge_vars_from_file_and_list(def_lines=args.var, filename=args.varfile)
names, variables = read_elf_variables(elf=args.elf, defs=defs)
config = ConfigParser()
config.read(CONFIG_FILE_NAME)
output_folder = config['folders']['output_folder']
if args.out:
if args.out[-4:] != '.csv':
args.out = args.out + '.csv'
csv_filename = os.path.join(output_folder, args.out)
else: # either --out or --out_prefix must be specified
csv_filename = next_available(output_folder, args.out_prefix)
take_snapshot = args.check_timestamp or args.snapshotfile
if take_snapshot:
print("Taking snapshot of parameters")
snapshot_output_filename = csv_filename[:-4] + '_params.csv'
(snapshot_elf_variables, params) = await record_snapshot(
args=args, client=client,
csv_filename=snapshot_output_filename,
varsfile=args.snapshotfile,
# TODO: why do we use 20000 in snapshot_vars.csv? ask Guy
extra_vars = ['{var_name},100,50'.format(var_name=BUILD_TIMESTAMP_VARNAME)] if args.check_timestamp else [])
print("parameters saved to: {}".format(snapshot_output_filename))
if args.check_timestamp:
check_timestamp(params, snapshot_elf_variables)
print("")
print("output file: {}".format(csv_filename))
bandwidth_bps = bandwidth_calc(args=args, variables=variables)
print("upper bound on bandwidth: {} Mbps out of {} ({:.3f}%)".format(
bandwidth_bps / 1e6,
args.baud / 1e6,
100 * bandwidth_bps / args.baud))
min_ticks = gcd(*(var['period_ticks'] for var in variables))
max_samples = args.ticks_per_second * args.runtime if args.runtime else 0 # TODO - off by a factor of at least min_ticks_between_samples
# TODO this corrects run-time if all vars are sampled at a low rate, but still incorrect in some cases e.g. (10, 13)
max_samples = max_samples / min_ticks
if max_samples > 0:
print("running for {} seconds = {} samples".format(args.runtime, int(max_samples)))
client.reset(csv_filename=csv_filename, names=names, min_ticks=min_ticks, max_samples=max_samples)
if args.listen:
await start_tcp_listener(client, args.listen)
start_time = time()
start_clock = perf_counter()
await run_client(args=args, client=client, variables=variables, allow_kb_stop=True)
logger.debug("stopped at time={} samples={}".format(time(), client.samples_received))
setup_time = client.start_logging_time - start_time
total_time = time() - start_time
total_clock = perf_counter() - start_clock
print("samples received: {samples_received}\nticks lost: {ticks_lost}\ntime run {total_time:#3.6} cpu %{percent} (setup time {setup_time:#3.6})".format(
samples_received=client.samples_received,
ticks_lost=client.ticks_lost,
total_time=total_time,
percent=int(total_clock * 100 / total_time),
setup_time=setup_time,
))
return client
def start_callback(args, loop):
loop.set_debug(args.debug)
try:
client = loop.run_until_complete(amain_startup(args))
except:
traceback.print_exc()
raise SystemExit
try:
client = loop.run_until_complete(amain(client=client, args=args))
except KeyboardInterrupt:
print("exiting on user ctrl-c")
except Exception as e:
logger.error("got exception {!r}".format(e))
raise
loop.run_until_complete(cleanup(args=args, client=client))
return client
def main(cmdline=None):
atexit.register(kill_all_processes)
parse_args_args = [] if cmdline is None else [cmdline]
args = parse_args(*parse_args_args)
util_verbose.kill = args.verbose_kill
if args.embedded:
from .embedded import main as embmain
embmain()
else:
loop = get_event_loop()
def exception_handler(loop, context):
print("Async Exception caught: {context}".format(context=context))
raise SystemExit
loop.set_exception_handler(exception_handler)
client = start_callback(args, loop)
if client.csv_filename is None or not os.path.exists(client.csv_filename):
print("no csv file created.")
if __name__ == '__main__':
main()
| 23,515 | 7,166 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def main(options):
# test parameter handling
print options.infile, options.traml_in, options.outfile
def handle_args():
import argparse
usage = ""
usage += "\nOpenSwathFeatureXMLToTSV −− Converts a featureXML to a mProphet tsv."
parser = argparse.ArgumentParser(description = usage )
parser.add_argument('-in', dest='infile', help = 'An input file containing features [featureXML]')
parser.add_argument('-tr', dest='traml_in', help='An input file containing the transitions [TraML]')
parser.add_argument('-out', dest='outfile', help='Output mProphet TSV file [tsv]')
args = parser.parse_args(sys.argv[1:])
return args
if __name__ == '__main__':
options = handle_args()
main(options)
| 799 | 260 |
import kanjigrid
gridder = kanjigrid.Gridder("Kanji", 40, "Header", 52)
grading = kanjigrid.Jouyou()
with open("test.txt", "r", encoding="utf-8") as f:
data = f.read()
gridder.feed_text(data)
grid = gridder.make_grid(grading, outside_of_grading=True, stats=True, bar_graph=True)
grid.save("test.png")
if "𠮟" in grading.get_all_in_grading():
print("𠮟")
if "塡" in grading.get_all_in_grading():
print("塡")
if "叱" in grading.get_all_in_grading():
print("叱 as replacement") | 487 | 216 |
import torch
import numpy as np
import time
from spectrl.util.rl import get_rollout, test_policy
class NNParams:
'''
Defines the neural network architecture.
Parameters:
state_dim: int (continuous state dimension for nn input)
action_dim: int (action space dimension for nn output)
hidden_dim: int (hidden states in the nn)
action_bound: float
num_discrete_states: int (number of different discrete states possible)
'''
def __init__(self, state_dim, action_dim, action_bound, hidden_dim, num_discrete_states):
self.state_dim = state_dim
self.action_dim = action_dim
self.action_bound = action_bound
self.hidden_dim = hidden_dim
self.num_discrete_states = num_discrete_states
class ARSParams:
'''
HyperParameters for augmented random search.
Parameters:
n_iters: int (ending condition)
n_samples: int (N)
n_top_samples: int (b)
delta_std (nu)
lr: float (alpha)
min_lr: float (minimum alpha)
'''
def __init__(self, n_iters, n_samples, n_top_samples, delta_std, lr, min_lr, log_interval=1):
self.n_iters = n_iters
self.n_samples = n_samples
self.n_top_samples = n_top_samples
self.delta_std = delta_std
self.lr = lr
self.min_lr = min_lr
self.log_interval = log_interval
class NNPolicy:
'''
Neural network policy.
params: NNParams
'''
def __init__(self, params):
# Step 1: Parameters
self.params = params
# Step 2: Construct num_discrete_states neural networks
self.input_layers = []
self.hidden_layers = []
self.output_layers = []
for i in range(self.params.num_discrete_states):
# Step 2a: Construct the input layer
input_layer = torch.nn.Linear(
self.params.state_dim, self.params.hidden_dim)
# Step 2b: Construct the hidden layer
hidden_layer = torch.nn.Linear(
self.params.hidden_dim, self.params.hidden_dim)
# Step 2c: Construct the output layer
output_layer = torch.nn.Linear(
self.params.hidden_dim, self.params.action_dim)
self.input_layers.append(input_layer)
self.hidden_layers.append(hidden_layer)
self.output_layers.append(output_layer)
# Step 3: Construct input normalization
self.mu = np.zeros(self.params.state_dim)
self.sigma_inv = np.ones(self.params.state_dim)
# Set requires_grad to False
for param in self.parameters():
param.requires_grad_(False)
def get_input(self, state):
'''
Get the neural network input from the full state
state is a pair (continuous state, discrete state).
'''
return state[0][:self.params.state_dim]
def get_action(self, state):
'''
Get the action to take in the current state.
state: (np.array, int)
'''
# Step 0: Separate discrete and continuous components
input = self.get_input(state)
# Step 1: Normalize state
input = (input - self.mu) * self.sigma_inv
# Step 2: Convert to torch
input = torch.tensor(input, dtype=torch.float)
# Step 3: Apply the input layer
hidden = torch.relu(self.input_layers[state[1]](input))
# Step 4: Apply the hidden layer
hidden = torch.relu(self.hidden_layers[state[1]](hidden))
# Step 5: Apply the output layer
output = torch.tanh(self.output_layers[state[1]](hidden))
# Step 6: Convert to numpy
actions = output.detach().numpy()
return self.params.action_bound * actions
def parameters(self):
'''
Construct the set of parameters for the policy.
Returns a list of torch parameters.
'''
parameters = []
for i in range(self.params.num_discrete_states):
parameters.extend(self.input_layers[i].parameters())
parameters.extend(self.hidden_layers[i].parameters())
parameters.extend(self.output_layers[i].parameters())
return parameters
class NNPolicySimple:
'''
Neural network policy that only looks at system state.
Ignores discrete state.
Only looks at first state_dim components of continuous state.
params: NNParams
'''
def __init__(self, params):
# Step 1: Parameters
self.params = params
# Step 2a: Construct the input layer
self.input_layer = torch.nn.Linear(
self.params.state_dim, self.params.hidden_dim)
# Step 2b: Construct the hidden layer
self.hidden_layer = torch.nn.Linear(
self.params.hidden_dim, self.params.hidden_dim)
# Step 2c: Construct the output layer
self.output_layer = torch.nn.Linear(
self.params.hidden_dim, self.params.action_dim)
# Step 3: Construct input normalization
self.mu = np.zeros(self.params.state_dim)
self.sigma_inv = np.ones(self.params.state_dim)
def get_input(self, state):
return state[0][:self.params.state_dim]
def get_action(self, state):
'''
Get the action to take in the current state.
state: (np.array, int)
'''
# Step 0: Extract the system state
input = self.get_input(state)
# Step 1: Normalize state
input = (input - self.mu) * self.sigma_inv
# Step 2: Convert to torch
input = torch.tensor(input, dtype=torch.float)
# Step 3: Apply the input layer
hidden = torch.relu(self.input_layer(input))
# Step 4: Apply the hidden layer
hidden = torch.relu(self.hidden_layer(hidden))
# Step 5: Apply the output layer
output = torch.tanh(self.output_layer(hidden))
# Step 6: Convert to numpy
actions = output.detach().numpy()
return self.params.action_bound * actions
def parameters(self):
'''
Construct the set of parameters for the policy.
Returns a list of torch parameters.
'''
parameters = []
parameters.extend(self.input_layer.parameters())
parameters.extend(self.hidden_layer.parameters())
parameters.extend(self.output_layer.parameters())
return parameters
def ars(env, nn_policy, params):
'''
Run augmented random search.
Parameters:
env: gym.Env (state is expected to be a pair (np.array, int))
Also expected to provide cum_reward() function.
nn_policy: NNPolicy
params: ARSParams
'''
best_policy = nn_policy
best_success_rate = 0
best_reward = -1e9
log_info = []
num_steps = 0
start_time = time.time()
# Step 1: Save original policy
nn_policy_orig = nn_policy
# Step 2: Initialize state distribution estimates
mu_sum = np.zeros(nn_policy.params.state_dim)
sigma_sq_sum = np.ones(nn_policy.params.state_dim) * 1e-5
n_states = 0
# Step 3: Training iterations
for i in range(params.n_iters):
# Step 3a: Sample deltas
deltas = []
for _ in range(params.n_samples):
# i) Sample delta
delta = _sample_delta(nn_policy)
# ii) Construct perturbed policies
nn_policy_plus = _get_delta_policy(
nn_policy, delta, params.delta_std)
nn_policy_minus = _get_delta_policy(
nn_policy, delta, -params.delta_std)
# iii) Get rollouts
sarss_plus = get_rollout(env, nn_policy_plus, False)
sarss_minus = get_rollout(env, nn_policy_minus, False)
num_steps += (len(sarss_plus) + len(sarss_minus))
# iv) Estimate cumulative rewards
r_plus = env.cum_reward(
np.array([state for state, _, _, _ in sarss_plus]))
r_minus = env.cum_reward(
np.array([state for state, _, _, _ in sarss_minus]))
# v) Save delta
deltas.append((delta, r_plus, r_minus))
# v) Update estimates of normalization parameters
states = np.array([nn_policy.get_input(state)
for state, _, _, _ in sarss_plus + sarss_minus])
mu_sum += np.sum(states)
sigma_sq_sum += np.sum(np.square(states))
n_states += len(states)
# Step 3b: Sort deltas
deltas.sort(key=lambda delta: -max(delta[1], delta[2]))
deltas = deltas[:params.n_top_samples]
# Step 3c: Compute the sum of the deltas weighted by their reward differences
delta_sum = [torch.zeros(delta_cur.shape)
for delta_cur in deltas[0][0]]
for j in range(params.n_top_samples):
# i) Unpack values
delta, r_plus, r_minus = deltas[j]
# ii) Add delta to the sum
for k in range(len(delta_sum)):
delta_sum[k] += (r_plus - r_minus) * delta[k]
# Step 3d: Compute standard deviation of rewards
sigma_r = np.std([delta[1] for delta in deltas] +
[delta[2] for delta in deltas])
# Step 3e: Compute step length
delta_step = [(params.lr * params.delta_std / (params.n_top_samples * sigma_r + 1e-8))
* delta_sum_cur
for delta_sum_cur in delta_sum]
# Step 3f: Update policy weights
nn_policy = _get_delta_policy(nn_policy, delta_step, 1.0)
# Step 3g: Update normalization parameters
nn_policy.mu = mu_sum / n_states
nn_policy.sigma_inv = 1.0 / np.sqrt((sigma_sq_sum / n_states))
# Step 3h: Logging
if i % params.log_interval == 0:
exp_cum_reward, success_rate = test_policy(env, nn_policy, 100, use_cum_reward=True)
current_time = time.time() - start_time
print('\nSteps taken after iteration {}: {}'.format(i, num_steps))
print('Reward after iteration {}: {}'.format(i, exp_cum_reward))
print('Success rate after iteration {}: {}'.format(i, success_rate))
print('Time after iteration {}: {} mins'.format(i, current_time/60))
log_info.append([num_steps, current_time/60, exp_cum_reward, success_rate])
# save best policy
if success_rate > best_success_rate or (success_rate == best_success_rate
and exp_cum_reward >= best_reward):
best_policy = nn_policy
best_success_rate = success_rate
best_reward = exp_cum_reward
if success_rate > 80 and exp_cum_reward > 0:
params.lr = max(params.lr/2, params.min_lr)
nn_policy = best_policy
# Step 4: Copy new weights and normalization parameters to original policy
for param, param_orig in zip(nn_policy.parameters(), nn_policy_orig.parameters()):
param_orig.data.copy_(param.data)
nn_policy_orig.mu = nn_policy.mu
nn_policy_orig.sigma_inv = nn_policy.sigma_inv
return log_info
def _sample_delta(nn_policy):
'''
Construct random perturbations to neural network parameters.
nn_policy: NNPolicy or NNPolicySimple
Returns: [torch.tensor] (list of torch tensors that is the same shape as nn_policy.parameters())
'''
delta = []
for param in nn_policy.parameters():
delta.append(torch.normal(torch.zeros(param.shape, dtype=torch.float)))
return delta
def _get_delta_policy(nn_policy, delta, sign):
'''
Construct the policy perturbed by the given delta
Parameters:
nn_policy: NNPolicy or NNPolicySimple
delta: [torch.tensor] (list of torch tensors with same shape as nn_policy.parameters())
sign: float
Returns: NNPolicy or NNPolicySimple
'''
# Step 1: Construct the perturbed policy
nn_policy_delta = None
if (isinstance(nn_policy, NNPolicySimple)):
nn_policy_delta = NNPolicySimple(nn_policy.params)
elif (isinstance(nn_policy, NNPolicy)):
nn_policy_delta = NNPolicy(nn_policy.params)
else:
raise Exception("Unrecognized neural network architecture")
# Step 2: Set normalization of the perturbed policy
nn_policy_delta.mu = nn_policy.mu
nn_policy_delta.sigma_inv = nn_policy.sigma_inv
# Step 3: Set the weights of the perturbed policy
for param, param_delta, delta_cur in zip(nn_policy.parameters(), nn_policy_delta.parameters(),
delta):
param_delta.data.copy_(param.data + sign * delta_cur)
return nn_policy_delta
| 12,738 | 3,909 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import subprocess
def get_execution_parallism():
return 1
def do_native_translation_v2(codeset, **kwargs):
kernel_name, in_args, out_args, body = codeset
expand_args = ' '.join([f'{x[0]}* {x[1]} = ({x[0]}*)__args[{i}];' for i, x in enumerate(in_args + out_args)])
full_body = f'''
#include <math.h>
#include <algorithm>
#define rsqrt(x) (1.0f / sqrt(x))
{kwargs['attrs'].blend}
extern "C" void {kernel_name}(int __rank__, void** __args) {{
{expand_args}
using namespace std;
{body.replace('threadIdx.x', '__rank__')}
}}
'''
return full_body
| 640 | 253 |
import sys
from pymod import index
from pymod.index import modules
from pymod.mappings import url
out = lambda s: sys.stdout.write(s)
out('{ ')
dom = index.domof('https://docs.python.org/2/library/exceptions.html')
for el in (el for el in dom.findAll('a', {'class': 'headerlink'})
if '-' not in el.attrs['href']):
out("'{}', ".format(el.attrs['href'].split('#exceptions.')[1]))
out('}\n')
| 408 | 148 |
import sqlite3
from collections import namedtuple
from functional import seq
with sqlite3.connect(':memory:') as conn:
conn.execute('CREATE TABLE user (id INT, name TEXT)')
conn.commit()
User = namedtuple('User', 'id name')
seq([(1, 'pedro'), (2, 'fritz')]).to_sqlite3(
conn, 'INSERT INTO user (id, name) VALUES (?, ?)')
seq([(3, 'sam'), (4, 'stan')]).to_sqlite3(conn, 'user')
seq([User(name='tom', id=5), User(name='keiga', id=6)]).to_sqlite3(conn, 'user')
seq([dict(name='david', id=7), User(name='jordan', id=8)]
).to_sqlite3(conn, 'user')
print(list(conn.execute('SELECT * FROM user')))
# [
# (1, 'pedro'), (2, 'fritz'),
# (3, 'sam'), (4, 'stan'),
# (5, 'tom'), (6, 'keiga'),
# (7, 'david'), (8, 'jordan')
# ]
users = seq.sqlite3(conn, 'SELECT * FROM user').to_list()
print(users)
| 878 | 339 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Settings for launch_jobs.py
Test settings for automated tests.
To test run with job scheduler
@author: Matthias Göbel
"""
from run_wrf.configs.test.config_test import *
from copy import deepcopy
params = deepcopy(params)
params["vmem"] = 500
| 298 | 109 |
s1 = "I am a beginner in python \nI will study the concepts to be familiar with this language.\nIt is a very user friendly language"
print("The long string is: \n" + s1) # -- L1
s2 = """The long string is:
I am a beginner in python
I will study the concepts to be familiar with this language.
It is a very user friendly language"""
print(s2) # -- L2
| 362 | 112 |
#!/usr/bin/env python
# $Id: setup.py 8864 2021-10-26 11:46:55Z grubert $
# Copyright: This file has been placed in the public domain.
from __future__ import print_function
import glob
import os
import sys
try:
from setuptools import setup
except ImportError:
print('Error: The "setuptools" module, which is required for the')
print(' installation of Docutils, could not be found.\n')
print(' You may install it with `python -m pip install setuptools`')
print(' or from a package called "python-setuptools" (or similar)')
print(' using your system\'s package manager.\n')
print(' Alternatively, install a release from PyPi with')
print(' `python -m pip install docutils`.')
sys.exit(1)
package_data = {
'name': 'docutils',
'description': 'Docutils -- Python Documentation Utilities',
'long_description': """\
Docutils is a modular system for processing documentation
into useful formats, such as HTML, XML, and LaTeX. For
input Docutils supports reStructuredText, an easy-to-read,
what-you-see-is-what-you-get plaintext markup syntax.""", # wrap at col 60
'url': 'http://docutils.sourceforge.net/',
'version': '0.18',
'author': 'David Goodger',
'author_email': 'goodger@python.org',
'maintainer': 'docutils-develop list',
'maintainer_email': 'docutils-develop@lists.sourceforge.net',
'license': 'public domain, Python, 2-Clause BSD, GPL 3 (see COPYING.txt)',
'platforms': 'OS-independent',
'python_requires': '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
'include_package_data': True,
'exclude_package_data': {"": ["docutils.conf"]},
'package_dir': {
'docutils': 'docutils',
'docutils.tools': 'tools'
},
'packages': [
'docutils',
'docutils.languages',
'docutils.parsers',
'docutils.parsers.rst',
'docutils.parsers.rst.directives',
'docutils.parsers.rst.languages',
'docutils.readers',
'docutils.transforms',
'docutils.utils',
'docutils.utils.math',
'docutils.writers',
'docutils.writers.html4css1',
'docutils.writers.html5_polyglot',
'docutils.writers.pep_html',
'docutils.writers.s5_html',
'docutils.writers.latex2e',
'docutils.writers.xetex',
'docutils.writers.odf_odt',
],
'scripts': [
'tools/rst2html.py',
'tools/rst2html4.py',
'tools/rst2html5.py',
'tools/rst2s5.py',
'tools/rst2latex.py',
'tools/rst2xetex.py',
'tools/rst2man.py',
'tools/rst2xml.py',
'tools/rst2pseudoxml.py',
'tools/rstpep2html.py',
'tools/rst2odt.py',
'tools/rst2odt_prepstyles.py',
],
'classifiers': [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Other Audience',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: Public Domain',
'License :: OSI Approved :: Python Software Foundation License',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing',
'Natural Language :: English', # main/default language, keep first
'Natural Language :: Afrikaans',
'Natural Language :: Arabic',
'Natural Language :: Catalan',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Natural Language :: Czech',
'Natural Language :: Danish',
'Natural Language :: Dutch',
'Natural Language :: Esperanto',
'Natural Language :: Finnish',
'Natural Language :: French',
'Natural Language :: Galician',
'Natural Language :: German',
'Natural Language :: Hebrew',
'Natural Language :: Italian',
'Natural Language :: Japanese',
'Natural Language :: Korean',
'Natural Language :: Latvian',
'Natural Language :: Lithuanian',
'Natural Language :: Persian',
'Natural Language :: Polish',
'Natural Language :: Portuguese (Brazilian)',
'Natural Language :: Russian',
'Natural Language :: Slovak',
'Natural Language :: Spanish',
'Natural Language :: Swedish',
],
}
"""Distutils setup parameters."""
def do_setup():
# Install data files properly.
dist = setup(**package_data)
return dist
if __name__ == '__main__':
do_setup()
| 5,177 | 1,618 |
"""Tests the ``remove`` plugin."""
from unittest.mock import patch
import pytest
import moe
@pytest.fixture
def mock_rm():
"""Mock the `remove_item()` api call."""
with patch("moe.plugins.remove.remove_item", autospec=True) as mock_rm:
yield mock_rm
@pytest.fixture
def tmp_rm_config(tmp_config):
"""A temporary config for the edit plugin with the cli."""
return tmp_config('default_plugins = ["cli", "remove"]')
class TestCommand:
"""Test the `remove` command."""
def test_track(self, mock_track, mock_query, mock_rm, tmp_rm_config):
"""Tracks are removed from the database with valid query."""
cli_args = ["remove", "*"]
mock_query.return_value = [mock_track]
moe.cli.main(cli_args, tmp_rm_config)
mock_query.assert_called_once_with("*", query_type="track")
mock_rm.assert_called_once_with(mock_track)
def test_album(self, mock_album, mock_query, mock_rm, tmp_rm_config):
"""Albums are removed from the database with valid query."""
cli_args = ["remove", "-a", "*"]
mock_query.return_value = [mock_album]
moe.cli.main(cli_args, tmp_rm_config)
mock_query.assert_called_once_with("*", query_type="album")
mock_rm.assert_called_once_with(mock_album)
def test_extra(self, mock_extra, mock_query, mock_rm, tmp_rm_config):
"""Extras are removed from the database with valid query."""
cli_args = ["remove", "-e", "*"]
mock_query.return_value = [mock_extra]
moe.cli.main(cli_args, tmp_rm_config)
mock_query.assert_called_once_with("*", query_type="extra")
mock_rm.assert_called_once_with(mock_extra)
def test_multiple_items(
self, mock_track_factory, mock_query, mock_rm, tmp_rm_config
):
"""All items returned from the query are removed."""
cli_args = ["remove", "*"]
mock_tracks = [mock_track_factory(), mock_track_factory()]
mock_query.return_value = mock_tracks
moe.cli.main(cli_args, tmp_rm_config)
for mock_track in mock_tracks:
mock_rm.assert_any_call(mock_track)
assert mock_rm.call_count == 2
def test_exit_code(self, mock_query, mock_rm, tmp_rm_config):
"""Return a non-zero exit code if no items are removed."""
cli_args = ["remove", "*"]
mock_query.return_value = []
with pytest.raises(SystemExit) as error:
moe.cli.main(cli_args, tmp_rm_config)
assert error.value.code != 0
mock_rm.assert_not_called()
class TestPluginRegistration:
"""Test the `plugin_registration` hook implementation."""
def test_no_cli(self, tmp_config):
"""Don't enable the remove cli plugin if the `cli` plugin is not enabled."""
config = tmp_config(settings='default_plugins = ["remove"]')
assert not config.plugin_manager.has_plugin("remove_cli")
def test_cli(self, tmp_config):
"""Enable the remove cli plugin if the `cli` plugin is enabled."""
config = tmp_config(settings='default_plugins = ["remove", "cli"]')
assert config.plugin_manager.has_plugin("remove_cli")
| 3,171 | 1,012 |
#Tyler Sorensen
#February 15, 2012
#University of Utah
#PyBool_builder.py
#The interface to build recursive style boolean expressions
#See README.txt for more information
def mk_const_expr(val):
"""
returns a constant expression of value VAL
VAL should be of type boolean
"""
return {"type" : "const",
"value": val }
def mk_var_expr(name):
"""
returns a variable expression of name NAME
where NAME is a string
"""
return {"type" : "var" ,
"name" : (name, 0)}
def mk_neg_expr(expr):
"""
returns a negated expression where EXPR
is the expression to be negated
"""
return {"type" : "neg",
"expr" : expr }
def mk_and_expr(expr1, expr2):
"""
returns an and expression
of the form (EXPR1 /\ EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "and" ,
"expr1" : expr1 ,
"expr2" : expr2 }
def mk_or_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 \/ EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "or" ,
"expr1" : expr1 ,
"expr2" : expr2 }
#NOT NEEDED
def mk_paren_expr(expr):
return {"type" : "paren",
"expr" : expr }
def mk_impl_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 -> EXPR2)
where EXPR1 and EXPR2 are expressions
NOTE: Order of expr1 and expr2 matters here
"""
return {"type" : "impl",
"expr1" : expr1 ,
"expr2" : expr2 }
def mk_eqv_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 <=> EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "eqv" ,
"expr1" : expr1 ,
"expr2" : expr2 }
def mk_xor_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 XOR EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "xor" ,
"expr1" : expr1 ,
"expr2" : expr2 }
| 2,089 | 716 |
#!/usr/bin/env python2
'''
A simple script to get the playback status of spotify.
This script needs ``dbus-python`` for spotify communication
To run simply::
./spotify-monitor.py <command>
Where command is one of the following::
``playback``
``playing``
'''
# pylint: disable=W0703
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import sys
def get_pandora_status(command):
'''
Get status for pithos/pandora
'''
try:
bus = dbus.SessionBus()
pithos_object = bus.get_object("net.kevinmehall.Pithos",
"/net/kevinmehall/Pithos")
pithos = dbus.Interface(pithos_object, "net.kevinmehall.Pithos")
if command == 'playback':
res = 'Playing' if pithos.IsPlaying() else 'Paused'
elif command == 'playing':
info = dict((str(k), str(v)) for k, v in pithos.GetCurrentSong().items())
res = '{0} - {1}'.format(info['title'], info['artist'])
except dbus.exceptions.DBusException:
res = None
return res
def get_status(command):
'''
Get the status.
command
The command to query spofity with.
Returns the status from spotify.
'''
try:
bus_loop = DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus(mainloop=bus_loop)
spotify_bus = session_bus.get_object('org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2')
spotify = dbus.Interface(spotify_bus,
'org.freedesktop.DBus.Properties')
if command == 'playback':
res = spotify.Get('org.mpris.MediaPlayer2.Player',
'PlaybackStatus')
elif command == 'playing':
meta = spotify.Get('org.mpris.MediaPlayer2.Player',
'Metadata')
artist = meta['xesam:artist'][0].encode('utf-8')
title = meta['xesam:title'].encode('utf-8')
res = '{0} - {1}'.format(title, artist)
except Exception:
res = 'Not Playing'
return res
def main(arg):
'''
Pass the arg to spotify.
'''
if arg == 'playback':
res = get_pandora_status(arg)
if not res or res == 'Not Playing':
res = get_status(arg)
print res
elif arg == 'playing':
res = get_pandora_status(arg)
if not res:
res = get_status(arg)
print res
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
else:
exit(101)
| 2,595 | 831 |
import sys
sys.path.insert(0, '../Pyro4-4.17')
import Pyro4
from time import clock
"""
log = open('pyro.log', 'w')
times = []
proxy = Pyro4.Proxy("PYRO:example.service@localhost:54642")
for i in range(100) :
local = []
begin = clock()
for files in proxy.getFiles(proxy.getcwd()) :
for file in files :
local.append(file)
end = clock()
times.append(end - begin)
log.write(str(end - begin) + "\n")
log.write("Average: " + str(reduce(lambda x, y: x+y, times)/len(times)))
"""
proxy = Pyro4.Proxy("PYRO:service@smarmy-pirate.cs.utexas.edu:9975")
begin = clock()
for files in proxy.getFiles(proxy.getcwd()) :
for file in files :
log = open('p' + file, 'w')
log.write(proxy.getFile(file))
end = clock()
print str(end - begin)
| 787 | 303 |
from nose.tools import assert_equal
from tests.fixtures import WebTest
class TestDemoController(WebTest):
pass
| 117 | 34 |
import sys
import matplotlib
import numpy as np
# Avoid errors when running on headless servers.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if len(sys.argv) != 6:
print "Usage plot.py <data file port 1> <min size> <step size> <max size> <num packets sent>"
sys.exit(1)
width = 20
data_file = sys.argv[1]
min_rate = int(sys.argv[2])
step_size = int(sys.argv[3])
max_rate = int(sys.argv[4])
num_packets_sent = int(sys.argv[5])
x_data = np.arange(min_rate, max_rate + step_size, step_size)
y_data = []
error = []
with open(data_file, 'r') as f:
for data in f.readlines():
if len(data.split(' ')) == 1:
y_data.append(int(data))
error = None
else:
values = []
for value in data.split(' '):
values.append(int(value))
y_data.append(np.mean(values))
error.append(np.std(values))
dropped_counts = []
for data in y_data:
dropped_counts.append(num_packets_sent - data)
plt.title('Number of drops by one port with different sized packets')
plt.xlabel('Packet size (Bytes)')
plt.ylabel('Packets')
plt.bar(x_data, y_data, width, color='blue', label="Number Captured", y_err=error)
plt.bar(x_data, dropped_counts, width, color='red', bottom=y_data, label="Number Dropped")
plt.legend()
plt.savefig('dropped_packets.eps', format='eps')
| 1,358 | 487 |
import os
import ssl
from six.moves import urllib
import torch
import numpy as np
import dgl
from torch.utils.data import Dataset, DataLoader
def download_file(dataset):
print("Start Downloading data: {}".format(dataset))
url = "https://s3.us-west-2.amazonaws.com/dgl-data/dataset/{}".format(
dataset)
print("Start Downloading File....")
context = ssl._create_unverified_context()
data = urllib.request.urlopen(url, context=context)
with open("./data/{}".format(dataset), "wb") as handle:
handle.write(data.read())
class SnapShotDataset(Dataset):
def __init__(self, path, npz_file):
if not os.path.exists(path+'/'+npz_file):
if not os.path.exists(path):
os.mkdir(path)
download_file(npz_file)
zipfile = np.load(path+'/'+npz_file)
self.x = zipfile['x']
self.y = zipfile['y']
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.x[idx, ...], self.y[idx, ...]
def METR_LAGraphDataset():
if not os.path.exists('data/graph_la.bin'):
if not os.path.exists('data'):
os.mkdir('data')
download_file('graph_la.bin')
g, _ = dgl.load_graphs('data/graph_la.bin')
return g[0]
class METR_LATrainDataset(SnapShotDataset):
def __init__(self):
super(METR_LATrainDataset, self).__init__('data', 'metr_la_train.npz')
self.mean = self.x[..., 0].mean()
self.std = self.x[..., 0].std()
class METR_LATestDataset(SnapShotDataset):
def __init__(self):
super(METR_LATestDataset, self).__init__('data', 'metr_la_test.npz')
class METR_LAValidDataset(SnapShotDataset):
def __init__(self):
super(METR_LAValidDataset, self).__init__('data', 'metr_la_valid.npz')
def PEMS_BAYGraphDataset():
if not os.path.exists('data/graph_bay.bin'):
if not os.path.exists('data'):
os.mkdir('data')
download_file('graph_bay.bin')
g, _ = dgl.load_graphs('data/graph_bay.bin')
return g[0]
class PEMS_BAYTrainDataset(SnapShotDataset):
def __init__(self):
super(PEMS_BAYTrainDataset, self).__init__(
'data', 'pems_bay_train.npz')
self.mean = self.x[..., 0].mean()
self.std = self.x[..., 0].std()
class PEMS_BAYTestDataset(SnapShotDataset):
def __init__(self):
super(PEMS_BAYTestDataset, self).__init__('data', 'pems_bay_test.npz')
class PEMS_BAYValidDataset(SnapShotDataset):
def __init__(self):
super(PEMS_BAYValidDataset, self).__init__(
'data', 'pems_bay_valid.npz')
| 2,678 | 990 |
# -*- coding: utf-8 -*-
import string
import random
import logging
import urllib2
from os import path
from django.test import TestCase
from django.core.files.base import ContentFile
from s3 import upload
from s3.storage import S3Storage
from settings import BOTO_S3_BUCKET
logger = logging.getLogger(__name__)
local_path = path.realpath(path.dirname(__file__))
def get_string(lngth):
strn = ''
for i in xrange(lngth):
strn += random.choice(string.letters)
return strn
class BotoTest(TestCase):
"""
Testing Amazon S3.
"""
def test_storage(self):
"""
Storage testing.
"""
text = ''
storage = S3Storage(host='s3.amazonaws.com')
file_length = random.randrange(300, 1300)
text = get_string(file_length)
filename_length = random.randrange(5, 12)
filename = get_string(filename_length)
self.assertFalse(storage.exists(filename))
test_file = ContentFile(text)
test_file.name = filename
uploaded_url = upload(test_file, host='s3.amazonaws.com')
self.assertTrue(storage.exists(filename))
url = 'http://' + BOTO_S3_BUCKET + '.s3.amazonaws.com/' + filename
self.assertEqual(uploaded_url, url)
page = urllib2.urlopen(uploaded_url)
self.assertEqual(text, page.read())
self.assertEqual(len(text), storage.size(filename))
self.assertEqual(url, storage.url(filename))
storage.delete(filename)
self.assertFalse(storage.exists(filename))
| 1,552 | 503 |
# -*- coding: utf-8 -*-
# http://wiki.ros.org/Bags/Format/2.0
__all__ = ("BagPlayer",)
import subprocess
import threading
from types import TracebackType
from typing import Optional, Type
import dockerblade
from loguru import logger
from ... import exceptions
class BagPlayer:
def __init__(
self,
fn_container: str,
shell: dockerblade.Shell,
files: dockerblade.FileSystem,
*,
delete_file_after_use: bool = False,
) -> None:
self.__lock = threading.Lock()
self.__fn_container = fn_container
self.__shell = shell
self.__files = files
self.__delete_file_after_use = delete_file_after_use
self.__started = False
self.__stopped = False
self._process: Optional[dockerblade.popen.Popen] = None
@property
def started(self) -> bool:
"""Indicates whether or not playback has started."""
return self.__started
@property
def stopped(self) -> bool:
"""Indicates whether or not playback has stopped."""
return self.__stopped
def __enter__(self) -> "BagPlayer":
self.start()
return self
def __exit__(
self,
ex_type: Optional[Type[BaseException]],
ex_val: Optional[BaseException],
ex_tb: Optional[TracebackType],
) -> None:
if ex_type is not None:
logger.error(
"error occurred during bag playback",
exc_info=(ex_type, ex_val, ex_tb),
)
if not self.stopped:
self.stop()
def finished(self) -> bool:
"""Checks whether playback has completed."""
p = self._process
return p.finished if p else False
def wait(self, time_limit: Optional[float] = None) -> None:
"""Blocks until playback has finished.
Parameters
----------
time_limit: Optional[float] = None
an optional time limit.
Raises
------
PlayerTimeout:
if playback did not finish within the provided timeout.
PlayerFailure:
if an unexpected occurred during playback.
"""
assert self._process
try:
self._process.wait(time_limit)
retcode = self._process.returncode
assert retcode is not None
if retcode != 0:
out = "\n".join(self._process.stream) # type: ignore
raise exceptions.PlayerFailure(retcode, out)
except subprocess.TimeoutExpired as error:
raise exceptions.PlayerTimeout from error
def start(self) -> None:
"""Starts playback from the bag.
Raises
------
PlayerAlreadyStarted:
if the player has already started.
"""
logger.debug("starting bag playback")
with self.__lock:
if self.__started:
raise exceptions.PlayerAlreadyStarted
self.__started = True
command: str = f"rosbag play -q {self.__fn_container}"
self._process = self.__shell.popen(
command, stdout=False, stderr=False
)
logger.debug("started bag playback")
def stop(self) -> None:
"""Stops playback from the bag.
Raises
------
PlayerAlreadyStopped:
if the player has already been stopped.
"""
logger.debug("stopping bag playback")
with self.__lock:
if self.__stopped:
raise exceptions.PlayerAlreadyStopped
if not self.__started:
raise exceptions.PlayerNotStarted
assert self._process
self._process.kill()
out = "\n".join(list(self._process.stream)) # type: ignore
logger.debug("player output:\n%s", out)
self._process = None
if self.__delete_file_after_use:
self.__files.remove(self.__fn_container)
self.__stopped = True
logger.debug("stopped bag playback")
| 4,060 | 1,080 |
from functools import wraps
from flask import request, make_response
from .exceptions import ApiError
from .schemas import create_schema, ma_version_lt_300b7
def request_schema(schema_or_dict, extends=None, many=None, cache_schema=True, pass_data=False):
schema_ = create_schema(schema_or_dict, extends)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
schema = cache_schema and schema_ or create_schema(schema_or_dict, extends)
if request.json is None:
# NOTE: this should be fixed with marshmallow 3 (and 2.16?)
raise ApiError('JSON data required')
data = schema.load(request.json, many=many)
if ma_version_lt_300b7:
data = data.data
if pass_data:
kwargs.update({'data' if pass_data is True else pass_data: data})
else:
kwargs.update(data)
return func(*args, **kwargs)
return wrapper
return decorator
def request_args_schema(schema_or_dict, extends=None, cache_schema=True, pass_data=False):
schema_ = create_schema(schema_or_dict, extends)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
schema = cache_schema and schema_ or create_schema(schema_or_dict, extends)
data = schema.load(request.args)
if ma_version_lt_300b7:
data = data.data
if pass_data:
kwargs.update({'data' if pass_data is True else pass_data: data})
else:
kwargs.update(data)
return func(*args, **kwargs)
return wrapper
return decorator
def response_schema(schema_or_dict, extends=None, many=None, cache_schema=True):
schema_ = create_schema(schema_or_dict, extends)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
schema = cache_schema and schema_ or create_schema(schema_or_dict, extends)
result = func(*args, **kwargs)
if isinstance(result, (list, tuple)) and (schema.many or many):
data = schema.dump(result, many=many)
else:
data = schema.dump(result, many=many)
if ma_version_lt_300b7:
data = data.data
return data
return wrapper
return decorator
def response_headers(headers={}):
"""
This decorator adds the headers passed in to the response
"""
# http://flask.pocoo.org/snippets/100/
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
resp = make_response(func(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return wrapper
return decorator
def response_headers_no_cache(func):
@wraps(func)
@response_headers({
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
})
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
| 3,120 | 903 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.urls import path
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from drf_yasg.views import get_schema_view
admin.autodiscover()
from visitors import views
from api.views import schema_view
urlpatterns = [
path('administramelo/', admin.site.urls),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^search_date/$', views.search_date),
url(r'^search/', views.search, name='search_view'),
url(r'^api/', include('api.urls')),
url(r'^docs(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^docs/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redocs/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
url(r'^statistics/$', views.statistics, name='statistics'),
url(r'^statistics_api/$', views.statistics_api),
url(r'^about/', views.about, name='about'),
path('', include('visitors.urls')),
url(r'^cazador/', include('cazador.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 1,447 | 502 |
# -*- coding: utf-8 -*-
"""
datagator.rest.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2015 by `University of Denver <http://pardee.du.edu/>`_
:license: Apache 2.0, see LICENSE for more details.
"""
import base64
from django.contrib.auth import authenticate, login
from django.core.exceptions import SuspiciousOperation
__all__ = ['with_authentication', ]
def _basic_auth(request):
if request.user.is_authenticated():
return request
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':', 1)
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return request
raise SuspiciousOperation("Failed authentication.")
return request
def with_authentication(_method=None, **options):
if _method is not None:
return with_authentication()(_method)
allow_unauthorized = options.get("allow_unauthorized", False)
def decorator(method):
@functools.wraps(method)
def wrapper(view, request, *args, **kwargs):
# user is already authenticated
try:
request = _basic_auth(request)
except SuspiciousOperation:
# user-submitted authorization header cannot be authenticated
return MessageResponse(403, "Failed authentication.")
if not allow_unauthorized and not request.user.is_authenticated():
response = MessageResponse(401, "Unauthorized access.")
response['WWW-Authenticate'] = "Basic realm=\"DataGator\""
return response
return method(view, request, *args, **kwargs)
return wrapper
return decorator
| 2,043 | 573 |
from getpass import getpass
import socket
COLORS = {"green" : "\33[92m",
"red" : "\33[91m",
"yellow" : "\33[93m",
"endc" : "\33[0m" }
def print_green(msg):
"""Prints msg in green text."""
print("{0}{1}{2}".format(COLORS["green"], msg, COLORS["endc"]))
def print_yellow(msg):
"""Prints msg in yellow text."""
print("{0}{1}{2}".format(COLORS["yellow"], msg, COLORS["endc"]))
def print_red(msg):
"""Prints msg in red text."""
print("{0}{1}{2}".format(COLORS["red"], msg, COLORS["endc"]))
def print_banner():
"""Prints the slyther entry banner."""
print_green("///////////////////")
print_green("// s l y t h e r //")
print_green("///////////////////")
def getpass_handled(prompt):
"""Wrapper for getpass() that handles KeyboardInterrupts."""
try:
return getpass(prompt)
except KeyboardInterrupt:
print_red("\nAborting...")
exit()
def confirm(prompt):
"""Displays the prompt, only returns true with input 'Y' or 'y'."""
confirmation = input(COLORS["yellow"] + prompt + COLORS["endc"]).lower()
return confirmation == "y"
def input_default(prompt, default):
"""Displays the prompt, returns input (default if user enters nothing)."""
response = input("{} [{}]: ".format(prompt, default))
return response if response else default
def get_ip():
"""Prompts the user for and returns a valid IP address string."""
while True:
ip = input("IP: ")
# Check if the ip has 3 "."s. inet_aton does not verify this
if len(ip.split(".")) != 4:
print_red("\nInvalid IP address. Please try again.")
continue
# Check if input creates a valid ip
try:
socket.inet_aton(ip)
except socket.error:
print_red("\nInvalid IP address. Please try again.")
continue
return ip
def get_recipient(contacts):
"""
Prompts a user for a contact. If a valid one is not provided, the user may
create a new one.
Args:
contacts: The contacts dictionary to select from.
Returns:
The contact ID of a valid contact.
"""
while True:
recipient = input("Contact Name: ")
for contact_id in contacts:
if recipient == contacts[contact_id]["name"]:
return contact_id
print_red("Contact not recognized.")
def get_command(commands):
"""Prompts for a command, and returns when the user has chosen a valid one."""
while True:
command = input("> ").lower()
if command in commands:
return command
else:
print_red("Invalid command. Please try again.")
def print_bar(msg):
print("-"*(31 - int(.5 * len(msg))), msg, "-"*(31 - int(.5 * len(msg))))
| 2,836 | 879 |
import pandas as pd
import numpy as np
from collections import Counter
data = pd.read_csv('out/negex_all.txt', sep="\t", header=None)
print(data.shape)
data.columns = ['PAT_DEID','NOTE_DEID','NOTE_DATE','ENCOUNTER_DATE','NOTE_CODE','TEXT_SNIPPET','lower_text','STATUS']
df = data.groupby(['PAT_DEID','NOTE_DEID','NOTE_DATE','ENCOUNTER_DATE','NOTE_CODE'])['STATUS'].apply(','.join).reset_index()
df_text = data.groupby(['PAT_DEID','NOTE_DEID','NOTE_DATE','ENCOUNTER_DATE','NOTE_CODE'])['TEXT_SNIPPET'].apply(' ##### '.join).reset_index()
df_text_required = df_text[['NOTE_DEID','TEXT_SNIPPET']]
df_fin = pd.merge(df, df_text_required, on='NOTE_DEID', how='inner')
df1 = df_fin.copy()
def check(l):
# l1 = l['STATUS'].tolist()
# l2 = str(l1).split(',')
l2 = l['STATUS'].split(',')
c = Counter(l2)
affirmed = c['affirmed']
negated = c['negated']
if (affirmed > negated or affirmed == negated):
return "Affirmed"
else:
return "Negated"
def majority_rule(var1,var2):
df[var2] = df.apply(check, axis = 1)
return df
df1 = majority_rule('STATUS','STATUS_FINAL')
print(df1.shape)
df2 = pd.merge(df1, df_text_required, on='NOTE_DEID', how='inner')
df2.to_pickle("out/annotated_note_all.pkl")
| 1,244 | 499 |
from flask import Flask
from flask import request
from flask import jsonify
from os import environ
import query
app = Flask(__name__)
if 'MONGODB_HOST' in environ:
mongodb_host = environ['MONGODB_HOST']
else:
mongodb_host = "localhost"
if 'MONGODB_PORT' in environ:
mongodb_port = environ['MONGODB_PORT']
else:
mongodb_port = "27017"
vr = query.VoterRecords(mongodb_host, mongodb_port)
@app.route('/search')
def search():
if request.args and 'q' in request.args:
search_string = request.args['q']
res = vr.determine_query_type(search_string)
resp = app.make_response(res)
resp.mimetype = 'application/json'
return jsonify(resp)
else:
return "No query data received", 200
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| 821 | 281 |
from stack.m_decoded_string import DecodeString
class TestDecodeString:
def test_lc_data_1(self):
ds = DecodeString()
ans = ds.valueAtIndex_bf("leet2code3", 15)
assert ans == "e"
ans = ds.valueAtIndex_opm("leet2code3", 15)
assert ans == "e"
ans = ds.valueAtIndex_opm_2("leet2code3", 15)
assert ans == "e"
def test_lc_data_2(self):
ds = DecodeString()
ans = ds.valueAtIndex_bf("ha22", 5)
assert ans == "h"
def test_lc_data_3(self):
ds = DecodeString()
ans = ds.valueAtIndex_bf("a2345678999999999999999", 1)
assert ans == "a"
ans = ds.valueAtIndex_opm("a2345678999999999999999", 18)
assert ans == "a"
ans = ds.valueAtIndex_opm_2("a2345678999999999999999", 18)
assert ans == "a"
def test_lc_data_4(self):
ds = DecodeString()
ans = ds.valueAtIndex_bf("test3code4", 15)
assert ans == "d"
| 972 | 412 |
from pylab import plot, show, legend
from numpy import array
from h5py import File
data = File("data.h5")
iter = 2
R = array(data["/%04d/R" % iter])
rho = array(data["/%04d/rho" % iter])
Vtot = array(data["/%04d/V_tot" % iter])
Zeff = -Vtot * R
#for i in range(1, 19):
# P = array(data["/%04d/P%04d" % (iter, i)])
# plot(R, P, label="P%04d" % i)
for i in range(1, 11):
iter = i
R = array(data["/%04d/R" % iter])
rho = array(data["/%04d/rho" % iter])
plot(R, rho*R**2, label="iter=%d" % iter)
legend()
show()
| 534 | 248 |
# Generated by Django 3.1 on 2020-08-18 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='address',
name='default_add',
field=models.BooleanField(default=True),
),
]
| 379 | 124 |
class KACLValidationError():
def __init__(self, line="", line_number=0, start_character_pos=None, end_character_pos=None, error_message=""):
self.__line_number = line_number
self.__start_character_pos = start_character_pos
self.__end_character_pos = end_character_pos
self.__error_message = error_message
self.__line = line
def line_number(self):
return self.__line_number
def position(self):
return self.__start_character_pos, self.__end_character_pos
def line(self):
return self.__line
def error_message(self):
return self.__error_message
class KACLValidation():
def __init__(self):
self.__validation_errors = []
def is_valid(self):
return (len(self.__validation_errors) == 0)
def errors(self):
return self.__validation_errors
def add_error(self, line, line_number, error_message, start_character_pos=None, end_character_pos=None):
self.__validation_errors.append(KACLValidationError(line=line,
line_number=line_number,
start_character_pos=start_character_pos,
end_character_pos=end_character_pos,
error_message=error_message))
def convert_to_dict(self):
validation_map = dict()
validation_map['valid'] = (len(self.__validation_errors) == 0)
errors = []
for error in self.__validation_errors:
error_map = {
"line": error.line(),
"line_number": error.line_number(),
"start_char_pos": error.position()[0],
"end_character_pos": error.position()[1],
"error_message": error.error_message()
}
errors.append(error_map)
validation_map['errors'] = errors
return validation_map
| 2,029 | 528 |
#!/usr/bin/python3
# Fabfile to delete out-of-date archives.
import os
from fabric.api import *
env.hosts = ['104.196.116.233', '54.165.130.77']
def do_clean(number=0):
"""Delete out-of-date archives.
"""
number = 1 if int(number) == 0 else int(number)
archives = sorted(os.listdir("versions"))
[archives.pop() for i in range(number)]
with lcd("versions"):
[local("rm ./{}".format(a)) for a in archives]
with cd("/data/web_static/releases"):
archives = run("ls -tr").split()
archives = [a for a in archives if "web_static_" in a]
[archives.pop() for i in range(number)]
[run("rm -rf ./{}".format(a)) for a in archives]
| 692 | 261 |
from tfrec.utils.model_utils import cross_validate
from tfrec.utils.model_utils import preprocess_and_split
__all__ = [
'cross_validate',
'preprocess_and_split',
]
| 173 | 58 |
"""
A threaded shared-memory scheduler for dask graphs.
This code is experimental and fairly ugly. It should probably be rewritten
before anyone really depends on it. It is very stateful and error-prone.
That being said, it is decently fast.
State
=====
Many functions pass around a ``state`` variable that holds the current state of
the computation. This variable consists of several other dictionaries and
sets, explained below.
Constant state
--------------
1. dependencies: {x: [a, b ,c]} a,b,c, must be run before x
2. dependents: {a: [x, y]} a must run before x or y
Changing state
--------------
### Data
1. cache: available concrete data. {key: actual-data}
2. released: data that we've seen, used, and released because it is no longer
needed
### Jobs
1. ready: A set of ready-to-run tasks
1. running: A set of tasks currently in execution
2. finished: A set of finished tasks
3. waiting: which tasks are still waiting on others :: {key: {keys}}
Real-time equivalent of dependencies
4. waiting_data: available data to yet-to-be-run-tasks :: {key: {keys}}
Real-time equivalent of dependents
Example
-------
>>> import pprint
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'ready': set(['z']),
'released': set([]),
'running': set([]),
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
Optimizations
=============
We build this scheduler with out-of-core array operations in mind. To this end
we have encoded some particular optimizations.
Compute to release data
-----------------------
When we choose a new task to execute we often have many options. Policies at
this stage are cheap and can significantly impact performance. One could
imagine policies that expose parallelism, drive towards a paticular output,
etc.. Our current policy is the compute tasks that free up data resources.
See the functions ``choose_task`` and ``score`` for more information
Inlining computations
---------------------
We hold on to intermediate computations either in memory or on disk.
For very cheap computations that may emit new copies of the data, like
``np.transpose`` or possibly even ``x + 1`` we choose not to store these as
separate pieces of data / tasks. Instead we combine them with the computations
that require them. This may result in repeated computation but saves
significantly on space and computation complexity.
See the function ``inline`` for more information.
"""
from .core import istask, flatten, reverse_dict, get_dependencies, ishashable
from .utils import deepmap
from operator import add
from toolz import concat, partial
from multiprocessing.pool import ThreadPool
from .compatibility import Queue
from threading import Lock
import psutil
def inc(x):
return x + 1
def double(x):
return x * 2
DEBUG = False
def start_state_from_dask(dsk, cache=None):
""" Start state from a dask
Example
-------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> import pprint
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'ready': set(['z']),
'released': set([]),
'running': set([]),
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
"""
if cache is None:
cache = dict()
for k, v in dsk.items():
if not istask(v):
cache[k] = v
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
waiting = dict((k, v.copy()) for k, v in dependencies.items() if v)
dependents = reverse_dict(dependencies)
for a in cache:
for b in dependents[a]:
waiting[b].remove(a)
waiting_data = dict((k, v.copy()) for k, v in dependents.items() if v)
ready = set([k for k, v in waiting.items() if not v])
waiting = dict((k, v) for k, v in waiting.items() if v)
state = {'dependencies': dependencies,
'dependents': dependents,
'waiting': waiting,
'waiting_data': waiting_data,
'cache': cache,
'ready': ready,
'running': set(),
'finished': set(),
'released': set()}
return state
'''
Running tasks
-------------
When we execute tasks we both
1. Perform the actual work of collecting the appropriate data and calling the function
2. Manage administrative state to coordinate with the scheduler
'''
def _execute_task(arg, cache, dsk=None):
""" Do the actual work of collecting data and executing a function
Examples
--------
>>> cache = {'x': 1, 'y': 2}
Compute tasks against a cache
>>> _execute_task((add, 'x', 1), cache) # Compute task in naive manner
2
>>> _execute_task((add, (inc, 'x'), 1), cache) # Support nested computation
3
Also grab data from cache
>>> _execute_task('x', cache)
1
Support nested lists
>>> list(_execute_task(['x', 'y'], cache))
[1, 2]
>>> list(map(list, _execute_task([['x', 'y'], ['y', 'x']], cache)))
[[1, 2], [2, 1]]
>>> _execute_task('foo', cache) # Passes through on non-keys
'foo'
"""
dsk = dsk or dict()
if isinstance(arg, list):
return (_execute_task(a, cache) for a in arg)
elif istask(arg):
func, args = arg[0], arg[1:]
args2 = [_execute_task(a, cache, dsk=dsk) for a in args]
return func(*args2)
elif not ishashable(arg):
return arg
elif arg in cache:
return cache[arg]
elif arg in dsk:
raise ValueError("Premature deletion of data. Key: %s" % str(arg))
else:
return arg
def execute_task(dsk, key, state, queue, results, lock):
"""
Compute task and handle all administration
See also:
_execute_task - actually execute task
"""
try:
task = dsk[key]
result = _execute_task(task, state['cache'], dsk=dsk)
with lock:
finish_task(dsk, key, result, state, results)
result = key, task, result, None
except Exception as e:
import sys
exc_type, exc_value, exc_traceback = sys.exc_info()
result = key, task, e, exc_traceback
queue.put(result)
return
def finish_task(dsk, key, result, state, results):
"""
Update executation state after a task finishes
Mutates. This should run atomically (with a lock).
"""
state['cache'][key] = result
if key in state['ready']:
state['ready'].remove(key)
for dep in state['dependents'][key]:
s = state['waiting'][dep]
s.remove(key)
if not s:
del state['waiting'][dep]
state['ready'].add(dep)
for dep in state['dependencies'][key]:
if dep in state['waiting_data']:
s = state['waiting_data'][dep]
s.remove(key)
if not s and dep not in results:
if DEBUG:
from chest.core import nbytes
print("Key: %s\tDep: %s\t NBytes: %.2f\t Release" % (key, dep,
sum(map(nbytes, state['cache'].values()) / 1e6)))
assert dep in state['cache']
release_data(dep, state)
assert dep not in state['cache']
elif dep in state['cache'] and dep not in results:
release_data(dep, state)
state['finished'].add(key)
state['running'].remove(key)
return state
def release_data(key, state):
""" Remove data from temporary storage
See Also
finish_task
"""
if key in state['waiting_data']:
assert not state['waiting_data'][key]
del state['waiting_data'][key]
state['released'].add(key)
del state['cache'][key]
def nested_get(ind, coll, lazy=False):
""" Get nested index from collection
Examples
--------
>>> nested_get(1, 'abc')
'b'
>>> nested_get([1, 0], 'abc')
('b', 'a')
>>> nested_get([[1, 0], [0, 1]], 'abc')
(('b', 'a'), ('a', 'b'))
"""
if isinstance(ind, list):
if lazy:
return (nested_get(i, coll, lazy=lazy) for i in ind)
else:
return tuple([nested_get(i, coll, lazy=lazy) for i in ind])
return seq
else:
return coll[ind]
'''
Task Selection
--------------
We often have a choice among many tasks to run next. This choice is both
cheap and can significantly impact performance.
Here we choose tasks that immediately free data resources.
'''
def score(key, state):
""" Prefer to run tasks that remove need to hold on to data """
deps = state['dependencies'][key]
wait = state['waiting_data']
return sum([1./len(wait[dep])**2 for dep in deps])
def choose_task(state, score=score):
"""
Select a task that maximizes scoring function
Default scoring function selects tasks that free up the maximum number of
resources.
E.g. for ready tasks a, b with dependencies:
{a: {x, y},
b: {x, w}}
and for data w, x, y, z waiting on the following tasks
{w: {b, c}
x: {a, b, c},
y: {a}}
We choose task a because it will completely free up resource y and
partially free up resource x. Task b only partially frees up resources x
and w and completely frees none so it is given a lower score.
See also:
score
"""
return max(state['ready'], key=partial(score, state=state))
'''
Inlining
--------
We join small cheap tasks on to others to avoid the creation of intermediaries.
'''
def inline(dsk, fast_functions=None):
""" Inline cheap functions into larger operations
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline(dsk, [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
def isfast(func):
if hasattr(func, 'func'): # Support partials, curries
return func.func in fast_functions
else:
return func in fast_functions
result = dict((k, expand_value(dsk, fast_functions, k))
for k, v in dsk.items()
if not dependents[k]
or not istask(v)
or not isfast(v[0]))
return result
def expand_key(dsk, fast, key):
"""
>>> dsk = {'out': (sum, ['i', 'd']),
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> expand_key(dsk, [inc], 'd')
'd'
>>> expand_key(dsk, [inc], 'i') # doctest: +SKIP
(inc, 'x')
>>> expand_key(dsk, [inc], ['i', 'd']) # doctest: +SKIP
[(inc, 'x'), 'd']
"""
if isinstance(key, list):
return [expand_key(dsk, fast, item) for item in key]
def isfast(func):
if hasattr(func, 'func'): # Support partials, curries
return func.func in fast
else:
return func in fast
if not ishashable(key):
return key
if (key in dsk and istask(dsk[key]) and isfast(dsk[key][0])):
task = dsk[key]
return (task[0],) + tuple([expand_key(dsk, fast, k) for k in task[1:]])
else:
return key
def expand_value(dsk, fast, key):
"""
>>> dsk = {'out': (sum, ['i', 'd']),
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> expand_value(dsk, [inc], 'd') # doctest: +SKIP
(double, 'y')
>>> expand_value(dsk, [inc], 'i') # doctest: +SKIP
(inc, 'x')
>>> expand_value(dsk, [inc], 'out') # doctest: +SKIP
(sum, [(inc, 'x'), 'd'])
"""
task = dsk[key]
if not istask(task):
return task
func, args = task[0], task[1:]
return (func,) + tuple([expand_key(dsk, fast, arg) for arg in args])
'''
`get`
-----
The main function of the scheduler. Get is the main entry point.
'''
def get(dsk, result, nthreads=psutil.NUM_CPUS, cache=None, debug_counts=None, **kwargs):
""" Threaded cached implementation of dask.get
Parameters
----------
dsk: dict
A dask dictionary specifying a workflow
result: key or list of keys
Keys corresponding to desired data
nthreads: integer of thread count
The number of threads to use in the ThreadPool that will actually execute tasks
cache: dict-like (optional)
Temporary storage of results
debug_counts: integer or None
This integer tells how often the scheduler should dump debugging info
Examples
--------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> get(dsk, 'w')
4
>>> get(dsk, ['w', 'y'])
(4, 2)
"""
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = set([result])
results = set(result_flat)
pool = ThreadPool(nthreads)
state = start_state_from_dask(dsk, cache=cache)
queue = Queue()
#lock for state dict updates
#When a task completes, we need to update several things in the state dict.
#To make sure the scheduler is in a safe state at all times, the state dict
# needs to be updated by only one thread at a time.
lock = Lock()
tick = [0]
if not state['ready']:
raise ValueError("Found no accessible jobs in dask")
def fire_task():
""" Fire off a task to the thread pool """
# Update heartbeat
tick[0] += 1
# Emit visualization if called for
if debug_counts and tick[0] % debug_counts == 0:
visualize(dsk, state, filename='dask_%03d' % tick[0])
# Choose a good task to compute
key = choose_task(state)
state['ready'].remove(key)
state['running'].add(key)
# Submit
pool.apply_async(execute_task, args=[dsk, key, state, queue, results,
lock])
try:
# Seed initial tasks into the thread pool
with lock:
while state['ready'] and len(state['running']) < nthreads:
fire_task()
# Main loop, wait on tasks to finish, insert new ones
while state['waiting'] or state['ready'] or state['running']:
key, finished_task, res, tb = queue.get()
if isinstance(res, Exception):
import traceback
traceback.print_tb(tb)
raise res
with lock:
while state['ready'] and len(state['running']) < nthreads:
fire_task()
finally:
# Clean up thread pool
pool.close()
pool.join()
# Final reporting
while not queue.empty():
key, finished_task, res, tb = queue.get()
# print("Finished %s" % str(finished_task))
if debug_counts:
visualize(dsk, state, filename='dask_end')
return nested_get(result, state['cache'])
'''
Debugging
---------
The threaded nature of this project presents challenging to normal unit-test
and debug workflows. Visualization of the execution state has value.
Our main mechanism is a visualization of the execution state as colors on our
normal dot graphs (see dot module).
'''
def visualize(dsk, state, filename='dask'):
""" Visualize state of compputation as dot graph """
from dask.dot import dot_graph, write_networkx_to_dot
g = state_to_networkx(dsk, state)
write_networkx_to_dot(g, filename=filename)
def color_nodes(dsk, state):
data, func = dict(), dict()
for key in dsk:
func[key] = {'color': 'gray'}
data[key] = {'color': 'gray'}
for key in state['released']:
data[key] = {'color': 'blue'}
for key in state['cache']:
data[key] = {'color': 'red'}
for key in state['finished']:
func[key] = {'color': 'blue'}
for key in state['running']:
func[key] = {'color': 'red'}
for key in dsk:
func[key]['penwidth'] = 4
data[key]['penwidth'] = 4
return data, func
def state_to_networkx(dsk, state):
""" Convert state to networkx for visualization
See Also:
visualize
"""
from .dot import to_networkx
data, func = color_nodes(dsk, state)
return to_networkx(dsk, data_attributes=data, function_attributes=func)
| 17,316 | 5,502 |
# Generated by Django 3.1.1 on 2020-09-01 18:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todoapp', '0002_auto_20200719_2021'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='todolist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='todos', to='todoapp.todolist'),
),
]
| 495 | 187 |
from django.core.management.base import LabelCommand
from yachter.courses.utils import export_static_html
class Command(LabelCommand):
help = "Export a static HTML/JSON website for browsing the courses."
args = "exportPath"
label = 'path to export dir'
def handle_label(self, export_path, **options):
export_static_html(export_path)
| 360 | 103 |
###############################################################
# cms set host='juliet.futuresystems.org'
# cms set user=$USER
#
# pytest -v --capture=no tests/test_01_job_cli.py
# pytest -v tests/test_01_job_cli.py
# pytest -v --capture=no tests/test_01_job_cli.py::TestJob::<METHODNAME>
###############################################################
import pytest
from cloudmesh.common.Shell import Shell
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.util import HEADING
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.common.variables import Variables
from cloudmesh.configuration.Configuration import Configuration
from textwrap import dedent
from cloudmesh.common.util import path_expand
import oyaml as yaml
import re
import time
import getpass
Benchmark.debug()
variables = Variables()
print(variables)
variables["jobset"] = path_expand("./a.yaml")
configured_jobset = variables["jobset"]
remote_host_ip = variables['host'] or 'juliet.futuresystems.org'
remote_host_user = variables['user'] or getpass.getuser()
@pytest.mark.incremental
class TestJob:
def test_help(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job help", shell=True)
Benchmark.Stop()
VERBOSE(result)
assert "Usage" in result
assert "Description" in result
def test_info(self):
HEADING()
Benchmark.Start()
variables = Variables()
configured_jobset = variables["jobset"]
result = Shell.execute("cms job info", shell=True)
Benchmark.Stop()
VERBOSE(result)
assert configured_jobset in result
def test_template(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job template --name='job[1-2]'", shell=True)
Benchmark.Stop()
VERBOSE(result)
spec = Configuration(configured_jobset)
assert spec['cloudmesh.jobset.hosts'] is not None
jobs = spec['cloudmesh.jobset.jobs'].keys()
assert 'job1' in jobs
assert 'job2' in jobs
def test_add_file(self):
HEADING()
job_str = dedent("""
pytest_job:
name: pytest_job
directory: .
ip: local
input: ./data
output: ./output/abcd
status: ready
gpu: ' '
user: user
arguments: -lisa
executable: ls
shell: bash
""").strip()
job = yaml.safe_load(job_str)
with open('../tests/other.yaml', 'w') as fo:
yaml.safe_dump(job, fo)
Benchmark.Start()
result = Shell.execute("cms job add 'other.yaml'", shell=True)
Benchmark.Stop()
VERBOSE(result)
time.sleep(10)
spec1 = Configuration(configured_jobset)
jobs1 = spec1['cloudmesh.jobset.jobs'].keys()
assert 'pytest_job' in jobs1
def test_add_cli(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job add --name='pytest_job1' "
f"--ip={remote_host_ip} "
"--executable='ls' "
"--arguments='-lisa' "
f"--user='{remote_host_user}' ",
shell=True)
Benchmark.Stop()
VERBOSE(result)
spec = Configuration(configured_jobset)
jobs = spec['cloudmesh.jobset.jobs'].keys()
assert 'pytest_job1' in jobs
def test_list(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job list", shell=True)
Benchmark.Stop()
job_count_1 = len(re.findall(r"\|\s\d+\s+\|", result, re.MULTILINE))
VERBOSE(result)
spec = Configuration(configured_jobset)
job_count_2 = len(spec['cloudmesh.jobset.jobs'].keys())
assert job_count_1 == job_count_2
def test_add_host(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job hosts add --hostname='juliet' "
f"--ip='{remote_host_ip}' "
"--cpu_count='12'", shell=True)
VERBOSE(result)
spec = Configuration(configured_jobset)
host_list = spec['cloudmesh.jobset.hosts'].keys()
assert 'juliet' in host_list
def test_run(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job run --name='pytest_job1'", shell=True)
Benchmark.Stop()
VERBOSE(result)
time.sleep(10)
spec = Configuration(configured_jobset)
job_status = spec['cloudmesh.jobset.jobs.pytest_job1.status']
assert job_status == 'submitted'
assert spec['cloudmesh.jobset.jobs.pytest_job1.submitted_to_ip'] \
is not None
def test_kill(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job kill --name='pytest_job1'", shell=True)
Benchmark.Stop()
VERBOSE(result)
time.sleep(10)
spec = Configuration(configured_jobset)
job_status = spec['cloudmesh.jobset.jobs.pytest_job1.status']
assert job_status == 'killed'
def test_reset(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job reset --name='pytest_job1'", shell=True)
Benchmark.Stop()
VERBOSE(result)
time.sleep(5)
spec = Configuration(configured_jobset)
job_status = spec['cloudmesh.jobset.jobs.pytest_job1.status']
assert job_status == 'ready'
def test_delete(self):
HEADING()
Benchmark.Start()
result = Shell.execute("cms job delete --name='pytest_job1'",
shell=True)
Benchmark.Stop()
VERBOSE(result)
time.sleep(5)
spec = Configuration(configured_jobset)
jobs = spec['cloudmesh.jobset.jobs'].keys()
assert 'pytest_job1' not in jobs
def test_benchmark(self):
HEADING()
Benchmark.print(csv=True)
| 6,163 | 1,924 |
import argparse
import time
from pathlib import Path
from logger import get_logger
from csv_reader import CSVReader
from utils import infer_type, clear_console
from sql_generator import SQLGenerator
if __name__ == "__main__":
## Clear console
clear_console()
## get logger
logger = get_logger('pysqlizer')
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='', help='Input CSV filename', metavar='infile', required=True)
parser.add_argument('-o', '--output', type=str, default='', help='Output SQL filename', metavar='outfile')
parser.add_argument('-t', '--table_name', type=str, default='', help='SQL table name', metavar='tname')
parser.add_argument('-d', '--db_name', type=str, default='', help='SQL database name', metavar='dbname')
parser.add_argument('-s', '--delimiter', type=str, default='', help='CSV file delimiter', metavar='delimiter')
parser.add_argument('-v', '--version', help='Show the program version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
#print(args)
logger.info('Starting PySQLizer...')
# Get arguments
input_file = args.input
output_file = args.output
table_name = args.table_name
database_name = args.db_name
delimiter = args.delimiter if args.delimiter else ','
## Check input file (type, existence and extension)
infile = Path(input_file)
if infile.is_dir():
logger.error('The file {} is a directory!'.format(input_file))
quit()
if not infile.exists():
logger.debug('The file {} does not exist!'.format(input_file))
quit()
if not infile.suffix.lower() == '.csv':
logger.error('The extension of the file {} is not CSV!'.format(input_file))
quit()
if output_file == '':
output_file = infile.stem
if table_name == '':
table_name = 'tname'
try:
logger.info('Reading CSV file: {}'.format(input_file))
start_time = time.perf_counter()
## Create CSV reader instance
csv_reader = CSVReader(input_file)
csv_reader.read_file(delimiter=delimiter)
csv_reader.extract_header_fields()
csv_reader.check_data_sanity()
end_time = time.perf_counter()
logger.info('Elapsed time: {}s'.format(end_time-start_time))
logger.info('Generating SQL instructions...')
start_time = time.perf_counter()
## Create SQL generator instance
sql_generator = SQLGenerator()
table_query = sql_generator.create_sql_table(table_name=table_name, columns=csv_reader.keys, db_name=database_name)
insert_query = sql_generator.insert_data(tablename=table_name, columns=csv_reader.keys, data=csv_reader.data)
end_time = time.perf_counter()
logger.info('Elapsed time: {}s'.format(end_time-start_time))
logger.info('Saving SQL file: {}'.format(output_file + '.sql'))
start_time = time.perf_counter()
sql_generator.save_sql_file(filename=output_file, table_structure_query=table_query, insert_query=insert_query)
end_time = time.perf_counter()
logger.info('Elapsed time: {}s'.format(end_time-start_time))
except Exception as e:
logger.error('{}'.format(e.args))
| 3,370 | 1,042 |
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if intervals is None or len(intervals) == 0:
return []
intervals.sort(key=lambda x: x.start)
ans = [intervals.pop(0)]
last = ans[0]
for interval in intervals:
if interval.start <= last.end:
if interval.end > last.end:
last.end = interval.end
else:
ans.append(interval)
last = interval
return ans
solution = Solution()
ans = solution.merge([Interval(1, 4), Interval(2, 3)])
for i in ans:
print(i.start, i.end)
| 854 | 254 |
from tkinter import *
import mariadb
root = Tk()
root.title('SCHOOL MANAGEMENT')
root.geometry("900x700")
counter=2
for i in range(1,20):
label=Entry(root).grid(row=counter,column=0)
counter += 2
root.mainloop() | 221 | 97 |
import flopy.mt3d as mt
class SftAdapter:
_data = None
def __init__(self, data):
self._data = data
def validate(self):
# should be implemented
# for key in content:
# do something
# return some hints
pass
def is_valid(self):
# should be implemented
# for key in content:
# do something
# return true or false
return True
def merge(self):
default = self.default()
for key in self._data:
if key == 'sf_stress_period_data':
default[key] = self.to_dict(self._data[key])
continue
default[key] = self._data[key]
return default
def to_dict(self, data):
if type(data) == list:
spd_dict = {}
for stress_period, record in enumerate(data):
spd_dict[stress_period] = record
return spd_dict
return data
def get_package(self, _mt):
content = self.merge()
return mt.Mt3dSft(
_mt,
**content
)
@staticmethod
def default():
default = {
"nsfinit": 0,
"mxsfbc": 0,
"icbcsf": 0,
"ioutobs": None,
"ietsfr": 0,
"isfsolv": 1,
"wimp": 0.5,
"wups": 1.0,
"cclosesf": 1e-06,
"mxitersf": 10,
"crntsf": 1.0,
"iprtxmd": 0,
"coldsf": 0.0,
"dispsf": 0.0,
"nobssf": 0,
"obs_sf": None,
"sf_stress_period_data": None,
"unitnumber": None,
"filenames": None,
"dtype": None,
"extension": 'sft'
}
return default
@staticmethod
def read_package(package):
content = {
"nsfinit": package.nsfinit,
"mxsfbc": package.mxsfbc,
"icbcsf": package.icbcsf,
"ioutobs": package.ioutobs,
"ietsfr": package.ietsfr,
"isfsolv": package.isfsolv,
"wimp": package.wimp,
"wups": package.wups,
"cclosesf": package.cclosesf,
"mxitersf": package.mxitersf,
"crntsf": package.crntsf,
"iprtxmd": package.iprtxmd,
"coldsf": package.coldsf,
"dispsf": package.dispsf,
"nobssf": package.nobssf,
"obs_sf": package.obs_sf,
"sf_stress_period_data": package.sf_stress_period_data,
"unitnumber": package.unitnumber,
"filenames": package.filenames,
"dtype": package.dtype,
"extension": package.extension
}
return content
| 2,742 | 872 |
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from collections import namedtuple
from typing import Dict
from src.visualization import diminishing_reward_colors, PLOT_DPI
StateAction = namedtuple('StateAction', 'id state action')
def get_all_state_action(state_to_actions):
state_action = []
idx = 1
for state, actions in state_to_actions.items():
if len(actions) > 0:
for action in actions:
state_action.append(StateAction(idx, state, action))
idx += 1
return state_action
def plot_payoff_landscape(payoffs: Dict, rho: float, rho_text_location, plot_filename=None) -> None:
colors = diminishing_reward_colors()
fig, ax = plt.subplots(figsize=(15, 10))
x = range(1, len(payoffs)+1)
for alg in ['ACS2', 'AACS2_v1', 'AACS2_v2', 'Q-Learning', 'R-Learning']:
y = sorted([v[alg] for k, v in payoffs.items()])
plt.scatter(x, y, color=colors[alg])
plt.plot(x, y, label=alg, linewidth=2, color=colors[alg])
# x-axis
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.set_major_formatter(FormatStrFormatter('%1.0f'))
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in')
ax.xaxis.set_tick_params(which='minor', size=5, width=1, direction='in')
ax.set_xlabel("State-action pairs")
# y-axis
ax.yaxis.set_major_locator(MultipleLocator(250))
ax.yaxis.set_minor_locator(MultipleLocator(50))
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in')
ax.yaxis.set_tick_params(which='minor', size=5, width=1, direction='in')
ax.set_ylabel("Payoff value")
# others
ax.set_title(f"Payoff Landscape")
ax.text(**rho_text_location, s=fr'$\rho={rho:.2f}$', color=colors['R-Learning'])
ax.legend(loc='lower right', bbox_to_anchor=(1, 0), frameon=False)
if plot_filename:
plt.savefig(plot_filename, transparent=False, bbox_inches='tight', dpi=PLOT_DPI)
return fig | 2,089 | 771 |
class MOD:
def __init__(self, Globals):
""" This adds additional message categories to the player detection algorithm """
# data transfer variables
self.Globals = Globals
self.G = self.Globals
self.ModData = Globals.ModData["Chatpp"]
self.backend = Globals.ui_backend
self.frontend = Globals.ui_frontend
# set mod data
self.ModData.name = "Chatpp"
self.ModData.version = "0.0.1"
self.ModData.config = {
"chat++-hypixel": True,
"chat++-bedwars practice": False,
}
self.ModData.settings = {
"chat++-hypixel": "Optimise for Hypixel", # config name : displayed name
"chat++-bedwars practice": "Optimise for the Bedwars Practice server", # config name : displayed name
}
self.ModData.scopes = {
"init": self.setup, # this is part of the setup for the backend ui
"config-init": self.ModData.config, # this is a dictionary of all config items which the mod uses
"config-settings": self.ModData.name, # this registers the mod for the settings menu
"on-message": self.on_message, # this is called when a chat message appears
}
def setup(self, frontend, backend):
""" This is the mod setup function """
join_fragment = "\n - "
print(
f"{self.ModData.name} {self.ModData.version} has been loaded with scopes:{join_fragment}{join_fragment.join([scope for scope in self.ModData.scopes.keys()])}",
end="\n\n")
self.frontend = frontend
self.backend = backend
def on_message(self, timestamp, message):
""" This processes a message """
# print(f"{timestamp} : '{message}'")
# Hypixel
if self.G.config["chat++-hypixel"]:
pass
# Bedwars practice
ranks = ["[Master]", "[Adept]", "[Trainee]"]
if self.G.config["chat++-bedwars practice"]:
# ranked users
for rank in ranks:
if f"{rank} " in message:
message = message.split(f"{rank} ")[1]
username = message.split(" ")[0]
self.add_user(username)
# void message
if " was hit into the void by " in message:
if message.endswith(" FINAL KILL!"):
username1 = message.split(" ")[0]
username2 = message.split(" ")[-3]
else:
username1, *_, username2 = message.split(" ")
self.add_user(username1)
self.add_user(username2)
# void message
elif message.endswith(" fell into the void."):
username = message.split(" ")[0]
self.add_user(username)
# lives remaining
elif " has " in message and " lives" in message:
username, *_ = message.split(" ")
self.add_user(username)
# elimination
elif " has been eliminated" in message:
username, *_ = message.split(" ")
self.sub_user(username)
# server join message
elif " has joined!" in message:
*_, username, _, _ = message.split(" ")
self.add_user(username)
# server leave message
elif " has left!" in message:
*_, username, _, _ = message.split(" ")
self.sub_user(username)
# game leave message
elif message.endswith(" has left the game!"):
username = message.split(" ")[0]
self.add_user(username)
# game start (connecting to lobby)
elif message.startswith("Connecting to "):
self.G.lobby_players = []
# game start (connection successful)
elif message.startswith("Successfully connected to "):
self.G.lobby_players = []
# sending to lobby
elif message.startswith("Sending you to "):
self.G.lobby_players = []
# remove "at"
elif message == "Join the discord for more info at: ":
self.sub_user("at")
# players in game
elif message.startswith("Players in this game: "):
players = message.split(": ")[-1].split(" ")
for player in players:
self.add_user(player)
# block sumo: gold block
elif message.endswith(" has been on the centre gold block for 5 seconds!"):
username = message.split(" ")[0]
self.add_user(username)
# bedwars
elif message.startswith("BED DESTRUCTION > ") and " was dismantled by " in message:
username = message.split(" ")[-1]
self.add_user(username)
# else:
# for p in self.G.lobby_players:
# if p in message:
# print(f"{timestamp} : '{message}'")
def add_user(self, username):
""" This adds a username to the player list """
if username not in self.G.lobby_players:
self.G.lobby_players.append(username)
def sub_user(self, username):
""" This removes a username from the player list """
if username in self.G.lobby_players:
# remove player
self.G.lobby_players.remove(username)
# run mod actions
self.G.thread_chat_ctx.mod_on_player_leave(username)
| 5,626 | 1,526 |
import time
import sys
import threading
import asyncio
# fly
from .ModelBootstrap import ModelBootstrap
from . import ModelManager
def bootstrap(_filename,):
#Model Bootstrap
runForEver = threading.Event()
mb = ModelBootstrap(filename=_filename,)
runForEver.wait()
#runForEver = threading.Event()
# Expects a .conf for the model. It should be availble in config folder
#modelConf='calculator.conf' #sys.argv[1]
#bootstrap(modelConf)
# This will wait forever.
#
#runForEver.wait()
| 520 | 176 |
import feedparser
def parseRSS(rss_url):
return feedparser.parse(rss_url)
def getHeadLines(rss_url):
headlines = []
feed = parseRSS(rss_url)
for newitem in feed['items']:
headlines.append(newitem['title'])
return headlines
allheadlines = []
newsurls={'googlenews': 'https://news.google.com/news/rss/?h1=ta&ned=us&gl=IN',}# I used IN in this line for indian news instead of that you can use your capital's
for key, url in newsurls.items():
allheadlines.extend(getHeadLines(url))
for h in allheadlines:
print(h)
| 569 | 208 |
from behave import when, then
from application.models import Member
@when(u'I request \'{page}\'')
def step_impl(context, page):
context.response = context.test.client.get(page)
@when(u'there are no members')
def step_impl(context):
Member.objects.all().delete()
@then(u'I see \'{content}\'')
def step_impl(context, content):
context.test.assertIn(content, str(context.response.content, 'utf-8'))
@when(u'there are members')
def step_impl(context):
Member.objects.create(name='Test user')
@then(u'I do not see \'{content}\'')
def step_impl(context, content):
context.test.assertNotIn(content, str(context.response.content, 'utf-8'))
| 664 | 228 |
"""
Displaying the fields in an xy cross section of the sphere (x polarized light, z-propagating)
"""
import numpy as np
import matplotlib.pyplot as plt
import miepy
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
Ag = miepy.materials. Ag()
# calculate scattering coefficients, 800 nm illumination
radius = 200e-9 # 200 nm radius
lmax = 5 # Use up to 5 multipoles
sphere = miepy.single_mie_sphere(radius, Ag, 800e-9, lmax)
# create discretized xy plane
x = np.linspace(-2*radius,2*radius,100)
y = np.linspace(-2*radius,2*radius,100)
z = np.array([radius*0.0])
X,Y,Z = np.meshgrid(x,y,z, indexing='xy')
R = (X**2 + Y**2 + Z**2)**0.5
THETA = np.arccos(Z/R)
PHI = np.arctan2(Y,X)
# electric and magnetic field functions
E_func = sphere.E_field(index=0)
E = E_func(R,THETA,PHI).squeeze()
IE = np.sum(np.abs(E)**2, axis=0)
H_func = sphere.H_field(index=0)
H = H_func(R,THETA,PHI).squeeze()
IH = np.sum(np.abs(H)**2, axis=0)
# plot results
fig,axes = plt.subplots(ncols=2, figsize=plt.figaspect(1/2.7))
for i,ax in enumerate(axes):
plt.subplot(ax)
I = IE if i == 0 else IH
plt.pcolormesh(np.squeeze(X)*1e9,np.squeeze(Y)*1e9, I, shading="gouraud", cmap=cm.viridis)
plt.colorbar(label='field intensity')
THETA = np.squeeze(THETA)
PHI = np.squeeze(PHI)
for i,ax in enumerate(axes):
F = E if i == 0 else H
Fx = F[0]*np.sin(THETA)*np.cos(PHI) + F[1]*np.cos(THETA)*np.cos(PHI) - F[2]*np.sin(PHI)
Fy = F[0]*np.sin(THETA)*np.sin(PHI) + F[1]*np.cos(THETA)*np.sin(PHI) + F[2]*np.cos(PHI)
step=10
ax.streamplot(np.squeeze(X)*1e9, np.squeeze(Y)*1e9, np.real(Fx), np.real(Fy), color='white', linewidth=1.0)
for ax in axes:
ax.set(xlim=[-2*radius*1e9, 2*radius*1e9], ylim=[-2*radius*1e9, 2*radius*1e9],
aspect='equal', xlabel="X (nm)", ylabel="Y (nm)")
axes[0].set_title("Electric Field")
axes[1].set_title("Magnetic Field")
plt.show()
# theta = np.linspace(0,np.pi,50)
# phi = np.linspace(0,2*np.pi,50)
# r = np.array([10000])
# R,THETA,PHI = np.meshgrid(r,theta,phi)
# X = R*np.sin(THETA)*np.cos(PHI)
# Y = R*np.sin(THETA)*np.sin(PHI)
# Z = R*np.cos(THETA)
# X = X.squeeze()
# Y = Y.squeeze()
# Z = Z.squeeze()
# E = E_func(R,THETA,PHI)
# I = np.sum(np.abs(E)**2, axis=0)
# I = np.squeeze(I)
# I -= np.min(I)
# I /= np.max(I)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# shape = X.shape
# C = np.zeros((shape[0], shape[1], 4))
# cmap_3d = cm.viridis
# for i in range(shape[0]):
# for j in range(shape[1]):
# C[i,j,:] = cmap_3d(I[i,j])
# surf = ax.plot_surface(X*1e9, Y*1e9, Z*1e9, rstride=1, cstride=1,shade=False, facecolors=C,linewidth=.0, edgecolors='#000000', antialiased=False)
# m = cm.ScalarMappable(cmap=cmap_3d)
# m.set_array(I)
# plt.colorbar(m)
# surf.set_edgecolor('k')
# ax.set_xlabel('X')
| 2,824 | 1,398 |
import datetime
import dateutil.parser
import xml
import xml.etree.ElementTree
from pya2a.utils import parseRemark
class Entity:
NAMESPACE = {"a2a": "http://Mindbus.nl/A2A"}
class Person(Entity):
"""
"""
def __init__(self, element: xml.etree.ElementTree.Element):
self.id = element.attrib['pid']
self.relations = []
## PersonName
pn = element.find('a2a:PersonName', namespaces=self.NAMESPACE)
self.PersonName = PersonName(pn)
# Gender
if (el := element.find('a2a:Gender',
namespaces=self.NAMESPACE)) is not None:
self.Gender = el.text
# Residence
if (el := element.find('a2a:Residence',
namespaces=self.NAMESPACE)) is not None:
self.Residence = Place(el)
# Religion
if (el := element.find('a2a:Religion',
namespaces=self.NAMESPACE)) is not None:
self.Religion = el.find('a2a:ReligionLiteral',
namespaces=self.NAMESPACE).text
# Origin
if (el := element.find('a2a:Origin',
namespaces=self.NAMESPACE)) is not None:
self.Origin = Place(el)
# Age
# BirthDate
if (el := element.find('a2a:BirthDate',
namespaces=self.NAMESPACE)) is not None:
self.BirthDate = Date(el)
# BirthPlace
if (el := element.find('a2a:BirthPlace',
namespaces=self.NAMESPACE)) is not None:
self.BirthPlace = Place(el)
# Profession
if (el := element.find('a2a:Profession',
namespaces=self.NAMESPACE)) is not None:
self.Profession = el.text
# MaritalStatus
if (el := element.find('a2a:MaritalStatus',
namespaces=self.NAMESPACE)) is not None:
self.Gender = el.text
# PersonRemark
if (els := element.findall('a2a:PersonRemark',
namespaces=self.NAMESPACE)) is not None:
remarks = []
for el in els:
remarkType = el.attrib['Key']
remark = el.find('a2a:Value', namespaces=self.NAMESPACE).text
remarks.append((remarkType, parseRemark(remark)))
self.Remarks = dict(remarks)
def __getattr__(self, attr):
return None
class PersonName(Entity):
"""
A2A:PersonNameAlias, A2A:PersonNameFamilyName, A2A:PersonNameFirstName,
A2A:PersonNameInitials, A2A:PersonNameLastName, A2A:PersonNameLiteral,
A2A:PersonNameNickName, A2A:PersonNamePatronym, A2A:PersonNamePrefixLastName,
A2A:PersonNameRemark, A2A:PersonNameTitle, A2A:PersonNameTitleOfNobility
"""
def __init__(self, element: xml.etree.ElementTree.Element):
for child in element:
key = child.tag.replace(f"{{{self.NAMESPACE['a2a']}}}", '')
value = child.text
self.__setattr__(key, value)
def __iter__(self):
for i in vars(self):
if i.startswith('PersonName'):
yield self.__getattribute__(i)
else:
continue
def __getattr__(self, attr):
return None
class Event(Entity):
def __init__(self, element: xml.etree.ElementTree.Element):
self.id = element.attrib['eid']
self.relations = []
# EventType
self.EventType = element.find('a2a:EventType',
namespaces=self.NAMESPACE).text
# EventDate
if (el := element.find('a2a:EventDate',
namespaces=self.NAMESPACE)) is not None:
self.EventDate = Date(el)
# EventPlace
if (el := element.find('a2a:EventPlace',
namespaces=self.NAMESPACE)) is not None:
self.EventPlace = Place(el)
# EventReligion
if (el := element.find('a2a:EventReligion',
namespaces=self.NAMESPACE)) is not None:
self.EventReligion = el.find('a2a:ReligionLiteral',
namespaces=self.NAMESPACE).text
# EventRemark
if (els := element.findall('a2a:EventRemark',
namespaces=self.NAMESPACE)) is not None:
remarks = []
for el in els:
remarkType = el.attrib['Key']
remark = el.find('a2a:Value', namespaces=self.NAMESPACE).text
remarks.append((remarkType, parseRemark(remark)))
self.Remarks = dict(remarks)
def __getattr__(self, attr):
return None
class Object(Entity):
def __init__(self, element: xml.etree.ElementTree.Element):
self.id = element.attrib['oid']
self.relations = []
class Source(Entity):
"""
A2A:EAC, A2A:EAD, A2A:RecordGUID, A2A:RecordIdentifier, A2A:SourceAvailableScans, A2A:SourceDate,
A2A:SourceDigitalOriginal, A2A:SourceDigitalizationDate, A2A:SourceIndexDate, A2A:SourceLastChangeDate,
A2A:SourcePlace, A2A:SourceReference, A2A:SourceRemark, A2A:SourceType
"""
def __init__(self, element: xml.etree.ElementTree.Element):
# SourcePlace
self.SourcePlace = Place(
element.find('a2a:SourcePlace', namespaces=self.NAMESPACE))
# SourceIndexDate
date_from = element.find('a2a:SourceIndexDate/a2a:From',
namespaces=self.NAMESPACE).text
self.IndexDateFrom = dateutil.parser.parse(date_from)
date_to = element.find('a2a:SourceIndexDate/a2a:To',
namespaces=self.NAMESPACE).text
self.IndexDateTo = dateutil.parser.parse(date_to)
# SourceDate
if (el := element.find('a2a:SourceDate',
namespaces=self.NAMESPACE)) is not None:
self.SourceDate = Date(el)
# SourceType
self.SourceType = element.find('a2a:SourceType',
namespaces=self.NAMESPACE).text
# EAD
# EAC
# SourceReference
self.SourceReference = SourceReference(
element.find('a2a:SourceReference', namespaces=self.NAMESPACE))
# SourceAvailableScans
if (el := element.find('a2a:SourceAvailableScans',
namespaces=self.NAMESPACE)) is not None:
self.scans = [
Scan(i)
for i in el.findall('a2a:Scan', namespaces=self.NAMESPACE)
]
else:
self.scans = []
# SourceDigitalizationDate
if (el := element.find('a2a:SourceDigitalizationDate',
namespaces=self.NAMESPACE)) is not None:
self.SourceDigitalizationDate = datetime.date.fromisoformat(
el.text)
# SourceLastChangeDate
self.SourceLastChangeDate = datetime.date.fromisoformat(
element.find('a2a:SourceLastChangeDate',
namespaces=self.NAMESPACE).text)
# SourceRetrievalDate
if (el := element.find('a2a:SourceRetrievalDate',
namespaces=self.NAMESPACE)) is not None:
self.SourceRetrievalDate = datetime.date.fromisoformat(el.text)
# SourceDigitalOriginal
# RecordIdentifier
if (el := element.find('a2a:RecordIdentifier',
namespaces=self.NAMESPACE)) is not None:
self.identifier = el.text
# RecordGUID
guid = element.find('a2a:RecordGUID', namespaces=self.NAMESPACE).text
self.guid = guid.replace('{', '').replace('}', '') # m$
# SourceRemark
if (els := element.findall('a2a:SourceRemark',
namespaces=self.NAMESPACE)) is not None:
remarks = []
for el in els:
remarkType = el.attrib['Key']
remark = el.find('a2a:Value', namespaces=self.NAMESPACE).text
remarks.append((remarkType, parseRemark(remark)))
remarkKeys = [i[0] for i in remarks]
duplicateKeys = set(k for k in remarkKeys
if remarkKeys.count(k) > 1)
duplicateKeys.add('filename') # hardcode
remarkDict = dict(
[i for i in remarks if i[0] not in duplicateKeys])
# add the duplicate keys with list value
for key in duplicateKeys:
remarkDict[key] = [
i[1]['Other'] for i in remarks if i[0] == key
]
self.Remarks = remarkDict
class Relation(Entity):
def __init__(self, element: xml.etree.ElementTree.Element):
self.RelationType = element.find('a2a:RelationType',
namespaces=self.NAMESPACE).text
# ExtendedRelationType
if (el := element.find('a2a:ExtendedRelationType',
namespaces=self.NAMESPACE)) is not None:
self.ExtendedRelationType = el.text
def __get__(self, value):
return self.value
class RelationEP(Relation):
def __init__(self, element: xml.etree.ElementTree.Element):
super().__init__(element)
self.person = element.find('a2a:PersonKeyRef',
namespaces=self.NAMESPACE).text
self.event = element.find('a2a:EventKeyRef',
namespaces=self.NAMESPACE).text
class RelationPP(Relation):
def __init__(self, element: xml.etree.ElementTree.Element):
super().__init__(element)
self.persons = [
i.text for i in element.findall('a2a:PersonKeyRef',
namespaces=self.NAMESPACE)
]
class RelationPO(Relation):
def __init__(self, element: xml.etree.ElementTree.Element):
super().__init__(element)
self.person = element.find('a2a:PersonKeyRef',
namespaces=self.NAMESPACE).text
self.object = element.find('a2a:ObjectKeyRef',
namespaces=self.NAMESPACE).text
class RelationP(Relation):
def __init__(self, element: xml.etree.ElementTree.Element):
super().__init__(element)
self.person = element.find('a2a:PersonKeyRef',
namespaces=self.NAMESPACE).text
class RelationOO(Relation):
def __init__(self, element: xml.etree.ElementTree.Element):
super().__init__(element)
self.objects = [
i.text for i in element.findall('a2a:ObjectKeyRef',
namespaces=self.NAMESPACE)
]
class RelationO(Relation):
def __init__(self, element: xml.etree.ElementTree.Element):
super().__init__(element)
self.object = element.find('a2a:ObjectKeyRef',
namespaces=self.NAMESPACE).text
class Place(Entity):
"""
A2A:Block, A2A:Country, A2A:County, A2A:DescriptiveLocationIndicator, A2A:DetailPlaceRemark,
A2A:HouseName, A2A:HouseNumber, A2A:HouseNumberAddition, A2A:Latitude, A2A:Longitude,
A2A:Municipality, A2A:PartMunicipality, A2A:Place, A2A:Province, A2A:Quarter, A2A:State, A2A:Street
"""
def __init__(self, element: xml.etree.ElementTree.Element):
for child in element:
key = child.tag.replace(f"{{{self.NAMESPACE['a2a']}}}", '')
value = child.text
self.__setattr__(key, value)
class SourceReference(Entity):
def __init__(self, element: xml.etree.ElementTree.Element):
for child in element:
key = child.tag.replace(f"{{{self.NAMESPACE['a2a']}}}", '')
value = child.text
self.__setattr__(key, value)
class Scan(Entity):
def __init__(self, element: xml.etree.ElementTree.Element):
for child in element:
key = child.tag.replace(f"{{{self.NAMESPACE['a2a']}}}", '')
value = child.text
self.__setattr__(key, value)
class Date(Entity):
def __init__(self, element: xml.etree.ElementTree.Element):
# Calendar="" IndexDateTime=""
if 'Calendar' in element.attrib:
self.calendar = element.attrib['Calendar']
if 'IndexDateTime' in element.attrib:
self.IndexDateTime = element.attrib['IndexDateTime']
for child in element:
key = child.tag.replace(f"{{{self.NAMESPACE['a2a']}}}", '')
value = child.text
self.__setattr__(key, value)
self.date = self._toISO()
def _toISO(self):
arguments = {
k.lower(): int(v)
for k, v in vars(self).items()
if k.lower() in ('year', 'month', 'day', 'hour', 'minute')
}
if {'year', 'month', 'day', 'hour'}.issubset(arguments):
date = datetime.datetime(**arguments)
#return date.isoformat()
return date
elif {'year', 'month', 'day'}.issubset(arguments):
date = datetime.date(**arguments)
#return date.isoformat()
return date
elif {'year', 'month'}.issubset(arguments):
return f"{arguments['year']}-{arguments['month']}"
elif {'year'}.issubset(arguments):
return f"{arguments['year']}"
else:
return None
def __str__(self):
return self._toISO()
| 13,616 | 4,004 |
import numpy as np
min, max = -0.8777435, 0.57090986
M = np.asmatrix([
[0.02355068, -0.50542802, 0.16642167, -0.44872788, -0.05130898, 0.13320047, 0.41464597, -0.55703336, 0.52567458, 0.23784444, 0.15049535, 0.16599870, -0.28757980, 0.22277315, 0.56460077, -0.70838273, -0.61990398, -0.39724344, -0.09969769, 0.45835119, 0.02840372, 0.09637213, 0.04063996, -0.16667950, -0.68209213, -0.09524837, 0.27514741, 0.02957204, -0.11251312, -0.43414843],
[-0.31239739, -0.13213386, -0.59719753, -0.16117097, 0.29835659, -0.21633907, -0.55013347, -0.22406115, -0.47912723, -0.08179668, 0.46718585, 0.38543564, -0.49470344, -0.35172677, -0.23060481, -0.39899889, -0.18135746, -0.54352880, -0.28287631, -0.05576789, 0.20255803, 0.18899839, 0.36582524, 0.43294433, 0.21794824, -0.62954980, -0.52842420, 0.00261285, 0.23226254, 0.27430296],
[-0.12496945, 0.27272177, 0.09565081, -0.19869098, 0.40514281, 0.30038768, -0.13575996, -0.01735646, 0.31392211, -0.34690821, -0.26467761, 0.27735108, 0.25757775, 0.56070799, 0.48236406, -0.16126287, -0.56543708, -0.52047604, 0.31337339, 0.31964961, -0.19712290, 0.29141095, 0.25103137, -0.49437916, -0.00175839, -0.39314604, -0.46974984, -0.24069642, -0.07134162, 0.38584659],
[-0.22494942, -0.23908727, -0.14118181, 0.25917593, -0.46544874, 0.21652603, 0.11955780, -0.08858330, 0.11210553, 0.15425776, 0.35051644, 0.12857421, -0.31161663, -0.10459967, 0.28051424, 0.35245281, 0.21058421, -0.38336727, -0.53721315, -0.45408809, 0.17018577, 0.37464410, 0.25320616, -0.50858176, 0.03510477, 0.28646398, -0.49693882, 0.31466347, 0.34066224, 0.39151987],
[-0.24122262, -0.18464386, -0.50166339, -0.06581594, 0.23343681, -0.28764677, -0.28263095, 0.47374201, -0.14122090, 0.41170570, -0.27171388, -0.76247406, -0.43367779, -0.41885039, -0.58815128, 0.16303478, -0.15360811, 0.40358800, 0.28507465, 0.11577206, -0.05193469, 0.10712312, 0.37356687, 0.17525157, -0.61338550, 0.28956139, 0.04172062, 0.19050168, -0.36498675, -0.48431775],
[0.20951799, -0.57114357, 0.16709965, 0.28986153, -0.48571789, 0.17514014, 0.42663154, -0.58854365, -0.49951825, -0.69118619, -0.12997085, 0.20892869, -0.27441102, 0.25154045, 0.33150116, 0.22571780, 0.00198699, -0.21132891, 0.54626226, -0.39937377, 0.09991331, 0.16465400, -0.31479383, 0.19637901, 0.27371463, -0.35296553, 0.32819411, 0.33079246, 0.09111243, -0.15263695],
[0.23110701, -0.82688808, 0.35345000, -0.63799143, 0.10259465, -0.67562747, 0.06791017, -0.55785728, 0.11328468, 0.03148035, 0.06963930, -0.40473521, 0.15695126, 0.10480986, 0.06786098, 0.05529213, -0.06358500, 0.39808711, -0.46259707, -0.41053730, 0.23919414, 0.06440434, -0.55259717, 0.17278855, -0.26870996, -0.59644037, -0.20437278, -0.15572956, -0.62037915, 0.20436110],
[0.43668377, 0.03184615, -0.79770166, 0.30957624, -0.29246098, 0.41470772, -0.39726156, 0.08003121, 0.32232824, 0.18267424, -0.46286914, -0.52988207, 0.40305007, 0.43693665, 0.57090986, -0.71393168, 0.16701773, -0.01028878, 0.03239791, -0.39907083, 0.20838976, 0.25748143, 0.24718748, -0.05084279, -0.52348840, -0.07115566, -0.33007148, 0.18890919, 0.40487564, 0.28275076],
[0.00545317, 0.05541809, -0.29821581, -0.69852740, 0.23890208, -0.58182591, 0.37835562, -0.12874492, -0.24086623, -0.18621640, 0.20001458, -0.55234039, 0.40093267, 0.19279823, -0.56214923, -0.12595257, -0.13790886, 0.04751531, -0.31666499, 0.33546147, 0.19133377, 0.01450487, -0.69050521, -0.15352796, 0.31702802, 0.13524684, 0.08716883, 0.35998338, 0.36140910, -0.18685688],
[0.13561521, 0.09853959, 0.23551922, -0.37978131, -0.26070073, 0.43132550, -0.10494933, 0.07914228, 0.04663205, -0.41666678, 0.16825140, 0.51182604, 0.13776678, -0.68972874, -0.72430468, -0.10668162, 0.29812980, -0.13480635, -0.66627938, 0.01717626, -0.11104345, 0.31376141, 0.39751169, -0.19769318, -0.28220543, 0.13042673, 0.42700538, 0.08965667, 0.18087055, -0.87774348],
])
S = (max - min)/127.0
result = np.clip(np.ceil(M/S).astype(int), -128, 127).tolist()
print(
'\n'.join(
' '.join(str(e) for e in row)
for row in result
)
)
| 4,059 | 3,748 |
# Aula 09 - Manipulando de cadeias de texto (Strings)
"""
Técnica de Fatiamento
Frase = Curso em Video Python
Frase [9]: letra específica
Frase [9:13]: Vai pegar do 9 ao 12 (menos um no final)
Frase [9:21:2]: Pula de 2 em 2
Frase [:5]: Irá começar no primeiro caracter e terminar no 4 (excluindo o número 5)
Frase [15:]: Indiquei o ínicio até o final
Frase [9::3]: Começa no 9 e vai até o final, porém pulando de 3 em 3
# Análise
len(frase): Irá ler o tamanho da frase e mostrará a quantidade de caracter.
frase.count('o'): Conta quantos caracteres escolhidos tem na frase.
frase.count('o',0,13): Fazendo uma contagem do 0 ao 12 e informará quantos caracteres tem neste conjunto.
frase.find('deo'): Neste ponto irá mostrar qual a posição esta a frase.
frase.find('Android'): Sinal que ele irá retornar menos -1, dizendo que o string não existe.
'Curso' in frase: Mostra se existe ou não a string na variavel.
# Transformação
frase.replace('Python','Android'): Irá substituir a frase encontrada com a frase escolhida.
frase.upper(): Irá converter tudo para maiúscula.
frase.lower(): Irá converter tudo para minúscula.
frase.capitalize(): Converte apenas a primeira letra altera para maíscula e o resto ficaria em minúscula.
frase.title(): Converte a primeira letra da frase para maiúscula.
frase.strip(): Remove espaços inúteis da string.
frase.rstrip(): Remove espaços inúteis da string a direita.
frase.lstrip(): Remove espaços inúteis da string a esquerda.
# Divisão
frase.split(procurar as funções): A frase será dividida com base nos espaços da string em lista.
# Junção
'-'.join(frase): Irá juntar as frase que foram feito de lista acima para transformar em string única
com - ao invés do espaço.
# Dica
Para escrever um texto grande sem precisar colocar vários prints, coloque tudo dentro de um comentário.
Para forçar a atualização da frase será preciso:
frase = 'Curso em Vídeo Python'
frase = frase.replace('Python','Android')
print(frase)
print("""
Github: http://github.com/rodrigojackal
Twitter: @RodrigoJackal
Skype: rodrigo.jackal
Linkedin: https://www.linkedin.com/in/rodrigo-ferreira-santos-andrade/
""")
"""
# Desafios
"""
Desafio 022 - Analisador de Texto: Crie um programa que leia o nome completo de uma pessoa e mostre:
O nome com todas as letras maiúsculas
O nome com todas as letras minúsculas
Quantas letras ao todo (sem considerar espaços)
Quantas letras tem o primeiro nome.
Desafio 023 - Separando digitos de um número: Faça um programa que leia um número de 0 a 9999
e mostre na tela cada um dos dígitos separados.
Ex: Digite um número: 1834
Unidade: 4
Dezena: 3
Centena: 8
Milhar: 1
Desafio 024 - Verificando as primeiras letras de um texto: Crie um programa que leia o nome de uma cidade e diga
se ela começa ou não com o nome "SANTO"
Desafio 025 - Procurando uma string dentro de outra: Crie um programa que leia o nome de uma pessoa e diga
se ela tem "SILVA" no nome.
Desafio 026 - Primeira e última ocorrência de uma string: Faça um programa que leia uma frase pelo teclado
e mostre:
Quantas vezes aparece a letra "A".
Em que posição ela aparece a primeira vez.
Em que posição ela aparece a última vez.
Desafio 027 - Primeiro e último nome de uma pessoa: Faça um programa que leia o nome completo de uma pessoa,
mostrando em seguida o primeiro e o último nome separadamente.
Ex: Ana Maria de Souza
Primeiro: Ana
Último: Souza
"""
| 3,391 | 1,228 |
import optuna
from {{cookiecutter.repo_name}}.utils import check_args_num, \
read_config, set_random_seed, str_hash, file_hash
from {{cookiecutter.repo_name}}.settings import optuna_db_path
def read_inp_file(filepath):
raise NotImplementedError
def write_output(out, filepath):
raise NotImplementedError
def get_objective(config):
"""
more on optuna objectives:
https://optuna.readthedocs.io/en/stable/faq.html
"""
raise NotImplementedError
def check_descr_unique(data_descr, data_hash):
"""
raises if database contains a row with the same data description
but different data hash
"""
raise NotImplementedError
def create_predictor():
"""
Creates a predictor object using inference stages and model object
"""
raise NotImplementedError
def measure_inference_time(predictor):
"""
Creates a predictor object using inference stages and model object
"""
raise NotImplementedError
if __name__ == "__main__":
_, config_file, X_file, y_file, best_model_path, predictor_file, \
metrics_file, study_name_file = check_args_num(8)
set_random_seed()
data_hash = str_hash(file_hash(X_file) + file_hash(y_file))
config = read_config(config_file)
objective_name = config.get('algo_name')
study_name = str_hash(data_hash + objective_name)
X = read_inp_file(X_file)
y = read_inp_file(y_file)
objective = get_objective(config)
sampler = optuna.samplers.TPESampler(seed=None)
study = optuna.create_study(optuna_db_path, study_name=study_name,
sampler=sampler, load_if_exists=True)
data_descr = config.get('data_descr')
check_descr_unique(data_descr, data_hash)
study.set_user_attr("data_description", data_descr)
study.set_user_attr("data_hash", data_hash)
study.set_user_attr("algo_name", objective_name)
try:
study.optimize(objective, n_trials=config.get('n_trials'))
except KeyboardInterrupt:
pass
write_output('{:6f}\n'.format(study.best_value), metrics_file)
write_output('{}\n'.format(study_name), study_name_file)
if (study.best_value is not None) and (objective.best_result is not None) \
and ((objective.best_result - study.best_value)
< config['metric_precision']):
write_output(objective.best_model, best_model_path)
predictor = create_predictor()
write_output(predictor, predictor_file)
inf_time = measure_inference_time(predictor)
study.set_user_attr("inference_time", inf_time)
| 2,575 | 863 |
# - *- coding: utf- 8 - *-
""" Bot to suggest music from Spotify based on your mood.
"""
import spotipy, os
from spotipy.oauth2 import SpotifyClientCredentials
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
#from access_token import AUTH_TOKEN, CLIENT_ID, CLIENT_SECRET
# Intialise spotipy
client_credentials_manager = SpotifyClientCredentials(client_id=os.environ['CLIENT_ID'], client_secret=os.environ['CLIENT_SECRET'])
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
# Define command handlers. They usually take two arguments bot and update
# In case of error handler they recieve TelegramError object in error
def start(bot, update):
update.message.reply_text("I can help you find the best music from Spotify 😉")
def help(bot, update):
update.message.reply_text("You can control me by sending these commands:\n\n/start - start a conversation with bot\n/new - get new releases from Spotify\n/help - get help from bot")
def new(bot, update):
response = []
results = sp.new_releases(country='US',limit=10)
for i, album in enumerate(results['albums']['items'],1):
response.append(' ' + str(i) + ' ' + album['name'] + ' - ' + album['artists'][0]['name'])
update.message.reply_text('\n\n'.join(response))
def sorry(bot, update):
update.message.reply_text("Sorry, I didn't get you. Type /help to get the list of available commands.")
def main():
"""Start the bot"""
# Create event handler and pass it your bot's token
updater = Updater(os.environ['AUTH_TOKEN'])
# Get dispatcher to register handlers
dispatcher = updater.dispatcher
print("Bot started!")
# On different commands - answer in Telegram
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('help', help))
dispatcher.add_handler(CommandHandler('new', new))
# dispatcher.add_handler(CommandHandler(''))
# On non-command i.e message - echo the message in telegram
dispatcher.add_handler(MessageHandler(Filters.text, sorry))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C
updater.idle()
if __name__ == '__main__':
main()
| 2,221 | 696 |
# Copyright (c) 2019, Danish Technological Institute.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
""" Utility code to locate tracker projects
"""
from tracker.tracker_file import TrackerFile
from tracker.utils import cli
from tracker.utils import config
def is_cwd_project(cwd):
raise NotImplementedError
def get_project_names_and_dirs():
trackerfile = TrackerFile()
projects = trackerfile.get("projects", {})
if projects:
data = [
{
"name": name,
"path": r.get("path", ""),
}
for d in projects for name, r in d.items()
]
return data
else:
cli.error("No projects specified in {}".format(
config.get_user_config_path()))
def get_project_names():
"""Searches for Tracker projects at the Tracker home configuration file
Returns:
<list> -- List of project names
"""
trackerfile = TrackerFile()
projects = trackerfile.get("projects", {})
project_names = []
if projects:
for d in projects:
k, _ = list(d.items())[0]
project_names.append(k)
return project_names
def get_project_dir_by_name(name):
trackerfile = TrackerFile()
data = trackerfile.get("projects")
for d in data:
k, _ = list(d.items())[0]
if name in k:
path = d[k]["path"]
return path
| 1,544 | 479 |
"""
Python API for Hacker News.
@author Karan Goel
@email karan@goel.im
"""
__title__ = 'hackernews'
__author__ = 'Karan Goel'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Karan Goel'
from .hn import HN, Story
| 220 | 92 |
ACCURACY = 0
MATTHEWS_CORRELATION_COEFFICIENT = 1
AUC = 2 | 57 | 36 |
'''
Advent of Code 2017
Day 6: Memory Reallocation
'''
import unittest
TEST_BANKS = ('0 2 7 0', 5, 4)
INPUT_BANKS = '0 5 10 0 11 14 13 4 11 8 8 7 1 4 12 11'
def findInfiniteLoop(memoryBanks):
'''
Finds the number of iterations required to detect an infinite loop with the given start condition.
memoryBanks is a list of integers, representing a number of memory banks with items in each.
Returns the number of iterations until an infinite loop is detected, and the size of the loop.
'''
nIterations = 0
nBanks = len(memoryBanks)
foundLoop = False
# create a history of known configurations, starting with the current one
# use a list instead of a set because sets reorder the items
# use strings instead of frozensets because frozensets reorder the items
resultList = [' '.join([str(i) for i in memoryBanks]),]
while not foundLoop:
# find the memory bank with the largest quanity
maximumItems = max(memoryBanks)
index = memoryBanks.index(maximumItems)
# Redistribute the items by emptying out the current bank and then
# giving the rest one of them, looping around the banks
nIterations += 1
memoryBanks[index] = 0
for counter in range(maximumItems):
index += 1
if index == nBanks:
index = 0
memoryBanks[index] += 1
# check to see if the current state has been seen before
currentState = ' '.join([str(i) for i in memoryBanks])
if currentState in resultList:
foundLoop = True
sizeOfLoop = nIterations - resultList.index(currentState)
else:
resultList.append(currentState)
return (nIterations, sizeOfLoop)
# Unit tests
class TestLoops(unittest.TestCase):
'''
Tests for Part 1 and Part 2
'''
# Part 1
def test_part1(self):
'''
Part 1 tests
'''
self.assertEqual(findInfiniteLoop([int(i) for i in TEST_BANKS[0].strip().split()])[0], TEST_BANKS[1])
## Part 2
def test_part2(self):
'''
Part 2 tests
'''
self.assertEqual(findInfiniteLoop([int(i) for i in TEST_BANKS[0].strip().split()])[1], TEST_BANKS[2])
if __name__ == '__main__':
print('Advent of Code\nDay 6: Memory Reallocation\n')
(iterations, loopSize) = findInfiniteLoop([int(i) for i in INPUT_BANKS.strip().split()])
print('Part 1: {0:d} iterations to infinite loop'.format(iterations))
print('Part 2: The loop is {0:d} iterations'.format(loopSize))
| 2,580 | 826 |
"""
Utility Methods for Authenticating against and using Indiana University CAS.
"""
import httplib2
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
def validate_cas_ticket(casticket, casurl):
"""
Takes a CAS Ticket and makes the out of bound GET request to
cas.iu.edu to verify the ticket.
"""
validate_url = 'https://%s/cas/validate?cassvc=IU&casurl=%s' % \
(settings.CAS_HOST, casurl,)
if hasattr(settings, 'CAS_HTTP_CERT'):
h = httplib2.Http(ca_certs=settings.CAS_HTTP_CERT)
else:
h = httplib2.Http()
resp, content = h.request(validate_url,"GET")
return content.splitlines()
def get_cas_username(casticket, casurl):
"""
Validates the given casticket and casurl and returns the username of the
logged in user. If the user is not logged in returns None
"""
resp = validate_cas_ticket(casticket, casurl)
if len(resp) == 2 and resp[0] == 'yes':
return resp[1]
else:
return None
class IUCASBackend(object):
"""
IUCAS Authentication Backend for Django
"""
def authenticate(self, ticket, casurl):
resp = validate_cas_ticket(ticket, casurl)
if len(resp) == 2 and resp[0] == 'yes':
username = resp[1]
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
return username
return user
else:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| 1,757 | 530 |
# -*- coding: utf-8 -*-
from django.http import JsonResponse
from decimal import Decimal
from datetime import datetime, timedelta
import re
import logging
from disqusapi import DisqusAPI
from django.contrib import messages
from django.apps import apps
from django.core.mail import mail_admins
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from djng.views.mixins import JSONResponseMixin, allow_remote_invocation
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import (
DetailView,
ListView,
UpdateView,
CreateView,
FormView,
DeleteView,
TemplateView,
)
from django.conf import settings
import blousebrothers.classifier as cl
from blousebrothers.tools import get_disqus_sso
from blousebrothers.auth import (
BBConferencierReqMixin,
ConferenceWritePermissionMixin,
ConferenceReadPermissionMixin,
TestPermissionMixin,
BBLoginRequiredMixin,
)
from blousebrothers.tools import analyse_conf, get_full_url
from blousebrothers.confs.utils import get_or_create_product
from blousebrothers.users.charts import MonthlyLineChart
from blousebrothers.users.models import User
from .models import (
Conference,
Question,
Answer,
AnswerImage,
ConferenceImage,
QuestionImage,
QuestionExplainationImage,
Item,
Test,
TestAnswer,
)
from .forms import ConferenceForm, ConferenceFinalForm, RefundForm, ConferenceFormSimple
logger = logging.getLogger(__name__)
Product = apps.get_model('catalogue', 'Product')
class ConferenceHomeView(LoginRequiredMixin, TemplateView):
template_name = 'confs/conference_home.html'
def get(self, request, *args, **kwargs):
if not request.user.tests.filter(finished=True).count():
return redirect(reverse('catalogue:index'))
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.request.user
user = User.objects.prefetch_related("tests__answers").get(pk=self.request.user.pk)
context.update(**user.stats)
monthly_chart = MonthlyLineChart()
monthly_chart.context = context
context['monthly_chart'] = monthly_chart
return context
class ConferenceDetailView(ConferenceReadPermissionMixin, DetailView):
model = Conference
# These next two lines tell the view to index lookups by conf
def get_object(self, queryset=None):
obj = Conference.objects.prefetch_related(
"questions__answers",
"questions__images",
).get(slug=self.kwargs['slug'])
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['meta'] = self.get_object().as_meta(self.request)
if self.request.user.is_superuser:
l = []
intro = context['object'].statement
quest = context['object'].questions.all()
for question in quest:
if question.explaination:
res = cl.classifier(str(intro)+" "+question.question+" "+question.explaination)
else:
res = cl.classifier(str(intro)+" "+question.question)
l.append(res)
context['specialities'] = l
return context
class ConferenceDeleteView(ConferenceWritePermissionMixin, BBConferencierReqMixin, DeleteView):
"""
View displayed to confirm deletion. Object are just flaged as deleted but are not
removed from db. Need to use admin interface to do so.
"""
template_name = 'confs/conference_delete.html'
model = Conference
def delete(self, request, *args, **kwargs):
"""
Override delete method to simply update object attribute deleted=True.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.deleted = True
self.object.save()
return HttpResponseRedirect(success_url)
def get_success_url(self):
return reverse('confs:list')
class ConferenceUpdateView(ConferenceWritePermissionMixin, JSONResponseMixin, UpdateView):
"""
Main Angular JS interface where you can edit question, images...
"""
template_name = 'confs/conference_update.html'
form_class = ConferenceForm
# send the user back to their own page after a successful update
def get_redirect_url(self):
return reverse('confs:detail',
kwargs={'slug': self.request.conf.slug})
def get_object(self, queryset=None):
obj = Conference.objects.get(slug=self.kwargs['slug'])
return obj
def form_valid(self, form):
context = self.get_context_data()
formset = context['formset']
if form.is_valid():
self.object = form.save(commit=False)
self.object.owner = self.request.user
self.object.save()
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
if formset.is_valid():
formset.save()
return redirect(self.object.get_absolute_url())
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
@allow_remote_invocation
def sync_data(self, edit_data):
# process in_data
conf, question, answers, images, qimages, ansimages, qexpimages = edit_data
try:
conf.pop('items')
conf.pop('specialities')
except:
pass
conf_pk = conf.pop('pk')
Conference.objects.filter(pk=conf_pk).update(**conf)
question.pop('specialities')
question.pop('items')
Question.objects.filter(pk=question.pop('pk')).update(**question)
for answer in answers:
Answer.objects.filter(pk=answer.pop('pk')).update(**answer)
for __, answers_images in ansimages.items():
for answer_image in answers_images:
AnswerImage.objects.filter(pk=answer_image.pop('pk')).update(**answer_image)
for image in images:
ConferenceImage.objects.filter(pk=image.pop('pk')).update(**image)
for image in qimages:
QuestionImage.objects.filter(pk=image.pop('pk')).update(**image)
for image in qexpimages:
QuestionExplainationImage.objects.filter(pk=image.pop('pk')).update(**image)
return analyse_conf(Conference.objects.get(pk=conf_pk))
@allow_remote_invocation
def get_keywords(self, data):
cf = Conference.objects.get(pk=data['pk'])
txt = cf.get_all_txt()
ret = []
for item in Item.objects.all():
for kw in item.kwords.all():
if re.search(r'[^\w]'+kw.value+r'[^\w]', txt):
ret.append("{} => {}".format(kw.value, item.name))
break
return ret
def ajax_switch_correction(request):
"""
Ajax switch correction available.
"""
status = request.GET['state'] == 'true'
conf = request.user.created_confs.get(id=request.GET['conf_id'])
conf.correction_dispo = status
conf.save()
return JsonResponse({'success': True})
def ajax_switch_for_sale(request):
"""
Ajax conf available.
"""
status = request.GET['state'] == 'true'
conf = request.user.created_confs.get(id=request.GET['conf_id'])
conf.for_sale = status
conf.save()
return JsonResponse({'success': True})
class ConferenceListView(ListView):
model = Conference
# These next two lines tell the view to index lookups by conf
paginate_by = 10
def get_queryset(self):
if self.request.user.is_superuser:
qry = self.model.objects.order_by('-edition_progress')
else:
qry = self.model.objects.filter(owner=self.request.user)
qry = qry.order_by('edition_progress')
if self.request.GET.get('q', False):
qry = qry.filter(title__icontains=self.request.GET['q'])
qry = qry.prefetch_related('products__stats')
qry = qry.prefetch_related('owner__sales')
return qry.all()
class ConferenceCreateView(BBConferencierReqMixin, CreateView, FormView):
template_name = 'confs/conference_form.html'
form_class = ConferenceForm
model = Conference
def get_object(self, queryset=None):
obj = Conference.objects.prefetch_related(
"questions__answers",
"questions__images",
).get(slug=self.kwargs['slug'])
return obj
# send the user back to their own page after a successful update
def get_redirect_url(self):
return reverse('confs:detail',
kwargs={'slug': self.request.conf.slug})
def get_success_url(self):
return reverse('confs:update',
kwargs={'slug': self.object.slug})
def form_valid(self, form):
if form.is_valid():
self.object = form.save(commit=False)
self.object.owner = self.request.user
self.object.save()
# create questions
for i in range(form.cleaned_data['nb_questions']):
q = Question.objects.create(conf=self.object, index=i)
for j in range(5):
Answer.objects.create(question=q, index=j)
self.request.user.status = 'creat_conf_begin'
self.request.user.conf_entam_url = get_full_url(self.request, 'confs:update', args=(self.object.slug,))
self.request.user.save()
return super().form_valid(form)
else:
return self.render_to_response(self.get_context_data(form=form))
class ConferenceFinalView(ConferenceWritePermissionMixin, BBConferencierReqMixin, UpdateView):
template_name = 'confs/conference_final.html'
form_class = ConferenceFinalForm
model = Conference
def get_success_url(self):
return reverse('confs:test',
kwargs={'slug': self.object.slug})
def get_object(self, queryset=None):
"""
Update user status if required.
"""
obj = super().get_object(queryset)
if not obj.for_sale:
self.request.user.status = 'creat_conf_100'
self.request.user.save()
else:
self.request.user.conf_pub_url = get_full_url(self.request, 'confs:update', args=(obj.slug,))
self.request.user.action = "publi"
self.request.user.save()
return obj
def get_context_data(self, **kwargs):
items = []
if self.object.items.count() == 0:
self.object.set_suggested_items()
else:
txt = self.object.get_all_txt()
for item in Item.objects.exclude(
id__in=self.object.items.all()
).all():
for kw in item.kwords.all():
if re.search(r'[^\w]'+kw.value+r'([^\w]|$)', txt):
items.append(item)
break
context = super().get_context_data(**{'items': items})
return context
def form_valid(self, form):
"""
Create a Test instance for user to be able to test is conference,
and create a disqus thread with owner as thread creator.
"""
if not Test.objects.filter(
conf=self.object,
student=self.request.user
).exists():
Test.objects.create(conf=self.object, student=self.request.user)
get_or_create_product(self.object)
if self.object.for_sale:
self.request.user.status = 'conf_publi_ok'
self.request.user.save()
if form.cleaned_data["free"]:
self.object.price = 0
else:
self.object.price = Decimal('0.33')
# Create disqus thread
try:
disqus = DisqusAPI(settings.DISQUS_SECRET_KEY, settings.DISQUS_PUBLIC_KEY)
disqus.get("threads.create",
method='post',
forum='blousebrothers',
remote_auth=get_disqus_sso(self.object.owner),
title=self.object.title,
url=get_full_url(self.request, 'confs:result', args=(self.object.slug,)),
identifier=self.object.slug,
)
except Exception as ex:
if "thread already exists" in ex.message:
pass
else:
logger.exception("PB CREATING THREAD")
return super().form_valid(form)
class ConferenceEditView(ConferenceWritePermissionMixin, BBConferencierReqMixin, UpdateView):
template_name = 'confs/conference_form.html'
form_class = ConferenceFormSimple
model = Conference
def get_redirect_url(self):
return reverse('confs:update',
kwargs={'slug': self.request.conf.slug})
def get_success_url(self):
return reverse('confs:update',
kwargs={'slug': self.object.slug})
class BuyedConferenceListView(LoginRequiredMixin, ListView):
model = Test
# These next two lines tell the view to index lookups by conf
paginate_by = 10
def get_queryset(self):
qry = self.model.objects.filter(student=self.request.user)
qry = qry.order_by('progress')
if self.request.GET.get('q', False):
qry = qry.filter(conf__title__icontains=self.request.GET['q'])
return qry.all()
class TestUpdateView(TestPermissionMixin, JSONResponseMixin, UpdateView):
"""
Main test view.
"""
model = Test
fields = []
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.finished:
return redirect(
reverse('confs:result', kwargs={'slug': self.object.conf.slug})
)
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Add time_taken var to context for timer initialization. time_taken units is
milliseconds as angularjs timer needs.
"""
tt = self.object.time_taken
time_taken = (tt.hour * 3600 + tt.minute * 60 + tt.second) * 1000 if tt else 0
return super().get_context_data(time_taken=time_taken, **kwargs)
def get_object(self, queryset=None):
"""
TestAnswers are created here, when user starts his test.
"""
conf = Conference.objects.get(slug=self.kwargs['slug'])
if conf.owner.username == "BlouseBrothers":
test, __ = Test.objects.get_or_create(conf=conf, student=self.request.user)
else:
test = Test.objects.get(conf=conf, student=self.request.user)
if not test.answers.count():
for question in conf.questions.all():
TestAnswer.objects.create(question=question, test=test)
return test
@allow_remote_invocation
def send_answers(self, data):
"""
API to collect test's answers.
:param data: {'answers': [0..4] => list of checked answers indexes,
'millis': time elapsed in milliseconds since test started,
}
"""
answers = data["answers"]
time_taken = datetime.fromtimestamp(data["millis"]/1000.0).time()
question = Question.objects.get(pk=answers[0]['question'])
test = Test.objects.get(conf=question.conf, student=self.request.user)
ta = TestAnswer.objects.get(test=test, question=question)
ta.given_answers = ','.join([str(answer['index']) for answer in answers if answer['correct']])
if not ta.given_answers:
raise Exception("NO ANSWER GIVEN")
if test.time_taken:
last_time = test.time_taken.hour * 3600 + test.time_taken.minute * 60 + test.time_taken.second
this_time = time_taken.hour * 3600 + time_taken.minute * 60 + time_taken.second
ta.time_taken = datetime.fromtimestamp(this_time - last_time)
else:
ta.time_taken = time_taken
ta.save()
test.time_taken = time_taken
test.progress = test.answers.exclude(given_answers='').count()/test.answers.count() * 100
test.save()
return {'success': True}
class TestResult(TestPermissionMixin, DetailView):
model = Test
def get_object(self, queryset=None):
conf = Conference.objects.get(slug=self.kwargs['slug'])
test = Test.objects.prefetch_related(
"answers__question__answers",
"answers__question__images",
).get(
conf=conf, student=self.request.user)
if not test.finished:
self.request.user.status = "give_eval_notok"
self.request.user.last_dossier_url = get_full_url(
self.request,
'confs:detail',
args=(conf.slug,)
)
self.request.user.save()
test.set_score()
try:
disqus = DisqusAPI(settings.DISQUS_SECRET_KEY, settings.DISQUS_PUBLIC_KEY)
thread = disqus.get('threads.details', method='get', forum='blousebrothers',
thread='ident:' + test.conf.slug)
disqus.post('threads.subscribe',
method='post',
thread=thread['id'],
remote_auth=get_disqus_sso(test.student),
)
except:
logger.exception("Student Disqus thread subscription error")
return test
def get(self, *args, **kwargs):
conf = Conference.objects.get(slug=self.kwargs['slug'])
product = Product.objects.get(conf=conf)
try:
return super().get(*args, **kwargs)
except ObjectDoesNotExist:
return redirect(product.get_absolute_url())
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
try:
product = Product.objects.get(conf=self.object.conf)
ctx.update(product=product)
except:
ctx.update(product=None)
return ctx
class TestResetView(TestPermissionMixin, UpdateView):
model = Test
fields = ['id']
def form_valid(self, form):
if self.request.user.has_full_access:
self.object.finished = False
self.object.progress = 0
self.object.answers.all().delete()
self.object.save()
return super().form_valid(form)
def get_success_url(self):
if self.request.user.has_full_access:
return reverse('confs:test',
kwargs={'slug': self.object.conf.slug})
else:
messages.info(self.request,
"Merci de souscrire à un abonnement pour pouvoir recommencer un dossier.")
return reverse('users:subscription', kwargs={'sub_id': 0})
def get_object(self, queryset=None):
conf = Conference.objects.get(slug=self.kwargs['slug'])
return Test.objects.get(conf=conf, student=self.request.user)
class RefundView(TestPermissionMixin, UpdateView):
model = Test
form_class = RefundForm
template_name = 'confs/refund_form.html'
email_template = '''
DEMANDE DE REMBOURSEMENT DE CONF
Nom : {}
Email : {}
Lien : {}
Conf : {}
Msg : {}'''
def form_valid(self, form):
msg = self.email_template.format(
self.request.user.username,
self.request.user.email,
get_full_url(self.request, 'dashboard:user-detail', args=(self.request.user.id,)),
get_full_url(self.request, 'confs:detail', args=(self.object.conf.slug,)),
form.cleaned_data['msg'],
)
mail_admins('Demande de remboursement', msg)
return super().form_valid(form)
def get_object(self, queryset=None):
conf = Conference.objects.get(slug=self.kwargs['slug'])
return Test.objects.get(conf=conf, student=self.request.user)
def get_success_url(self):
messages.success(self.request, "Ta demande à bien été transmise, on te recontacte très vite.")
return reverse('catalogue:index')
| 20,460 | 5,976 |
import serial
import string
import math
from itertools import chain
class robot:
address = "/dev/cu.HC-05-DevB"
speed = 0;
current_position = [0,0,0]
target_position = [0, 0]
distance = 0;
angle_diff = 0;
compliment = 0;
colorLower = [0,0,0]
colorUpper = [0,0,0]
ID = 0
# def __init__ (self):
# pass
#
def __init__ (self, colorL, colorU, ID = None):
self.colorLower = colorL
self.colorUpper = colorU
self.ID = ID
#
# set address and target
def initialize_port(self, address, target):
self.address = address
self.target_position = target
self.port = serial.Serial(address, 9600)
# method to move the robot
def move(self):
self.calc_dist_angle()
# print ("angle ", self.angle_diff, "distance", self.distance, "compliment", self.compliment)
if 20 <= abs(self.compliment) <= 160 and self.distance > 100:
print ("orientating")
self.orient()
elif self.distance > 170:
# print ("moving")
if 160 <= abs(self.angle_diff) <= 200:
# print ("should go forward")
self.forward()
elif math.floor(abs(self.angle_diff)) in range (0,20)+range(340,360):
# print ("should go backward")
self.backward()
#
else:
self.stop();
# method to find the required orientation
def orient(self):
if abs(self.speed) > 0.5:
self.speed = 0
#
left_turn_conditions = chain(range(-90,0),range(90,180),range(-270,-180),range(270,360))
right_turn_conditions = chain(range(0,90),range(-180,-90),range(180, 270),range(-360,-270))
if math.floor(self.angle_diff) in left_turn_conditions and (self.speed > -0.5):
print ("left")
self.port.write(bytearray("a","utf-8"))
self.speed = self.speed - 0.5
elif math.floor(self.angle_diff) in right_turn_conditions and (self.speed < 0.5):
print ("right")
self.port.write(bytearray("d","utf-8"))
self.speed = self.speed + 0.5
# method to move the robot forward
def forward(self):
if abs(self.speed) == 0.5:
self.speed = 0
#
ratio = int(math.ceil((self.distance*8)/1000))
if self.speed < 2:
# for i in range(0,ratio):
print ("forward ", ratio, self.speed)
self.port.write(bytearray("w","utf-8"))
self.speed = self.speed+1;
# method to move the robot backward
def backward(self):
ratio = int(math.ceil((self.distance*8)/1000))
if self.speed > -2:
print ("backward", ratio, self.speed)
# for i in range(0,ratio):
self.port.write(bytearray("s","utf-8"))
self.speed = self.speed-1;
# method to stop the robot
def stop(self):
self.port.write(bytearray("q","utf-8"))
self.speed = 0
# method to calculate the distance between robot and target and orientation difference
def calc_dist_angle(self):
x_delta = self.target_position[0] - self.current_position[0]
y_delta = self.target_position[1] - self.current_position[1]
self.distance = math.hypot(x_delta, y_delta)
required_orientation = math.atan2(y_delta, x_delta) * 180/math.pi
current_orientation = self.current_position[2]
self.angle_diff = (required_orientation - current_orientation)
#calculates the compliment of angle [0, 180] in each quadrant
self.compliment = abs(self.angle_diff) - math.floor( abs(self.angle_diff)/180 )*180
| 3,177 | 1,334 |
from discord import TextChannel, User
from discord.ext.commands import Bot
from .configuration import CONF0
from tqdm.asyncio import tqdm
# class LogMe:
# """This is a complicated logger I came up with.\n
# Feel free to insult me whilst readding it."""
# _std = {
# "LS": "|-----------------Log_ START-------------------|",
# "ES": "|-----------------ERR_ START-------------------|",
# "EE": "|------------------ERR_ END--------------------|",
# "LE": "|------------------Log_ END--------------------|",
# "!?": "Some unprintable error happened...",
# "!!": "Ah for fucks sake something went horribly wrong!",
# }
# def __init__(self, bot: Bot, config: CONF0):
# self.LogAdmin = set([bot.get_user(Admin) for Admin in config.LogAdmin])
# self.LogChan = set([bot.get_channel(Chan) for Chan in config.LogChan])
# async def __call__(self, st, err_: bool = False, tq: bool = True):
# if err_:
# print(self._std["ES"]) if (tq) else tqdm.write(self._std["ES"])
# print(st) if (tq) else tqdm.write(st)
# try:
# with self.bot.get_channel(self.LogChan) as chan:
# await chan.send()
# if self.LogAdmin:
# await chan.send(
# " ".join([str(admin.mention) for admin in self.LogAdmin])
# )
# await chan.send(st)
# await chan.send(self._std["EE"])
# except Exception:
# try:
# with self.bot.get_channel(self.debug) as chan:
# await chan.send(self._std["ES"])
# try:
# if self.LogAdmin:
# await chan.send(
# " ".join(
# [str(admin.mention) for admin in self.LogAdmin]
# )
# )
# await chan.send(str(st))
# except Exception:
# if self.LogAdmin:
# await chan.send(
# " ".join(
# [str(admin.mention) for admin in self.LogAdmin]
# )
# )
# await chan.send("Some unprintable error happened...")
# await chan.send(self._std["EE"])
# except Exception:
# _std = "Ah for hugs sake something went horribly wrong! AGAIN"
# print(_std) if (tq) else tqdm.write(_std)
# print(self._std["EE"]) if (tq) else tqdm.write(self._std["EE"])
# else:
# print(st) if (tq) else tqdm.write(st)
# try:
# with self.bot.get_channel(self.debug) as chan:
# await chan.send(st)
# except Exception:
# try:
# with self.bot.get_channel(self.debug) as chan:
# try:
# try:
# await chan.send(st)
# except Exception:
# await chan.send(str(type(st)))
# await chan.send(str(st))
# except Exception:
# await chan.send(self._std["!?"])
# except Exception:
# await self(self._std["!!"], True)
# def add_LogChan(self, Chan: TextChannel) -> None:
# self.Logchan.add(Chan)
# def del_LogChan(self, Chan: TextChannel) -> None:
# self.Logchan.remove(Chan)
# def add_LogAdmin(self, Admin: User) -> None:
# self.LogAdmin.add(Admin)
# def del_LogAdmin(self, Admin: User) -> None:
# self.LogAdmin.remove(Admin)
| 4,054 | 1,114 |
"""
test_django-oci api
-------------------
Tests for `django-oci` api.
"""
from django.urls import reverse
from django.contrib.auth.models import User
from django_oci import settings
from rest_framework import status
from rest_framework.test import APITestCase
from django.test.utils import override_settings
from time import sleep
from unittest import skipIf
import subprocess
import requests
import hashlib
import base64
import json
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
# Boolean from environment that determines authentication required variable
auth_regex = re.compile('(\w+)[:=] ?"?([^"]+)"?')
# Important: user needs to be created globally to be seen
user, _ = User.objects.get_or_create(username="dinosaur")
token = str(user.auth_token)
def calculate_digest(blob):
"""Given a blob (the body of a response) calculate the sha256 digest"""
hasher = hashlib.sha256()
hasher.update(blob)
return hasher.hexdigest()
def get_auth_header(username, password):
"""django oci requires the user token as the password to generate a longer
auth token that will expire after some number of seconds
"""
auth_str = "%s:%s" % (username, password)
auth_header = base64.b64encode(auth_str.encode("utf-8"))
return {"Authorization": "Basic %s" % auth_header.decode("utf-8")}
def get_authentication_headers(response):
"""Given a requests.Response, assert that it has status code 401 and
provides the Www-Authenticate header that can be parsed for the request
"""
assert response.status_code == 401
assert "Www-Authenticate" in response.headers
matches = dict(auth_regex.findall(response.headers["Www-Authenticate"]))
for key in ["scope", "realm", "service"]:
assert key in matches
# Prepare authentication headers and get token
headers = get_auth_header(user.username, token)
url = "%s?service=%s&scope=%s" % (
matches["realm"],
matches["service"],
matches["scope"],
)
# With proper headers should be 200
auth_response = requests.get(url, headers=headers)
assert auth_response.status_code == 200
body = auth_response.json()
# Make sure we have the expected fields
for key in ["token", "expires_in", "issued_at"]:
assert key in body
# Formulate new auth header
return {"Authorization": "Bearer %s" % body["token"]}
def read_in_chunks(image, chunk_size=1024):
"""Helper function to read file in chunks, with default size 1k."""
while True:
data = image.read(chunk_size)
if not data:
break
yield data
def get_manifest(config_digest, layer_digest):
"""A dummy image manifest with a config and single image layer"""
return json.dumps(
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": config_digest,
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": layer_digest,
}
],
"annotations": {"com.example.key1": "peas", "com.example.key2": "carrots"},
}
)
class APIBaseTests(APITestCase):
def setUp(self):
self.process = subprocess.Popen(["python", "manage.py", "runserver"])
sleep(2)
def tearDown(self):
os.kill(self.process.pid, 9)
def test_api_version_check(self):
"""
GET of /v2 should return a 200 response.
"""
url = reverse("django_oci:api_version_check")
response = self.client.get(url, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
class APIPushTests(APITestCase):
def push(
self,
digest,
data,
content_type="application/octet-stream",
test_response=True,
extra_headers={},
):
url = "http://127.0.0.1:8000%s?digest=%s" % (
reverse("django_oci:blob_upload", kwargs={"name": self.repository}),
digest,
)
print("Single Monolithic POST: %s" % url)
headers = {
"Content-Length": str(len(data)),
"Content-Type": content_type,
}
headers.update(extra_headers)
response = requests.post(url, data=data, headers=headers)
if test_response:
self.assertTrue(
response.status_code
in [status.HTTP_202_ACCEPTED, status.HTTP_201_CREATED]
)
return response
def test_push_post_then_put(self):
"""
POST /v2/<name>/blobs/uploads/
PUT /v2/<name>/blobs/uploads/
"""
url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:blob_upload", kwargs={"name": self.repository})
)
print("POST to request session: %s" % url)
headers = {"Content-Type": "application/octet-stream"}
response = requests.post(url, headers=headers)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.post(url, headers=headers)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue("Location" in response.headers)
blob_url = "http://127.0.0.1:8000%s?digest=%s" % (
response.headers["Location"],
self.digest,
)
# PUT to upload blob url
headers = {
"Content-Length": str(len(self.data)),
"Content-Type": "application/octet-stream",
}
headers.update(auth_headers)
print("PUT to upload: %s" % blob_url)
response = requests.put(blob_url, data=self.data, headers=headers)
# This should allow HTTP_202_ACCEPTED too
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue("Location" in response.headers)
download_url = add_url_prefix(response.headers["Location"])
response = requests.get(download_url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Test upload request from another repository
non_standard_name = "conformance-aedf05b6-6996-4dae-ad18-70a4db9e9061"
url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:blob_upload", kwargs={"name": non_standard_name})
)
url = "%s?mount=%s&from=%s" % (url, self.digest, self.repository)
print("POST to request mount from another repository: %s" % url)
headers = {"Content-Type": "application/octet-stream"}
response = requests.post(url, headers=headers)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.post(url, headers=headers)
assert "Location" in response.headers
assert non_standard_name in response.headers["Location"]
download_url = add_url_prefix(response.headers["Location"])
response = requests.get(download_url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_push_chunked(self):
"""
POST /v2/<name>/blobs/uploads/
PATCH <location>
PUT /v2/<name>/blobs/uploads/
"""
url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:blob_upload", kwargs={"name": self.repository})
)
print("POST to request chunked session: %s" % url)
headers = {"Content-Type": "application/octet-stream", "Content-Length": "0"}
response = requests.post(url, headers=headers)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.post(url, headers=headers)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue("Location" in response.headers)
session_url = "http://127.0.0.1:8000%s" % response.headers["Location"]
# Read the file in chunks, for each do a patch
start = 0
with open(self.image, "rb") as fd:
for chunk in read_in_chunks(fd):
if not chunk:
break
end = start + len(chunk) - 1
content_range = "%s-%s" % (start, end)
headers = {
"Content-Range": content_range,
"Content-Length": str(len(chunk)),
"Content-Type": "application/octet-stream",
}
headers.update(auth_headers)
start = end + 1
print("PATCH to upload content range: %s" % content_range)
response = requests.patch(session_url, data=chunk, headers=headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue("Location" in response.headers)
# Finally, issue a PUT request to close blob
session_url = "%s?digest=%s" % (session_url, self.digest)
response = requests.put(session_url, headers=auth_headers)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue("Location" in response.headers)
def test_push_view_delete_manifest(self):
"""
PUT /v2/<name>/manifests/<reference>
DELETE /v2/<name>/manifests/<reference>
"""
url = "http://127.0.0.1:8000%s" % (
reverse(
"django_oci:image_manifest",
kwargs={"name": self.repository, "tag": "latest"},
)
)
print("PUT to create image manifest: %s" % url)
# Calculate digest for config (yes, we haven't uploaded the blob, it's ok)
with open(self.config, "r") as fd:
content = fd.read()
config_digest = calculate_digest(content.encode("utf-8"))
# Prepare the manifest (already a text string)
manifest = get_manifest(config_digest, self.digest)
manifest_reference = "sha256:%s" % calculate_digest(manifest.encode("utf-8"))
headers = {
"Content-Type": "application/vnd.oci.image.manifest.v1+json",
"Content-Length": str(len(manifest)),
}
response = requests.put(url, headers=headers, data=manifest)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.put(url, headers=headers, data=manifest)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue("Location" in response.headers)
# test manifest download
response = requests.get(url, headers=auth_headers).json()
for key in ["schemaVersion", "config", "layers", "annotations"]:
assert key in response
# Retrieve newly created tag
tags_url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:image_tags", kwargs={"name": self.repository})
)
print("GET to list tags: %s" % tags_url)
tags = requests.get(tags_url, headers=auth_headers)
self.assertEqual(tags.status_code, status.HTTP_200_OK)
tags = tags.json()
for key in ["name", "tags"]:
assert key in tags
# First delete tag (we are allowed to have an untagged manifest)
response = requests.delete(url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
# Finally, delete the manifest
url = "http://127.0.0.1:8000%s" % (
reverse(
"django_oci:image_manifest",
kwargs={"name": self.repository, "reference": manifest_reference},
)
)
response = requests.delete(url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_push_single_monolithic_post(self):
"""
POST /v2/<name>/blobs/uploads/
"""
# Push the image blob, should return 401 without authentication
response = self.push(digest=self.digest, data=self.data, test_response=False)
headers = get_authentication_headers(response)
response = self.push(
digest=self.digest,
data=self.data,
test_response=False,
extra_headers=headers,
)
assert response.status_code == 201
assert "Location" in response.headers
download_url = add_url_prefix(response.headers["Location"])
response = requests.get(download_url, headers=headers if headers else None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Upload an image manifest
with open(self.config, "r") as fd:
content = fd.read().encode("utf-8")
config_digest = calculate_digest(content)
self.push(digest=config_digest, data=content, extra_headers=headers)
def setUp(self):
self.repository = "vanessa/container"
self.image = os.path.abspath(
os.path.join(here, "..", "examples", "singularity", "busybox_latest.sif")
)
self.config = os.path.abspath(
os.path.join(here, "..", "examples", "singularity", "config.json")
)
# Read binary data and calculate sha256 digest
with open(self.image, "rb") as fd:
self.data = fd.read()
self._digest = calculate_digest(self.data)
self.digest = "sha256:%s" % self._digest
def add_url_prefix(download_url):
if not download_url.startswith("http"):
download_url = "http://127.0.0.1:8000%s" % download_url
return download_url
| 13,930 | 4,231 |
#LCST Plotter
#Author: ESTC
import numpy
import streamlit
import matplotlib.pyplot as plt
import pandas
def launch_app():
streamlit.title("LCST Plotter")
global cation, anion, mw_cat, mw_an, datafile
cation = streamlit.text_input("Enter the abbreviation of the cation:")
# mw_cat = streamlit.text_input("Enter the molecular weight of the cation:")
anion = streamlit.text_input("Enter the abbreviationo of the anion:")
# mw_an = streamlit.text_input("Enter the molecular weight of the anion:")
T_start = streamlit.text_input("Enter start temperature in °C")
streamlit.text_input("Enter your initials:")
datafile = streamlit.file_uploader("Upload the LCST file:",type="xlsx")
def load_data(datafile):
global T,x1a,x1b,x1
data = pandas.read_excel(datafile)
T = data['T']-273.15
x1a = data["x'1"]
x1b = data['x"1']
# x1 =
streamlit.dataframe(data)
def make_plot(x1a,x1b,T,cation,anion):
fig,ax = plt.subplots()
ax.set_title("Predicted Phase Diagram of Aqueous ["+cation+"]["+anion+"]")
ax.scatter(x1a,T,marker=".",c="blue")
ax.scatter(x1b,T,marker=".",c="blue")
ax.set_xlabel("Water Mole Fraction")
ax.set_xlim([0,1.05])
ax.set_ylabel("Temperature (°C)")
ax.set_ylim([0,150])
plt.savefig(cation+"_"+anion+".png")
streamlit.pyplot(fig)
launch_app()
if datafile is not None:
load_data(datafile)
make_plot(x1a,x1b,T,cation,anion)
| 1,492 | 602 |
#!/usr/bin/env python3
from flask import Flask, render_template, app, url_for,request
import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
from textblob import TextBlob
import re
import pandas as pa
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
import time
import itertools
app=Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
return render_template('index2.html')
@app.route('/index2')
def index2():
return render_template('index.html')
@app.route('/layout')
def layout():
return render_template('layout.html')
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/Sentiment_Search', methods=['POST'])
def Sentiment_Search():
search=request.form['search_Text']
sid = SentimentIntensityAnalyzer()
ss = sid.polarity_scores(search)
neg = float(ss['neg']*100)
neu = float(ss['neu']*100)
pos = float(ss['pos']*100)
compound =float(ss['compound']*100)
ok=1
return render_template("home.html",okk=ok,negg=neg,neuu=neu,poss=pos,comm=compound,srch=search)
@app.route('/facebook', methods=['POST'])
def facebook():
try:
driver = webdriver.Firefox()
driver.get("https://www.facebook.com")
wait = WebDriverWait(driver, 600)
u_id = wait.until(EC.presence_of_element_located((By.XPATH,'//div[@class="_1k67 _cy7"]')))
u_id.click()
x=0
while x<1000:
driver.execute_script("window.scrollBy(0,2000)")
time.sleep(1)
x=x+50
status=driver.find_elements_by_xpath('//div[@class="_1dwg _1w_m _q7o"]')
stdetails=[]
for i in status:
stdetails.append(i.text)
status_details=[]
for i in stdetails:
status_details.append(i.split())
tokenized=list(itertools.chain.from_iterable(status_details))
#remove punctuation from list
tokenized=[i for i in tokenized if i.lower() not in stopwords.words('english')]
sid = SentimentIntensityAnalyzer()
neg=0
neu=0
pos=0
compound=0
for sentence in tokenized:
ss = sid.polarity_scores(sentence)
neg = neg+ float(ss['neg'])
neu = neu +float(ss['neu'])
pos = pos + float(ss['pos'])
compound = compound+float(ss['compound'])
total=neg+neu+pos+compound
negative=(neg/total)*100
neutral=(neu/total)*100
positive=(pos/total)*100
compound=((compound/total)*100)
if negative > neutral and negative > positive and negative > compound:
greatest=negative
great="Highest Polarity is of Negative"
if neutral > positive and neutral > negative and neutral > compound:
greatest=neutral
great="Highest Polarity is of Neutral"
if positive > neutral and positive > negative and positive > compound:
greatest=positive
great="Highest Polarity is of Positive"
if compound > neutral and compound > negative and compound > positive:
greatest=positive
great="Highest Polarity is of Compound"
greatest= float("{0:.2f}".format(greatest))
driver.close()
return render_template('facebook_output.html',negg=negative,poss=positive,neuu=neutral,compp=compound,great_per=greatest,str_var=great)
except:
err=1
titleshow="Some Error !! try again ......."
return render_template("whatsapp.html",error=titleshow,condition=err)
@app.route('/whatsappAnalysis', methods=['POST'])
def whatsappAnalysis():
target=request.form['conversation_id']
try:
driver = webdriver.Firefox()
driver.get("https://web.whatsapp.com/")
wait = WebDriverWait(driver, 600)
x_arg = '//span[contains(@title, '+ '"' +target + '"'+ ')]'
person_title = wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))
person_title.click()
x=-50
chat=[]
while x > -2000:
element=driver.find_element_by_xpath("//div[@class='_9tCEa']")
driver.execute_script("arguments[0].scrollIntoView(500);",element);
x=x-100
time.sleep(1)
textget=driver.find_elements_by_class_name("selectable-text.invisible-space.copyable-text")
print("Number of tweets extracted: {}.\n".format(len(textget)))
for Text in textget:
chat.append(Text.text)
menu=driver.find_elements_by_class_name("rAUz7")
menu[2].click()
list=driver.find_elements_by_class_name("_10anr.vidHz._28zBA")
list[5].click()
a=len(chat)
b=int(a/2)
data=chat[b:a]
sid = SentimentIntensityAnalyzer()
neg=0
neu=0
pos=0
compound=0
for sentence in data:
ss = sid.polarity_scores(sentence)
neg = neg+ float(ss['neg'])
neu = neu +float(ss['neu'])
pos = pos + float(ss['pos'])
compound = compound+float(ss['compound'])
total=neg+neu+pos+compound
negative=(neg/total)*100
neutral=(neu/total)*100
positive=(pos/total)*100
compound=((compound/total)*100)
if negative > neutral and negative > positive and negative > compound:
greatest=negative
great="Highest Polarity is of Negative"
if neutral > positive and neutral > negative and neutral > compound:
greatest=neutral
great="Highest Polarity is of Neutral"
if positive > neutral and positive > negative and positive > compound:
greatest=positive
great="Highest Polarity is of Positive"
if compound > neutral and compound > negative and compound > positive:
greatest=positive
great="Highest Polarity is of Compound"
greatest= float("{0:.2f}".format(greatest))
driver.close()
return render_template('facebook_output.html',negg=negative,poss=positive,neuu=neutral,compp=compound,great_per=greatest,str_var=great)
print("ok")
except:
err=1
titleshow="Some Error !! try again ......."
return render_template("facebook_output.html",error=titleshow,condition=err)
@app.route('/datacoming_twitter', methods=['POST'])
def data_twitter():
try:
CONSUMER_KEY = '--'
CONSUMER_SECRET = '--'
ACCESS_TOKEN = '--'
ACCESS_SECRET = '--'
def twitter_setup():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
return api
# We create an extractor object:
extractor = twitter_setup()
SearchName=request.form['tw_username']
tweets = extractor.user_timeline(screen_name="@"+SearchName, count=200)
length_tweets=str(len(tweets))
data = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
data['len'] = np.array([len(tweet.text) for tweet in tweets])
data['ID'] = np.array([tweet.id for tweet in tweets])
data['Date'] = np.array([tweet.created_at for tweet in tweets])
data['Source'] = np.array([tweet.source for tweet in tweets])
data['Likes'] = np.array([tweet.favorite_count for tweet in tweets])
data['RTs'] = np.array([tweet.retweet_count for tweet in tweets])
mean = np.mean(data['len'])
fav_max = np.max(data['Likes'])
rt_max = np.max(data['RTs'])
fav = data[data.Likes == fav_max].index[0]
rt = data[data.RTs == rt_max].index[0]
liked_tweet=data['Tweets'][fav]
retweets=data['Tweets'][rt]
sources = []
for source in data['Source']:
if source not in sources:
sources.append(source)
def clean_tweet(tweet):
"""
Utility function to clean the text in a tweet by removing
links and special characters using regex.
"""
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analize_sentiment(tweet):
"""
Utility function to classify the polarity of a tweet
using textblob
"""
analysis = TextBlob(clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
data['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ])
pos_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] > 0]
neu_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] == 0]
neg_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] < 0]
pos_Percent=len(pos_tweets)/len(data['Tweets'])*100
neu_Percent=len(neu_tweets)/len(data['Tweets'])*100
neg_Percent=len(neg_tweets)/len(data['Tweets'])*100
if pos_Percent > neu_Percent and pos_Percent > neg_Percent:
greatest=pos_Percent
great="Highest Polarity is of Positive"
if neu_Percent > pos_Percent and neu_Percent > neg_Percent:
greatest=neu_Percent
great="Highest Polarity is of Neutral"
if neg_Percent > pos_Percent and pos_Percent > neu_Percent:
greatest=pos_Percent
great="Highest Polarity is of Neagtive"
greatest= float("{0:.2f}".format(greatest))
return render_template('twitter_output.html',twit_src=sources,likeTweet=liked_tweet,retweet=retweets,pos=pos_Percent,neg=neg_Percent,neu=neu_Percent,great_per=greatest,str_var=great)
print("ok")
except:
err=1
titleshow="Some Error !! try again ......."
return render_template("twitter_output.html",error=titleshow,condition=err)
@app.route('/cancer')
def cancer():
return render_template('cancer.html')
@app.route('/cancerPredict', methods=['POST'])
def cancerPredict():
age=float(request.form['age'])
gender=float(request.form['gender'])
air=float(request.form['values'])
alch=float(request.form['values1'])
dust=float(request.form['values2'])
occp=float(request.form['values3'])
gene=float(request.form['values4'])
ldesc=float(request.form['values5'])
diet=float(request.form['values6'])
obsty=float(request.form['values7'])
smoke=float(request.form['values8'])
psmoke=float(request.form['values9'])
chest=float(request.form['values10'])
cough=float(request.form['values11'])
fatig=float(request.form['values12'])
weight=float(request.form['values13'])
breath=float(request.form['values14'])
wheez=float(request.form['values15'])
swallow=float(request.form['values16'])
nails=float(request.form['values17'])
cold=float(request.form['values18'])
dcough=float(request.form['values19'])
snore=float(request.form['values20'])
data=pa.read_excel("cancer_patient_data_sets .xlsx").values
#print(data)
#print(data[0,1:24])
train_data=data[0:998,1:24]
train_target=data[0:998,24]
'''print(train_target)
test_data=data[999:,1:24]
test_target=data[999:,24]
print(test_target)'''
clf=DecisionTreeClassifier()
trained=clf.fit(train_data,train_target)
clf1=SVC()
trained1=clf1.fit(train_data,train_target)
clf2=KNeighborsClassifier(n_neighbors=3)
trained2=clf2.fit(train_data,train_target)
test=[age,gender,air,alch,dust,occp,gene,ldesc,diet,obsty,smoke,psmoke,chest,cough,fatig,weight,breath,wheez,swallow,nails,cold,dcough,snore]
#test=[34,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,2,3,5,2,3]
predicted=trained.predict([test])
predicted1=trained1.predict([test])
predicted2=trained2.predict([test])
print(predicted)
print(predicted1)
print(predicted2)
#print(test_target)
'''
acc=accuracy_score(predicted,test_target)
print(acc)
acc1=accuracy_score(predicted1,test_target)
print(acc)
acc2=accuracy_score(predicted2,test_target)
print(acc)
'''
#print(train_target)
#print(age,gender,air,alch,dust,occp,gene,ldesc,diet,obsty,smoke,psmoke,chest,cough,fatig,weight,breath,wheez,swallow,nails,cold,dcough,snore)
#return render_template("cancer.html",predicted=predicted,predicted1=predicted1,predicted2=predicted2)
if __name__ == '__main__':
app.run("127.0.0.1",5000,debug=True)
| 13,039 | 4,388 |
import torch
def save_param(model, pth_path):
'''
save the parameters of the model
Args:
model: the model to which the params belong
pth_path: the path where .pth file is saved
'''
torch.save(model.state_dict(), pth_path)
def load_param(model, pth_path):
'''
load the parameters of the model
Args:
model: the model where the params go into
pth_path: the path where .pth (to be loaded) is saved
'''
model.load_state_dict(torch.load(pth_path)) | 529 | 171 |
#Project Euler Problem-77
#Author Tushar Gayan
#Multinomial Theorem
import math
import numpy as np
def mod_list(pow,terms):
m = []
for i in range(terms):
if i%pow == 0:
m.append(1)
else:
m.append(0)
return m[::-1]
def prime_check(num):
if num > 1:
for i in xrange(2,int(math.sqrt(num)+1)):
if (num % i) == 0:
return False
break
else:
return True
'''prime_list = []
i = 1
while len(prime_list)<200:
if prime_check(i) == True:
prime_list.append(i)
i +=1
print(prime_list)
m = 1
for i in prime_list:
m *= np.poly1d(mod_list(i,30))
#print(i)
print(np.poly1d(m))
#for i in range(480):
# print(m[i])
print(m.c)'''
def partition(n):
if n<4:
return 1
else:
prime_list = []
for i in range(2,n+1):
if prime_check(i)==True:
prime_list.append(i)
#print(prime_list)
poly_list = []
poly = 1
for j in prime_list:
#poly_list.append(np.poly1d(mod_list(j,n+1)))
#print(np.poly1d(mod_list(j,n+1)))
poly *= np.poly1d(mod_list(j,n+1))
return poly[n]
i = 1
while partition(i) < 5000:
i += 1
print partition(i), i
| 1,348 | 516 |