seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17598457644 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from PIL import Image
def make_pages(page, item_list, page_size):
paginator = Paginator(item_list, page_size)
try:
images = paginator.page(int(page))
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
return images
def image_rotate(image_url, angle):
path = '.' + image_url
oi = Image.open(path)
oi = oi.rotate(angle)
oi.save(path)
return 'success'
| mr-shubhamsinghal/gallery-app | gallery/utils.py | utils.py | py | 510 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.core.paginator.Paginator",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 14,
"usage_type": ... |
7176590579 | import secrets
from eth_keys import (
keys,
)
from eth_utils import (
int_to_big_endian,
)
try:
import factory
except ImportError:
raise ImportError(
"The p2p.tools.factories module requires the `factory_boy` library."
)
def _mk_private_key_bytes() -> bytes:
return int_to_big_endian(secrets.randbits(256)).rjust(32, b"\x00")
class PrivateKeyFactory(factory.Factory):
class Meta:
model = keys.PrivateKey
private_key_bytes = factory.LazyFunction(_mk_private_key_bytes)
def _mk_public_key_bytes() -> bytes:
return PrivateKeyFactory().public_key.to_bytes()
class PublicKeyFactory(factory.Factory):
class Meta:
model = keys.PublicKey
public_key_bytes = factory.LazyFunction(_mk_public_key_bytes)
| ethereum/py-evm | eth/tools/factories/keys.py | keys.py | py | 772 | python | en | code | 2,109 | github-code | 6 | [
{
"api_name": "eth_utils.int_to_big_endian",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "secrets.randbits",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "factory.Factory",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "eth... |
18183301241 | from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import Post, Comment
from .forms import CommentForm, ContactForm
# Create your views here.
def home(request):
return render(request, 'home.html')
def about(request):
return render(request, 'about.html')
def market_index(request):
posts = Post.objects.all()
return render(request, 'market/index.html', {
'posts': posts
})
def market_detail(request, post_id):
post = Post.objects.get(id=post_id)
return render(request, 'market/detail.html', {
'post': post, 'comment_form': CommentForm()
})
def add_comment(request, post_id):
form = CommentForm(request.POST)
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.post_id = post_id
new_comment.save()
return redirect('detail', post_id=post_id)
def add_contact(request, comment_id, post_id):
form = ContactForm(request.POST)
if form.is_valid():
new_contact = form.save(commit=False)
new_contact.comment_id = comment_id
new_contact.save()
return redirect('detail', post_id=post_id)
class PostCreate(CreateView):
model = Post
fields = ['item', 'picture', 'description', 'price', 'user']
class PostUpdate(UpdateView):
model = Post
fields = ['item', 'picture', 'description', 'price']
class PostDelete(DeleteView):
model = Post
success_url = '/market'
| gollobc/Meridio-Full-Stack-App | meridio/main_app/views.py | views.py | py | 1,500 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Post.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name":... |
34450081054 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.shortcuts import render
from django.views.generic.base import View
from django.http import JsonResponse
from participa.settings import SEFAZ_API_URL
from participa.auth_sefaz.views import ParticipaSefazRequest, BaseView
from participa.auth_sefaz.models import User
from participa.report.models import Report, MonitoredNFe
from rest_framework.renderers import JSONRenderer
import json
class ReportView(BaseView):
def get(self):
pass
def post(self, *args, **kwargs):
get_token = self.request.META['HTTP_AUTHORIZATION']
data = json.loads(str(self.request.body, "utf_8"))
user = User.objects.filter(cpf=data.get("cpfDestinatario", None)).first()
if user:
return_report = self.send_report(json.dumps(data), get_token)
if return_report:
report = Report(id_report=return_report.text, user=user)
report.save()
return self.success_recive()
else:
return self.error_recive()
class QRCodeMonitorView(BaseView):
def post(self, *args, **kwargs):
data = json.loads(str(self.request.body, "utf_8"))
user = User.objects.filter(cpf=data.get("cpf", None)).first()
qr_code_data = data.get("qrcode_data", None)
if user and qr_code_data:
monitor = MonitoredNFe(user=user, qr_code_data=qr_code_data)
monitor.save()
return self.success_recive()
else:
return self.error_recive()
class QRCodeMonitorListView(BaseView):
def post(self, *args, **kwargs):
data = json.loads(str(self.request.body, "utf_8"))
user = User.objects.filter(cpf=data.get("cpf", None)).first()
if user:
monitoreds = MonitoredNFe.objects.filter(user=user)
monitoreds_serialized = dict(monitoreds=list(monitoreds.values('pk', 'status', 'qr_code_data', 'created_at', 'updated_at')))
return JsonResponse(monitoreds_serialized)
else:
return self.error_recive()
| vctrferreira/hackathon-sefaz | participa/report/views.py | views.py | py | 2,135 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "participa.auth_sefaz.views.BaseView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "participa.auth_sefaz.models.User.objects.filter",
"line_number": 23,
"usage_type": "call"
... |
39915090383 | import numpy as np
import pandas as pd
import math
import requests, json, time
from datetime import datetime
class Processor:
def __init__(self, history_length):
self.history_length = history_length
def fetchHistoricalDataForTicker(self, fsym, tsym, lim):
df_cols = ['time', 'open', 'high', 'low', 'close', 'volumefrom', 'volumeto']
curr_ts = str(int(time.time()))
limit = str(lim)
histURL = 'https://min-api.cryptocompare.com/data/histominute?fsym=' + fsym + '&tsym=' + tsym + '&limit=' + limit + '&toTs=' + curr_ts + '&aggregate=1' + '&e=Coinbase' #CCCAGG for aggregated
resp = requests.get(histURL)
resp_json = json.loads(resp.content.decode('utf-8'))
df = pd.DataFrame(columns = df_cols)
for i in range(0, lim):
data = []
for count, val in enumerate(df_cols):
entry = resp_json['Data'][i][val]
data.append(entry)
row = pd.Series(data, df_cols)
df = df.append(row, ignore_index = True)
if(df.empty):
return
df = df.rename(index=str, columns={"time": "ts"})
df.index = pd.to_datetime(df.ts, unit = 's')
df = df.drop('ts', axis = 1)
matrix_df = df.as_matrix()
return matrix_df
def fetchData(self):
data = self.fetchHistoricalDataForTicker('ETH', 'USD', 2000)
#Arbitrary 1500/500 split
train_data = data[:1500, :]
test_data = data[1501:, :]
return {'train': train_data, 'test': test_data}
| kwhuo68/rl-btc | processor.py | processor.py | py | 1,365 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number... |
73051866108 | import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
model = tf.keras.models.load_model('C:/AI/model.h5')
class_names = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
img_height = 180
img_width = 180
sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
img = keras.preprocessing.image.load_img(
sunflower_path, target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.imshow(img)
plt.axis('off')
plt.show()
print(score)
print(class_names[np.argmax(score)])
print(100 * np.max(score))
| dasfef/PyQt5 | Ex20221202_3(h5 activate).py | Ex20221202_3(h5 activate).py | py | 925 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.get_file",
"line_number": 16,
"usage_type": "call"
},
{... |
74147628668 | # coding: utf-8
"""
Pydici billing views. Http request are processed here.
@author: Sébastien Renard (sebastien.renard@digitalfox.org)
@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)
"""
from datetime import date, timedelta
import mimetypes
import json
from io import BytesIO
import os
import subprocess
import tempfile
from os.path import basename
from django.shortcuts import render
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext as _
from django.utils import translation
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.db.models import Sum, Q, F, Min, Max, Count
from django.db.models.functions import TruncMonth
from django.views.generic import TemplateView
from django.views.decorators.cache import cache_page
from django.forms.models import inlineformset_factory
from django.forms.utils import ValidationError
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.template.loader import get_template
from django_weasyprint.views import WeasyTemplateResponse, WeasyTemplateView
from pypdf import PdfMerger, PdfReader
import facturx
from billing.utils import get_billing_info, update_client_bill_from_timesheet, update_client_bill_from_proportion, \
bill_pdf_filename, get_client_billing_control_pivotable_data, generate_bill_pdf
from billing.models import ClientBill, SupplierBill, BillDetail, BillExpense
from leads.models import Lead
from people.models import Consultant
from people.utils import get_team_scopes
from staffing.models import Timesheet, Mission
from staffing.views import MissionTimesheetReportPdf
from crm.models import Subsidiary
from crm.utils import get_subsidiary_from_session
from core.utils import get_fiscal_years_from_qs, get_parameter, user_has_feature
from core.utils import COLORS, nextMonth, previousMonth
from core.decorator import pydici_non_public, PydiciNonPublicdMixin, pydici_feature, PydiciFeatureMixin
from billing.forms import BillDetailInlineFormset, BillExpenseFormSetHelper, BillExpenseInlineFormset, BillExpenseForm
from billing.forms import ClientBillForm, BillDetailForm, BillDetailFormSetHelper, SupplierBillForm
@pydici_non_public
@pydici_feature("reports")
def bill_review(request):
"""Review of clients bills: bills overdue, due soon, or to be created"""
today = date.today()
wait_warning = timedelta(15) # wait in days used to warn that a bill is due soon
subsidiary = get_subsidiary_from_session(request)
# Get bills overdue, due soon, litigious and recently paid
overdue_bills = ClientBill.objects.filter(state="1_SENT", due_date__lte=today)
overdue_bills = overdue_bills.prefetch_related("lead__responsible", "lead__subsidiary").select_related("lead__client__contact", "lead__client__organisation__company")
soondue_bills = ClientBill.objects.filter(state="1_SENT", due_date__gt=today, due_date__lte=(today + wait_warning))
soondue_bills = soondue_bills.prefetch_related("lead__responsible", "lead__subsidiary").select_related("lead__client__contact", "lead__client__organisation__company")
recent_bills = ClientBill.objects.filter(state="2_PAID").order_by("-payment_date")
recent_bills = recent_bills.prefetch_related("lead__responsible", "lead__subsidiary").select_related("lead__client__contact", "lead__client__organisation__company")
litigious_bills = ClientBill.objects.filter(state="3_LITIGIOUS").select_related()
# Filter bills on subsidiary if defined
if subsidiary:
overdue_bills = overdue_bills.filter(lead__subsidiary=subsidiary)
soondue_bills = soondue_bills.filter(lead__subsidiary=subsidiary)
recent_bills = recent_bills.filter(lead__subsidiary=subsidiary)
litigious_bills = litigious_bills.filter(lead__subsidiary=subsidiary)
# Limit recent bill to last 20 ones
recent_bills = recent_bills[: 20]
# Compute totals
soondue_bills_total = soondue_bills.aggregate(Sum("amount"))["amount__sum"]
overdue_bills_total = overdue_bills.aggregate(Sum("amount"))["amount__sum"]
litigious_bills_total = litigious_bills.aggregate(Sum("amount"))["amount__sum"]
soondue_bills_total_with_vat = sum([bill.amount_with_vat for bill in soondue_bills if bill.amount_with_vat])
overdue_bills_total_with_vat = sum([bill.amount_with_vat for bill in overdue_bills if bill.amount_with_vat])
litigious_bills_total_with_vat = sum([bill.amount_with_vat for bill in litigious_bills if bill.amount_with_vat])
# Get leads with done timesheet in past three month that don't have bill yet
leads_without_bill = Lead.objects.filter(state="WON", mission__timesheet__working_date__gte=(date.today() - timedelta(90)))
leads_without_bill = leads_without_bill.annotate(Count("clientbill")).filter(clientbill__count=0)
if subsidiary:
leads_without_bill = leads_without_bill.filter(subsidiary=subsidiary)
return render(request, "billing/bill_review.html",
{"overdue_bills": overdue_bills,
"soondue_bills": soondue_bills,
"recent_bills": recent_bills,
"litigious_bills": litigious_bills,
"soondue_bills_total": soondue_bills_total,
"overdue_bills_total": overdue_bills_total,
"litigious_bills_total": litigious_bills_total,
"soondue_bills_total_with_vat": soondue_bills_total_with_vat,
"overdue_bills_total_with_vat": overdue_bills_total_with_vat,
"litigious_bills_total_with_vat": litigious_bills_total_with_vat,
"leads_without_bill": leads_without_bill,
"billing_management": user_has_feature(request.user, "billing_management"),
"consultant": Consultant.objects.filter(trigramme__iexact=request.user.username).first(),
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def supplier_bills_validation(request):
"""Review and validate suppliers bills"""
today = date.today()
subsidiary = get_subsidiary_from_session(request)
supplier_overdue_bills = SupplierBill.objects.filter(state__in=("1_RECEIVED", "1_VALIDATED"), due_date__lte=today)
supplier_overdue_bills = supplier_overdue_bills.prefetch_related("lead").select_related()
supplier_soondue_bills = SupplierBill.objects.filter(state__in=("1_RECEIVED", "1_VALIDATED"), due_date__gt=today)
supplier_soondue_bills = supplier_soondue_bills.prefetch_related("lead").select_related()
# Filter bills on subsidiary if defined
if subsidiary:
supplier_overdue_bills = supplier_overdue_bills.filter(lead__subsidiary=subsidiary)
supplier_soondue_bills = supplier_soondue_bills.filter(lead__subsidiary=subsidiary)
return render(request, "billing/supplier_bills_validation.html",
{"supplier_soondue_bills": supplier_soondue_bills,
"supplier_overdue_bills": supplier_overdue_bills,
"billing_management": user_has_feature(request.user, "billing_management"),
"consultant": Consultant.objects.filter(trigramme__iexact=request.user.username).first(),
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 60 * 24)
def bill_delay(request):
"""Report on client bill creation and payment delay"""
data = []
subsidiary = get_subsidiary_from_session(request)
bills = ClientBill.objects.filter(creation_date__gt=(date.today() - timedelta(2*365)), state__in=("1_SENT", "2_PAID"),
amount__gt=0)
if subsidiary:
bills = bills.filter(lead__subsidiary=subsidiary)
bills = bills.select_related("lead__responsible", "lead__subsidiary", "lead__client__organisation__company",
"lead__paying_authority__company", "lead__paying_authority__contact")
bills = bills.prefetch_related("billdetail_set__mission")
for bill in bills:
data.append(
{_("Lead"): bill.lead.deal_id,
_("Responsible"): bill.lead.responsible.name,
_("Subsidiary"): bill.lead.subsidiary.name,
_("client company"): bill.lead.client.organisation.company.name,
_("Paying authority"): str(bill.lead.paying_authority or "null"),
_("Billing mode"): ",".join(list(set([d.mission.get_billing_mode_display() or "NA" for d in bill.billdetail_set.all()] or ["NA"]))),
_("creation lag"): bill.creation_lag() or "null",
_("payment delay"): bill.payment_delay(),
_("payment wait"): bill.payment_wait(),
_("creation date"): bill.creation_date.replace(day=1).isoformat()}
)
return render(request, "billing/payment_delay.html",
{"data": data,
"user": request.user},)
class BillingRequestMixin(PydiciFeatureMixin):
pydici_feature = "billing_request"
@pydici_non_public
@pydici_feature("billing_management")
def mark_bill_paid(request, bill_id):
"""Mark the given bill as paid"""
bill = ClientBill.objects.get(id=bill_id)
bill.state = "2_PAID"
bill.save()
return HttpResponseRedirect(reverse("billing:bill_review"))
@pydici_non_public
@pydici_feature("management")
def validate_supplier_bill(request, bill_id):
"""Mark the given supplier bill as validated"""
consultant = Consultant.objects.filter(trigramme__iexact=request.user.username).first()
bill = SupplierBill.objects.get(id=bill_id)
if consultant == bill.lead.responsible and bill.state == "1_RECEIVED":
bill.state = "1_VALIDATED"
bill.save()
return HttpResponseRedirect(reverse("billing:supplier_bills_validation"))
else:
return HttpResponseRedirect(reverse("core:forbidden"))
@pydici_non_public
@pydici_feature("billing_management")
def mark_supplierbill_paid(request, bill_id):
"""Mark the given supplier bill as paid"""
bill = SupplierBill.objects.get(id=bill_id)
bill.state = "2_PAID"
bill.save()
return HttpResponseRedirect(reverse("billing:supplier_bills_validation"))
@pydici_non_public
@pydici_feature("management")
def bill_file(request, bill_id=0, nature="client"):
"""Returns bill file"""
response = HttpResponse()
try:
if nature == "client":
bill = ClientBill.objects.get(id=bill_id)
else:
bill = SupplierBill.objects.get(id=bill_id)
if bill.bill_file:
response["Content-Type"] = mimetypes.guess_type(bill.bill_file.name)[0] or "application/stream"
response["Content-Disposition"] = 'attachment; filename="%s"' % basename(bill.bill_file.name)
for chunk in bill.bill_file.chunks():
response.write(chunk)
except (ClientBill.DoesNotExist, SupplierBill.DoesNotExist, OSError):
pass
return response
class Bill(PydiciNonPublicdMixin, TemplateView):
template_name = 'billing/bill.html'
def get_context_data(self, **kwargs):
context = super(Bill, self).get_context_data(**kwargs)
try:
bill = ClientBill.objects.get(id=kwargs.get("bill_id"))
context["bill"] = bill
context["expenses_image_receipt"] = []
for expenseDetail in bill.billexpense_set.all():
if expenseDetail.expense and expenseDetail.expense.receipt_content_type() != "application/pdf":
context["expenses_image_receipt"].append(expenseDetail.expense.receipt_data())
except ClientBill.DoesNotExist:
bill = None
return context
@method_decorator(pydici_feature("billing_request"))
def dispatch(self, *args, **kwargs):
return super(Bill, self).dispatch(*args, **kwargs)
class BillAnnexPDFTemplateResponse(WeasyTemplateResponse):
"""TemplateResponse override to merge """
@property
def rendered_content(self):
old_lang = translation.get_language()
try:
target = BytesIO()
bill = self.context_data["bill"]
translation.activate(bill.lang)
bill_pdf = super(BillAnnexPDFTemplateResponse, self).rendered_content
merger = PdfMerger()
merger.append(PdfReader(BytesIO(bill_pdf)))
# Add expense receipt
for billExpense in bill.billexpense_set.all():
if billExpense.expense and billExpense.expense.receipt_content_type() == "application/pdf":
merger.append(PdfReader(billExpense.expense.receipt.file))
# Add timesheet
if bill.include_timesheet:
fake_http_request = self._request
fake_http_request.method = "GET"
for mission in Mission.objects.filter(billdetail__bill=bill).annotate(Min("billdetail__month"), Max("billdetail__month")).distinct():
response = MissionTimesheetReportPdf.as_view()(fake_http_request, mission=mission,
start=mission.billdetail__month__min,
end=mission.billdetail__month__max)
merger.append(BytesIO(response.rendered_content))
merger.write(target)
target.seek(0) # Be kind, rewind
# Make it PDF/A-3B compliant
cmd = "gs -q -dPDFA=3 -dBATCH -dNOPAUSE -sColorConversionStrategy=UseDeviceIndependentColor -sDEVICE=pdfwrite -dPDFACompatibilityPolicy=1 -sOutputFile=- -"
try:
gs_in = tempfile.TemporaryFile()
gs_out = tempfile.TemporaryFile()
gs_in.write(target.getvalue())
target.close()
gs_in.seek(0)
subprocess.run(cmd.split(), stdin=gs_in, stdout=gs_out)
gs_out.seek(0)
# Add factur-x information
if bill.add_facturx_data:
facturx_xml = get_template("billing/invoice-factur-x.xml").render({"bill": bill})
facturx_xml = facturx_xml.encode("utf-8")
pdf_metadata = {
"author": "enioka",
"keywords": "Factur-X, Invoice, pydici",
"title": "enioka Invoice %s" % bill.bill_id,
"subject": "Factur-X invoice %s dated %s issued by enioka" % (bill.bill_id, bill.creation_date),
}
pdf = facturx.generate_from_binary(gs_out.read(), facturx_xml, pdf_metadata=pdf_metadata, lang=bill.lang)
else:
pdf = gs_out.read()
finally:
gs_out.close()
gs_in.close()
finally:
translation.activate(old_lang)
return pdf
class BillPdf(Bill, WeasyTemplateView):
response_class = BillAnnexPDFTemplateResponse
def get_filename(self):
bill = self.get_context_data(**self.kwargs)["bill"]
return bill_pdf_filename(bill)
@pydici_non_public
@pydici_feature("billing_request")
def client_bill(request, bill_id=None):
"""Add or edit client bill"""
billDetailFormSet = None
billExpenseFormSet = None
billing_management_feature = "billing_management"
wip_status = ("0_DRAFT", "0_PROPOSED")
forbidden = HttpResponseRedirect(reverse("core:forbidden"))
if bill_id:
try:
bill = ClientBill.objects.get(id=bill_id)
have_expenses = bill.lead.expense_set.filter(chargeable=True, billexpense__isnull=True).exists()
except ClientBill.DoesNotExist:
raise Http404
else:
bill = None
have_expenses = False
BillDetailFormSet = inlineformset_factory(ClientBill, BillDetail, formset=BillDetailInlineFormset, form=BillDetailForm, fields="__all__")
BillExpenseFormSet = inlineformset_factory(ClientBill, BillExpense, formset=BillExpenseInlineFormset, form=BillExpenseForm, fields="__all__")
if request.POST:
form = ClientBillForm(request.POST, request.FILES, instance=bill)
# First, ensure user is allowed to manipulate the bill
if bill and bill.state not in wip_status and not user_has_feature(request.user, billing_management_feature):
return forbidden
if form.data["state"] not in wip_status and not user_has_feature(request.user, billing_management_feature):
return forbidden
# Now, process form
if bill and bill.state in wip_status:
billDetailFormSet = BillDetailFormSet(request.POST, instance=bill)
billExpenseFormSet = BillExpenseFormSet(request.POST, instance=bill)
if form.data["state"] not in wip_status and (billDetailFormSet.has_changed() or billExpenseFormSet.has_changed()):
form.add_error("state", ValidationError(_("You can't modify bill details in that state")))
if form.is_valid() and (billDetailFormSet is None or billDetailFormSet.is_valid()) and (billExpenseFormSet is None or billExpenseFormSet.is_valid()):
bill = form.save()
if billDetailFormSet:
billDetailFormSet.save()
if billExpenseFormSet:
billExpenseFormSet.save()
bill.save() # Again, to take into account modified details.
if bill.state in wip_status:
success_url = reverse_lazy("billing:client_bill", args=[bill.id, ])
# User want to add chargeable expenses ?
if "Submit-expenses" in request.POST:
# compute again because user may add expenses during submit
expenses = bill.lead.expense_set.filter(chargeable=True, billexpense__isnull=True)
for expense in expenses:
BillExpense(bill=bill, expense=expense).save()
else:
success_url = request.GET.get('return_to', False) or reverse_lazy("billing:client_bill_detail", args=[bill.id, ])
if bill.bill_file:
if form.changed_data == ["state"] and billDetailFormSet is None and billExpenseFormSet is None:
# only state has change. No need to regenerate bill file.
messages.add_message(request, messages.INFO, _("Bill state has been updated"))
elif "bill_file" in form.changed_data:
# a file has been provided by user himself. We must not generate a file and overwrite it.
messages.add_message(request, messages.WARNING, _("Using custom user file to replace current bill"))
elif bill.billexpense_set.exists() or bill.billdetail_set.exists():
# bill file exist but authorized admin change information and do not provide custom file. Let's generate again bill file
messages.add_message(request, messages.WARNING, _("A new bill is generated and replace the previous one"))
if os.path.exists(bill.bill_file.path):
os.remove(bill.bill_file.path)
generate_bill_pdf(bill, request)
else:
# Bill file still not exist. Let's create it
messages.add_message(request, messages.INFO, _("A new bill file has been generated"))
generate_bill_pdf(bill, request)
return HttpResponseRedirect(success_url)
else:
if bill:
# Create a form to edit the given bill
form = ClientBillForm(instance=bill)
if bill.state in wip_status:
billDetailFormSet = BillDetailFormSet(instance=bill)
billExpenseFormSet = BillExpenseFormSet(instance=bill)
else:
# Still no bill, let's create it with its detail if at least mission or lead has been provided
missions = []
if request.GET.get("lead"):
lead = Lead.objects.get(id=request.GET.get("lead"))
missions = lead.mission_set.all() # take all missions
if request.GET.get("mission"):
missions = [Mission.objects.get(id=request.GET.get("mission"))]
if missions:
bill = ClientBill(lead=missions[0].lead)
bill.save()
for mission in missions:
if mission.billing_mode == "TIME_SPENT":
if request.GET.get("start_date") and request.GET.get("end_date"):
start_date = date(int(request.GET.get("start_date")[0:4]), int(request.GET.get("start_date")[4:6]), 1)
end_date = date(int(request.GET.get("end_date")[0:4]), int(request.GET.get("end_date")[4:6]), 1)
else:
start_date = previousMonth(date.today())
end_date = date.today().replace(day=1)
update_client_bill_from_timesheet(bill, mission, start_date, end_date)
else: # FIXED_PRICE mission
proportion = request.GET.get("proportion", 0.30)
bill = update_client_bill_from_proportion(bill, mission, proportion=proportion)
if bill:
form = ClientBillForm(instance=bill)
billDetailFormSet = BillDetailFormSet(instance=bill)
billExpenseFormSet = BillExpenseFormSet(instance=bill)
else:
# Simple virgin new form
form = ClientBillForm()
return render(request, "billing/client_bill_form.html",
{"bill_form": form,
"detail_formset": billDetailFormSet,
"detail_formset_helper": BillDetailFormSetHelper(),
"expense_formset": billExpenseFormSet,
"expense_formset_helper": BillExpenseFormSetHelper(),
"bill_id": bill.id if bill else None,
"can_delete": bill.state in wip_status if bill else False,
"can_preview": bill.state in wip_status if bill else False,
"have_expenses": have_expenses,
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def client_bill_detail(request, bill_id):
"""Display detailed bill information, metadata and bill pdf"""
bill = ClientBill.objects.get(id=bill_id)
return render(request, "billing/client_bill_detail.html",
{"bill": bill})
@pydici_non_public
@pydici_feature("billing_request")
def clientbill_delete(request, bill_id):
"""Delete client bill in early stage"""
redirect_url = reverse("billing:client_bills_in_creation")
try:
bill = ClientBill.objects.get(id=bill_id)
if bill.state in ("0_DRAFT", "0_PROPOSED"):
bill.delete()
messages.add_message(request, messages.INFO, _("Bill removed successfully"))
else:
messages.add_message(request, messages.WARNING, _("Can't remove a bill that have been sent. You may cancel it"))
redirect_url = reverse_lazy("billing:client_bill", args=[bill.id, ])
except Exception as e:
messages.add_message(request, messages.WARNING, _("Can't find bill %s" % bill_id))
return HttpResponseRedirect(redirect_url)
@pydici_non_public
@pydici_feature("billing_management")
def supplier_bill(request, bill_id=None):
"""Add or edit supplier bill"""
if bill_id:
try:
bill = SupplierBill.objects.get(id=bill_id)
except SupplierBill.DoesNotExist:
raise Http404
else:
bill = None
lead_id =request.GET.get("lead")
if request.POST:
form = SupplierBillForm(request.POST, request.FILES, instance=bill)
if form.is_valid():
bill = form.save()
return HttpResponseRedirect(reverse_lazy("billing:supplier_bills_archive"))
else:
if bill:
form = SupplierBillForm(instance=bill)
elif lead_id:
form = SupplierBillForm(initial={"lead": lead_id})
else:
form = SupplierBillForm()
return render(request, "billing/supplier_bill_form.html",
{"bill_form": form,
"bill_id": bill.id if bill else None,
"can_delete": bill.state == "1_RECEIVED" if bill else False,
"user": request.user})
@pydici_non_public
@pydici_feature("billing_management")
def supplierbill_delete(request, bill_id):
"""Delete supplier in early stage"""
redirect_url = reverse("billing:supplier_bills_archive")
try:
bill = SupplierBill.objects.get(id=bill_id)
if bill.state == "1_RECEIVED":
bill.delete()
messages.add_message(request, messages.INFO, _("Bill removed successfully"))
else:
messages.add_message(request, messages.WARNING, _("Can't remove a bill in state %s. You may cancel it" % bill.get_state_display()))
redirect_url = reverse_lazy("billing:supplier_bill", args=[bill.id, ])
except Exception as e:
messages.add_message(request, messages.WARNING, _("Can't find bill %s" % bill_id))
return HttpResponseRedirect(redirect_url)
@pydici_non_public
@pydici_feature("billing_request")
def pre_billing(request, start_date=None, end_date=None, mine=False):
"""Pre billing page: help to identify bills to send"""
subsidiary = get_subsidiary_from_session(request)
team = None
team_consultants = None
if end_date is None:
end_date = date.today().replace(day=1)
else:
end_date = date(int(end_date[0:4]), int(end_date[4:6]), 1)
if start_date is None:
start_date = previousMonth(date.today())
else:
start_date = date(int(start_date[0:4]), int(start_date[4:6]), 1)
if end_date - start_date > timedelta(180):
# Prevent excessive window that is useless would lead to deny of service
start_date = (end_date - timedelta(180)).replace(day=1)
if end_date < start_date:
end_date = nextMonth(start_date)
if "team_id" in request.GET:
team = Consultant.objects.get(id=int(request.GET["team_id"]))
team_consultants = Consultant.objects.filter(staffing_manager=team)
mine = False
timeSpentBilling = {} # Key is lead, value is total and dict of mission(total, Mission billingData)
rates = {} # Key is mission, value is Consultant rates dict
internalBilling = {} # Same structure as timeSpentBilling but for billing between internal subsidiaries
try:
billing_consultant = Consultant.objects.get(trigramme__iexact=request.user.username)
except Consultant.DoesNotExist:
billing_consultant = None
mine = False
fixedPriceMissions = Mission.objects.filter(nature="PROD", billing_mode="FIXED_PRICE",
timesheet__working_date__gte=start_date,
timesheet__working_date__lt=end_date)
undefinedBillingModeMissions = Mission.objects.filter(nature="PROD", billing_mode=None,
timesheet__working_date__gte=start_date,
timesheet__working_date__lt=end_date)
timespent_timesheets = Timesheet.objects.filter(working_date__gte=start_date, working_date__lt=end_date,
mission__nature="PROD", mission__billing_mode="TIME_SPENT")
internalBillingTimesheets = Timesheet.objects.filter(working_date__gte=start_date, working_date__lt=end_date,
mission__nature="PROD")
internalBillingTimesheets = internalBillingTimesheets.exclude(Q(consultant__company=F("mission__subsidiary")) & Q(consultant__company=F("mission__lead__subsidiary")))
#TODO: handle fixed price mission fully delegated to a subsidiary
if mine: # Filter on consultant mission/lead as responsible
fixedPriceMissions = fixedPriceMissions.filter(Q(lead__responsible=billing_consultant) | Q(responsible=billing_consultant))
undefinedBillingModeMissions = undefinedBillingModeMissions.filter(Q(lead__responsible=billing_consultant) | Q(responsible=billing_consultant))
timespent_timesheets = timespent_timesheets.filter(Q(mission__lead__responsible=billing_consultant) | Q(mission__responsible=billing_consultant))
internalBillingTimesheets = internalBillingTimesheets.filter(Q(mission__lead__responsible=billing_consultant) | Q(mission__responsible=billing_consultant))
elif team: # Filter on team
fixedPriceMissions = fixedPriceMissions.filter(
Q(lead__responsible__in=team_consultants) | Q(responsible__in=team_consultants))
undefinedBillingModeMissions = undefinedBillingModeMissions.filter(
Q(lead__responsible__in=team_consultants) | Q(responsible__in=team_consultants))
timespent_timesheets = timespent_timesheets.filter(
Q(mission__lead__responsible__in=team_consultants) | Q(mission__responsible__in=team_consultants))
internalBillingTimesheets = internalBillingTimesheets.filter(
Q(mission__lead__responsible__in=team_consultants) | Q(mission__responsible__in=team_consultants))
fixedPriceMissions = fixedPriceMissions.order_by("lead").distinct()
undefinedBillingModeMissions = undefinedBillingModeMissions.order_by("lead").distinct()
if subsidiary: # filter on subsidiary
fixedPriceMissions = fixedPriceMissions.filter(subsidiary=subsidiary)
timespent_timesheets = timespent_timesheets.filter(mission__subsidiary=subsidiary)
undefinedBillingModeMissions = undefinedBillingModeMissions.filter(subsidiary=subsidiary)
timesheet_data = timespent_timesheets.order_by("mission__lead", "consultant").values_list("mission", "consultant").annotate(Sum("charge"))
timeSpentBilling = get_billing_info(timesheet_data)
for internal_subsidiary in Subsidiary.objects.all():
subsidiary_timesheet_data = internalBillingTimesheets.filter(consultant__company=internal_subsidiary)
for target_subsidiary in Subsidiary.objects.exclude(pk=internal_subsidiary.id):
timesheet_data = subsidiary_timesheet_data.filter(mission__lead__subsidiary=target_subsidiary)
timesheet_data = timesheet_data .order_by("mission__lead", "consultant").values_list("mission", "consultant").annotate(Sum("charge"))
billing_info = get_billing_info(timesheet_data)
if billing_info:
internalBilling[(internal_subsidiary,target_subsidiary)] = billing_info
scopes, team_current_filter, team_current_url_filter = get_team_scopes(subsidiary, team)
if team:
team_name = _("team %(manager_name)s") % {"manager_name": team}
else:
team_name = None
return render(request, "billing/pre_billing.html",
{"time_spent_billing": timeSpentBilling,
"fixed_price_missions": fixedPriceMissions,
"undefined_billing_mode_missions": undefinedBillingModeMissions,
"internal_billing": internalBilling,
"start_date": start_date,
"end_date": end_date,
"mine": mine,
"scope": team_name or subsidiary or _("Everybody"),
"team_current_filter": team_current_filter,
"team_current_url_filter": team_current_url_filter,
"scopes": scopes,
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def client_bills_in_creation(request):
"""Review client bill in preparation"""
return render(request, "billing/client_bills_in_creation.html",
{"data_url": reverse('billing:client_bills_in_creation_DT'),
"datatable_options": ''' "order": [[4, "desc"]], "columnDefs": [{ "orderable": false, "targets": [1, 3] }] ''',
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def client_bills_archive(request):
"""Review all client bill """
return render(request, "billing/client_bills_archive.html",
{"data_url": reverse('billing:client_bills_archive_DT'),
"datatable_options": ''' "lengthMenu": [ 10, 25, 50, 100, 500 ], "order": [[4, "desc"]], "columnDefs": [{ "orderable": false, "targets": [1, 2, 10] }] ''',
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def supplier_bills_archive(request):
"""Review all supplier bill """
return render(request, "billing/supplier_bills_archive.html",
{"data_url": reverse('billing:supplier_bills_archive_DT'),
"datatable_options": ''' "order": [[4, "desc"]], "columnDefs": [{ "orderable": false, "targets": [2, 10] }] ''',
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
def lead_billing(request, lead_id):
"""lead / mission billing tab that display billing control and client/supplier bill list"""
lead = Lead.objects.get(id=lead_id)
return render(request, "billing/_lead_billing.html",
{"lead": lead,
"show_supplier_bills": True})
@pydici_non_public
@pydici_feature("reports")
def client_billing_control_pivotable(request, filter_on_subsidiary=None, filter_on_company=None, filter_on_lead=None):
"""Check lead/mission billing."""
subsidiary = get_subsidiary_from_session(request)
month_to_exc_from_my_leads = [date.today().replace(day=1)]
for i in range(6):
month_to_exc_from_my_leads.append(nextMonth(month_to_exc_from_my_leads[-1]))
month_to_exc_from_my_leads = [m.isoformat() for m in month_to_exc_from_my_leads]
data = get_client_billing_control_pivotable_data(filter_on_subsidiary=filter_on_subsidiary or subsidiary,
filter_on_company=filter_on_company,
filter_on_lead=filter_on_lead,
only_active=True)
return render(request, "billing/client_billing_control_pivotable.html",
{"data": data,
"consultant": Consultant.objects.filter(trigramme__iexact=request.user.username).first(),
"month_to_exc_from_my_leads": month_to_exc_from_my_leads,
"derivedAttributes": "{}"})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 60)
def graph_billing(request):
"""Bar graph of client bills by status"""
subsidiary = get_subsidiary_from_session(request)
bills = ClientBill.objects.filter(creation_date__gt=(date.today() - timedelta(3*365)), state__in=("1_SENT", "2_PAID"))
if subsidiary:
bills = bills.filter(lead__subsidiary=subsidiary)
if bills.count() == 0:
return HttpResponse()
bills = bills.annotate(month=TruncMonth("creation_date")).values("month")
bills = bills.annotate(amount_paid=Sum("amount", filter=Q(state="2_PAID")),
amount_sent=Sum("amount", filter=Q(state="1_SENT")))
bills = bills.values("month", "amount_paid", "amount_sent").order_by()
bills = [{"month": b["month"].isoformat(), "amount_paid": float(b["amount_paid"] or 0)/1000, "amount_sent": float(b["amount_sent"] or 0)/1000} for b in bills]
return render(request, "billing/graph_billing.html",
{"graph_data": json.dumps(bills),
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 10)
def graph_yearly_billing(request):
"""Fiscal year billing per subsidiary"""
bills = ClientBill.objects.filter(state__in=("1_SENT", "2_PAID"))
years = get_fiscal_years_from_qs(bills, "creation_date")
month = int(get_parameter("FISCAL_YEAR_MONTH"))
data = {}
graph_data = []
labels = []
growth = []
subsidiary = get_subsidiary_from_session(request)
if subsidiary:
subsidiaries = [subsidiary,]
else:
subsidiaries = Subsidiary.objects.all()
for subsidiary in subsidiaries:
data[subsidiary.name] = []
for year in years:
turnover = {}
for subsidiary_name, amount in bills.filter(creation_date__gte=date(year, month, 1), creation_date__lt=date(year + 1, month, 1)).values_list("lead__subsidiary__name").annotate(Sum("amount")):
turnover[subsidiary_name] = float(amount)
for subsidiary in subsidiaries:
data[subsidiary.name].append(turnover.get(subsidiary.name, 0))
last_turnover = 0
for current_turnover in [sum(i) for i in zip(*list(data.values()))]: # Total per year
if last_turnover > 0:
growth.append(round(100 * (current_turnover - last_turnover) / last_turnover, 1))
else:
growth.append(None)
last_turnover = current_turnover
if years[-1] == date.today().year:
growth.pop() # Don't compute for on-going year.
graph_data.append(["x"] + years) # X (years) axis
# Add turnover per subsidiary
for key, value in list(data.items()):
if sum(value) == 0:
continue
value.insert(0, key)
graph_data.append(value)
labels.append(key)
# Add growth
graph_data.append([_("growth")] + growth)
labels.append(_("growth"))
return render(request, "billing/graph_yearly_billing.html",
{"graph_data": json.dumps(graph_data),
"years": years,
"subsidiaries_names" : json.dumps(labels),
"series_colors": COLORS,
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 60 * 4)
def graph_outstanding_billing(request):
"""Graph outstanding billing, including overdue clients bills"""
end = nextMonth(date.today() + timedelta(45))
current = (end - timedelta(30) * 24).replace(day=1)
today = date.today()
months = []
outstanding = []
outstanding_overdue = []
graph_data = []
subsidiary = get_subsidiary_from_session(request)
while current < end:
months.append(current.isoformat())
next_month = nextMonth(current)
bills = ClientBill.objects.filter(due_date__lte=next_month, state__in=("1_SENT", "2_PAID")).exclude(payment_date__lt=current)
if subsidiary:
bills = bills.filter(lead__subsidiary=subsidiary)
overdue_bills = bills.exclude(payment_date__lte=F("due_date")).exclude(payment_date__gt=next_month).exclude(due_date__gt=today)
outstanding.append(float(bills.aggregate(Sum("amount"))["amount__sum"] or 0))
outstanding_overdue.append(float(overdue_bills.aggregate(Sum("amount"))["amount__sum"] or 0))
current = next_month
graph_data.append(["x"] + months)
graph_data.append([_("billing outstanding")] + outstanding)
graph_data.append([_("billing outstanding overdue")] + outstanding_overdue)
return render(request, "billing/graph_outstanding_billing.html",
{"graph_data": json.dumps(graph_data),
"series_colors": COLORS,
"user": request.user}) | digitalfox/pydici | billing/views.py | views.py | py | 39,484 | python | en | code | 122 | github-code | 6 | [
{
"api_name": "datetime.date.today",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "crm.utils.get_su... |
38217350686 | #!/usr/bin/env python3
import argparse
import sys
from typing import List, Union
import uuid
import capstone_gt
import gtirb
from gtirb_capstone.instructions import GtirbInstructionDecoder
def lookup_sym(node: gtirb.Block) -> Union[str, None]:
"""
Find a symbol name that describes the node.
"""
for sym in node.module.symbols:
if sym._payload == node:
return sym.name
def node_str(node: gtirb.Block) -> str:
"""
Generate a string that uniquely identifies the node
"""
if isinstance(node, gtirb.ProxyBlock):
return lookup_sym(node) or node.uuid
else:
return hex(node.address)
def has_undefined_branch(branches: List[gtirb.Edge]) -> bool:
"""
Determine if any of the branches are not resolved to a target.
"""
for branch in branches:
if isinstance(branch.target, gtirb.ProxyBlock) and not lookup_sym(
branch.target
):
return True
return False
def has_symbolic_branch(branches: List[gtirb.Edge]) -> bool:
"""
Determine if any of the branches are to a defined symbol.
"""
for branch in branches:
if lookup_sym(branch.target):
return True
return False
def is_skipped_section(node: gtirb.CodeBlock) -> bool:
"""
Determine if the node is part of an uninteresting section.
"""
skipped_sections = [
".plt",
".init",
".fini",
".MIPS.stubs",
]
for section in node.module.sections:
if section.name not in skipped_sections:
continue
for interval in section.byte_intervals:
start = interval.address
end = interval.address + interval.size
if start <= node.address and node.address < end:
return True
return False
def get_func_entry_name(node: gtirb.CodeBlock) -> Union[str, None]:
"""
If the node is the entry point to a function, return the function name.
Otherwise returns None
"""
for key, value in node.module.aux_data["functionNames"].data.items():
if node in node.module.aux_data["functionEntries"].data[key]:
return value.name
def belongs_to_skipped_func(node: gtirb.CodeBlock) -> bool:
"""
Determine if a CFG node is
"""
skipped_funcs = [
"__do_global_ctors_aux",
"__do_global_dtors_aux",
"__libc_csu_fini",
"__libc_csu_init",
"_dl_relocate_static_pie",
"_start",
"deregister_tm_clones",
"frame_dummy",
"register_tm_clones",
]
for name in skipped_funcs:
for key, value in node.module.aux_data["functionNames"].data.items():
if value.name == name:
if node in node.module.aux_data["functionBlocks"].data[key]:
return True
return is_skipped_section(node)
def is_padding(node: gtirb.CodeBlock) -> bool:
"""
Determine if a CFG node is padding
"""
for key, padding_size in node.module.aux_data["padding"].data.items():
padding_addr = key.element_id.address + key.displacement
if padding_addr == node.address:
return True
return False
def check_unreachable(module: gtirb.Module) -> int:
"""
Check a GTIRB module for unexpected unreachable code
"""
error_count = 0
for node in module.cfg_nodes:
if (
not isinstance(node, gtirb.CodeBlock)
or belongs_to_skipped_func(node)
or is_padding(node)
):
continue
func = get_func_entry_name(node)
if len(list(node.incoming_edges)) == 0 and func != "main":
if func:
# In some cases in our examples, function call sites are
# optimized away, but the function is left in the binary.
# We warn for these - if this code isn't being run, we're not
# testing whether ddisasm disassembled it well, and we may want
# to consider reworking those examples.
print(
'WARNING: unreachable function "{}" at {}'.format(
func, node_str(node)
)
)
else:
# Unreachable code that is not a function entry is likely to
# be an error, such as jump table where not all possible
# targets were discovered.
print("ERROR: unreachable code at", node_str(node))
error_count += 1
return error_count
def check_unresolved_branch(module: gtirb.Module) -> int:
"""
Check a GTIRB module for unresolved branches
"""
error_count = 0
for node in module.cfg_nodes:
if (
not isinstance(node, gtirb.CodeBlock)
or belongs_to_skipped_func(node)
or is_padding(node)
):
continue
branches = []
for edge in node.outgoing_edges:
if edge.label.type not in (
gtirb.Edge.Type.Return,
gtirb.Edge.Type.Fallthrough,
):
branches.append(edge)
# Calls to PLT functions seem to have a branch to a ProxyBlock for
# that symbol and a branch to the original PLT function.
if has_undefined_branch(branches) and not has_symbolic_branch(
branches
):
print("ERROR: unresolved jump in", node_str(node))
error_count += 1
return error_count
def check_cfg_empty(module: gtirb.Module) -> int:
"""
Check if a GTIRB module has an empty CFG
"""
if len(list(module.cfg_nodes)) == 0:
print("ERROR: CFG has no nodes")
return 1
return 0
def check_main_is_code(module: gtirb.Module) -> int:
"""
Check a GTIRB module for a `main` symbol that is not a CodeBlock.
Returns the number of errors found.
"""
error_count = 0
for sym in module.symbols:
if sym.name == "main":
if not isinstance(sym.referent, gtirb.CodeBlock):
print("ERROR: main is not code")
error_count += 1
return error_count
def check_decode_mode_matches_arch(module: gtirb.Module) -> int:
"""
Ensure a GTIRB only uses DecodeMode values that match the architecture
Returns the number of errors found.
"""
error_count = 0
# if a new mode is added, we will raise a KeyError unless it is added
# to this dictionary.
mode_to_arch = {
gtirb.CodeBlock.DecodeMode.Thumb: gtirb.module.Module.ISA.ARM
}
for block in module.code_blocks:
if block.decode_mode == gtirb.CodeBlock.DecodeMode.Default:
# "Default" is correct on every arch
continue
if module.isa != mode_to_arch[block.decode_mode]:
print(f"ERROR: {module.isa} does not support {block.decode_mode}")
error_count += 1
return error_count
def check_outgoing_edges(module: gtirb.Module) -> int:
"""
Check outgoing edges for invalid configurations
"""
error_count = 0
for node in module.cfg_nodes:
fallthrough_count = 0
direct_call_count = 0
direct_jump_count = 0
for edge in node.outgoing_edges:
if edge.label.direct and edge.label.type == gtirb.Edge.Type.Call:
direct_call_count += 1
elif (
edge.label.direct and edge.label.type == gtirb.Edge.Type.Branch
):
direct_jump_count += 1
elif edge.label.type == gtirb.Edge.Type.Fallthrough:
fallthrough_count += 1
if fallthrough_count > 1:
print("ERROR: multiple fallthrough from ", node_str(node))
error_count += 1
if direct_call_count > 1:
print("ERROR: multiple direct call from ", node_str(node))
error_count += 1
if direct_jump_count > 1:
print("ERROR: multiple direct jump from ", node_str(node))
error_count += 1
return error_count
def is_rep_loop(inst: capstone_gt.CsInsn) -> bool:
"""
Check if an instruction is a rep/repe/repne loop
"""
return inst.prefix[0] in [
capstone_gt.x86.X86_PREFIX_REP,
capstone_gt.x86.X86_PREFIX_REPE,
capstone_gt.x86.X86_PREFIX_REPNE,
]
def is_direct(inst: capstone_gt.CsInsn) -> bool:
"""
Check if a call or jump instruction is direct
"""
assert any(
inst.group(grp)
for grp in (
capstone_gt.x86.X86_GRP_CALL,
capstone_gt.x86.X86_GRP_JUMP,
capstone_gt.x86.X86_GRP_BRANCH_RELATIVE,
)
)
target = inst.operands[0]
return target.type == capstone_gt.CS_OP_IMM
def is_pc_relative(inst: capstone_gt.CsInsn) -> bool:
"""
Check if a call or jump instruction is pc-relative
"""
assert any(
inst.group(grp)
for grp in (
capstone_gt.x86.X86_GRP_CALL,
capstone_gt.x86.X86_GRP_JUMP,
capstone_gt.x86.X86_GRP_BRANCH_RELATIVE,
)
)
target = inst.operands[0]
return (
target.type == capstone_gt.CS_OP_MEM
and inst.reg_name(target.mem.base) == "rip"
)
def check_edge_instruction_group(module: gtirb.Module) -> int:
"""
Check edges for valid instruction groups
"""
# TODO: support non-x86 checks
if module.isa not in [gtirb.Module.ISA.X64, gtirb.Module.ISA.IA32]:
return 0
err_count = 0
decoder = GtirbInstructionDecoder(module.isa)
# TODO: there is one more generic capstone group, X86_GRP_PRIVILEGE.
# does it belong in Syscall?
edge_type_groups = {
gtirb.Edge.Type.Branch: set(
(
capstone_gt.x86.X86_GRP_JUMP,
capstone_gt.x86.X86_GRP_BRANCH_RELATIVE,
)
),
gtirb.Edge.Type.Call: set((capstone_gt.x86.X86_GRP_CALL,)),
gtirb.Edge.Type.Return: set((capstone_gt.x86.X86_GRP_RET,)),
gtirb.Edge.Type.Syscall: set((capstone_gt.x86.X86_GRP_INT,)),
gtirb.Edge.Type.Sysret: set((capstone_gt.x86.X86_GRP_IRET,)),
}
for edge in module.ir.cfg:
if edge.label.type == gtirb.Edge.Type.Fallthrough:
# fallthrough edges do not map to a specified instruction group
continue
block = edge.source
# get the last instruction
for instruction in decoder.get_instructions(block):
last_inst = instruction
# ensure instruction can be an edge
# Instructions with rep prefix can have self-edge
if (
edge.label.type == gtirb.Edge.Type.Branch
and is_rep_loop(last_inst)
and edge.target == block
):
continue
valid_groups = edge_type_groups[edge.label.type]
if not any(last_inst.group(grp) for grp in valid_groups):
print(
"ERROR: invalid edge instruction group at 0x{:08x}: {}".format(
last_inst.address, last_inst.groups
)
)
err_count += 1
return err_count
def check_cfg_completeness(module: gtirb.Module) -> int:
"""
Check we have 1 call/branch edge from all direct or
pc-relative calls/jumps.
"""
# TODO: support non-x86 checks
if module.isa not in [gtirb.Module.ISA.X64, gtirb.Module.ISA.IA32]:
return 0
err_count = 0
decoder = GtirbInstructionDecoder(module.isa)
for block in module.code_blocks:
# get the last instruction
for instruction in decoder.get_instructions(block):
last_inst = instruction
if last_inst.group(capstone_gt.x86.X86_GRP_CALL):
call_edges = [
edge
for edge in block.outgoing_edges
if edge.label.type == gtirb.EdgeType.Call
]
if is_direct(last_inst) or is_pc_relative(last_inst):
# do not count if we are using the 'call next; next: pop'
# trick to get the PC value.
if (
is_direct(last_inst)
and module.isa == gtirb.Module.ISA.IA32
and last_inst.operands[0].imm
== last_inst.address + last_inst.size
):
continue
if len(call_edges) != 1:
print(
"ERROR: expected 1 call edge at "
f"0x{last_inst.address:08x} and got {len(call_edges)}"
)
err_count += 1
elif last_inst.group(capstone_gt.x86.X86_GRP_JUMP):
# The first block of plt sections looks like:
# pushq .got.plt+8(%rip)
# jmpq *.got.plt+16(%rip) <----
# And the first 3 entries of .got.plt (or .got) are:
# .quad link-time address of _DYNAMIC # set by linker
# .quad Obj_Entry # set by ld.so
# .quad _rtld_bind_start # set by ld.so
# Currently we don't generate an edge for that
# jump because .got.plt+16 has a 0 and no relocations.
if (
block.section.address == block.address
and block.section.name in [".plt", ".plt.sec", ".plt.got"]
):
continue
branch_edges = [
edge
for edge in block.outgoing_edges
if edge.label.type == gtirb.EdgeType.Branch
]
if is_direct(last_inst) or is_pc_relative(last_inst):
if len(branch_edges) != 1:
print(
"ERROR: expected 1 branch edge at "
f"0x{last_inst.address:08x} and got"
f" {len(branch_edges)}"
)
err_count += 1
return err_count
def check_dangling_auxdata(module: gtirb.Module) -> int:
"""
Check for dangling UUIDs in elfSymbolTabIdxInfo auxdata
"""
err_count = 0
for k, v in module.aux_data["elfSymbolTabIdxInfo"].data.items():
if not isinstance(k, gtirb.Symbol):
if isinstance(k, uuid.UUID):
print(
"ERROR: expected elfSymbolTabInfo key to be Symbol, but "
f"it is a dangling UUID: {k}, {v}"
)
else:
print(
"ERROR: expected elfSymbolTabInfo key to be Symbol, but "
f"it is {type(k)}: {k}, {v}"
)
err_count += 1
return err_count
CHECKS = {
"unreachable": check_unreachable,
"unresolved_branch": check_unresolved_branch,
"cfg_empty": check_cfg_empty,
"main_is_code": check_main_is_code,
"decode_mode_matches_arch": check_decode_mode_matches_arch,
"outgoing_edges": check_outgoing_edges,
"edge_instruction_group": check_edge_instruction_group,
"cfg_completeness": check_cfg_completeness,
"dangling_auxdata": check_dangling_auxdata,
}
class NoSuchCheckError(Exception):
"""Indicates an invalid GTIRB check was specified"""
pass
def run_checks(module: gtirb.Module, selected_checks: List[str]):
"""
Run specified checks
Raises NoSuchCheckError for unexpected names in selected_checks
"""
error_count = 0
for selected_check in selected_checks:
if selected_check not in CHECKS:
raise NoSuchCheckError(f"No such check: {selected_check}")
error_count += CHECKS[selected_check](module)
return error_count
def main():
parser = argparse.ArgumentParser()
parser.add_argument("path")
check_names = list(CHECKS.keys())
check_names.append("all")
parser.add_argument(
"--check",
choices=check_names,
default="all",
help="The name of the check to run",
)
args = parser.parse_args()
module = gtirb.IR.load_protobuf(args.path).modules[0]
checks = list(CHECKS.keys()) if args.check == "all" else [args.check]
error_count = run_checks(module, checks)
sys.exit(error_count)
if __name__ == "__main__":
main()
| GrammaTech/ddisasm | tests/check_gtirb.py | check_gtirb.py | py | 16,220 | python | en | code | 581 | github-code | 6 | [
{
"api_name": "gtirb.Block",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "gtirb.Block",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "gtirb.ProxyBlock",
... |
38756128140 | """
Kubernetes server class implementation.
"""
from __future__ import absolute_import
import os
import logging
import uuid
from kubernetes import config
from kubernetes import client as k8sclient
from kubernetes.client.rest import ApiException
from retry import retry
from pytest_server_fixtures import CONFIG
from .common import (ServerClass,
merge_dicts,
ServerFixtureNotRunningException,
ServerFixtureNotTerminatedException)
log = logging.getLogger(__name__)
IN_CLUSTER = os.path.exists('/var/run/secrets/kubernetes.io/namespace')
fixture_namespace = CONFIG.k8s_namespace
if IN_CLUSTER:
config.load_incluster_config()
if not fixture_namespace:
with open('/var/run/secrets/kubernetes.io/namespace', 'r') as f:
fixture_namespace = f.read().strp()
log.info("SERVER_FIXTURES_K8S_NAMESPACE is not set, using current namespace '%s'", fixture_namespace)
if CONFIG.k8s_local_test:
log.info("====== Running K8S Server Class in Test Mode =====")
config.load_kube_config()
fixture_namespace = 'default'
class NotRunningInKubernetesException(Exception):
"""Thrown when code is not running as a Pod inside a Kubernetes cluster."""
pass
class KubernetesServer(ServerClass):
"""Kubernetes server class."""
def __init__(self,
server_type,
cmd,
get_args,
env,
image,
labels={}):
super(KubernetesServer, self).__init__(cmd, get_args, env)
if not fixture_namespace:
raise NotRunningInKubernetesException()
self._image = image
self._labels = merge_dicts(labels, {
'server-fixtures': 'kubernetes-server-fixtures',
'server-fixtures/server-type': server_type,
'server-fixtures/session-id': CONFIG.session_id,
})
self._v1api = k8sclient.CoreV1Api()
def launch(self):
try:
log.debug('%s Launching pod' % self._log_prefix)
self._create_pod()
self._wait_until_running()
log.debug('%s Pod is running' % self._log_prefix)
except ApiException as e:
log.warning('%s Error while launching pod: %s', self._log_prefix, e)
raise
def run(self):
pass
def teardown(self):
self._delete_pod()
# TODO: provide an flag to skip the wait to speed up the tests?
self._wait_until_teardown()
@property
def is_running(self):
try:
return self._get_pod_status().phase == 'Running'
except ApiException as e:
if e.status == 404:
# return false if pod does not exists
return False
raise
@property
def hostname(self):
if not self.is_running:
raise ServerFixtureNotRunningException()
return self._get_pod_status().pod_ip
@property
def namespace(self):
return fixture_namespace
@property
def labels(self):
return self._labels
def _get_pod_spec(self):
container = k8sclient.V1Container(
name='fixture',
image=self._image,
command=self._get_cmd(),
env=[k8sclient.V1EnvVar(name=k, value=v) for k, v in self._env.iteritems()],
)
return k8sclient.V1PodSpec(
containers=[container]
)
def _create_pod(self):
try:
pod = k8sclient.V1Pod()
pod.metadata = k8sclient.V1ObjectMeta(name=self.name, labels=self._labels)
pod.spec = self._get_pod_spec()
self._v1api.create_namespaced_pod(namespace=self.namespace, body=pod)
except ApiException as e:
log.error("%s Failed to create pod: %s", self._log_prefix, e.reason)
raise
def _delete_pod(self):
try:
body = k8sclient.V1DeleteOptions()
# delete the pod without waiting
body.grace_period_seconds = 1
self._v1api.delete_namespaced_pod(namespace=self.namespace, name=self.name, body=body)
except ApiException as e:
log.error("%s Failed to delete pod: %s", self._log_prefix, e.reason)
def _get_pod_status(self):
try:
resp = self._v1api.read_namespaced_pod_status(namespace=self.namespace, name=self.name)
return resp.status
except ApiException as e:
log.error("%s Failed to read pod status: %s", self._log_prefix, e.reason)
raise
@retry(ServerFixtureNotRunningException, tries=28, delay=1, backoff=2, max_delay=10)
def _wait_until_running(self):
log.debug("%s Waiting for pod status to become running", self._log_prefix)
if not self.is_running:
raise ServerFixtureNotRunningException()
@retry(ServerFixtureNotTerminatedException, tries=28, delay=1, backoff=2, max_delay=10)
def _wait_until_teardown(self):
try:
self._get_pod_status()
# waiting for pod to be deleted (expect ApiException with status 404)
raise ServerFixtureNotTerminatedException()
except ApiException as e:
if e.status == 404:
return
raise
@property
def _log_prefix(self):
return "[K8S %s:%s]" % (self.namespace, self.name)
| man-group/pytest-plugins | pytest-server-fixtures/pytest_server_fixtures/serverclass/kubernetes.py | kubernetes.py | py | 5,398 | python | en | code | 526 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pytest_server_fixtures.... |
72531788029 | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=too-many-arguments
import pytest
from models_library.api_schemas_webserver.projects import (
ProjectCreateNew,
ProjectGet,
ProjectListItem,
ProjectReplace,
TaskProjectGet,
)
from models_library.generics import Envelope
from models_library.rest_pagination import Page
from pydantic import parse_obj_as
from pytest_simcore.simcore_webserver_projects_rest_api import (
CREATE_FROM_SERVICE,
CREATE_FROM_TEMPLATE,
CREATE_FROM_TEMPLATE__TASK_RESULT,
GET_PROJECT,
LIST_PROJECTS,
NEW_PROJECT,
REPLACE_PROJECT,
REPLACE_PROJECT_ON_MODIFIED,
HttpApiCallCapture,
)
@pytest.mark.parametrize(
"api_call",
(NEW_PROJECT, CREATE_FROM_SERVICE, CREATE_FROM_TEMPLATE),
ids=lambda c: c.name,
)
def test_create_project_schemas(api_call: HttpApiCallCapture):
request_payload = ProjectCreateNew.parse_obj(api_call.request_payload)
assert request_payload
response_body = parse_obj_as(
Envelope[ProjectGet] | Envelope[TaskProjectGet], api_call.response_body
)
assert response_body
@pytest.mark.parametrize(
"api_call",
(LIST_PROJECTS,),
ids=lambda c: c.name,
)
def test_list_project_schemas(api_call: HttpApiCallCapture):
assert api_call.request_payload is None
response_body = parse_obj_as(Page[ProjectListItem], api_call.response_body)
assert response_body
@pytest.mark.parametrize(
"api_call",
(GET_PROJECT, CREATE_FROM_TEMPLATE__TASK_RESULT),
ids=lambda c: c.name,
)
def test_get_project_schemas(api_call: HttpApiCallCapture):
# NOTE: that response_body here is the exported values
# and therefore ProjectGet has to be implemented in such a way that
# can also parse exported values! (e.g. Json does not allow that, or ocassionaly exclude_none)
response_body = parse_obj_as(Envelope[ProjectGet], api_call.response_body)
assert response_body
@pytest.mark.parametrize(
"api_call",
(REPLACE_PROJECT, REPLACE_PROJECT_ON_MODIFIED),
ids=lambda c: c.name,
)
def test_replace_project_schemas(api_call: HttpApiCallCapture):
request_payload = parse_obj_as(ProjectReplace, api_call.request_payload)
assert request_payload
response_body = parse_obj_as(Envelope[ProjectGet], api_call.response_body)
assert response_body
| ITISFoundation/osparc-simcore | packages/models-library/tests/test_api_schemas_webserver_projects.py | test_api_schemas_webserver_projects.py | py | 2,403 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "pytest_simcore.simcore_webserver_projects_rest_api.HttpApiCallCapture",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "models_library.api_schemas_webserver.projects.ProjectCreateNew.parse_obj",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mo... |
30815309971 | from typing import List, Tuple
import cmath
import random
_inv_root2 = 1 / cmath.sqrt(2)
_root2 = cmath.sqrt(2)
bra = List[complex]
ket = List[complex]
def vdot(v1, v2):
return sum(v1[i] * v2[i] for i in range(len(v1)))
def vinv(v):
return [-x for x in v]
def qdot(q1: ket, q2: ket) -> complex:
return sum([q1[i].conjugate() * q2[i] for i in range(len(q1))])
def qmag(q1: ket, q2: ket) -> complex:
return qdot(q1, q2) * qdot(q2, q1)
def qbra(q1: ket) -> bra:
return [q.conjugate() for q in q1]
def qbasis(a: complex, b: complex, basis_a: ket, basis_b: ket) -> ket:
return [a * q1 + b * q2 for q1, q2 in zip(basis_a, basis_b)]
class QSpin:
# static variables
_spin_states = {
'up': [1, 0],
'down': [0, 1],
'right': qbasis(_inv_root2, _inv_root2, [1, 0], [0, 1]),
'left': qbasis(_inv_root2, -_inv_root2, [1, 0], [0, 1]),
'in': qbasis(_inv_root2, 1j * _inv_root2, [1, 0], [0, 1]),
'out': qbasis(_inv_root2, -1j * _inv_root2, [1, 0], [0, 1]),
}
spin_state_type = Tuple[complex, complex]
spin_coords_type = Tuple[float, float, float]
def __init__(self):
self.state = QSpin._spin_states['up']
def set_state(self, new_state: spin_state_type):
self.state = new_state
@staticmethod
def coords_to_state(coords: spin_coords_type):
x, y, z = coords
cu = z + _inv_root2 * y + _inv_root2 * x
cd = _inv_root2 * x + 1j * _inv_root2 * y
return cu, cd
@staticmethod
def state_to_coords(state: spin_state_type):
cu = qdot(QSpin._spin_states['up'], state)
cd = qdot(QSpin._spin_states['down'], state)
y = cd.imag * _root2
x = cd.real * _root2
z = cu.real - _inv_root2 * y - _inv_root2 * x
return x.real, y.real, z.real
def set_state_with_coords(self, coords: spin_coords_type):
cu, cd = QSpin.coords_to_state(coords)
self.state = [cu, cd]
def state_as_coords(self):
return QSpin.state_to_coords(self.state)
def measure_spin(self, coords: spin_coords_type):
if coords[0] ** 2 + coords[1] ** 2 + coords[2] ** 2 != 1:
raise ValueError('Spin coordinates must be of unit length')
# get the angle between the current spin state and the desired spin state
angle = cmath.acos(vdot(self.state_as_coords(), coords))
# the probability of measuring +1
p1 = cmath.cos(0.5 * angle).real ** 2
# pn1 = cmath.sin(0.5 * angle).real ** 2
if random.random() < p1:
# if +1, set state to +1
self.set_state(QSpin.coords_to_state(coords))
return 1
else:
# else, set state to -1
self.set_state(QSpin.coords_to_state(vinv(coords)))
return -1
def matrix_mul(A, v):
return [vdot(A[i], v) for i in range(len(A))]
def test_xyz():
x, y, z = [0, _inv_root2, -_inv_root2]
print(x ** 2 + y ** 2 + z ** 2)
I = 1j
M = [[z, x-y*I], [x+y*I, -z]]
A = [
_inv_root2 * x + _inv_root2 * y + z,
_inv_root2 * x - _inv_root2 * I * y
]
print(matrix_mul(M, A))
print(A)
if __name__ == '__main__':
test_xyz()
exit()
spin = QSpin()
# print(QSpin._spin_states['up'])
# print(QSpin._spin_states['right'])
# print(qmag(QSpin._spin_states['up'], QSpin._spin_states['right']))
# print("initial state")
print(spin.state_as_coords())
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print("final state")
# print(spin.state_as_coords())
| Wroppy/werry_math | physics/quantum/systems.py | systems.py | py | 3,990 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cmath.sqrt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cmath.sqrt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 8,
... |
30357445001 | from traits.api import HasTraits, Code
from traitsui.api import Item, Group, View
# The main demo class:
class CodeEditorDemo(HasTraits):
"""Defines the CodeEditor demo class."""
# Define a trait to view:
code_sample = Code('import sys\n\nsys.print("hello world!")')
# Display specification:
code_group = Group(
Item('code_sample', style='simple', label='Simple'),
Item('_'),
Item('code_sample', style='custom', label='Custom'),
Item('_'),
Item('code_sample', style='text', label='Text'),
Item('_'),
Item('code_sample', style='readonly', label='ReadOnly'),
)
# Demo view:
traits_view = View(
code_group, title='CodeEditor', width=600, height=600, buttons=['OK']
)
# Create the demo:
demo = CodeEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == "__main__":
demo.configure_traits()
| enthought/traitsui | traitsui/examples/demo/Standard_Editors/CodeEditor_demo.py | CodeEditor_demo.py | py | 922 | python | en | code | 290 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "traits.api.Code",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Group",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "traitsui.api.I... |
12096156845 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import requests
from bs4 import BeautifulSoup, Tag
from workflow import Workflow
def main(wf):
if not wf.args:
return
word = wf.args[0].strip()
resp = requests.post("http://www.zdic.net/sousuo/", data={"q": word})
soup = BeautifulSoup(resp.content, "html.parser")
#soup = BeautifulSoup(open("./test/%s.html"% word, "rb").read(), "html.parser")
# 拼音, 拼音作为title用
title= None
pinyin = soup.find("span", attrs={"class": "dicpy"})
if pinyin:
title = pinyin.string
if not title and pinyin.find("a"):
title = pinyin.find("a").string
if not title:
title = word
# 获取解释
explain = None
for py in soup.findAll("span", attrs={"class": "dicpy"}):
p = py.findParent()
if p.name == "p" and p.attrs.get("class", [""])[0].startswith("zdct"):
explain = p
break
texts = []
if explain:
for e in explain.nextSiblingGenerator():
if not isinstance(e, Tag): continue
if e.attrs.get("class", [""])[0] != explain.attrs['class'][0]:
break
texts.append(e.text)
wf.add_item(title, " ".join(texts), arg=resp.url, valid=True, largetext="\n".join(texts))
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| jinuljt/zdic.alfredworkflow | zdic.py | zdic.py | py | 1,421 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.Tag",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "workflow.Workflow",
"... |
17079551201 | from gensim.models import KeyedVectors
from anki_corpus_for_gensim import bg_stopwords,en_stopwords
import json, argparse, time
from flask import Flask, request
from flask_cors import CORS
##################################################
# API part
##################################################
app = Flask(__name__)
cors = CORS(app)
def get_wmdist_bg(bg_sent, en_sent):
return multilingual_model.wmdistance(
['bg:'+x for x in bg_sent if x not in bg_stopwords],
['en:'+x for x in en_sent if x not in en_stopwords]
)
@app.route('/', methods=['POST'])
def predict():
start = time.time()
data = request.data.decode("utf-8")
results = list()
if data == "":
params = request.form
sentences = json.loads(params)
print(sentences)
# bg_sent = json.loads(params['bg']).split()
# en_sent = json.loads(params['en']).split()
else:
params = json.loads(data)
sentences = params
print(sentences)
# bg_sent = params['bg'].split()
# en_sent = params['en'].split()
for sent in sentences['sent']:
print(sent)
bg_sent = sent['bg'].split()
en_sent = sent['en'].split()
print(bg_sent)
print(en_sent)
match = get_wmdist_bg(bg_sent, en_sent)
results.append(match)
print('Returning', results)
json_data = json.dumps({'results': results})
print("Time spent handling the request: %f" % (time.time() - start))
return json_data
##################################################
# END API part
##################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
multilingual_model = KeyedVectors.load('glove_word_embeddings/multilingual.gensim')
print('Starting the API')
app.run(host="0.0.0.0", debug=True)
| teodorToshkov/sentencesimilarity | app.py | app.py | py | 1,897 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "anki_corpus_for_gensim.bg_stopwords",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "anki_... |
29778121292 | import requests, os, json
from bs4 import BeautifulSoup as BS
from random import choice
_BASE_PATH = os.path.dirname(__file__)
_DATA_FILE = os.path.join(_BASE_PATH,'data.txt')
_ACTORS_FILE = os.path.join(_BASE_PATH,'actors.txt')
_DIRECTORS_FILE = os.path.join(_BASE_PATH,'directors.txt')
_YEARS_FILE = os.path.join(_BASE_PATH,'years.txt')
_IMDB_LIST = r"http://www.imdb.com/chart/top?ref_=nv_mv_250_6"
_IMDB_BASE_URL = r"http://www.imdb.com"
_movies = []
_actorsPool = []
_directorsPool = []
_yearsPool = []
def _getActorPool():
actors = set()
for m in _movies:
for a in _getActorsForMovie(m['link']):
actors.add(a)
if len(actors) >= 100:
break
return list(actors)
def _getDirectorPool():
directors = set()
for m in _movies:
directors.add(_getDirectorForMovie(m['link']))
if len(directors) >= 100:
break
return list(directors)
def _getYearPool():
years = set()
for m in _movies:
years.add(m['year'])
return list(years)
def _init():
global _movies
global _actorsPool
global _directorsPool
global _yearsPool
try:
file = open(_DATA_FILE, encoding='ISO-8859-1')
_movies = eval(file.read())
file.close()
except FileNotFoundError:
file = open(_DATA_FILE, 'w', encoding='ISO-8859-1')
_movies = _getMovies()
file.write(str(_movies))
file.close()
try:
file = open(_ACTORS_FILE, encoding='ISO-8859-1')
_actorsPool = list(eval(file.read()))
except FileNotFoundError:
file = open(_ACTORS_FILE, 'w', encoding='ISO-8859-1')
_actorsPool = _getActorPool()
file.write(str(_actorsPool))
file.close()
try:
file = open(_DIRECTORS_FILE, encoding='ISO-8859-1')
_directorsPool = list(eval(file.read()))
except FileNotFoundError:
file = open(_DIRECTORS_FILE, 'w', encoding='ISO-8859-1')
_directorsPool = _getDirectorPool()
file.write(str(_directorsPool))
file.close()
try:
file = open(_YEARS_FILE, encoding='ISO-8859-1')
_yearsPool = list(eval(file.read()))
except FileNotFoundError:
file = open(_YEARS_FILE, 'w', encoding='ISO-8859-1')
_yearsPool = _getYearPool()
file.write(str(_yearsPool))
file.close()
def _getMovies():
response = requests.get(_IMDB_LIST, headers = {'accept-language': 'en-US, en'})
text = response.text
soup = BS(text, 'html.parser')
movieList = []
for line in soup.findAll('td', {"class": "titleColumn"}):
m = dict()
m['link'] = _IMDB_BASE_URL + line.find('a').attrs['href']
m['title'] = line.find('a').text
m['year'] = (line.find('span').text)[1:-1]
m['actors'],m['director'] = _getActorsAndDirector(m['link'])
movieList.append(m)
return movieList
def _getActorsForMovie(link):
text = requests.get(link).text
soup = BS(text, 'html.parser')
actors = []
for line in soup.findAll('span', {"itemprop": "actors"}):
name = line.find('span',{"itemprop":"name"}).text
actors.append(name)
return actors
def _getDirectorForMovie(link):
text = requests.get(link).text
soup = BS(text, 'html.parser')
return (soup.find('span', {"itemprop": "director"})).find('span', {"itemprop":"name"}).text
def _getActorsAndDirector(link):
text = requests.get(link).text
soup = BS(text, 'html.parser')
actors = []
for line in soup.findAll('span', {"itemprop": "actors"}):
name = line.find('span',{"itemprop":"name"}).text
actors.append(name)
director = (soup.find('span', {"itemprop": "director"})).find('span', {"itemprop":"name"}).text
return actors,director
def getRandomQuestion(nrOfChoices=4):
if not (_movies and _actorsPool and _directorsPool and _yearsPool):
_init() # Making sure everything is set up, none of these
# variables should be empty
# Get random movie
movie = choice(_movies)
group = {'a': ['actors', _actorsPool, 'Who of the following starred in {0}?'],
'd': ['director', _directorsPool, 'Who was the director of {0}?'],
'y': ['year', _yearsPool, 'When was the movie {0} premeried?']}
choices = []
questionType = choice(['a','d','y']) # a for actor, d for director, y for year
if questionType == 'a':
correctAnswer = choice(movie[group[questionType][0]])
else:
correctAnswer = movie[group[questionType][0]]
pool = set(group[questionType][1])
exclude = set(movie[group[questionType][0]])
pool = list(pool-exclude)
for i in range(nrOfChoices-1):
filler = choice(pool)
pool.remove(filler)
choices.append(filler)
choices.insert(choice(range(3)), correctAnswer)
jsonObj = json.dumps({'question':group[questionType][2].format(movie['title']),
'choices': choices,
'answer': correctAnswer})
return jsonObj
'''
TO BE ABLE TO USE THE MODULE AS A SCRIPT AS WELL
'''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Y U so stupid!?')
parser.add_argument('-q', '--questions', help='number of questions to return', type=int, nargs=1, default=1)
parser.add_argument('-d', '--difficulty', help='difficulty,defined by number of choices', type=int, choices=range(2, 11),default=4)
parser.add_argument('-k', '--keep-them-coming', help='continue getting questions until input is other than y/Y', action='store_true')
args = parser.parse_args()
difficulty = args.difficulty
questions = args.questions
if type(difficulty) == type([]):
difficulty = difficulty[0]
if type(questions) == type([]):
questions = questions[0]
for i in range(questions):
print(getRandomQuestion(difficulty))
if args.keep_them_coming:
userInput = ""
while True:
answer = input('\nKeep them coming? (y):')
if not(str(answer).lower() == 'y' or answer.lower() == 'yes'):
exit()
print(getRandomQuestion(args.difficulty))
| asav13/PRLA-Verk5 | part2/y_u_so_stupid.py | y_u_so_stupid.py | py | 6,583 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7... |
4142566592 | import ast
import networkx as nx
import numpy as np
from collections import defaultdict
import json
from tqdm import tqdm
# nodes = [0, 1, 2, 3, 4]
# graph = [[4, 3, 0.75], [4, 1, 0.81], [4, 2, 0.97], [4, 0, 0.52]]
# page_rank_probs = defaultdict(float)
# DG = nx.DiGraph()
# DG.add_nodes_from(nodes)
# DG.add_weighted_edges_from(graph)
# PAGE_RANK = nx.pagerank(DG, alpha=0.95)
# # for sub_graph in nx.weakly_connected_components(DG):
# # sub_graph_size = len(sub_graph)
# # PAGE_RANK = nx.pagerank(DG.subgraph(list(sub_graph)))
# #
# # normalized_PAGERANK = {k: v * (sub_graph_size) / 5 for k, v in PAGE_RANK.items()}
# # page_rank_probs.update(normalized_PAGERANK)
# # # print ('normalized_PAGERANK', normalized_PAGERANK)
# #
# print(page_rank_probs)
# print(PAGE_RANK)
#
# # def find_ngrams(input_list, n):
# # print ([input_list[i:] for i in range(n)])
# # return zip(*[input_list[i:] for i in range(n)])
# #
# # # for ng in find_ngrams(['I', 'live', 'in', 'kingston'], 2):
# # # print ('ng', ng)
# # print (range(5))
#
# # a = np.array([[1, 4, 2], [3, 5, 6]])
# # b = np.array([[1,1], [2,2], [3, 3], [4, 4], [5, 5], [6, 6]])
# # print (b[a[0]])
# a = np.array([1, 2, 4, 5, 56])
# print ( int(np.sum(a > 5)))
# def argsort(seq):
# # http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
# return sorted(range(len(seq)), key=seq.__getitem__)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp((x - np.max(x)) * 1)
return e_x / e_x.sum()
def artificial_nli(cider_data, nli_data, nli_origin, thres):
d = json.load(open(cider_data, 'r'))
dn = json.load(open(nli_data, 'r'))
do = json.load(open(nli_origin, 'r'))['prob_graph']
prob_array = []
cnt = 0
for e in do.values():
if cnt >= 500:
break
for ee in e:
# print ('ee', ee, 'e', e)
if ee[2] > 0.5:
prob_array.append(ee[2])
cnt += 1
prob_array = np.array(prob_array)
prob_edges = []
node_nums = []
has_edges = 0
xxx = 0
for e, en in tqdm(zip(d, dn)):
tmp_edges = []
num = len(en)
new_e = softmax(np.array([ele[1] for ele in e[:num]]))
# print (new_e)
arg_new_e = np.argsort(new_e)
low = 1.0 / len(en) * (thres)
high = 1.0 / len(en) * (1.0 / thres)
small_num = int(np.sum(new_e < low))
# print ('small_num', small_num)
large_num = int(np.sum(new_e > high))
# print ('large_num', large_num)
if small_num > 0:
# print (np.random.choice(prob_array, size=(num - small_num,)))
# print (np.random.rand(num - small_num,) * 0.1)
sampl = np.random.choice(prob_array, size=(num - small_num,)) + (np.random.rand(num - small_num,) - 0.5) * 0.05
sampl = np.sort(sampl)
sampl = np.clip(sampl, 0.501, 0.999)
# sampl = np.sort(np.random.uniform(low=0.5, high=1.0, size=(num - small_num,)))
for i in range(num - small_num):
tmp_edges.append([arg_new_e[small_num-1], arg_new_e[small_num + i], sampl[i]])
if large_num > 0:
sampl = np.random.choice(prob_array, size=(large_num,)) + (np.random.rand(large_num,) - 0.5) * 0.1
sampl = np.sort(sampl)
sampl = np.clip(sampl, 0.501, 0.999)
for i in range(1, large_num+1):
if num - i > small_num:
tmp_edges.append([arg_new_e[small_num], arg_new_e[-i], sampl[-i]])
else:
break
if small_num > 0 or large_num > 0:
has_edges += 1
ext_edges = []
for i in range(len(tmp_edges)):
cur = tmp_edges[i][1]
for j in range(i+1, len(tmp_edges)):
if cur == tmp_edges[j][0]:
sampl = np.random.choice(prob_array, size=(1)) + (np.random.rand(1)) * 0.1
ext_edges.append([tmp_edges[i][0], tmp_edges[j][1], sampl[0]])
xxx += 1
tmp_edges.extend(ext_edges)
prob_edges.append(tmp_edges)
node_nums.append(num)
print('xxx', xxx)
json.dump({'edges': prob_edges, 'nodes': node_nums}, open('experiment/coco_nli_graph_pg1.json', 'w'))
print ('has_edges', has_edges)
return prob_edges
def minorchanges_nli(cider_data, nli_data, nli_origin, change=True):
d = json.load(open(cider_data, 'r')) # test and val data excluded
dn = json.load(open(nli_data, 'r'))
do = json.load(open(nli_origin, 'r'))
h_adj, l_adj, no_change, no_change_hi = 0, 0, 0, 0
for idx, (e, en) in tqdm(enumerate(zip(d, dn))):
num = len(en)
new_e = softmax(np.array([ele[1] for ele in e[:num]]))
# print (new_e)
if np.array_equal(new_e, np.array([0.2, 0.2, 0.2, 0.2, 0.2])):
# print ('skip')
continue
arg_new_e = np.argsort(new_e)
lo, hi = arg_new_e[0], arg_new_e[-1]
nli_e = np.array([ele[1] for ele in en])
arg_nli_e = np.argsort(nli_e)
nlo, nhi = arg_nli_e[0], arg_nli_e[-1]
if lo == nlo and hi == nhi:
no_change += 1
continue
if hi == nhi:
no_change_hi += 1
if change:
current_prob = do['prob_graph'][str(idx)]
current_edge = do['graph'][str(idx)]
forward_ix_list, backward_ix_list = [], []
for ix in range(len(current_prob)):
if current_prob[ix][0] == lo:
forward_ix_list.append(ix)
if current_prob[ix][0] == hi:
backward_ix_list.append(ix)
assert len(forward_ix_list) > 0 and len(backward_ix_list) > 0, 'has to find the index'
def samp(a, b):
return np.random.uniform(low=a, high=b, size=1)[0]
# sampl2 = np.random.uniform(low=0.01, high=0.05, size=1)[0]
for f_ix in forward_ix_list:
if current_prob[f_ix][2] < 0.5:
if current_prob[f_ix][1] == hi:
current_prob[f_ix][2] = samp(0.89, 0.99)
current_edge[f_ix][2] = 1.0
h_adj += 1
elif np.random.uniform() > 0.85:
current_prob[f_ix][2] = samp(0.51, 0.95)
current_edge[f_ix][2] = 1.0
h_adj += 1
for b_ix in backward_ix_list:
if current_prob[b_ix][2] > 0.5:
current_prob[b_ix][2] = samp(0.01, 0.05)
current_edge[b_ix][2] = 0.0
l_adj += 1
do['prob_graph'][str(idx)] = current_prob
do['graph'][str(idx)] = current_edge
print('no need to change', no_change)
print('h_adj', h_adj, 'l_adj', l_adj, 'nochangehi', no_change_hi)
if change:
json.dump(do, open('experiment/coco_nli_new.json', 'w'))
return
def show_lo_and_high(cider_data, nli_data, data_json):
d = json.load(open(cider_data, 'r')) # test and val data excluded
dn = json.load(open(nli_data, 'r'))
da = json.load(open(data_json, 'r'))['images']
cnt = 0
for idx, (e, en, enn) in tqdm(enumerate(zip(d, dn, da))):
num = len(en)
new_e = softmax(np.array([ele[1] for ele in e[:num]]))
if np.array_equal(new_e, np.array([0.2, 0.2, 0.2, 0.2, 0.2])):
continue
arg_new_e = np.argsort(new_e)
lo, hi = arg_new_e[0], arg_new_e[-1]
cnt += 1
if cnt > 100:
break
print ("*"*20)
print ('low:', enn['sentences'][lo]['raw'])
print ('high', enn['sentences'][hi]['raw'])
print ("*"*20)
return
minorchanges_nli('data/prob_cand_inst_v3', 'data/nli_dist_rl', 'experiment/coco_nli_relation.json')
# show_lo_and_high('data/prob_cand_inst_v3', 'data/nli_weights_v2', 'data/dataset_karparthy.json')
# print (np.array([1.0/5]*5))
| Gitsamshi/Nli-image-caption | playground.py | playground.py | py | 8,056 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.exp",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 53,
... |
2653030207 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class BiLSTM(nn.Module):
def __init__(self, args):
super(BiLSTM, self).__init__()
self.embedding_size = args.embedding_size
self.hidden_size = args.hidden_size
self.vocab_size = args.vocab_size #args.vocab_size
self.label_size = args.num_classes
self.batch_size = args.K # batch size is per task here
self.use_gpu = args.use_gpu
self.embeddings = nn.Embedding(self.vocab_size, self.embedding_size)
self.lstm = nn.LSTM(input_size=self.embedding_size,
hidden_size=self.hidden_size,
bidirectional=True,
batch_first=True)
self.hidden = self.init_hidden(self.use_gpu)
self.classifier = nn.Linear(self.hidden_size*2, self.label_size)
if self.use_gpu:
self.embeddings = self.embeddings.cuda()
self.lstm = self.lstm.cuda()
self.classifier = self.classifier.cuda()
def init_hidden(self, use_gpu):
h_0 = Variable(torch.zeros(2, self.batch_size, self.hidden_size), requires_grad=True)
c_0 = Variable(torch.zeros(2, self.batch_size, self.hidden_size), requires_grad=True)
if use_gpu:
return h_0.cuda(), c_0.cuda()
else:
return h_0, c_0
def forward(self, word_ids, lengths):
# sort by length
lengths, perm_idx = lengths.sort(0, descending=True)
word_ids = word_ids[perm_idx]
# print('word ids', word_ids.size())
# word_ids = word_ids.permute(1, 0)
embs = self.embeddings(word_ids)#.view(word_ids.size(1), self.batch_size, -1) # maybe permute instead
# print('embs', embs.size())
# print('lengths', lengths.size())
packed = pack_padded_sequence(embs, lengths, batch_first=True)
# print('packed', packed.size())
# embs = embs.permute(1, 0, 2)
# print(packed.size())
output, self.hidden = self.lstm(packed, self.hidden)
output, _ = pad_packed_sequence(output, batch_first=True)
# unsort
_, unperm_idx = perm_idx.sort(0)
output = output[unperm_idx]
lengths = lengths[unperm_idx]
# get final output state
last_indices = (lengths - 1).view(-1, 1).expand(len(lengths), output.size(2)).unsqueeze(1) # 1 = time dimension
if self.use_gpu:
last_indices = last_indices.cuda()
last_output = output.gather(1, last_indices).squeeze(1)
# print(last_output.size())
logits = self.classifier(last_output)
return logits
| wbakst/meta-learned-embeddings | lstm.py | lstm.py | py | 2,741 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
40555963992 | from django.urls import path
from Web.views import Index, Persons, Random, QuotesByPerson, QuotesByCategory, CategoryCreateView, PersonCreateView, QuoteCreateView, LogoutView
from Web.api_views import APIPersons, APICategories, APIQuotes, APIQuotesByPerson, APIQuotesByCategory, APIQuotesRandom
urlpatterns = [
path('', Index.as_view(), name='index'),
path('persons/', Persons.as_view(), name='persons'),
path('create/category', CategoryCreateView.as_view(), name='create_category'),
path('create/person', PersonCreateView.as_view(), name='create_person'),
path('create/quote', QuoteCreateView.as_view(), name='create_quote'),
path('qbp/<person_pk>/', QuotesByPerson.as_view(), name='quotes_by_person'),
path('qbc/<category_pk>/', QuotesByCategory.as_view(), name='quotes_by_category'),
path('random/', Random.as_view(), name='random'),
path('account/logout', LogoutView.as_view(), name='logout'),
path('api/persons/', APIPersons.as_view(), name="api_persons"),
path('api/categories/', APICategories.as_view(), name="api_categories"),
path('api/quotes/', APIQuotes.as_view(), name="api_quotes"),
path('api/qbp/<int:pk>/', APIQuotesByPerson.as_view(), name="api_quotes_by_person"),
path('api/qbc/<int:pk>/', APIQuotesByCategory.as_view(), name="api_quotes_by_category"),
path('api/quotes_random/', APIQuotesRandom.as_view(), name="api_quotes_random"),
]
| mavenium/PyQuotes | Web/urls.py | urls.py | py | 1,415 | python | en | code | 27 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Web.views.Index.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Web.views.Index",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.pat... |
36396608095 | """
Continuous Statistics Class
"""
from numbers import Number
from typing import Union, Tuple
from functools import wraps
import inspect
import numpy as np
from gval.statistics.base_statistics import BaseStatistics
import gval.statistics.continuous_stat_funcs as cs
class ContinuousStatistics(BaseStatistics):
"""
Class for Running Continuous Statistics on Agreement Maps
Attributes
----------
registered_functions : dict
Available statistical functions with names as keys and parameters as values
"""
def __init__(self):
# Automatically populates and numba vectorizes all functions in categorical_stat_funcs.py
self.required_param = 1
self.optional_param = 0
self._func_names = [
fn
for fn in dir(cs)
if len(fn) > 5
and "__" not in fn
and "Number" not in fn
and "convert_output" not in fn
]
self._funcs = [getattr(cs, name) for name in self._func_names]
for name, func in zip(self._func_names, self._funcs):
setattr(self, name, func)
self._signature_validation = {
"names": {
"error": self.required_param,
"candidate_map": self.optional_param,
"benchmark_map": self.optional_param,
},
"required": [
self.required_param,
self.optional_param,
self.optional_param,
],
"param_types": [
"xarray.core.dataset.Dataset",
"xarray.core.dataarray.DataArray",
"Union[xarray.core.dataarray.DataArray, xarray.core.dataset.Dataset]",
"Union[xarray.core.dataset.Dataset, xarray.core.dataarray.DataArray]",
],
"return_type": [float, Number],
"no_of_args": [1, 2, 3],
}
self.registered_functions = {
name: {"params": [param for param in inspect.signature(func).parameters]}
for name, func in zip(self._func_names, self._funcs)
}
def available_functions(self) -> list:
"""
Lists all available functions
Returns
-------
List of available functions
"""
return list(self.registered_functions.keys())
def get_all_parameters(self):
"""
Get all the possible arguments
Returns
-------
List of all possible arguments for functions
"""
return list(self._signature_validation["names"].keys())
def register_function(self, name: str):
"""
Register decorator function in statistics class
Parameters
----------
name: str
Name of function to register in statistics class
Returns
-------
Decorator function
"""
def decorator(func):
self.function_signature_check(func)
if name not in self.registered_functions:
self.registered_functions[name] = {
"params": [
param
for param in inspect.signature(func).parameters
if param != "self"
]
}
setattr(self, name, func)
else:
raise KeyError("This function name already exists")
@wraps(func)
def wrapper(*args, **kwargs): # pragma: no cover
result = func(*args, **kwargs)
return result
return wrapper
return decorator
def register_function_class(self):
"""
Register decorator function for an entire class
Parameters
----------
vectorize_func: bool
Whether to vectorize the function
"""
def decorator(dec_self: object):
"""
Decorator for wrapper
Parameters
----------
dec_self: object
Class to register stat functions
"""
for name, func in inspect.getmembers(dec_self, inspect.isfunction):
if name not in self.registered_functions:
self.function_signature_check(func)
self.registered_functions[name] = {
"params": [
param
for param in inspect.signature(func).parameters
if param != "self"
]
}
setattr(self, name, func)
else:
raise KeyError("This function name already exists")
return decorator
def function_signature_check(self, func):
"""
Validates signature of registered function
Parameters
----------
func: function
Function to check the signature of
"""
signature = inspect.signature(func)
names = self._signature_validation["names"]
param_types = self._signature_validation["param_types"]
return_type = self._signature_validation["return_type"]
no_of_args = self._signature_validation["no_of_args"]
# Checks if param names, type, and return type are in valid list
# Considered no validation if either are empty
for key, val in signature.parameters.items():
if (key not in names and len(names) > 0) or (
not str(val).split(": ")[-1] in param_types and len(param_types) > 0
):
raise TypeError(
"Wrong parameters in function: \n"
f"Valid Names: {names} \n"
f"Valid Types: {param_types} \n"
)
if len(no_of_args) > 0 and len(signature.parameters) not in no_of_args:
raise TypeError(
"Wrong number of parameters: \n"
f"Valid number of parameters: {no_of_args}"
)
if signature.return_annotation not in return_type and len(return_type) > 0:
raise TypeError("Wrong return type \n" f"Valid return Type {return_type}")
def get_parameters(self, func_name: str) -> list:
"""
Get parameters of registered function
Parameters
----------
func_name: str
Returns
-------
List of parameter names for the associated function
"""
if func_name in self.registered_functions:
return self.registered_functions[func_name]["params"]
else:
raise KeyError("Statistic not found in registered functions")
def process_statistics(
self, func_names: Union[str, list], **kwargs
) -> Tuple[float, str]:
"""
Parameters
----------
func_names: Union[str, list]
Name of registered function to run
**kwargs: dict or keyword arguments
Dictionary or keyword arguments of to pass to metric functions.
Returns
-------
Tuple[float, str]
Tuple with metric values and metric names.
"""
func_names = (
list(self.registered_functions.keys())
if func_names == "all"
else func_names
)
func_list = [func_names] if isinstance(func_names, str) else func_names
return_stats, return_funcs = [], []
for name in func_list:
if name in self.registered_functions:
params = self.get_parameters(name)
required = self._signature_validation["required"]
func = getattr(self, name)
# Necessary for numba functions which cannot accept keyword arguments
func_args, skip_function, return_nan = [], False, False
for param, req in zip(params, required):
if param in kwargs and kwargs[param] is not None:
func_args.append(kwargs[param])
elif not self._signature_validation["names"][param]:
skip_function = True
break
else:
print(
f"Parameter {param} missing from kwargs of {name}, returning nan"
)
return_nan = True
break
if skip_function:
continue
stat_val = np.nan if return_nan else func(*func_args)
def check_value(stat_name: str, stat: Number):
if (np.isnan(stat) or np.isinf(stat)) and not return_nan:
print(
"Warning:",
f"Invalid value calculated for {stat_name}:",
stat,
)
if isinstance(stat_val, dict):
for st_name, val in stat_val.items():
check_value(st_name, val)
else:
check_value(name, stat_val)
return_stats.append(stat_val)
return_funcs.append(name)
else:
raise KeyError(f"Statistic, {name}, not found in registered functions")
return return_stats, return_funcs
| NOAA-OWP/gval | src/gval/statistics/continuous_statistics.py | continuous_statistics.py | py | 9,420 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "gval.statistics.base_statistics.BaseStatistics",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "gval.statistics.continuous_stat_funcs",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "gval.statistics.continuous_stat_funcs",
"line_number... |
19399859119 | # 目标和
# https://leetcode-cn.com/leetbook/read/queue-stack/ga4o2/
from typing import List
import common.arrayCommon as Array
class Solution:
def findTargetSumWays(self, nums: List[int], S: int) -> int:
print(nums)
s = sum(nums)
if s < S:
return 0
n = len(nums)
r = s * 2 + 1
dp = [[0] * r for _ in range(n)]
if nums[0] == 0:
dp[0][s] = 2
else:
dp[0][s - nums[0]] = 1
dp[0][s + nums[0]] = 1
for i in range(1, n):
for j in range(0, r):
if j - nums[i] >= 0:
dp[i][j] += dp[i - 1][j - nums[i]]
if j + nums[i] < r:
dp[i][j] += dp[i - 1][j + nums[i]]
print(dp)
return dp[-1][s + S]
nums = [0, 0, 0, 0, 0, 0, 0, 0, 1]
S = 1
r = Solution().findTargetSumWays(nums, S)
print(r)
# class Solution:
# multiplier = [1, -1]
# ans = 0
#
# def findTargetSumWays(self, nums: List[int], S: int) -> int:
# self.ans = 0
# self.search(S, nums, 0, 0)
# return self.ans
#
# def search(self, target, nums, i, cur):
# if i == len(nums):
# if cur == target:
# self.ans += 1
# return
# for each in self.multiplier:
# c = cur + each * nums[i]
# self.search(target, nums, i + 1, c)
| Yigang0622/LeetCode | findTargetSumWays.py | findTargetSumWays.py | py | 1,411 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
}
] |
31309035194 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Library Imports
from __future__ import print_function, division
from keras.models import Sequential, Model
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from keras.layers import Input
from keras.utils.data_utils import get_file
from keras.optimizers import Nadam
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
from collections import Counter
import numpy as np
import random
import sys
import os
import getopt
import copy
import csv
import io
import time
import ipywidgets
import traitlets
from datetime import datetime
from math import log
import pandas as pd
from ATLSTM_layer import ATLSTM_layer
# In[ ]:
def load_data(eventlog, path, sep="|"):
return pd.read_csv('../../../dataset/'+path+'/%s' % eventlog, sep=sep, error_bad_lines=False).values
# # In[ ]:
def get_divisor(timeseqs):
return np.mean([item for sublist in timeseqs for item in sublist])
def create_model_folder(name, dirc):
i = 1
path_dir = "../../../results/output_files/models/"+dirc
if os.path.isdir(path_dir) == False:
try:
os.mkdir(path_dir)
except OSError:
print ("Creation of the directory %s failed" % path_dir)
else:
print ("Successfully created the directory %s " % path_dir)
for i in range(100):
new_name = name + "_v" + str(i)
path_name = path_dir + "/" + new_name
if os.path.isdir(path_name) == False:
try:
os.mkdir(path_name)
except OSError:
continue
else:
print ("Successfully created the directory %s " % path_name)
break
return new_name
# In[ ]:
def main(argv = None):
if argv is None:
argv = sys.argv
inputfile = ""
directory = ""
sep=""
num_add_feats = 0
try:
opts, args = getopt.getopt(argv, "hi:d:n:")
except getopt.GetoptError:
print(os.path.basename(__file__),
"-i <input_file> -d <directory> -s <separator> -n <num_add_feats>")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(os.path.basename(__file__),
"-i <input_file> -d <directory> -s <separator> -n <num_add_feats>")
sys.exit()
elif opt == "-i":
inputfile = arg
elif opt == "-d":
directory = arg
elif opt == "-s":
sep = arg
elif opt == "-n":
num_add_feats = int(arg)
begin_time = datetime.now()
#helper variables
lines = [] #these are all the activity seq
# timeseqs = [] #time sequences (differences between two events)
# timeseqs2 = [] #time sequences (differences between the current and first)
lastcase = ''
line = ''
firstLine = True
lines = []
timeseqs = []
timeseqs2 = []
timeseqs3 = []
timeseqs4 = []
add_feats = []
times = []
times2 = []
times3 = []
times4 = []
add_feat = -1
numlines = 0
casestarttime = None
lasteventtime = None
ascii_offset = 161
spamreader = load_data(inputfile, directory, sep)
for row in spamreader:
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
if row[0]!=lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
add_feats.append(list(add_feat))
line = ''
times = []
times2 = []
times3 = []
times4 = []
add_feat = row[3:]
# add_feat = int(row[3])
numlines+=1
line+=chr(int(row[1])+ascii_offset)
timesincelastevent = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(casestarttime))
midnight = datetime.fromtimestamp(time.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = datetime.fromtimestamp(time.mktime(t))-midnight
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
timediff3 = timesincemidnight.seconds #this leaves only time even occured after midnight
timediff4 = datetime.fromtimestamp(time.mktime(t)).weekday() #day of the week
times.append(timediff)
times2.append(timediff2)
times3.append(timediff3)
times4.append(timediff4)
# add_feats.append(add_feat)
lasteventtime = t
firstLine = False
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
add_feats.append(add_feat)
numlines+=1
divisor = get_divisor(timeseqs) #average time between events
print('divisor: {}'.format(divisor))
divisor2 = get_divisor(timeseqs2) #average time between current and first events
print('divisor2: {}'.format(divisor2))
elems_per_fold = int(round(numlines/3))
fold1 = lines[:elems_per_fold]
fold1_t = timeseqs[:elems_per_fold]
fold1_t2 = timeseqs2[:elems_per_fold]
fold1_t3 = timeseqs3[:elems_per_fold]
fold1_t4 = timeseqs4[:elems_per_fold]
fold1_ft = add_feats[:elems_per_fold]
with open('../../../results/output_files/folds/fold1.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in zip(fold1, fold1_t):
spamwriter.writerow([s +'#{}'.format(t) for s, t in zip(row, timeseq)])
fold2 = lines[elems_per_fold:2*elems_per_fold]
fold2_t = timeseqs[elems_per_fold:2*elems_per_fold]
fold2_t2 = timeseqs2[elems_per_fold:2*elems_per_fold]
fold2_t3 = timeseqs3[elems_per_fold:2*elems_per_fold]
fold2_t4 = timeseqs4[elems_per_fold:2*elems_per_fold]
fold2_ft = add_feats[elems_per_fold:2*elems_per_fold]
with open('../../../results/output_files/folds/fold2.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in zip(fold2, fold2_t):
spamwriter.writerow([s +'#{}'.format(t) for s, t in zip(row, timeseq)])
fold3 = lines[2*elems_per_fold:]
fold3_t = timeseqs[2*elems_per_fold:]
fold3_t2 = timeseqs2[2*elems_per_fold:]
fold3_t3 = timeseqs3[2*elems_per_fold:]
fold3_t4 = timeseqs4[2*elems_per_fold:]
fold3_ft = add_feats[2*elems_per_fold:]
with open('../../../results/output_files/folds/fold3.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in zip(fold3, fold3_t):
spamwriter.writerow([s +'#{}'.format(t) for s, t in zip(row, timeseq)])
lines = fold1 + fold2
lines_t = fold1_t + fold2_t
lines_t2 = fold1_t2 + fold2_t2
lines_t3 = fold1_t3 + fold2_t3
lines_t4 = fold1_t4 + fold2_t4
lines_ft = fold1_ft + fold2_ft
step = 1
sentences = []
softness = 0
next_chars = []
lines = list(map(lambda x: x+'!',lines)) #put delimiter symbol
maxlen = max(list(map(lambda x: len(x),lines))) #find maximum line size
# next lines here to get all possible characters for events and annotate them with numbers
chars = list(map(lambda x: set(x),lines))
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
chars.remove('!')
print('total chars: {}, target chars: {}'.format(len(chars), len(target_chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
print(indices_char)
sentences_t = []
sentences_t2 = []
sentences_t3 = []
sentences_t4 = []
sentences_ft = []
next_chars_t = []
next_chars_t2 = []
next_chars_t3 = []
next_chars_t4 = []
next_chars_ft = []
for line, line_t, line_t2, line_t3, line_t4, line_ft in zip(lines, lines_t, lines_t2, lines_t3, lines_t4, lines_ft):
for i in range(0, len(line), step):
if i==0:
continue
#we add iteratively, first symbol of the line, then two first, three...
sentences.append(line[0: i])
sentences_t.append(line_t[0:i])
sentences_t2.append(line_t2[0:i])
sentences_t3.append(line_t3[0:i])
sentences_t4.append(line_t4[0:i])
sentences_ft.append(line_ft)
next_chars.append(line[i])
if i==len(line)-1: # special case to deal time of end character
next_chars_t.append(0)
next_chars_t2.append(0)
next_chars_t3.append(0)
next_chars_t4.append(0)
else:
next_chars_t.append(line_t[i])
next_chars_t2.append(line_t2[i])
next_chars_t3.append(line_t3[i])
next_chars_t4.append(line_t4[i])
next_chars_ft.append(line_ft)
print('nb sequences:', len(sentences))
print('Vectorization...')
num_features = len(chars)+5+num_add_feats+1
print('num features: {}'.format(num_features))
X = np.zeros((len(sentences), maxlen, num_features), dtype=np.float32)
y_a = np.zeros((len(sentences), len(target_chars)), dtype=np.float32)
y_t = np.zeros((len(sentences)), dtype=np.float32)
for i, sentence in enumerate(sentences):
leftpad = maxlen-len(sentence)
next_t = next_chars_t[i]
sentence_t = sentences_t[i]
sentence_t2 = sentences_t2[i]
sentence_t3 = sentences_t3[i]
sentence_t4 = sentences_t4[i]
# sentence_ft = sentences_ft[i][0]
# sentence_ft2 = sentences_ft[i][1]
# sentence_ft3 = sentences_ft[i][2]
for t, char in enumerate(sentence):
multiset_abstraction = Counter(sentence[:t+1])
for c in chars:
if c==char: #this will encode present events to the right places
X[i, t+leftpad, char_indices[c]] = 1
X[i, t+leftpad, len(chars)] = t+1
X[i, t+leftpad, len(chars)+1] = sentence_t[t]/divisor
X[i, t+leftpad, len(chars)+2] = sentence_t2[t]/divisor2
X[i, t+leftpad, len(chars)+3] = sentence_t3[t]/86400
X[i, t+leftpad, len(chars)+4] = sentence_t4[t]/7
# X[i, t+leftpad, len(chars)+5] = next_chars_t[t]
if num_add_feats > 0:
for f in range(num_add_feats):
X[i, t+leftpad, len(chars)+f+5] = sentences_ft[i][f]
# X[i, t+leftpad, len(chars)+6] = sentence_ft
# X[i, t+leftpad, len(chars)+7] = sentence_ft2
# X[i, t+leftpad, len(chars)+8] = sentence_ft3
for c in target_chars:
if c==next_chars[i]:
y_a[i, target_char_indices[c]] = 1-softness
else:
y_a[i, target_char_indices[c]] = softness/(len(target_chars)-1)
y_t[i] = next_t/divisor
np.set_printoptions(threshold=sys.maxsize)
# build the model:
print('Build model...')
print(X.shape)
main_input = Input(shape=(maxlen, num_features), name='main_input')
# train a 2-layer LSTM with one shared layer
l1 = ATLSTM_layer(128, return_sequences=True)(main_input) # the shared layer
l2_1 = ATLSTM_layer(128, return_sequences=False)(l1)
l2_2 = ATLSTM_layer(128, return_sequences=False)(l1)
d1 = keras.layers.Dropout(.2)(l1)
d2_1 = keras.layers.Dropout(.2)(l2_1)
d2_2 = keras.layers.Dropout(.2)(l2_2)
act_output = Dense(len(target_chars), activation='softmax', kernel_initializer='glorot_uniform', name='act_output')(d2_1)
time_output = Dense(1, kernel_initializer='glorot_uniform', name='time_output')(d2_2)
model = Model(inputs=[main_input], outputs=[act_output, time_output])
model_folder = ""
if num_add_feats == 0:
model_folder = create_model_folder("model_nofeat",directory)
else:
model_folder = create_model_folder("model_"+num_add_feats+"_feat", directory)
opt = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004, clipvalue=3)
model.compile(loss={'act_output':'categorical_crossentropy', 'time_output':'mae'}, optimizer=opt)
early_stopping = EarlyStopping(monitor='val_loss', patience=42)
model_checkpoint = ModelCheckpoint("../../../results/output_files/models/"+directory+"/"+model_folder+'/model_{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='auto')
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
model.fit({'main_input': X}, {'act_output':y_a, 'time_output':y_t},
validation_split=0.2,
verbose=2,
callbacks=[early_stopping, model_checkpoint, lr_reducer],
batch_size=maxlen,
epochs=200
)
print(datetime.now() - begin_time)
if __name__ == "__main__":
main(sys.argv[1:])
| RenatoMAlves/context-aware-time-prediction | code/Context-LSTM/train_addittional_feats_py3.py | train_addittional_feats_py3.py | py | 13,972 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
3096449689 | from django.shortcuts import render, render_to_response
from django.utils import timezone
from django.http import HttpResponse, Http404
#from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.paginator import EmptyPage, PageNotAnInteger
from flynsarmy_paginator.paginator import FlynsarmyPaginator as Paginator
from .models import foodall, gn
from django.db.models import Q
import random
import pymysql
ITEMS_PER_PAGE = 10
PAGE_GROUP = 10
#-- TemplateView
def home(request):
return render(request, 'foodle/home.html')
def search(request):
wisesaying = [
'고기 먹다 체하면 냉면으로 쑥 눌러줘라',
'기분이 저기압일 땐 반드시 고기 앞으로 가라',
'내가 아는 맛이 가장 맛있는 맛',
'치킨 뼈를 봤을 때 앙념을 먹었는지 후라이드를 먹었는지 모르게 하라',
'맛있게 먹으면 0칼로리',
'탕수육은 부먹도 찍먹도 아닌 쳐먹이 진리',
'일찍 일어나는 새가 많이 먹는다',
'먹었던 뼈도 다시보자',
'인생은 치킨의 연속이다',
'내가 먹겠다는 의지만 있으면 위가 늘어난다',
'B(birth)와 D(death)사이에는 C(chicken)이 있다',
'오늘 먹을 치킨을 내일로 미루지 말자',
'튀긴 음식은 신발을 튀겨도 맛있다',
'맛집이 있다면 지옥도 가겠다',
'현기증 난단 말이에요. 빨리 라면 끓여 주세요',
'물이 너무 많으면 라면을 더 넣어라',
]
if not hasattr(search, "searchwords"):
search.searchwords = ''
if not hasattr(search, "foodle_list"):
search.foodle_list = []
if not hasattr(search, "w_list"):
search.w_list = []
if 'search_words' in request.GET:
search.searchwords = request.GET['search_words']
if search.searchwords == '':
search.foodle_list = []
else:
#search.foodle_list = mysqlexport(search.searchwords)
search.w_list = wordslist(search.searchwords)
q = Q()
for wlist in search.w_list:
q = q & ((Q(title__contains=wlist) | Q(subtitle__contains=wlist)) | Q(ind__contains=wlist))
search.foodle_list = foodall.objects.filter(q).order_by('-data')
# Paging
paginator = Paginator(search.foodle_list, 10, adjacent_pages = 5)
page = request.GET.get('page')
try:
lists = paginator.page(page)
except PageNotAnInteger:
lists = paginator.page(1)
except EmptyPage:
lists = paginator.page(paginator.num_pages)
return render_to_response('foodle/search.html', {"lists": lists, "searchwords": search.searchwords, "wisesaying": wisesaying[random.randint(0, len(wisesaying)-1)]})
#search_list.searchwords=''
def mysqlexport(key):
conn=pymysql.connect(host='127.0.0.1',charset='utf8',user='root',passwd='root',db='food')
conn.query("set character_set_connection=utf8;")
conn.query("set character_set_server=utf8;")
conn.query("set character_set_client=utf8;")
conn.query("set character_set_results=utf8;")
conn.query("set character_set_database=utf8;")
curs = conn.cursor(pymysql.cursors.DictCursor)
key = searchword(key)
sql="SELECT * FROM gn where title like" + key + " or ind like " + key + " or subtitle like " + key + "ORDER BY data DESC"
print(sql)
curs.execute(sql)
rows=curs.fetchall()
return rows
def searchword(key):
sc=0
ec=0
cc=0
wnum=0
word=''
for a in key:
cc+=1
if a.count(' ')>0:
ec=cc-1
word+=" '%"+key[sc:ec]+"%' and ind like"
sc = cc
if cc==key.__len__():
ec=cc
word += " '%" + key[sc:ec] + "%'"
sc ==cc
return word
def wordslist(key):
words_list = []
temp = ''
cc = 0
for a in key:
cc += 1
if a==' ':
words_list.append(temp)
temp = ''
else:
temp += a
if(cc == len(key)):
words_list.append(temp)
return words_list
#mysqlexport('서울 프뤼엥 ')
| seolakim/reve-web | foodle/views.py | views.py | py | 4,218 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "models.... |
33805613545 | from typing import Optional, Dict
from fastapi import WebSocket, APIRouter, Cookie, status, Depends
from ws_chat_py.engines.person_engine import PersonEngine
ws_router = APIRouter()
@ws_router.websocket("/ws")
async def ws_chat_handler(websocket: WebSocket):
await websocket.accept()
authorized = check_chat_auth(websocket.cookies)
if not authorized:
await websocket.close()
return
new_person = PersonEngine.create_person(token=authorized, name='name')
while True:
try:
txt = await websocket.receive_json()
resp = {'msg': txt}
await websocket.send_json(resp)
except Exception as e:
await websocket.close()
break
def check_chat_auth(cookies: Dict[str, str]) -> Optional[str]:
chat_auth_token = cookies.get('chat_auth_token')
if not chat_auth_token:
return None
return chat_auth_token
| backcrawler/ws_chat_py | ws_chat_py/handlers/ws_handlers.py | ws_handlers.py | py | 925 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "fastapi.WebSocket",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "ws_chat_py.engines.person_engine.PersonEngine.create_person",
"line_number": 18,
"usage_type": "call"
... |
28990015892 | from sys import platform, version_info
if True:
from PyQt5.QtCore import pyqtSlot, Qt, QSettings, QTimer
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QMessageBox
else:
from PyQt4.QtCore import pyqtSlot, Qt, QSettings, QTimer
from PyQt4.QtGui import QFontMetrics
from PyQt4.QtGui import QDialog, QDialogButtonBox, QMessageBox
# ------------------------------------------------------------------------------------------------------------
# Imports (Custom Stuff)
import ui_settings_jack
# ------------------------------------------------------------------------------------------------------------
# Try Import DBus
try:
import dbus
except:
dbus = None
# ------------------------------------------------------------------------------------------------------------
# Global object
global gJackctl, gResetNeeded
gJackctl = None
gResetNeeded = False
# ------------------------------------------------------------------------------------------------------------
# enum jack_timer_type_t
JACK_TIMER_SYSTEM_CLOCK = 0
JACK_TIMER_CYCLE_COUNTER = 1
JACK_TIMER_HPET = 2
# ------------------------------------------------------------------------------------------------------------
# Set Platform
if "linux" in platform:
LINUX = True
if version_info >= (3, 0):
from subprocess import getoutput
else:
from commands import getoutput
else:
LINUX = False
# ------------------------------------------------------------------------------------------------------------
# Init DBus
def initBus(bus):
global gJackctl
if not bus:
gJackctl = None
return 1
try:
gJackctl = dbus.Interface(bus.get_object("org.jackaudio.service", "/org/jackaudio/Controller"), "org.jackaudio.Configure")
return 0
except:
gJackctl = None
return 1
def needsInit():
global gJackctl
return bool(gJackctl is None)
def setResetNeeded(yesNo):
global gResetNeeded
gResetNeeded = yesNo
# ------------------------------------------------------------------------------------------------------------
# Helper functions
def getBufferSize():
return getDriverParameter("period", -1)
def getSampleRate():
return getDriverParameter("rate", -1)
def isRealtime():
return getEngineParameter("realtime", False)
def setBufferSize(bsize):
return setDriverParameter("period", dbus.UInt32(bsize))
def setSampleRate(srate):
return setDriverParameter("rate", dbus.UInt32(srate))
# ------------------------------------------------------------------------------------------------------------
# Helper functions (engine)
def engineHasFeature(feature):
if gJackctl is None:
return False
try:
featureList = gJackctl.ReadContainer(["engine"])[1]
except:
featureList = ()
return bool(dbus.String(feature) in featureList)
def getEngineParameter(parameter, fallback):
if gJackctl is None or not engineHasFeature(parameter):
return fallback
else:
try:
return gJackctl.GetParameterValue(["engine", parameter])[2]
except:
return fallback
def setEngineParameter(parameter, value, optional=True):
if not engineHasFeature(parameter):
return False
elif optional:
paramValueTry = gJackctl.GetParameterValue(["engine", parameter])
if paramValueTry is None:
return False
paramValue = paramValueTry[2]
if value != paramValue:
return bool(gJackctl.SetParameterValue(["engine", parameter], value))
else:
return False
else:
return bool(gJackctl.SetParameterValue(["engine", parameter], value))
# ------------------------------------------------------------------------------------------------------------
# Helper functions (driver)
def driverHasFeature(feature):
if gJackctl is None:
return False
try:
featureList = gJackctl.ReadContainer(["driver"])[1]
except:
featureList = ()
return bool(dbus.String(feature) in featureList)
def getDriverParameter(parameter, fallback):
if gJackctl is None or not driverHasFeature(parameter):
return fallback
else:
try:
return gJackctl.GetParameterValue(["driver", parameter])[2]
except:
return fallback
def setDriverParameter(parameter, value, optional=True):
if not driverHasFeature(parameter):
return False
elif optional:
if value != gJackctl.GetParameterValue(["driver", parameter])[2]:
return bool(gJackctl.SetParameterValue(["driver", parameter], value))
else:
return False
else:
return bool(gJackctl.SetParameterValue(["driver", parameter], value))
# ------------------------------------------------------------------------------------------------------------
# JACK Settings Dialog
class JackSettingsW(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.ui = ui_settings_jack.Ui_JackSettingsW()
self.ui.setupUi(self)
# -------------------------------------------------------------
# Check if we've got valid control interface
global gJackctl
if gJackctl is None:
QTimer.singleShot(0, self, SLOT("slot_closeWithError()"))
return
# -------------------------------------------------------------
# Align driver text and hide non available ones
driverList = gJackctl.ReadContainer(["drivers"])[1]
fontMetris = QFontMetrics(self.ui.obj_server_driver.font())
maxWidth = 75
for i in range(self.ui.obj_server_driver.rowCount()):
item = self.ui.obj_server_driver.item(i, 0)
item.setTextAlignment(Qt.AlignCenter)
itexText = item.text()
itemWidth = fontMetris.width(itexText)+25
if itemWidth > maxWidth:
maxWidth = itemWidth
if dbus.String(itexText.lower()) not in driverList:
self.ui.obj_server_driver.hideRow(i)
self.ui.obj_server_driver.setMinimumWidth(maxWidth)
self.ui.obj_server_driver.setMaximumWidth(maxWidth)
# -------------------------------------------------------------
# Set-up connections
self.accepted.connect(self.slot_saveJackSettings)
self.ui.buttonBox.button(QDialogButtonBox.Reset).clicked.connect(self.slot_resetJackSettings)
self.ui.obj_driver_duplex.clicked.connect(self.slot_checkDuplexSelection)
self.ui.obj_server_driver.currentCellChanged.connect(self.slot_checkDriverSelection)
self.ui.obj_driver_capture.currentIndexChanged[int].connect(self.slot_checkALSASelection)
self.ui.obj_driver_playback.currentIndexChanged[int].connect(self.slot_checkALSASelection)
# -------------------------------------------------------------
# Load initial settings
self.fDriverName = ""
self.fBrokenServerClockSource = False
self.checkEngine()
self.loadServerSettings()
self.loadDriverSettings(True) # reset because we'll change it below
# -------------------------------------------------------------
# Load selected JACK driver
self.fDriverName = str(gJackctl.GetParameterValue(["engine", "driver"])[2])
for i in range(self.ui.obj_server_driver.rowCount()):
if self.ui.obj_server_driver.item(i, 0).text().lower() == self.fDriverName:
self.ui.obj_server_driver.setCurrentCell(i, 0)
break
# Special ALSA check
self.slot_checkALSASelection()
# -------------------------------------------------------------
# Load last GUI settings
self.loadSettings()
# -----------------------------------------------------------------
# Engine calls
def checkEngine(self):
self.ui.obj_server_realtime.setEnabled(engineHasFeature("realtime"))
self.ui.obj_server_realtime_priority.setEnabled(engineHasFeature("realtime-priority"))
self.ui.obj_server_temporary.setEnabled(engineHasFeature("temporary"))
self.ui.obj_server_verbose.setEnabled(engineHasFeature("verbose"))
self.ui.obj_server_alias.setEnabled(engineHasFeature("alias"))
self.ui.obj_server_client_timeout.setEnabled(engineHasFeature("client-timeout"))
self.ui.obj_server_clock_source.setEnabled(engineHasFeature("clock-source"))
self.ui.obj_server_port_max.setEnabled(engineHasFeature("port-max"))
self.ui.obj_server_replace_registry.setEnabled(engineHasFeature("replace-registry"))
self.ui.obj_server_sync.setEnabled(engineHasFeature("sync"))
self.ui.obj_server_self_connect_mode.setEnabled(engineHasFeature("self-connect-mode"))
# Disable clock-source if not on Linux
if not LINUX:
self.ui.obj_server_clock_source.setEnabled(False)
# -----------------------------------------------------------------
# Server calls
def saveServerSettings(self):
# always reset server name
if engineHasFeature("name"):
setEngineParameter("name", "default", True)
if self.ui.obj_server_realtime.isEnabled():
value = dbus.Boolean(self.ui.obj_server_realtime.isChecked())
setEngineParameter("realtime", value, True)
if self.ui.obj_server_realtime_priority.isEnabled():
value = dbus.Int32(self.ui.obj_server_realtime_priority.value())
setEngineParameter("realtime-priority", value, True)
if self.ui.obj_server_temporary.isEnabled():
value = dbus.Boolean(self.ui.obj_server_temporary.isChecked())
setEngineParameter("temporary", value, True)
if self.ui.obj_server_verbose.isEnabled():
value = dbus.Boolean(self.ui.obj_server_verbose.isChecked())
setEngineParameter("verbose", value, True)
if self.ui.obj_server_alias.isEnabled():
value = dbus.Boolean(self.ui.obj_server_alias.isChecked())
setEngineParameter("alias", value, True)
if self.ui.obj_server_client_timeout.isEnabled():
value = dbus.Int32(int(self.ui.obj_server_client_timeout.currentText()))
setEngineParameter("client-timeout", value, True)
if self.ui.obj_server_clock_source.isEnabled():
if self.ui.obj_server_clock_source_system.isChecked():
if self.fBrokenServerClockSource:
value = dbus.UInt32(JACK_TIMER_SYSTEM_CLOCK)
else:
value = dbus.Byte("s".encode("utf-8"))
elif self.ui.obj_server_clock_source_cycle.isChecked():
if self.fBrokenServerClockSource:
value = dbus.UInt32(JACK_TIMER_CYCLE_COUNTER)
else:
value = dbus.Byte("c".encode("utf-8"))
elif self.ui.obj_server_clock_source_hpet.isChecked():
if self.fBrokenServerClockSource:
value = dbus.UInt32(JACK_TIMER_HPET)
else:
value = dbus.Byte("h".encode("utf-8"))
else:
value = None
print("JackSettingsW::saveServerSettings() - Cannot save clock-source value")
if value != None:
setEngineParameter("clock-source", value, True)
if self.ui.obj_server_port_max.isEnabled():
value = dbus.UInt32(int(self.ui.obj_server_port_max.currentText()))
setEngineParameter("port-max", value, True)
if self.ui.obj_server_replace_registry.isEnabled():
value = dbus.Boolean(self.ui.obj_server_replace_registry.isChecked())
setEngineParameter("replace-registry", value, True)
if self.ui.obj_server_sync.isEnabled():
value = dbus.Boolean(self.ui.obj_server_sync.isChecked())
setEngineParameter("sync", value, True)
if self.ui.obj_server_self_connect_mode.isEnabled():
if self.ui.obj_server_self_connect_mode_0.isChecked():
value = dbus.Byte(" ".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_1.isChecked():
value = dbus.Byte("E".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_2.isChecked():
value = dbus.Byte("e".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_3.isChecked():
value = dbus.Byte("A".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_4.isChecked():
value = dbus.Byte("a".encode("utf-8"))
else:
value = None
print("JackSettingsW::saveServerSettings() - Cannot save self-connect-mode value")
if value != None:
setEngineParameter("self-connect-mode", value, True)
def loadServerSettings(self, reset=False, forceReset=False):
global gJackctl
settings = gJackctl.ReadContainer(["engine"])
for i in range(len(settings[1])):
attribute = str(settings[1][i])
if reset:
valueTry = gJackctl.GetParameterValue(["engine", attribute])
if valueTry is None:
continue
else:
value = valueTry[1]
if forceReset and attribute != "driver":
gJackctl.ResetParameterValue(["engine", attribute])
else:
valueTry = gJackctl.GetParameterValue(["engine", attribute])
if valueTry is None:
continue
else:
value = valueTry[2]
if attribute == "name":
pass # Don't allow to change this
elif attribute == "realtime":
self.ui.obj_server_realtime.setChecked(bool(value))
elif attribute == "realtime-priority":
self.ui.obj_server_realtime_priority.setValue(int(value))
elif attribute == "temporary":
self.ui.obj_server_temporary.setChecked(bool(value))
elif attribute == "verbose":
self.ui.obj_server_verbose.setChecked(bool(value))
elif attribute == "alias":
self.ui.obj_server_alias.setChecked(bool(value))
elif attribute == "client-timeout":
self.setComboBoxValue(self.ui.obj_server_client_timeout, str(int(value)))
elif attribute == "clock-source":
if len(str(value)) == 1 and not isinstance(value, dbus.UInt32):
value = str(value)
if value == "c":
self.ui.obj_server_clock_source_cycle.setChecked(True)
elif value == "h":
self.ui.obj_server_clock_source_hpet.setChecked(True)
elif value == "s":
self.ui.obj_server_clock_source_system.setChecked(True)
else:
value = int(value)
self.fBrokenServerClockSource = True
if value == JACK_TIMER_SYSTEM_CLOCK:
self.ui.obj_server_clock_source_system.setChecked(True)
elif value == JACK_TIMER_CYCLE_COUNTER:
self.ui.obj_server_clock_source_cycle.setChecked(True)
elif value == JACK_TIMER_HPET:
self.ui.obj_server_clock_source_hpet.setChecked(True)
else:
self.ui.obj_server_clock_source.setEnabled(False)
print("JackSettingsW::saveServerSettings() - Invalid clock-source value '%s'" % value)
elif attribute == "port-max":
self.setComboBoxValue(self.ui.obj_server_port_max, str(int(value)))
elif attribute == "replace-registry":
self.ui.obj_server_replace_registry.setChecked(bool(value))
elif attribute == "sync":
self.ui.obj_server_sync.setChecked(bool(value))
elif attribute == "self-connect-mode":
value = str(value)
if value == " ":
self.ui.obj_server_self_connect_mode_0.setChecked(True)
elif value == "E":
self.ui.obj_server_self_connect_mode_1.setChecked(True)
elif value == "e":
self.ui.obj_server_self_connect_mode_2.setChecked(True)
elif value == "A":
self.ui.obj_server_self_connect_mode_3.setChecked(True)
elif value == "a":
self.ui.obj_server_self_connect_mode_4.setChecked(True)
else:
self.ui.obj_server_self_connect_mode.setEnabled(False)
print("JackSettingsW::loadServerSettings() - Invalid self-connect-mode value '%s'" % value)
elif attribute in ("driver", "slave-drivers"):
pass
else:
print("JackSettingsW::loadServerSettings() - Unimplemented server attribute '%s', value: '%s'" % (attribute, str(value)))
# -----------------------------------------------------------------
# Driver calls
# resetIfNeeded: fix alsa parameter re-order bug in JACK 1.9.8 (reset/remove non-used values)
def saveDriverSettings(self, resetIfNeeded):
global gJackctl, gResetNeeded
if resetIfNeeded and not gResetNeeded:
resetIfNeeded = False
if self.ui.obj_driver_device.isEnabled():
value = dbus.String(self.ui.obj_driver_device.currentText().split(" [")[0])
if value != gJackctl.GetParameterValue(["driver", "device"])[2]:
gJackctl.SetParameterValue(["driver", "device"], value)
elif resetIfNeeded:
gJackctl.ResetParameterValue(["driver", "device"])
if self.ui.obj_driver_capture.isEnabled():
if self.fDriverName == "alsa":
value = dbus.String(self.ui.obj_driver_capture.currentText().split(" ")[0])
elif self.fDriverName == "dummy":
value = dbus.UInt32(int(self.ui.obj_driver_capture.currentText()))
elif self.fDriverName == "firewire":
value = dbus.Boolean(self.ui.obj_driver_capture.currentIndex() == 1)
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save capture value")
if value != None:
setDriverParameter("capture", value, True)
elif resetIfNeeded:
gJackctl.ResetParameterValue(["driver", "capture"])
if self.ui.obj_driver_playback.isEnabled():
if self.fDriverName == "alsa":
value = dbus.String(self.ui.obj_driver_playback.currentText().split(" ")[0])
elif self.fDriverName == "dummy":
value = dbus.UInt32(int(self.ui.obj_driver_playback.currentText()))
elif self.fDriverName == "firewire":
value = dbus.Boolean(self.ui.obj_driver_playback.currentIndex() == 1)
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save playback value")
if value != None:
setDriverParameter("playback", value, True)
elif resetIfNeeded:
gJackctl.ResetParameterValue(["driver", "playback"])
if self.ui.obj_driver_rate.isEnabled():
value = dbus.UInt32(int(self.ui.obj_driver_rate.currentText()))
setDriverParameter("rate", value, True)
if self.ui.obj_driver_period.isEnabled():
value = dbus.UInt32(int(self.ui.obj_driver_period.currentText()))
setDriverParameter("period", value, True)
if self.ui.obj_driver_nperiods.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_nperiods.value())
setDriverParameter("nperiods", value, True)
if self.ui.obj_driver_hwmon.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_hwmon.isChecked())
setDriverParameter("hwmon", value, True)
if self.ui.obj_driver_hwmeter.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_hwmeter.isChecked())
setDriverParameter("hwmeter", value, True)
if self.ui.obj_driver_duplex.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_duplex.isChecked())
setDriverParameter("duplex", value, True)
if self.ui.obj_driver_hw_alias.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_hw_alias.isChecked())
setDriverParameter("hw-alias", value, True)
if self.ui.obj_driver_softmode.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_softmode.isChecked())
setDriverParameter("softmode", value, True)
if self.ui.obj_driver_monitor.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_monitor.isChecked())
setDriverParameter("monitor", value, True)
if self.ui.obj_driver_dither.isEnabled():
if self.ui.obj_driver_dither.currentIndex() == 0:
value = dbus.Byte("n".encode("utf-8"))
elif self.ui.obj_driver_dither.currentIndex() == 1:
value = dbus.Byte("r".encode("utf-8"))
elif self.ui.obj_driver_dither.currentIndex() == 2:
value = dbus.Byte("s".encode("utf-8"))
elif self.ui.obj_driver_dither.currentIndex() == 3:
value = dbus.Byte("t".encode("utf-8"))
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save dither value")
if value != None:
setDriverParameter("dither", value, True)
if self.ui.obj_driver_inchannels.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_inchannels.value())
setDriverParameter("inchannels", value, True)
if self.ui.obj_driver_outchannels.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_outchannels.value())
setDriverParameter("outchannels", value, True)
if self.ui.obj_driver_shorts.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_shorts.isChecked())
setDriverParameter("shorts", value, True)
if self.ui.obj_driver_input_latency.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_input_latency.value())
setDriverParameter("input-latency", value, True)
if self.ui.obj_driver_output_latency.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_output_latency.value())
setDriverParameter("output-latency", value, True)
if self.ui.obj_driver_midi_driver.isEnabled():
if self.ui.obj_driver_midi_driver.currentIndex() == 0:
value = dbus.String("none")
elif self.ui.obj_driver_midi_driver.currentIndex() == 1:
value = dbus.String("seq")
elif self.ui.obj_driver_midi_driver.currentIndex() == 2:
value = dbus.String("raw")
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save midi-driver value")
if value != None:
if driverHasFeature("midi"):
setDriverParameter("midi", value, True)
else:
setDriverParameter("midi-driver", value, True)
if self.ui.obj_driver_wait.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_wait.value())
setDriverParameter("wait", value, True)
if self.ui.obj_driver_verbose.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_verbose.value())
setDriverParameter("verbose", value, True)
if self.ui.obj_driver_snoop.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_snoop.isChecked())
setDriverParameter("snoop", value, True)
if self.ui.obj_driver_channels.isEnabled():
value = dbus.Int32(self.ui.obj_driver_channels.value())
setDriverParameter("channels", value, True)
def loadDriverSettings(self, reset=False, forceReset=False):
global gJackctl
settings = gJackctl.ReadContainer(["driver"])
for i in range(len(settings[1])):
attribute = str(settings[1][i])
if reset:
value = gJackctl.GetParameterValue(["driver", attribute])[1]
if forceReset:
gJackctl.ResetParameterValue(["driver", attribute])
else:
value = gJackctl.GetParameterValue(["driver", attribute])[2]
if attribute == "device":
self.setComboBoxValue(self.ui.obj_driver_device, str(value), True)
elif attribute == "capture":
if self.fDriverName == "firewire":
self.ui.obj_driver_capture.setCurrentIndex(1 if bool(value) else 0)
elif self.fDriverName == "dummy":
self.setComboBoxValue(self.ui.obj_driver_capture, str(int(value)), True)
else:
self.setComboBoxValue(self.ui.obj_driver_capture, str(value), True)
elif attribute == "playback":
if self.fDriverName == "firewire":
self.ui.obj_driver_playback.setCurrentIndex(1 if bool(value) else 0)
elif self.fDriverName == "dummy":
self.setComboBoxValue(self.ui.obj_driver_playback, str(int(value)), True)
else:
self.setComboBoxValue(self.ui.obj_driver_playback, str(value), True)
elif attribute == "rate":
self.setComboBoxValue(self.ui.obj_driver_rate, str(int(value)))
elif attribute == "period":
self.setComboBoxValue(self.ui.obj_driver_period, str(int(value)))
elif attribute == "nperiods":
self.ui.obj_driver_nperiods.setValue(int(value))
elif attribute == "hwmon":
self.ui.obj_driver_hwmon.setChecked(bool(value))
elif attribute == "hwmeter":
self.ui.obj_driver_hwmeter.setChecked(bool(value))
elif attribute == "duplex":
self.ui.obj_driver_duplex.setChecked(bool(value))
elif attribute == "hw-alias":
self.ui.obj_driver_hw_alias.setChecked(bool(value))
elif attribute == "softmode":
self.ui.obj_driver_softmode.setChecked(bool(value))
elif attribute == "monitor":
self.ui.obj_driver_monitor.setChecked(bool(value))
elif attribute == "dither":
value = str(value)
if value == "n":
self.ui.obj_driver_dither.setCurrentIndex(0)
elif value == "r":
self.ui.obj_driver_dither.setCurrentIndex(1)
elif value == "s":
self.ui.obj_driver_dither.setCurrentIndex(2)
elif value == "t":
self.ui.obj_driver_dither.setCurrentIndex(3)
else:
self.ui.obj_driver_dither.setEnabled(False)
print("JackSettingsW::loadDriverSettings() - Invalid dither value '%s'" % value)
elif attribute == "inchannels":
self.ui.obj_driver_inchannels.setValue(int(value))
elif attribute == "outchannels":
self.ui.obj_driver_outchannels.setValue(int(value))
elif attribute == "shorts":
self.ui.obj_driver_shorts.setChecked(bool(value))
elif attribute == "input-latency":
self.ui.obj_driver_input_latency.setValue(int(value))
elif attribute == "output-latency":
self.ui.obj_driver_output_latency.setValue(int(value))
elif attribute in ("midi", "midi-driver"):
value = str(value)
if value == "none":
self.ui.obj_driver_midi_driver.setCurrentIndex(0)
elif value == "seq":
self.ui.obj_driver_midi_driver.setCurrentIndex(1)
elif value == "raw":
self.ui.obj_driver_midi_driver.setCurrentIndex(2)
else:
self.ui.obj_driver_midi_driver.setEnabled(False)
print("JackSettingsW::loadDriverSettings() - Invalid midi-driver value '%s'" % value)
elif attribute == "wait":
self.ui.obj_driver_wait.setValue(int(value))
elif attribute == "verbose":
self.ui.obj_driver_verbose.setValue(int(value))
elif attribute == "snoop":
self.ui.obj_driver_snoop.setChecked(bool(value))
elif attribute == "channels":
self.ui.obj_driver_channels.setValue(int(value))
else:
print("JackSettingsW::loadDriverSettings() - Unimplemented driver attribute '%s', value: '%s'" % (attribute, str(value)))
# -----------------------------------------------------------------
# Helper functions
def getAlsaDeviceList(self, playback=True):
alsaDeviceList = []
executable = 'aplay' if playback else 'arecord'
aplay_out = getoutput("env LANG=C LC_ALL=C {} -l".format(executable)).split("\n")
for line in aplay_out:
line = line.strip()
if line.startswith("card "):
cardInfo = line.split(", ", 1)[0].split(": ")
cardIndex = cardInfo[0].replace("card ", "")
cardName = cardInfo[1].split(" [")[0]
deviceInfo = line.split(", ", 1)[1].split(": ")
deviceIndex = deviceInfo[0].replace("device ", "")
deviceName = deviceInfo[1].split(" [")[0]
if cardName != "Loopback":
fullName = "hw:%s,%s [%s]" % (cardName, deviceIndex, deviceName)
alsaDeviceList.append(fullName)
return alsaDeviceList
def setComboBoxValue(self, box, text, split=False):
for i in range(box.count()):
if box.itemText(i) == text or (box.itemText(i).split(" ")[0] == text and split):
box.setCurrentIndex(i)
break
else:
if text:
box.addItem(text)
box.setCurrentIndex(box.count() - 1)
# -----------------------------------------------------------------
# Qt SLOT calls
@pyqtSlot(int)
def slot_checkALSASelection(self, ignored=0):
if self.fDriverName == "alsa":
check = bool(self.ui.obj_driver_duplex.isChecked() and (self.ui.obj_driver_capture.currentIndex() > 0 or self.ui.obj_driver_playback.currentIndex() > 0))
self.ui.obj_driver_device.setEnabled(not check)
@pyqtSlot(bool)
def slot_checkDuplexSelection(self, active):
if driverHasFeature("duplex"):
self.ui.obj_driver_capture.setEnabled(active)
self.ui.obj_driver_capture_label.setEnabled(active)
self.ui.obj_driver_playback.setEnabled(active)
self.ui.obj_driver_playback_label.setEnabled(active)
#self.ui.obj_driver_inchannels.setEnabled(active)
#self.ui.obj_driver_inchannels_label.setEnabled(active)
#self.ui.obj_driver_input_latency.setEnabled(active)
#self.ui.obj_driver_input_latency_label.setEnabled(active)
self.slot_checkALSASelection()
@pyqtSlot(int)
def slot_checkDriverSelection(self, row):
global gJackctl
# Save previous settings
self.saveDriverSettings(False)
# Set new Jack driver
self.fDriverName = dbus.String(self.ui.obj_server_driver.item(row, 0).text().lower())
gJackctl.SetParameterValue(["engine", "driver"], self.fDriverName)
# Add device list
self.ui.obj_driver_device.clear()
if driverHasFeature("device"):
if LINUX and self.fDriverName == "alsa":
dev_list = self.getAlsaDeviceList()
for dev in dev_list:
self.ui.obj_driver_device.addItem(dev)
else:
dev_list = gJackctl.GetParameterConstraint(["driver", "device"])[3]
for i in range(len(dev_list)):
self.ui.obj_driver_device.addItem(dev_list[i][0] + " [%s]" % str(dev_list[i][1]))
# Custom 'playback' and 'capture' values
self.ui.obj_driver_capture.clear()
self.ui.obj_driver_playback.clear()
if self.fDriverName == "alsa":
self.ui.obj_driver_capture.addItem("none")
self.ui.obj_driver_playback.addItem("none")
if LINUX:
dev_list_playback = self.getAlsaDeviceList(playback=True)
dev_list_record = self.getAlsaDeviceList(playback=False)
for dev in dev_list_playback:
self.ui.obj_driver_playback.addItem(dev)
for dev in dev_list_record:
self.ui.obj_driver_capture.addItem(dev)
else:
dev_list = gJackctl.GetParameterConstraint(["driver", "device"])[3]
for i in range(len(dev_list)):
self.ui.obj_driver_capture.addItem(dev_list[i][0] + " [" + dev_list[i][1] + "]")
self.ui.obj_driver_playback.addItem(dev_list[i][0] + " [" + dev_list[i][1] + "]")
elif self.fDriverName == "dummy":
for i in range(16):
self.ui.obj_driver_capture.addItem("%i" % int((i * 2) + 2))
self.ui.obj_driver_playback.addItem("%i" % int((i * 2) + 2))
elif self.fDriverName == "firewire":
self.ui.obj_driver_capture.addItem("no")
self.ui.obj_driver_capture.addItem("yes")
self.ui.obj_driver_playback.addItem("no")
self.ui.obj_driver_playback.addItem("yes")
elif driverHasFeature("playback") or driverHasFeature("capture"):
print("JackSettingsW::slot_checkDriverSelection() - Custom playback/capture for driver '%s' not implemented yet" % self.fDriverName)
# Load Driver Settings
self.loadDriverSettings()
# Enable widgets according to driver
self.ui.obj_driver_capture.setEnabled(driverHasFeature("capture"))
self.ui.obj_driver_capture_label.setEnabled(driverHasFeature("capture"))
self.ui.obj_driver_playback.setEnabled(driverHasFeature("playback"))
self.ui.obj_driver_playback_label.setEnabled(driverHasFeature("playback"))
self.ui.obj_driver_device.setEnabled(driverHasFeature("device"))
self.ui.obj_driver_device_label.setEnabled(driverHasFeature("device"))
self.ui.obj_driver_rate.setEnabled(driverHasFeature("rate"))
self.ui.obj_driver_rate_label.setEnabled(driverHasFeature("rate"))
self.ui.obj_driver_period.setEnabled(driverHasFeature("period"))
self.ui.obj_driver_period_label.setEnabled(driverHasFeature("period"))
self.ui.obj_driver_nperiods.setEnabled(driverHasFeature("nperiods"))
self.ui.obj_driver_nperiods_label.setEnabled(driverHasFeature("nperiods"))
self.ui.obj_driver_hwmon.setEnabled(driverHasFeature("hwmon"))
self.ui.obj_driver_hwmeter.setEnabled(driverHasFeature("hwmeter"))
self.ui.obj_driver_duplex.setEnabled(driverHasFeature("duplex"))
self.ui.obj_driver_hw_alias.setEnabled(driverHasFeature("hw-alias"))
self.ui.obj_driver_softmode.setEnabled(driverHasFeature("softmode"))
self.ui.obj_driver_monitor.setEnabled(driverHasFeature("monitor"))
self.ui.obj_driver_dither.setEnabled(driverHasFeature("dither"))
self.ui.obj_driver_dither_label.setEnabled(driverHasFeature("dither"))
self.ui.obj_driver_inchannels.setEnabled(driverHasFeature("inchannels"))
self.ui.obj_driver_inchannels_label.setEnabled(driverHasFeature("inchannels"))
self.ui.obj_driver_outchannels.setEnabled(driverHasFeature("outchannels"))
self.ui.obj_driver_outchannels_label.setEnabled(driverHasFeature("outchannels"))
self.ui.obj_driver_shorts.setEnabled(driverHasFeature("shorts"))
self.ui.obj_driver_input_latency.setEnabled(driverHasFeature("input-latency"))
self.ui.obj_driver_input_latency_label.setEnabled(driverHasFeature("input-latency"))
self.ui.obj_driver_output_latency.setEnabled(driverHasFeature("output-latency"))
self.ui.obj_driver_output_latency_label.setEnabled(driverHasFeature("output-latency"))
self.ui.obj_driver_midi_driver.setEnabled(driverHasFeature("midi") or driverHasFeature("midi-driver"))
self.ui.obj_driver_midi_driver_label.setEnabled(driverHasFeature("midi") or driverHasFeature("midi-driver"))
self.ui.obj_driver_wait.setEnabled(driverHasFeature("wait"))
self.ui.obj_driver_wait_label.setEnabled(driverHasFeature("wait"))
self.ui.obj_driver_verbose.setEnabled(driverHasFeature("verbose"))
self.ui.obj_driver_verbose_label.setEnabled(driverHasFeature("verbose"))
self.ui.obj_driver_snoop.setEnabled(driverHasFeature("snoop"))
self.ui.obj_driver_channels.setEnabled(driverHasFeature("channels"))
self.ui.obj_driver_channels_label.setEnabled(driverHasFeature("channels"))
# Misc stuff
if self.ui.obj_server_driver.item(row, 0).text() == "ALSA":
self.ui.toolbox_driver_misc.setCurrentIndex(1)
self.ui.obj_driver_capture_label.setText(self.tr("Input Device:"))
self.ui.obj_driver_playback_label.setText(self.tr("Output Device:"))
elif self.ui.obj_server_driver.item(row, 0).text() == "Dummy":
self.ui.toolbox_driver_misc.setCurrentIndex(2)
self.ui.obj_driver_capture_label.setText(self.tr("Input Ports:"))
self.ui.obj_driver_playback_label.setText(self.tr("Output Ports:"))
elif self.ui.obj_server_driver.item(row, 0).text() == "FireWire":
self.ui.toolbox_driver_misc.setCurrentIndex(3)
self.ui.obj_driver_capture_label.setText(self.tr("Capture Ports:"))
self.ui.obj_driver_playback_label.setText(self.tr("Playback Ports:"))
elif self.ui.obj_server_driver.item(row, 0).text() == "Loopback":
self.ui.toolbox_driver_misc.setCurrentIndex(4)
else:
self.ui.toolbox_driver_misc.setCurrentIndex(0)
self.slot_checkDuplexSelection(self.ui.obj_driver_duplex.isChecked())
@pyqtSlot()
def slot_saveJackSettings(self):
self.saveServerSettings()
self.saveDriverSettings(True)
@pyqtSlot()
def slot_resetJackSettings(self):
if self.ui.tabWidget.currentIndex() == 0:
self.loadServerSettings(True, True)
elif self.ui.tabWidget.currentIndex() == 1:
self.loadDriverSettings(True, True)
@pyqtSlot()
def slot_closeWithError(self):
QMessageBox.critical(self, self.tr("Error"), self.tr("jackdbus is not available!\nIt's not possible to configure JACK at this point."))
self.close()
def saveSettings(self):
settings = QSettings("Cadence", "JackSettings")
settings.setValue("Geometry", self.saveGeometry())
settings.setValue("CurrentTab", self.ui.tabWidget.currentIndex())
def loadSettings(self):
settings = QSettings("Cadence", "JackSettings")
self.restoreGeometry(settings.value("Geometry", b""))
self.ui.tabWidget.setCurrentIndex(settings.value("CurrentTab", 0, type=int))
def closeEvent(self, event):
self.saveSettings()
QDialog.closeEvent(self, event)
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Allow to use this as a standalone app
if __name__ == '__main__':
# Additional imports
import resources_rc
from sys import argv as sys_argv, exit as sys_exit
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication
# App initialization
app = QApplication(sys_argv)
# Connect to DBus
if dbus:
if initBus(dbus.SessionBus()):
QMessageBox.critical(None, app.translate("JackSettingsW", "Error"), app.translate("JackSettingsW",
"jackdbus is not available!\n"
"Is not possible to configure JACK at this point."))
sys_exit(1)
else:
QMessageBox.critical(None, app.translate("JackSettingsW", "Error"),
app.translate("JackSettingsW", "DBus is not available, cannot continue."))
sys_exit(1)
# Show GUI
gui = JackSettingsW(None)
gui.setWindowIcon(QIcon(":/scalable/jack.svg"))
gui.show()
# App-Loop
sys_exit(app.exec_())
| falkTX/Cadence | src/jacksettings.py | jacksettings.py | py | 41,004 | python | en | code | 361 | github-code | 6 | [
{
"api_name": "sys.platform",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "sys.version_info",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "dbus.Interface",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "dbus.UInt32",
"line_... |
71923388669 | #!/usr/bin/env python
import os, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-u", required=True, dest="usuario", help="Usuario de PostgreSQL")
parser.add_argument("-H", default="localhost", dest="host", help="IP del equipo remoto")
parser.add_argument("-p", default="5432", dest="puerto", help="Puerto del equipo remoto")
parser.add_argument("-db", required=True, dest="database", help="Nombre de la base de datos")
args = parser.parse_args()
if args.usuario: usuario = "-U "+args.usuario
if args.host: host = "-h "+args.host
if args.puerto: puerto = "-p "+args.puerto
if args.database: database = args.database
os.system("pg_dump "+host+" "+usuario+" "+puerto+" "+database+" | gzip > "+database+"_$(date +%Y-%m-%d).sql.gz") | francisjgarcia/ASGBD-2018-19 | scripts/python/backup.py | backup.py | py | 761 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
}
] |
39172128523 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import json, make_response
from flask_restplus import Api, Resource
from http import HTTPStatus
from .v1.api import api as api_v1
from .v2.api import api as api_v2
from .health import api as api_health
api = Api()
api.add_namespace(api_v1)
api.add_namespace(api_v2)
api.add_namespace(api_health)
@api.route('/swagger')
class Swagger(Resource):
@api.doc(
id='Get swagger JSON',
responses={200: 'OK'},
description='''
Retrieve the swagger JSON object
'''
)
def get(self):
r = json.dumps(api.__schema__, indent=2)
r = make_response( r, HTTPStatus.OK )
r.headers['Content-Type'] = 'application/json'
return r
@api.route('/postman')
class Swagger(Resource):
@api.doc(
id='Get Postman representation',
responses={200: 'OK'},
description='''
Retrieve the Postman JSON object
'''
)
def get(self):
data = api.as_postman(urlvars=True, swagger=True)
r = json.dumps(data, indent=2)
r = make_response( r, HTTPStatus.OK )
r.headers['Content-Type'] = 'application/json'
return r
| shalomb/terrestrial | apis/__init__.py | __init__.py | py | 1,122 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_restplus.Api",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "v1.api.api",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "v2.api.api",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "health.api",
"li... |
33422541294 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
from torchvision.utils import save_image
from torch.utils.data import Dataset
from torchvision import datasets, models, transforms
from Regressor_and_loss import disparityregression
# input = (B,3,256,512)
class baseline_model(nn.Module):
def __init__(self,B,C,H,W,newmodel):
super(baseline_model,self).__init__()
self.B=B
self.C=C
self.H=H
self.W=W
self.device = "cuda"
self.max_disp = 192
self.cnn_Shared = newmodel
self.cnn_3dims = nn.Sequential(
nn.Conv3d(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(64, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(128, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(128, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(32, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(32, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
)
#y = x.flatten(start_dim=1, end_dim=2) #https://stackoverflow.com/questions/65465646/convert-5d-tensor-to-4d-tensor-in-pytorch"""
# size = (B, 192, 256 , 512)
def concat_for_3D(self, left_feats, right_feats):
cost = torch.Tensor(self.B,self.C*2, self.max_disp//4, self.H//4, self.W//4).to(self.device)
for i in range(self.max_disp // 4):
if(i==0):
cost[:, :self.C, i, :, :] = left_feats
cost[:, self.C:, i, :, :] = right_feats
else:
cost[:, :self.C, i, :, i:] = left_feats[:,:,:,i:]
cost[:, self.C:, i, :, i:] = right_feats[:,:,:,:-i]
return cost
def forward(self,x_left,x_right):
im_left = self.cnn_Shared(x_left)
im_right = self.cnn_Shared(x_right)
cost_vol = self.concat_for_3D(im_left,im_right)
score_volume = self.cnn_3dims(cost_vol)
m = nn.Upsample(scale_factor=4, mode='trilinear')
score_volume = m(score_volume)
y = score_volume.flatten(start_dim=1, end_dim=2)
prob=F.softmax(y,1)
#https://github.com/jzhangbs/DSM/blob/master/model.py
prob = disparityregression(self.max_disp)(prob)
return prob
def create_mod():
model = models.resnet18(pretrained=True) # https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
model.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), bias=False)
#model.layer4[0].conv1 = nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel = torch.nn.Sequential(*(list(model.children())[0:8]))
newmodel[5][0].conv1 = nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel[5][0].downsample = nn.Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
newmodel[6][0].conv1 = nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel[6][0].downsample[0] = nn.Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
newmodel = newmodel[:7]
newmodel.newconv1 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
newmodel.newbn1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
#newmodel.ndownsample1 = nn.Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
newmodel.newconv2 = nn.Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel.newbn2 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
newmodel.newconv3 = nn.Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
#print(newmodel)
#newmodel.to(device)
return newmodel | Pahulmeet/Stereo_Depth_Estimation | model8.py | model8.py | py | 4,908 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
41705912087 | import newspaper
# Declare the url
url = "https://ktechhub.com/tutorials/completely-deploy-your-laravel-application-on-ubuntu-linux-server-60a51098a8bf2"
#Extract web content
url = newspaper.Article(url="%s" % (url), language='en')
url.download()
url.parse()
# Display scraped data
print(url.text)
| Kalkulus1/python_codes | scrape_any_web_article.py | scrape_any_web_article.py | py | 302 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "newspaper.Article",
"line_number": 7,
"usage_type": "call"
}
] |
6176179693 | import os, time, sys
import matplotlib.pyplot as plt
import itertools
import pickle
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import torchvision.utils as vutils
from torchvision.utils import save_image
EPOCH=20
LR_G=0.0002
LR_D=0.0002
IMG_SIZE=28
LATENT_DIM=100
BATCHSIZE=32
DATA_ROOT='./mnist'
DOWNLOAD=False
dataloader=data.DataLoader(
datasets.MNIST(
root=DATA_ROOT,
train=True,
transform=transforms.ToTensor(),
download=DOWNLOAD
),
batch_size=BATCHSIZE,
shuffle=True
)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.layer1=nn.Sequential(
nn.Linear(LATENT_DIM,128),
nn.LeakyReLU(0.2)
)
self.layer2=nn.Sequential(
nn.Linear(128, 256),
nn.LeakyReLU(0.2)
)
self.layer3 = nn.Sequential(
nn.Linear(256, 512),
nn.LeakyReLU(0.2)
)
self.layer4 = nn.Sequential(
nn.Linear(512, IMG_SIZE*IMG_SIZE),
nn.Tanh()
)
def forward(self,x):
x = x.view(x.size(0),-1)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0),1,IMG_SIZE,IMG_SIZE)
return x
class Discrimator(nn.Module):
def __init__(self):
super(Discrimator, self).__init__()
self.layer1=nn.Sequential(
nn.Linear(IMG_SIZE*IMG_SIZE,512),
nn.LeakyReLU(0.2)
)
self.layer2 = nn.Sequential(
nn.Linear(512, 256),
nn.LeakyReLU(0.2)
)
self.layer3 = nn.Sequential(
nn.Linear(256, 128),
nn.LeakyReLU(0.2)
)
self.layer4 = nn.Sequential(
nn.Linear(128, 1),
nn.Sigmoid()
)
def forward(self,x):
x=x.view(-1,IMG_SIZE*IMG_SIZE)
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
return x
G=Generator()
D=Discrimator()
real_label=1
fake_label=0
optimizer_G=optim.Adam(G.parameters(),lr=LR_G)
optimizer_D=optim.Adam(D.parameters(),lr=LR_D)
loss_func=nn.BCELoss()
loss_G=[]
loss_D=[]
iters=0
for epoch in range(EPOCH):
for step,(x,y)in enumerate(dataloader):
optimizer_D.zero_grad()
label=torch.full((BATCHSIZE,),real_label,dtype=torch.float)
# print(x.size())
output=D(x).view(-1)
# print(output.size())
# print(label.size())
loss_real=loss_func(output,label)
loss_real.backward()
noise=torch.randn(BATCHSIZE,LATENT_DIM)
label=torch.full((BATCHSIZE,),fake_label,dtype=torch.float)
input=G(noise)
output=D(input.detach()).view(-1)
loss_fake=loss_func(output,label)
loss_fake.backward()
loss_d=loss_real+loss_fake
optimizer_D.step()
optimizer_G.zero_grad()
label.fill_(real_label)
output=D(input).view(-1)
loss_g=loss_func(output,label)
loss_g.backward()
optimizer_G.step()
if step % 300 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f'
% (epoch + 1, EPOCH, step, len(dataloader),
loss_d.item(), loss_g.item()))
# Save Losses for plotting later
loss_G.append(loss_g.item())
loss_D.append(loss_d.item())
if (iters % 500 == 0) or ((epoch == EPOCH - 1) and (step == len(dataloader) - 1)):
with torch.no_grad():
test_z = torch.randn(BATCHSIZE, LATENT_DIM)
generated = G(test_z)
save_image(generated.view(generated.size(0), 1, 28, 28), './img4/img_' + (str)(iters) + '.png')
iters += 1
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(loss_G, label="G")
plt.plot(loss_D, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
| hqyu/DL_GAN | GAN.py | GAN.py | py | 4,169 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 26,
"usage_type": "call"
},
{
"api_name"... |
7713955808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask
myApp = Flask(__name__)
@myApp.route('/')
def bonjour():
message = 'Bonjour, je suis Ramy \n'
return message
if __name__ == '__main__':
myApp.run(host='0.0.0.0', port=8080)
| RMDHMN/pythonFlash_testing | app.py | app.py | py | 254 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
}
] |
16471277711 | """
A row measuring seven units in length has red blocks with a minimum length of
three units placed on it, such that any two red blocks (which are allowed to be
different lengths) are separated by at least one black square. There are
exactly seventeen ways of doing this.
How many ways can a row measuring fifty units in length be filled?
NOTE: Although the example above does not lend itself to the possibility, in
general it is permitted to mix block sizes. For example, on a row measuring
eight units in length you could use red (3), black (1), and red (4).
Solution comment: Fast for Python, ~4 ms. This somehow worked on first
try. Idea was to describe the number of ways to place the blocks with N units,
and use this to build larger solutions. A baseline fact is that there is only
one way to do it if N < 3 (i.e. the trivial solution). Then we can place a
block of ever increasing size (until no more room), and then add the number of
ways to place blocks on the remaining units. We can place the block either at
the start, or at some offset. Trying all blocksizes and all offsets we generate
the solution.
The memoization is essential for building the solution recursively like this.
Could be translated to DP with a simple array, but that would take some more
accurate indexing. The simplicity of this approach is the most appealing part.
And somehow I got the ±1 parts right on the first go.
"""
from time import time
from functools import lru_cache
@lru_cache()
def ways(N):
w = 1 # The trivial solution is always possible.
if N >= 3:
for offset in range(N - 3 + 1): # Start placing block at each offset.
n = N - offset # The remaining units after the offset.
for b in range(3, n + 1): # Add the ways after placing a block of size b.
w += ways(n - b - 1)
return w
if __name__ == "__main__":
t0 = time()
print('Answer: {}\nExecution time: {:.3f} ms'.format(ways(50), (time() - t0) * 1e3))
| bsamseth/project-euler | 114/114.py | 114.py | py | 1,999 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functools.lru_cache",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 44,
"usage_type": "call"
}
] |
36668954335 | from abc import ABC, abstractmethod
# enum and constants
from enum import Enum
from uuid import UUID
from time import time
import threading
class VehicleType(Enum):
# supported vehicle
CAR = 'car'
TRUCK = 'truck'
VAN = 'van'
MOTORBIKE = 'motorbike'
class ParkingSpotType(Enum):
# available spot type in parking lot
COMPACT = 'compact'
LARGE = 'large'
MOTORBIKE = 'motorbike'
ELECTRIC = 'electric'
class ParkingSpotStatus(Enum):
AVAILABLE = 'available'
UNAVAILABLE = 'unavailable'
class AccountStatus(Enum):
# account status of users
ACTIVE = 'active'
BLOCKED = 'blocked'
BANNED = 'banned'
class ParkingTicketStatus(Enum):
ACTIVE = 'active'
PAID = 'paid'
LOST = 'lost'
class ContactType:
PHONE = 'phone'
EMAIL = 'email'
# Informational class
class Address:
# to store address information of PLOT
def __init__(self, state, city, country, zip_code, street=None):
self.state = state
self.city = city
self.country = country
self.zip_code = zip_code
self.street = street
class Contact:
# TO store contact information of PLOT
def __init__(self, contact_type, name, value, sequence):
self.type = contact_type
self.name = name
self.value = value
self.sequence = sequence
# Parking spot
class ParkingSpot(ABC):
def __init__(
self, spot_number, status, base_charge, special_charge,
spot_type
):
self.spot_number = spot_number
self.status = status
self.base_charge = base_charge
self.special_charge_per_hour = special_charge
self.type = spot_type
self.allocated_vehicle = None
def set_spot_status(self, status):
self.status = status
def get_spot_status(self):
return self.status
def allocate_vehicle(self, vehicle):
self.allocated_vehicle = vehicle
self.set_spot_status(ParkingSpotStatus.UNAVAILABLE)
def remove_vehicle(self):
self.allocated_vehicle = None
self.set_spot_status(ParkingSpotStatus.AVAILABLE)
class CompactSpot(ParkingSpot):
def __init__(self, spot_number, base_charge, special_charge):
super().__init__(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge, ParkingSpotType.COMPACT
)
class LargeSpot(ParkingSpot):
def __init__(self, spot_number, base_charge, special_charge):
super().__init__(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge, ParkingSpotType.COMPACT
)
class MotorbikeSpot(ParkingSpot):
def __init__(self, spot_number, base_charge, special_charge):
super().__init__(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge, ParkingSpotType.COMPACT
)
# Vehicle
class Vehicle(ABC):
def __init__(
self, vehicle_number, vehicle_type, ticket=None, color=None
):
self.number = vehicle_number
self.type = vehicle_type
self.ticket = ticket
self.color = color
def assign_ticket(self, ticket):
self.ticket = ticket
class Car(Vehicle):
def __init__(self, vehicle_number, ticket=None):
super().__init__(vehicle_number, VehicleType.CAR, ticket=ticket)
class Truck(Vehicle):
def __init__(self, vehicle_number, ticket=None):
super().__init__(vehicle_number, VehicleType.TRUCK, ticket=ticket)
class Motorbike(Vehicle):
def __init__(self, vehicle_number, ticket=None):
super().__init__(vehicle_number, VehicleType.MOTORBIKE, ticket=ticket)
class VehicleFactory:
@classmethod
def get_vehicle(cls, vehicle_number, vehicle_type):
if vehicle_type == VehicleType.CAR:
return Car(vehicle_number)
if vehicle_type == VehicleType.TRUCK:
return Truck(vehicle_number)
if vehicle_type == VehicleType.MOTORBIKE:
return Motorbike(vehicle_number)
else:
raise Exception("Unsupported vehicle type")
# Parking ticket
class Ticket:
def __init__(
self, gate_number,
payment_status=ParkingTicketStatus.ACTIVE,
):
self.ticket_number = str(int(time())) + '_' + str(gate_number)
self.payment_status = payment_status
def get_payment_status(self):
return self.payment_status
# Parking floors
class ParkingFloor:
def __init__(self, floor_number, spot_limits):
self.number = floor_number
self.spots = []
self.spot_sequence_mapping = {}
self.spot_limits = spot_limits
def add_spots(self, spot):
if spot.spot_number in self.spot_sequence_mapping:
raise Exception('This spot is already present')
current_len = len(self.spots)
if current_len == self.spot_limits:
raise Exception('Maximum limit reached')
self.spots.append(spot)
self.spot_sequence_mapping[spot.spot_number] = current_len
def remove_spot(self, spot):
if spot.spot_number not in self.spot_sequence_mapping:
raise Exception('Invalid spot number')
spot_index = self.spot_sequence_mapping.get(spot.spot_number)
del self.spot_sequence_mapping[spot.spot_number]
self.spots.pop(spot_index)
def get_total_spots(self):
return len(self.spots)
def get_available_spots_count(self, spot_types=[]):
count = 0
for spot in self.spots:
if spot_types and spot.type not in spot_types:
continue
if spot.get_spot_status() == ParkingSpotStatus.AVAILABLE:
count = count+1
return count
def get_unavailable_spots_count(self, spot_types=[]):
count = 0
for spot in self.spots:
if spot_types and spot.type not in spot_types:
continue
if spot.get_spot_status() == ParkingSpotStatus.UNAVAILABLE:
count = count + 1
return count
def get_first_free_spot(self, spot_type_list=[]):
for spot in self.spots:
if spot.type in spot_type_list:
return spot
return None
@property
def is_full(self):
for spot in self.spots:
if spot.status == ParkingSpotStatus.AVAILABLE:
return False
return True
class ParkingLot:
# singleton ParkingLot to ensure only one object of ParkingLot in the
# system
instance = None
class __OnlyOne:
def __init__(self, name, floor_limits):
self.name = name
self.addresses = []
self.contacts = []
self.floors = []
self.floor_sequence_mapping = {}
self.floor_limits = floor_limits
self.entrance_panels = [1, 2, 3] # for example
self.exit_panels = [] = [1, 2, 3] # for example
self.lock = threading.Lock()
def add_plot_address(self, address):
self.addresses.append(address)
def add_contacts(self, contact):
self.contacts.append(contact)
def add_floor(self, floor):
if floor.number in self.floor_sequence_mapping:
raise Exception('This floor is already present')
curr_floor_size = len(self.floors)
if curr_floor_size == self.floor_limits:
raise Exception('Maximum limit reached')
self.floors.append(floor)
self.floor_sequence_mapping[floor.number] = curr_floor_size
def remove_floor(self):
pass
def get_free_spot(self, spot_types=[]):
for floor in self.floors:
free_spot = floor.get_first_free_spot(spot_type_list=spot_types)
if free_spot:
return free_spot
raise Exception("No available slots")
def generate_ticket(self, vehicle_number, vehicle_type):
if self.is_full:
raise Exception("Parking full")
self.lock.acquire()
ticket = Ticket()
vehicle = VehicleFactory.get_vehicle(vehicle_number, vehicle_type)
vehicle.assign_ticket(ticket)
first_free_spot = self.get_free_spot(
self._spot_types(vehicle_type)
)
first_free_spot.allocate_vehicle(vehicle)
self.lock.release()
def _spot_types(self, vehicle_type):
if vehicle_type == VehicleType.MOTORBIKE:
return [
VehicleType.CAR, VehicleType.TRUCK, VehicleType.MOTORBIKE
]
if vehicle_type == VehicleType.CAR:
return [VehicleType.TRUCK, VehicleType.CAR]
if vehicle_type == VehicleType.MOTORBIKE:
return [VehicleType.MOTORBIKE]
return []
@property
def is_full(self):
for floor in self.floors:
if not floor.is_full:
return False
return True
def __init__(self, name, floor_limit):
if not ParkingLot.instance:
ParkingLot.instance = ParkingLot.__OnlyOne(name, floor_limit)
else:
ParkingLot.instance.name = name
ParkingLot.instance.floor_limit = floor_limit
def __getattr__(self, name):
return getattr(self.instance, name)
class UserDetails:
# TO store user personal details
def __init__(self, name, addresses=[], contacts=[]):
self.name = name
self.addresses = addresses
self.contacts = contacts
class Account:
# to store user account related information
def __init__(self, username, password, user_details, status):
self.username = username
self.password = password
self.user_info = user_details
self.status = status
self.parking_lot = ParkingLot(name="XXXX", floor_limit=10).instance
class Admin(Account):
def __init__(
self, username, password, user_details,
status=AccountStatus.ACTIVE
):
super().__init__(username, password, user_details, status)
def add_floor(self, floor_number):
floor = ParkingFloor(floor_number, 100)
self.parking_lot.add_floor(floor)
def add_spot(
self, floor, spot_number, base_charge, special_charge_per_hour,
spot_type
):
spot = ParkingSpot(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge_per_hour, spot_type
)
floor.add_spot(spot)
class ParkingAttendant(Account):
def __init__(
self, username, password, user_details,
status=AccountStatus.ACTIVE
):
super().__init__(username, password, user_details, status)
def generate_ticket(self, vehicle_number, vehicle_type):
parking_lot = self.parking_lot
return parking_lot.generate_ticket(vehicle_number, vehicle_type)
| manofsteel-ab/design-patterns | oo_design/parking_lot.py | parking_lot.py | py | 11,034 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 31,
... |
75066660668 | import scipy.io as sio
import numpy as np
class ReadFiles(object):
def __init__(self):
spamData = sio.loadmat('../data/spam_data.mat', struct_as_record=False)
self.header = spamData['__header__']
self.version = spamData['__version__']
self.names = spamData['names']
pTrain = spamData['P_train']
pTest = spamData['P_test']
self.features = np.concatenate((pTrain, pTest), axis=1)
self.features = self.features.transpose()
self.log("Features Matrix Created and Imported")
tTest = spamData['T_test']
tTrain = spamData['T_train']
self.labels = np.concatenate((tTrain, tTest), axis=1)
self.labels = self.labels.transpose()
self.labels = np.ravel(self.labels)
self.log("Labels Array Created and Imported")
def getFeatures(self):
return self.features
def getLabels(self):
return self.labels
def log(self, msg):
print('[Reading Files] {}'.format(msg))
| Skalwalker/SpamRecognition | scripts/readfiles.py | readfiles.py | py | 1,011 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.io.loadmat",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"li... |
39399750947 | import logging
import os
import argparse
import json
from itertools import chain
from typing import Dict, List, Tuple, Any
from functools import partial
import s3fs
from hydra import compose, initialize, core
from omegaconf import OmegaConf
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Nopep8
import tensorflow as tf
from base_trainer import BaseTrainer
# ------------------------------- Trainer class ------------------------------ #
class BaselineTrainer(BaseTrainer):
"""
This class is used to train an image classification model.
"""
def __init__(self,
hyperparameters: Dict[str, Any],
config: Dict[str, Any],
job_name: str,
train_dataset: tf.data.Dataset,
val_dataset: tf.data.Dataset,
train_class_weights: Dict[str, float],
distributed: bool,
strategy: tf.distribute.Strategy,
model_dir: str,
logger: logging.Logger) -> None:
"""
Constructor for the BaselineTrainer class.
Parameters
----------
hyperparameters : Dict[str, Any]
A dictionary containing the hyperparameters for model training.
config : Dict[str, Any]
A dictionary containing the configuration for model training.
job_name : str
The name of the job.
train_dataset : tf.data.Dataset
A tf.data.Dataset object that contains the training data.
val_dataset : tf.data.Dataset
The validation data is recommend to be a repeated dataset.
train_class_weights : Dict[str, float]
Class weights for the training data.
distributed : bool
A boolean that specifies whether to use distributed training.
strategy : tf.distribute.Strategy
A tf.distribute.Strategy object that specifies the strategy for distributed training.
model_dir : str
Path to the directory where the model will be saved.
logger : logging.Logger
A logger object.
Returns
-------
None
"""
super().__init__(
hyperparameters=hyperparameters,
config=config,
job_name=job_name,
train_dataset=train_dataset,
val_dataset=val_dataset,
train_class_weights=train_class_weights,
distributed=distributed,
strategy=strategy,
model_dir=model_dir,
logger=logger
)
def _create_model(self) -> tf.keras.Model:
"""
Function that creates the compiled model.
Returns
-------
tf.keras.Model
The compiled model.
"""
# Default convolutional layer
DefaultConv2D = partial(
tf.keras.layers.Conv2D,
kernel_size=self.hyperparameters['conv2d_kernel_size'],
padding='same',
activation='linear',
use_bias=False, # Not needed if batch normalization is used
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2()
)
# Default dense layer
DefaultDense = partial(
tf.keras.layers.Dense,
activation='linear',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2()
)
# ---------------------------- Model architecture ---------------------------- #
# Data augmentation
data_augmentation = AugmentationModel(aug_params={
'RandomRotation': {'factor': 0.5},
'RandomContrast': {'factor': 0.3}
}).build_augmented_model()
inputs = tf.keras.Input(shape=(self.config['image_size'], self.config['image_size'], self.config['num_channels']), name='input_layer')
x = data_augmentation(inputs)
x = tf.keras.layers.Rescaling(scale=1.0/255.0, name='rescaling_layer')(x)
for i in range(5):
x = DefaultConv2D(filters=self.hyperparameters[f'conv2d_num_filters_block_{i}'], name=f'conv2d_{i}')(x)
x = tf.keras.layers.BatchNormalization(name=f'conv2d_batch_norm_{i}')(x)
x = tf.keras.layers.Activation('relu', name=f'conv2d_relu_{i}')(x)
x = tf.keras.layers.MaxPooling2D(pool_size=self.hyperparameters['conv2d_pooling_size'], name=f'conv2d_pooling_{i}')(x)
x = tf.keras.layers.Flatten(name='flatten_layer')(x)
for i in range(3):
x = DefaultDense(units=self.hyperparameters[f'dense_num_units_{i}'], name=f'dense_{i}')(x)
x = tf.keras.layers.BatchNormalization(name=f'dense_batch_norm_{i}')(x)
# Dropout before activation is the same as after for 'RELU' based on https://sebastianraschka.com/faq/docs/dropout-activation.html
x = tf.keras.layers.Dropout(rate=self.hyperparameters['dense_dropout_rate'], name=f'dense_drop_out_{i}')(x)
x = tf.keras.layers.Activation('relu', name=f'dense_relu_{i}')(x)
outputs = tf.keras.layers.Dense(units=self.config['num_classes'], activation='linear', name='output_layer')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
# ---------------------------------- Compile --------------------------------- #
optimizer = self._create_optimizer(learning_rate=self.hyperparameters['opt_learning_rate'])
loss_fn = self._create_loss_fn()
metrics = self._create_metrics()
model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=metrics
)
return model
def fit(self) -> None:
"""
Function that fits the models.
Returns
-------
None
"""
# ------------------------------- Create model ------------------------------- #
if self.distributed:
with self.strategy.scope():
model = self._create_model()
else:
model = self._create_model()
# --------------------------------- Callbacks -------------------------------- #
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=3,
mode='min',
restore_best_weights=True
)
back_and_restore = tf.keras.callbacks.BackupAndRestore(
backup_dir=os.path.join(os.getcwd(), 'backup')
)
callbacks = [early_stopping, back_and_restore]
if self.distributed:
tensorboard = tf.keras.callbacks.TensorBoard(
log_dir=f's3://{self.config["s3_bucket"]}/{self.config["s3_key"]}/tensorboard_logs/{self.job_name}'
)
callbacks.append(tensorboard)
# ------------------------------------ Fit ----------------------------------- #
model.fit(
x=self.train_dataset,
epochs=self.hyperparameters['fit_epochs'],
validation_data=self.val_dataset,
callbacks=callbacks,
# Number of steps (batches of samples) to draw from before stopping validation
validation_steps=self.hyperparameters['fit_validation_steps'],
class_weight=self.train_class_weights
)
logger.info(f'Best validation loss: {early_stopping.best}')
# ---------------------------------- Save model --------------------------------- #
if self.distributed:
model_dir = self._create_model_dir(
self.model_dir,
self.strategy.cluster_resolver.task_type,
self.strategy.cluster_resolver.task_id
)
model.save(os.path.join(model_dir, '00000000'))
else:
model.save(os.path.join(self.model_dir, '00000000'))
return None
if __name__ == '__main__':
from custom_utils import get_logger, parser, add_additional_args, AugmentationModel, load_datasets
# ---------------------------------- Set up ---------------------------------- #
logger = get_logger(name='baseline_training')
# Hyra
core.global_hydra.GlobalHydra.instance().clear()
initialize(version_base='1.2', config_path='config', job_name='baseline_training')
config = OmegaConf.to_container(compose(config_name='main'), resolve=True)
# Parser hyperparameters specified by the SageMaker
filters = {f'conv2d_num_filters_block_{i}': int for i in range(0, 5)}
dense_layer_units = {f'dense_num_units_{i}': int for i in range(0, 3)}
loss_hyperparams = {'loss_alpha': float, 'loss_gamma': float}
other_hyperparams = {
'conv2d_pooling_size': int,
'conv2d_kernel_size': int,
'dense_dropout_rate': float,
'opt_learning_rate': float,
'opt_adam_beta_1': float,
'opt_adam_beta_2': float,
'opt_clipnorm': float,
'fit_epochs': int,
'use_focal_loss': int
}
additional_args = dict(chain(
filters.items(),
dense_layer_units.items(),
loss_hyperparams.items(),
other_hyperparams.items()
))
args = add_additional_args(parser_func=parser, additional_args=additional_args)()
job_name = args.training_env['job_name']
# --------------------------------- Load data -------------------------------- #
if args.test_mode:
distributed = False
strategy = None
else:
distributed = True
strategy = tf.distribute.MultiWorkerMirroredStrategy()
if not distributed:
# Sample three batches from the training dataset
train_dataset = load_datasets(
dir=args.train,
batch_size=config['batch_size'],
val=False
).take(3)
# Sample three batches from the validation dataset
val_dataset = load_datasets(
dir=args.val,
batch_size=config['batch_size'],
val=True
).take(3)
else:
tf_config = json.loads(os.environ['TF_CONFIG'])
num_workers = len(tf_config['cluster']['worker'])
global_batch_size = config['batch_size'] * num_workers
# Load the training dataset
train_dataset = load_datasets(
dir=args.train,
batch_size=global_batch_size,
val=False
)
# Load the validation dataset
val_dataset = load_datasets(
dir=args.val,
batch_size=global_batch_size,
val=True
)
# Load training set weights
fs = s3fs.S3FileSystem()
with fs.open(f's3://{config["s3_bucket"]}/{config["s3_key"]}/input-data/train_weights.json', 'rb') as f:
train_class_weights = json.load(f)
# Convert all keys to integers
train_class_weights = {int(k): v for k, v in train_class_weights.items()}
# --------------------------------- Train model --------------------------------- #
trainer = BaselineTrainer(
hyperparameters={
'conv2d_num_filters_block_0': args.conv2d_num_filters_block_0,
'conv2d_num_filters_block_1': args.conv2d_num_filters_block_1,
'conv2d_num_filters_block_2': args.conv2d_num_filters_block_2,
'conv2d_num_filters_block_3': args.conv2d_num_filters_block_3,
'conv2d_num_filters_block_4': args.conv2d_num_filters_block_4,
'conv2d_pooling_size': args.conv2d_pooling_size,
'conv2d_kernel_size': args.conv2d_kernel_size,
'dense_num_units_0': args.dense_num_units_0,
'dense_num_units_1': args.dense_num_units_1,
'dense_num_units_2': args.dense_num_units_2,
'dense_dropout_rate': args.dense_dropout_rate,
'opt_learning_rate': args.opt_learning_rate,
'opt_adam_beta_1': args.opt_adam_beta_1,
'opt_adam_beta_2': args.opt_adam_beta_2,
'opt_clipnorm': args.opt_clipnorm,
'loss_alpha': args.loss_alpha,
'loss_gamma': args.loss_gamma,
'fit_epochs': args.fit_epochs,
'fit_validation_steps': 1 if args.test_mode else int(config['val_size'] / config['batch_size']),
'use_focal_loss': args.use_focal_loss
},
config=config,
job_name=job_name,
train_dataset=train_dataset,
val_dataset=val_dataset,
train_class_weights=train_class_weights,
distributed=distributed,
strategy=strategy,
model_dir=args.model_dir,
logger=logger
)
trainer.fit()
del trainer | YangWu1227/python-for-machine-learning | neural_network/projects/cnn_insect_classification_sagemaker/src/baseline_entry.py | baseline_entry.py | py | 12,600 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "base_trainer.BaseTrainer",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Any",
... |
22400134597 | from PyQt5 import QtWidgets
from diz3_2 import * # импорт нашего сгенерированного файла
import sys
from BD import Orm
class Dialog2(QtWidgets.QDialog):
def __init__(self, id):
self.id = id
super(Dialog2, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.comboBox.addItem("Да")
self.ui.comboBox.addItem("Нет")
self.ui.comboBox_2.addItem("Да")
self.ui.comboBox_2.addItem("Нет")
self.ui.buttonBox.accepted.connect(self.add)
self.ui.buttonBox.rejected.connect(self.close)
self.bd = Orm()
def add(self):
owner = self.id
name = self.ui.lineEdit.text()
facility = self.ui.lineEdit_2.text()
if "Да" == self.ui.comboBox.currentText():
reckoning = True
else:
reckoning = False
if "Да" == self.ui.comboBox_2.currentText():
waybills = True
else:
waybills = False
count = self.ui.spinBox.value()
# r = []
# r.append((name, company, store, supplier, reckoning, ndc, count, price))
# print(r)
self.bd.addfacil(owner, name, facility, reckoning, waybills, count)
self.close()
# app = QtWidgets.QApplication([])
# application = Dialog()
#
# sys.exit(app.exec())
| Vorlogg/BD | dialog2.py | dialog2.py | py | 1,367 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "BD.Orm",
"line_number": 22,
"usage_type": "call"
}
] |
19691344827 | """Define custom dataset class extending the Pytorch Dataset class"""
import os
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as tvt
from utils.utils import Params
class SketchesDataset(Dataset):
"""Custom class for Sketches dataset"""
def __init__(self, root: str, csv_file: str, transform: tvt = None) -> None:
"""Get the filenames and labels of images from a csv file.
Args:
root: Directory containing the data
csv_file: file containing the data
transform: Transformation to apply on images
"""
self.root = root
self.data = pd.read_csv(os.path.join(root, csv_file))
self.transform = transform
def __len__(self) -> int:
"""Return the size of the dataset.
"""
return len(self.data)
def __getitem__(self, idx: int) -> Tuple[Image.Image, np.ndarray]:
"""Get an item from the dataset given the index idx"""
row = self.data.iloc[idx]
im_name = row["Image Id"] + ".png"
im_path = os.path.join(self.root, "images", im_name)
img = Image.open(im_path).convert("RGB")
labels = torch.tensor(row[1:], dtype=torch.float32)
if self.transform is not None:
img = self.transform(img)
return img, labels
def get_transform(mode: str, params: Params) -> tvt.Compose:
"""Data augmentation
Args:
is_train: If the dataset is training
Returns:
Composition of all the data transforms
"""
trans = [
tvt.Resize((params.height, params.width)),
tvt.ToTensor(),
tvt.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
if mode == "train":
trans += [
tvt.RandomHorizontalFlip(params.flip),
tvt.ColorJitter(
brightness=params.brightness,
contrast=params.contrast,
saturation=params.saturation,
hue=params.hue
),
tvt.RandomRotation(params.degree)
]
return tvt.Compose(trans)
def collate_fn(batch: List[Tuple[torch.tensor, torch.tensor]]) -> Tuple[torch.tensor, torch.tensor]:
"""Collate function to create a batch of data
Args:
batch: List of data generated by dataset
Returns:
Batch of images and labels
"""
data = list(zip(*batch))
imgs = torch.stack(data[0], 0)
labels = torch.stack(data[1], 0)
return imgs, labels
def get_dataloader(
modes: List[str],
params: Params,
) -> Dict[str, DataLoader]:
"""Get DataLoader objects.
Args:
modes: Mode of operation i.e. 'train', 'val', 'test'
params: Hyperparameters
Returns:
DataLoader object for each mode
"""
dataloaders = {}
for mode in modes:
if mode == "train":
trans = get_transform(mode, params)
shuf = True
else:
trans = get_transform(mode, params)
shuf = False
dataset = SketchesDataset(
root=params.data_dir,
csv_file=mode + "_sketches_" + params.type + ".csv",
transform=trans
)
dataloaders[mode] = DataLoader(
dataset,
batch_size=params.batch_size,
num_workers=params.num_workers,
pin_memory=params.pin_memory,
collate_fn=collate_fn,
shuffle=shuf
)
return dataloaders
| karanrampal/sketches | src/model/data_loader.py | data_loader.py | py | 3,585 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.pa... |
19399695029 | from typing import List
import copy
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
graph = {}
s = set()
for i, each in enumerate(equations):
nominator = each[0]
denominator = each[1]
s.add(nominator)
s.add(denominator)
if nominator not in graph:
graph[nominator] = {denominator: values[i]}
else:
graph[nominator][denominator] = values[i]
if denominator not in graph:
graph[denominator] = {nominator: 1.0 / values[i]}
else:
graph[denominator][nominator] = 1.0 / values[i]
print(graph)
ans = []
for query in queries:
nominator = query[0]
denominator = query[1]
if nominator not in s or denominator not in s:
ans.append(-1)
elif nominator == denominator:
ans.append(1.0)
else:
r = self.solve(graph, nominator,denominator, [], [])
ans.append(r)
return ans
def solve(self, graph, nominator, denominator, path, ratio):
# print(path)
if nominator == denominator:
res = 1
for each in ratio:
res *= each
return res
if len(path) == 0:
path.append(nominator)
for each in graph[nominator]:
if each not in path:
p = copy.deepcopy(path)
r = copy.deepcopy(ratio)
p.append(each)
r.append(graph[nominator][each])
ans = self.solve(graph, each, denominator, p, r)
if ans != -1:
return ans
# path.pop()
# ratio.pop()
return -1
equations = [["x1","x2"],["x2","x3"],["x3","x4"],["x4","x5"]]
values = [3.0,4.0,5.0,6.0]
queries =[["x1","x5"],["x5","x2"],["x2","x4"],["x2","x2"],["x2","x9"],["x9","x9"]]
s = Solution()
r = s.calcEquation(equations, values, queries)
print(r)
| Yigang0622/LeetCode | calcEquation.py | calcEquation.py | py | 2,160 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 52,
"usage_type": "call"
}
] |
1512983994 | """
This file contains helper functions for the project
"""
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from math import atan2, degrees
import urllib.request
from PIL import Image
# functions
def get_tracking_data():
"""
Function to read in tracking data and return a dataframe
"""
return pd.read_csv("./data/tracking_data.csv")
def gini_coefficient(x):
"""Compute Gini coefficient of array of values"""
diffsum = 0
for i, xi in enumerate(x[:-1], 1):
diffsum += np.sum(np.abs(xi - x[i:]))
return diffsum / (len(x) ** 2 * np.mean(x))
def create_football_field(
linenumbers=True,
endzones=True,
highlight_line=False,
highlight_line_number=50,
highlighted_name="Line of Scrimmage",
fifty_is_los=False,
figsize=(12, 6.33),
):
"""
Function that plots the football field for viewing plays.
"""
# credit https://www.kaggle.com/code/robikscube/nfl-big-data-bowl-plotting-player-position/notebook
rect = patches.Rectangle(
(0, 0), 120, 53.3, linewidth=0.1, edgecolor="r", facecolor="darkgreen", zorder=0
)
fig, ax = plt.subplots(1, figsize=figsize)
ax.add_patch(rect)
plt.plot(
[
10,
10,
10,
20,
20,
30,
30,
40,
40,
50,
50,
60,
60,
70,
70,
80,
80,
90,
90,
100,
100,
110,
110,
120,
0,
0,
120,
120,
],
[
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
53.3,
0,
0,
53.3,
],
color="white",
)
if fifty_is_los:
plt.plot([60, 60], [0, 53.3], color="gold")
plt.text(62, 50, "<- Player Yardline at Snap", color="gold")
# Endzones
if endzones:
ez1 = patches.Rectangle(
(0, 0),
10,
53.3,
linewidth=0.1,
edgecolor="r",
facecolor="blue",
alpha=0.2,
zorder=0,
)
ez2 = patches.Rectangle(
(110, 0),
120,
53.3,
linewidth=0.1,
edgecolor="r",
facecolor="blue",
alpha=0.2,
zorder=0,
)
ax.add_patch(ez1)
ax.add_patch(ez2)
plt.xlim(0, 120)
plt.ylim(-5, 58.3)
plt.axis("off")
if linenumbers:
for x in range(20, 110, 10):
numb = x
if x > 50:
numb = 120 - x
plt.text(
x,
5,
str(numb - 10),
horizontalalignment="center",
fontsize=20, # fontname='Arial',
color="white",
)
plt.text(
x - 0.95,
53.3 - 5,
str(numb - 10),
horizontalalignment="center",
fontsize=20, # fontname='Arial',
color="white",
rotation=180,
)
if endzones:
hash_range = range(11, 110)
else:
hash_range = range(1, 120)
for x in hash_range:
ax.plot([x, x], [0.4, 0.7], color="white")
ax.plot([x, x], [53.0, 52.5], color="white")
ax.plot([x, x], [22.91, 23.57], color="white")
ax.plot([x, x], [29.73, 30.39], color="white")
if highlight_line:
hl = highlight_line_number + 10
plt.plot([hl, hl], [0, 53.3], color="yellow")
plt.text(hl + 2, 50, "<- {}".format(highlighted_name), color="yellow")
return fig, ax
def calc_angle(x, y, x1, y1):
"""
function to calculate angle between two sets of x-y coordinates
"""
# change in x and y
dx = x1 - x
dy = y1 - y
# calculate angle
return degrees(atan2(dy, dx))
def draw_table_image(img_url, ax):
"""
Draws table image
"""
club_icon = Image.open(urllib.request.urlopen(img_url))
club_icon.resize((100, 100))
ax.imshow(club_icon)
ax.axis("off")
return ax
class BboxLocator:
"""
A helper class to locate a bbox in a given axes.
Will be used in our leaderboards.
"""
def __init__(self, bbox, transform):
self._bbox = bbox
self._transform = transform
def __call__(self, ax, renderer):
_bbox = self._transform.transform_bbox(self._bbox)
return ax.figure.transFigure.inverted().transform_bbox(_bbox)
| emekaamadi/Milestone-1-NFL-Project | src/functions.py | functions.py | py | 5,059 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 2... |
1307107485 | import functools
import os
import re
from importlib import import_module
from typing import Callable, Pattern
import yaml
from livelossplot.outputs import NeptuneLogger
def unpack_config(func: Callable) -> Callable:
"""Load parameters from a config file and inject it to function keyword arguments"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
config_file = kwargs.get('config')
if config_file:
del kwargs['config']
with open(config_file, 'r') as f:
run_args = yaml.full_load(f)
kwargs.update(run_args)
ret = func(*args, **kwargs)
return ret
return _wrapper
def create_artifacts_dir(runs_dir: str, run_template: Pattern = re.compile(r'(offline-experiment-)([0-9]+)')) -> str:
os.makedirs(runs_dir, exist_ok=True)
runs = [re.match(run_template, run) for run in os.listdir(runs_dir) if re.match(run_template, run)]
if len(runs) == 0:
next_run_dir = 'offline-experiment-0'
else:
last_run_match = max(runs, key=lambda r: int(r.group(2)))
next_run_id = int(last_run_match.group(2)) + 1
next_run_dir = last_run_match.group(1) + str(next_run_id)
next_run_dir = os.path.join(runs_dir, next_run_dir)
os.makedirs(next_run_dir)
return next_run_dir
def create_experiment(func: Callable) -> Callable:
"""Create experiment with function keyword parameters and generated name"""
def wrapper(*args, **params):
neptune_project_name = params.get('neptune_project')
output_dir = params['output_dir']
del params['output_dir']
logger_outputs = []
params['logger_outputs'] = logger_outputs
if neptune_project_name is not None:
del params['neptune_project']
neptune_output = NeptuneLogger(
project_qualified_name=neptune_project_name, params=params, upload_source_files='**/*.py'
)
logger_outputs.append(neptune_output)
params['run_dir'] = os.path.join(output_dir, neptune_output.experiment.id)
ret = func(*args, **params)
neptune_output.neptune.stop()
else:
logger_outputs.append('ExtremaPrinter')
params['run_dir'] = create_artifacts_dir(output_dir)
ret = func(*args, **params)
return ret
return wrapper
def import_function(class_path: str) -> Callable:
"""Function take module with to class or function and imports it dynamically"""
modules = class_path.split('.')
module_str = '.'.join(modules[:-1])
cls = modules[-1]
module = import_module(module_str)
return getattr(module, cls)
| Bartolo1024/RLCarRacing | utils.py | utils.py | py | 2,684 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Callable",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "yaml.full_load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.Pattern",
"... |
6932656070 |
import gc
import json
import numpy as np
import optuna
import pandas as pd
import sys
import warnings
import xgboost
from glob import glob
from sklearn.model_selection import KFold, StratifiedKFold
from tqdm import tqdm
from utils import FEATS_EXCLUDED, loadpkl, line_notify, to_json
#==============================================================================
# hyper parameter optimization by optuna
# https://github.com/pfnet/optuna/blob/master/examples/lightgbm_simple.py
#==============================================================================
warnings.filterwarnings('ignore')
# load datasets
CONFIGS = json.load(open('../configs/105_xgb.json'))
# load feathers
FILES = sorted(glob('../features/*.feather'))
DF = pd.concat([pd.read_feather(f) for f in tqdm(FILES, mininterval=60)], axis=1)
# split train & test
TRAIN_DF = DF[DF['click_mode'].notnull()]
del DF
gc.collect()
# use selected features
TRAIN_DF = TRAIN_DF[CONFIGS['features']]
# set card_id as index
TRAIN_DF.set_index('sid', inplace=True)
FEATS = [f for f in TRAIN_DF.columns if f not in FEATS_EXCLUDED]
def objective(trial):
xgb_train = xgboost.DMatrix(TRAIN_DF[FEATS],
TRAIN_DF['click_mode'])
param = {
'device':'gpu',
'objective':'multi:softmax',
'tree_method': 'gpu_hist', # GPU parameter
'predictor': 'gpu_predictor', # GPU parameter
'eval_metric':'mlogloss',
'num_class':12,
'eta': 0.05,
'booster': 'gbtree',
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0),
'silent':1,
}
param['gamma'] = trial.suggest_loguniform('gamma', 1e-8, 1.0)
param['max_depth'] = trial.suggest_int('max_depth', 1, 12)
param['min_child_weight'] = trial.suggest_uniform('min_child_weight', 0, 45)
param['subsample']=trial.suggest_uniform('subsample', 0.001, 1)
param['colsample_bytree']=trial.suggest_uniform('colsample_bytree', 0.001, 1)
param['colsample_bylevel'] = trial.suggest_uniform('colsample_bylevel', 0.001, 1)
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=326)
clf = xgboost.cv(params=param,
dtrain=xgb_train,
metrics=['mlogloss'],
nfold=NUM_FOLDS,
folds=list(folds.split(TRAIN_DF[FEATS], TRAIN_DF['click_mode'])),
num_boost_round=10000,
early_stopping_rounds=200,
verbose_eval=100,
seed=47
)
gc.collect()
return clf['test-mlogloss-mean'].iloc[-1]
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(objective, n_trials=100)
print('Number of finished trials: {}'.format(len(study.trials)))
print('Best trial:')
trial = study.best_trial
print(' Value: {}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
# save result
hist_df = study.trials_dataframe()
hist_df.to_csv("../output/optuna_result_xgb.csv")
# save json
CONFIGS['params'] = trial.params
to_json(CONFIGS, '../configs/105_xgb.json')
line_notify('{} finished. Value: {}'.format(sys.argv[0],trial.value))
| MitsuruFujiwara/KDD-Cup-2019 | src/804_optimize_xgb_optuna.py | 804_optimize_xgb_optuna.py | py | 3,421 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line... |
6589740252 | import pickle
from flask import Flask, request, render_template, jsonify, send_file
from elasticsearch import Elasticsearch
from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering, AutoModel
import spacy
import json
import time
from pymongo import MongoClient
import os
from sklearn.linear_model import LogisticRegression
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
with open("config.json", "r") as config:
config_variables = json.load(config)
all_models = dict()
all_elastics = dict()
all_ood_classes = dict()
for model in config_variables["models"]:
all_models[model["model"]] = pipeline(
model["pipeline"],
tokenizer=model["tokenizer"],
model=model["model"],
device=model["device"],
handle_impossible_answer=bool(model["handle_impossible_answer"]),
max_answer_len=model["max_answer_len"],
)
for elastic_table in config_variables["elastics"]:
all_elastics[elastic_table["elastic_table_name"]] = elastic_table[
"elastic_table_name"
]
all_ood_classes[elastic_table["elastic_table_name"]] = elastic_table["ood_class"]
contriever_tokenizer = AutoTokenizer.from_pretrained("facebook/mcontriever-msmarco")
contriever_model = AutoModel.from_pretrained("facebook/mcontriever-msmarco")
ood_model = pickle.load(open("models/ood_model.pkl", "rb"))
nlp_hu = spacy.load("hu_core_news_trf")
MONGO_URL = os.environ.get("MONGO_URL")
ELASTIC_URL = os.environ.get("ELASTIC_URL")
ELASTIC_USER = os.environ.get("ELASTIC_USER")
ELASTIC_PASSWORD = os.environ.get("ELASTIC_PASSWORD")
# ELASTIC_PASSWORD = "lFqLIrbCQfI84P6v_ue0"
DEBUG = os.environ.get("DEBUG", "").lower() == "true"
@app.route("/test")
def test():
return jsonify({"Hello": "world!"}), 200
# @app.route('/query/<query>')
def predict_from_question(query, size, elastic, model_type):
doc_q = nlp_hu(query)
clean_tokens = list()
for token in doc_q:
# print(token.text, token.pos_, token.dep_)
if token.pos_ not in ["DET", "ADV", "PRON", "PUNCT"]:
clean_tokens.append(token.lemma_)
clean_question = " ".join(clean_tokens)
body = {"size": size, "query": {"match": {"document": clean_question}}}
es = Elasticsearch(
ELASTIC_URL, http_auth=(ELASTIC_USER, ELASTIC_PASSWORD), verify_certs=False
)
s = es.search(index=all_elastics[elastic], body=body)
# The query only returns the text before the question mark, so we add it here.
official_question = query if query[-1:] == "?" else query + "?"
# We use the highest ranked document by the elasticsearch.
contexts = list(s["hits"]["hits"])
return_value = list()
official_all_context = "\n-\n\n".join(
context["_source"]["official_document"] for context in contexts
)
lemmatized_all_context = "\n-\n\n".join(
context["_source"]["document"] for context in contexts
)
app.logger.info(contexts)
qa_pipeline = all_models[model_type]
if official_all_context != "":
prediction = qa_pipeline(
{"context": official_all_context, "question": official_question}
)
else:
prediction = {"answer": "", "start": 0, "end": 0, "score": -1}
if "\n-\n\n" in prediction["answer"]:
model_answer = prediction["answer"].split("\n-\n\n")[0]
else:
model_answer = prediction["answer"]
relevant_context = ""
elastic_score = 0
file_name, h1, h2, h3 = "", "", "", ""
for context_raw in contexts:
if context_raw["_source"]["official_document"].__contains__(model_answer):
relevant_context = context_raw["_source"]["official_document"]
elastic_score = context_raw["_score"]
file_name = context_raw["_source"]["file_name"]
h1 = context_raw["_source"]["h1"]
h2 = context_raw["_source"]["h2"]
h3 = context_raw["_source"]["h3"]
break
return_value.append(
{
"lemmatized_context": lemmatized_all_context,
"official_question": official_question,
"official_context": official_all_context,
"relevant_context": relevant_context,
"answer": prediction["answer"],
"start": prediction["start"],
"end": prediction["end"],
"model_score": prediction["score"],
"elastic_score": elastic_score,
"metadata": [
{"section": h2 + " > " + h3, "filename": file_name, "source": h1}
]
}
)
return return_value
@app.route("/qa", methods=["POST"])
def rest_api():
try:
record = json.loads(request.data)
if record["query"] == "":
return jsonify({"answers": [], "system": {}})
record["elapsed_time"] = time.time()
ood_class = ood_model.predict(get_contriever_vector([record["query"]]).detach().numpy())[0].item()
if (ood_class == all_ood_classes[record["elastic"]]):
query = predict_from_question(
record["query"], record["size"], record["elastic"], record["model_type"]
)
query[0]["ood_class"] = ood_class
else:
query = list([{"ood_class": ood_class}])
record["elapsed_time"] = time.time() - record["elapsed_time"]
record["time"] = time.time()
mongo_id = str(
db["qa"].insert_one({"answers": query, "system": record}).inserted_id
)
try:
if not DEBUG:
for answer in query:
del answer["lemmatized_context"]
del answer["official_question"]
del answer["official_context"]
del answer["model_score"]
del answer["elastic_score"]
del answer["ood_class"]
except Exception as e:
app.logger.error(e)
db["errors"].insert_one(
{"error": str(e), "time": time.time(), "type": "qa_delete_ood"}
)
return jsonify({"answers": query, "system": {"id": mongo_id}})
except Exception as e:
app.logger.error(e)
db["errors"].insert_one({"error": str(e), "time": time.time(), "type": "qa"})
return jsonify({}), 418
@app.route("/feedback/like", methods=["POST"])
def feedback_like():
try:
record = json.loads(request.data)
db["likes"].insert_one({"id": record["id"], "time": time.time()})
return jsonify({}), 200
except Exception as e:
app.logger.error(e)
db["errors"].insert_one({"error": str(e), "time": time.time(), "type": "like"})
return jsonify({}), 400
@app.route("/feedback/dislike", methods=["POST"])
def feedback_dislike():
try:
record = json.loads(request.data)
db["dislikes"].insert_one(
{
"id": record["id"],
"what_should_be": record["what_should_be"],
"whats_wrong": record["whats_wrong"],
"anything_else": record["anything_else"],
"was_this_in_the_context": record["was_this_in_the_context"],
"time": time.time(),
}
)
return jsonify({}), 200
except Exception as e:
app.logger.error(e)
db["errors"].insert_one(
{"error": str(e), "time": time.time(), "type": "dislike"}
)
return jsonify({}), 400
def get_contriever_vector(sentences):
inputs = contriever_tokenizer(sentences, padding=True, truncation=True, return_tensors="pt")
outputs = contriever_model(**inputs)
def mean_pooling(token_embeddings, mask):
token_embeddings = token_embeddings.masked_fill(~mask[..., None].bool(), 0.0)
sentence_embeddings = token_embeddings.sum(dim=1) / mask.sum(dim=1)[..., None]
return sentence_embeddings
return mean_pooling(outputs[0], inputs["attention_mask"])
if __name__ == "__main__":
client = MongoClient(MONGO_URL)
db = client["shunqa"]
app.run(host="0.0.0.0", port=5000, debug=True)
| szegedai/SHunQA | backend/flask_service.py | flask_service.py | py | 8,073 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "transformers.pipeline",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenize... |
7796643764 | import numpy as np
import pickle
from engine.sim_functions import calc_local_zodiacal_minimum,Spectrograph
from engine.planet_retrieval import Planet,Star
from engine.main_computer import compute
from itertools import chain
from multiprocessing import Pool
import json
import sys
dR_scale = float(sys.argv[1]) #Reflectance error in beamsplitter
out_file = str(sys.argv[2])
base_wave = 18
#####################################################
#Secondary parameters
min_wave = 4 #microns
max_wave = 19 #microns
num_channels = 50
#####################################################
#Set up the spectral parameters
spec = Spectrograph(min_wave,max_wave,base_wave,num_channels)
from engine.nullers.five_telescopes_err_2 import get_nuller_response
architecture_verbose = "Five telescope kernel nuller, optimised for diagonal telescopes (K2 alt)"
base_scale_factor = 1.028 #= approx 1.03*0.619 (where 0.619 is the conversion between a side and diagonal of a pentagon)
sz = 1500
mode_verbose = "Search"
fov_scale_factor = 5
dphi_scale = dR_scale
number_processes = 28 #parallelise?
###########################################################################
#Tau boo coordinates (ecliptic latitude of 26 degress)
ra = 206.81560195
dec = 17.45690446
z = 1 #exozodi (same as solar system)
#Generate stars of given types
def Beta_Pic(dist):
name = "Beta Pic analogue"
stype = "A6V"
rad = 1.8
teff = 8052
mass = 1.75
return Star(name,1,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Tau_Boo(dist):
name = "Tau Boo analogue"
stype = "F7V"
rad = 1.42
teff = 6399
mass = 1.39
return Star(name,2,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Sun(dist):
name = "Solar analogue"
stype = "G2V"
rad = 1
teff = 5772
mass = 1
return Star(name,3,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Eps_Eri(dist):
name = "Epsilon Eri analogue"
stype = "K2V"
rad = 0.735
teff = 5084
mass = 0.82
return Star(name,4,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Prox_Cen(dist):
name = "Proxima Cen analogue"
stype = "M5V"
rad = 0.15
teff = 3042
mass = 0.12
return Star(name,5,dist,stype,rad,teff,mass,ra,dec,spec,z)
#Helper function to generate Earth-twin planets
def myPlanet(star,num,a):
#Earth twin
PRad = 1
PMass = 1
Temp = 300
Ageom = 0.1 #Rough estimate?
AngSep = a/star.Dist
lam_ref = 0.318 #From PPop, assuming face on orbit (inc = 0)
return Planet(star,0,star.SNumber,num,PRad,PMass,365,0,0,0,0,0,0,0,Ageom,a,a,AngSep,0,0,lam_ref,Temp,spec)
#Give each star one planet in the middle of the HZ
def append_planet_list(star):
star.Planets = [myPlanet(star,2,star.HZMid)]
return star
dists = np.linspace(1,20,20)
#Make the list of stars at given distances
star_list = []
for d in dists:
star_list.append(append_planet_list(Sun(d)))
star_list.append(append_planet_list(Eps_Eri(d)))
star_list.append(append_planet_list(Prox_Cen(d)))
#Make errors
dphi = np.zeros(10)
dR = np.zeros(10)
dphi[2] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[4] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[5] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[7] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[8] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[9] = np.sign(np.random.random()*2-1)*dphi_scale
dR[2] = np.sign(np.random.random()*2-1)*dR_scale
dR[4] = np.sign(np.random.random()*2-1)*dR_scale
dR[5] = np.sign(np.random.random()*2-1)*dR_scale
dR[7] = np.sign(np.random.random()*2-1)*dR_scale
dR[8] = np.sign(np.random.random()*2-1)*dR_scale
dR[9] = np.sign(np.random.random()*2-1)*dR_scale
def response_func(baseline,fov,sz,base_wavelength):
return get_nuller_response(dphi,dR,baseline,fov,sz,base_wavelength)
###########################################################################
#Get local zodi minimum
local_exozodi = calc_local_zodiacal_minimum(spec)
#RUN!!
#Multiprocessing
def worker_func(star):
return compute(star,1,response_func,spec,sz,base_scale_factor,fov_scale_factor,local_exozodi)
pool = Pool(processes=number_processes)
ls_star_data = pool.map(worker_func,star_list)
pool.close()
#Make into one list of dictionaries
dict_ls = list(chain.from_iterable(ls_star_data))
#Funtion to round sig figs (for numerical readibility)
def round_sig_figs(x, p):
x_positive = np.where(np.isfinite(x) & (x != 0), np.abs(x), 10**(p-1))
mags = 10 ** (p - 1 - np.floor(np.log10(x_positive)))
return np.round(x * mags) / mags
#Header data, plus results
main_dict = {
"Architecture":architecture_verbose,
"Mode":mode_verbose,
"baseline_wavelength (microns)":round_sig_figs(spec.baseline_wave*1e6,5),
"sz":sz,
"fov_scale_factor":fov_scale_factor,
"min_wavelength (microns)":round_sig_figs(spec.wave_min*1e6,5),
"max_wavelength (microns)":round_sig_figs(spec.wave_max*1e6,5),
"num_channels":spec.n_channels,
"channel_widths (microns)":round_sig_figs(spec.dlambda*1e6,5),
"channel_centres (microns)":round_sig_figs(spec.channel_centres*1e6,5),
"results":dict_ls
}
#Needed for writing JSON
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
#Write file
fout = open(out_file,"w")
json.dump(main_dict,fout,cls=NpEncoder,indent=2)
fout.close()
| JonahHansen/LifeTechSim | error_sim.py | error_sim.py | py | 5,550 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "engine.sim_functions.Spectrograph",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "engine.... |
31974764531 | # -*- coding: utf-8 -*-
import json
import scrapy
from club_activity_friends_details.items import ClubActivityFriendsDetailsItem
from lib.GetCurrentTime import get_current_date
from models.club import StructureStartUrl
class AutoHomeClubActivityFriendsDetailsSpider(scrapy.Spider):
name = 'auto_home_club_activity_friends_details'
club_id_list = StructureStartUrl().get_bbs_id()
club_index = 0
page_index = 1
base_url = "https://club.app.autohome.com.cn/club_v8.2.0/club/getactivityfriendlist-pm2-b%s-t2-c0-u66230826-p%s-s20.json"
start_urls = [base_url % (club_id_list[club_index], page_index)]
def parse(self, response):
item = ClubActivityFriendsDetailsItem()
content = json.loads(response.body.decode(), strict=False)
activity_friend_list = content["result"]["activityfriendlist"] # 活跃车友
club_master_list = content["result"]["clubmasterlist"] # 推荐车友
for club_master in club_master_list:
item["bbs_id"] = self.club_id_list[self.club_index]
item["user_id"] = club_master["userid"]
item["recommend"] = 0
item["time"] = get_current_date()
yield item
for activity_friend in activity_friend_list:
item["bbs_id"] = self.club_id_list[self.club_index]
item["user_id"] = activity_friend["userid"]
item["recommend"] = 1
item["time"] = get_current_date()
yield item
self.page_index += 1
if self.page_index <= content["result"]["pagecount"]:
print(self.page_index)
url = self.base_url % (self.club_id_list[self.club_index], self.page_index)
print(url)
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
else:
self.club_index += 1
if self.club_index < len(self.club_id_list):
self.page_index = 1
url = self.base_url % (self.club_id_list[self.club_index], self.page_index)
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
| CY113/Cars | club_activity_friends_details/club_activity_friends_details/spiders/auto_home_club_activity_friends_details.py | auto_home_club_activity_friends_details.py | py | 2,113 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.club.StructureStartUrl",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "club_activity_friends_details.items.ClubActivityFriendsDetailsItem",
"line_number": 19,
... |
36413248388 | # ---------------
# ParamCopy - Substance 3D Designer plugin
# (c) 2019-2022 Eyosido Software SARL
# ---------------
import os, weakref
from functools import partial
from PySide2.QtCore import QObject
from PySide2.QtWidgets import QToolBar
import sd
from sd.context import Context
from sd.api.sdapplication import SDApplication
from sd.api.sduimgr import SDUIMgr
from paramcopy.pccore import pclog
from paramcopy.pccore.pcdata import PCData
class PCGraphCustomToolbarMgr(QObject):
"""
Handles a single custom toolbar per graph view used by a single external component
"""
def __init__(self, callback, toolbarIcon):
super().__init__()
self.sdApp = sd.getContext().getSDApplication()
self.sdUiMgr = self.sdApp.getQtForPythonUIMgr()
self.toolbars = {} # key: graphViewId, value: weak reference on created toolbar (so we don't prevent Qt to delete the toolbars)
self.callback = partial(callback) # callback must create/setup a single QToolBar object and return it.
self.toolbarIcon = toolbarIcon
self.registerGraphViewCreated()
# --- Public
def cleanup(self):
self.removeAllToolbars()
if self.graphViewCreatedCbId:
self.sdUiMgr.unregisterCallback(self.graphViewCreatedCbId)
# --- Private
def registerGraphViewCreated(self):
self.graphViewCreatedCbId = self.sdUiMgr.registerGraphViewCreatedCallback( partial(self.onGraphViewCreated, uiMgr=self.sdUiMgr))
def removeAllToolbars(self):
for toolbarRef in self.toolbars.values():
weakref.proxy(toolbarRef).deleteLater()
self.toolbars = {}
def onGraphViewCreated(self, graphViewId, uiMgr):
if not self.toolbars.get(graphViewId):
toolbar = self.callback() # let user create and setup the QToolBar
toolbar.destroyed.connect(partial(self.onToolbarDestroyed, graphViewId=graphViewId))
self.toolbars[graphViewId] = toolbar
self.sdUiMgr.addToolbarToGraphView(graphViewId, toolbar, icon = self.toolbarIcon, tooltip = toolbar.toolTip())
def onToolbarDestroyed(self, graphViewId):
# self.sender() is not the toolbar object, so we need to look-up by graphViewId
if self.toolbars.get(graphViewId):
del self.toolbars[graphViewId]
| eyosido/ParamCopy | src/paramcopy/pcui/pctoolbar.py | pctoolbar.py | py | 2,317 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "PySide2.QtCore.QObject",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sd.getContext",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "functools.part... |
5254327339 | # -*- coding: utf-8 -*-
"""
Utility functions
@authors: Álvaro Ramírez Cardona (alramirezca@unal.edu.co)
Vanessa Robledo Delgado (vanessa.robledo@udea.edu.co)
"""
from os import path
import xarray as xr
import numpy as np
import pandas as pd
from scipy import ndimage
import geopandas as gpd
from datetime import timedelta
import rioxarray
import rasterio
from geopy.distance import geodesic
import math
import sys
import glob
import numbers as checknumbers
from shapely.geometry import MultiPolygon, Polygon, shape, Point, MultiPoint, mapping
from shapely.wkt import loads
import uuid
import tqdm
import time
import warnings
import folium
import webbrowser
warnings.filterwarnings("ignore")
#___________________________________Functions______________________________________________________
def readNC(pathTb = None, pathP = None, utc_local_hour = 0, utc_local_sign = "minus"):
"""
Function for reading and resampling the Tb and P DataArrays.
The spatial resampling is 0.1° - lineal interpolation.
The temporal resampling is 1 h - nearest original coordinate to up-sampled frequency coordinates.
Inputs:
* pathTb: str, path where the Tb raw data are located.
* pathP: str, path where the P raw data are located.
The path must have the next structure:
linux: r"/home....../"
windows: r"C:/....../"
* utc_local_hour: int, allows transform the raw data hour (UTC)
to a interest time zone (interest region).
* utc_local_sign: str (minus, plus, local), sets whether to add or subtract
in the conversion for the time zone of interest. If set "local" no conversion will be done
and the time zone will be GMT or UTC.
(ex: UTC-5 timing is determined by utc_local_hour = 5 and utc_local_sign = "minus".
Outputs:
* xarray.Dataset with the brightness temperature (Tb) and P (Precipitation) data.
"""
if isinstance(pathTb, str) and isinstance(pathP, str):
try:
#Globbing the Tb and P files
filenamestb = glob.glob(pathTb+'*.nc4')
filenamesp =glob.glob(pathP+'*.nc4')
#Reading P data
ds_p = xr.open_mfdataset(filenamesp)
ds_p['P'] = ds_p['precipitationCal']; del ds_p['precipitationCal']
ds_p = ds_p['P'].T
#Temporal resampling precipitation data
ds_p = ds_p.resample(time ='1H').mean() #promedio Horario de precipitacion
#Reading Tb data
ds_t = xr.open_mfdataset(filenamestb); ds_t = ds_t['Tb']
#Temporal resampling Tb data
ds_t = ds_t.resample(time="1H").nearest(tolerance="1H")
#Spatial resampling Tb DataArray. This is based on P coordinates (lat and lon).
ds_t = ds_t.interp(lat=ds_p.lat.values, lon=ds_p.lon.values)
#Reorder levels from Tb DataArray for merging with P DataArray.
ds_t = ds_t.transpose("lat", "lon", "time")
#Merging DataArrays
try:
ds = xr.merge([ds_p, ds_t])
except:
#The raw P data from some years does not have a valid datetime index
#These lines convert CFTimeIndex to DatetimeIndex for merging.
datetimeindex = ds_p.indexes['time'].to_datetimeindex()
ds_p['time'] = datetimeindex
ds = xr.merge([ds_p, ds_t])
#Closing the raw DataArrays
ds_p.close()
ds_t.close()
#Converting the UTC to local hour
datex = ds.time.coords.to_index()
#Replacing the datetimeindex based on UTC_LOCAL_HOUR
if utc_local_sign == "minus":
datedt = datex.to_pydatetime() - timedelta(hours=utc_local_hour)
elif utc_local_sign == "plus":
datedt = datex.to_pydatetime() + timedelta(hours=utc_local_hour)
elif utc_local_sign == "local":
datedt = datex.to_pydatetime()
else:
raise TypeError("You must type a valid parameter for utc_local_sign: minus, plus or local. If you use local please enter utc_local_hour = 0")
dates_64 = [np.datetime64(row) for row in datedt]
ds = ds.assign_coords({"time": dates_64})
#Attaching Atributes to DataArray merged.
ds.Tb.attrs["units"] = "K"
ds.P.attrs["units"] = "mm/h"
ds.Tb.attrs["_FillValue"] = 'NaN'
ds.P.attrs["_FillValue"] = 'NaN'
ds.lon.attrs['units'] = "degrees_east"
ds.lat.attrs['units'] = "degrees_north"
#Extracting dimensiones: time, lat and lon
dates = ds.time.values;
lon, lat = np.float32(np.meshgrid(ds.lon,ds.lat))
#Establishing EPSG:4326
ds = ds.rio.set_crs(4326)
ds.attrs['crs'] = ds.rio.crs
initial_date_lecture = str(ds.time[0].values)[:16]
final_date_lecture = str(ds.time[-1].values)[:16]
print('Complete Tb and P data reading ' + initial_date_lecture + " - " + final_date_lecture)
except:
raise FileNotFoundError("Make sure you are complying with the Tb and P paths parameters: /home../")
elif isinstance(pathTb, str) and pathP is None:
try:
#Globbing the Tb files
filenamestb = glob.glob(pathTb+'*.nc4')
#Reading Tb data
ds_t = xr.open_mfdataset(filenamestb);
#Temporal resampling Tb data
ds_t = ds_t.resample(time="1H").nearest(tolerance="1H")
#Reorder levels from Tb DataArray
ds = ds_t.transpose("lat", "lon", "time")
#Converting the UTC to local hour
datex = ds.time.coords.to_index()
#Replacing the datetimeindex based on UTC_LOCAL_HOUR
if utc_local_sign == "minus":
datedt = datex.to_pydatetime() - timedelta(hours=utc_local_hour)
elif utc_local_sign == "plus":
datedt = datex.to_pydatetime() + timedelta(hours=utc_local_hour)
elif utc_local_sign == "local":
datedt = datex.to_pydatetime() + timedelta(hours=utc_local_hour)
else:
raise TypeError("You must type a valid parameter for utc_local_sign: minus, plus or local. If you use local please enter tc_local_hour = 0")
dates_64 = [np.datetime64(row) for row in datedt]
ds = ds.assign_coords({"time": dates_64})
#Attaching Atributes to DataArray merged.
ds.Tb.attrs["units"] = "K"
ds.Tb.attrs["_FillValue"] = 'NaN'
ds.lon.attrs['units'] = "degrees_east"
ds.lat.attrs['units'] = "degrees_north"
#Extracting dimensiones: time, lat and lon
lon, lat = np.float32(np.meshgrid(ds.lon,ds.lat))
#Establishing EPSG:4326
ds = ds.rio.set_crs(4326)
ds.attrs['crs'] = ds.rio.crs
initial_date_lecture = str(ds.time[0].values)[:16]
final_date_lecture = str(ds.time[-1].values)[:16]
print('Complete Tb data reading ' + initial_date_lecture + " - " + final_date_lecture)
except:
raise FileNotFoundError("Make sure you are complying with the Tb path parameters: /home../")
else:
raise FileNotFoundError("There must be at least a valid path for Tb data.")
return ds
def readTRACKS(path):
"""
function for reading tracks results.
Inputs:
* path: str, path where the tracks and MCS results is located.
Outputs:
* Geopandas.GeoDataFrame with the tracks and MCS associated.
"""
df = pd.read_csv(path, index_col = ["belong", "id_gdf"], parse_dates = ["time"])
df['geometry'] = gpd.GeoSeries.from_wkt(df['geometry'])
df['centroid_'] = gpd.GeoSeries.from_wkt(df['centroid_'])
df = gpd.GeoDataFrame(df, geometry='geometry', crs = 4326)
return df
def plot_folium(resume, location, path_save):
"""
function for plotting tracks results in folium map.
Inputs:
* resume: GeoDataFrame, data related with the tracks and MCS's.
* location list (lat, lon), location for center the map_folium.
* path_save: str, path where the .html folium map will be saved
Outputs:
* the .html folium map will be open with the librarie "webbrowser"
* path_saved: str, path where the .html was saved.
"""
m = folium.Map(location=location, zoom_start=5, tiles='CartoDB positron')
df = resume.reset_index()
for i in df.belong.unique():
#Sorting index by time
tracks = df.loc[df.belong == i].reset_index()
tracks = tracks.set_index("time").sort_index()
tracks = tracks.reset_index()
for idn, r in tracks.iterrows():
sim_geo = gpd.GeoSeries(r['geometry']).simplify(tolerance=0.001)
geo_j = sim_geo.to_json()
geo_j = folium.GeoJson(data=geo_j,
style_function=lambda x: {'fillColor': 'orange'})
folium.Popup(r.index).add_to(geo_j)
try: #Tb and P methodlogy
folium.Marker(location=[r['centroid_'].y, r['centroid_'].x], popup='id_track: {} <br> id_mcs: {} <br> hour_mcs: {} <br> time: {} <br> area[km2]: {} <br> distance_traveled[km]: {} <br> direction[°]: {} <br> intersection_percentage[%]: {} <br> mean_tb[K]: {} <br> mean_p[mm/h]: {} <br> total_distance_traveled[km]: {} <br> total_duration[h]: {} <br>'.format(r['belong'], r["id_gdf"], idn, r["time"], round(r['area_tb'],1), round(r["distance_c"],1), r["direction"], r["intersection_percentage"], round(r["mean_tb"],1), round(r["mean_pp"],1), round(r["total_distance"],1), r["total_duration"])).add_to(m)
extra_name = "Tb_P_"
except: #Tb methodlogy
folium.Marker(location=[r['centroid_'].y, r['centroid_'].x], popup='id_track: {} <br> id_mcs: {} <br> hour_mcs: {} <br> time: {} <br> area[km2]: {} <br> distance_traveled[km]: {} <br> direction[°]: {} <br> intersection_percentage[%]: {} <br> mean_tb[K]: {} <br> total_distance_traveled[km]: {} <br> total_duration[h]: {} <br>'.format(r['belong'], r["id_gdf"], idn, r["time"], round(r['area_tb'],1), round(r["distance_c"],1), r["direction"], r["intersection_percentage"], round(r["mean_tb"],1), round(r["total_distance"],1), r["total_duration"])).add_to(m)
extra_name = "Tb_"
geo_j.add_to(m)
min_time = str(resume.time.min())[:-6].replace("-","_").replace(" ","_")
max_time = str(resume.time.max())[:-6].replace("-","_").replace(" ","_")
path_result = path_save+'map_'+extra_name+min_time+"_"+max_time+".html"
m.save(path_result)
try:
webbrowser.open(path_result)
except:
pass
return path_result
| alramirezca/ATRACKCS | atrackcs/utils/funcs.py | funcs.py | py | 11,183 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "xarray.open_mfdataset",
... |
25441389301 | # -*- coding: utf-8 -*-
from PIL import Image,ImageFont,ImageDraw
import json
import cover
import time
from io import BytesIO
def paste_with_a(base_img_, img_, pos):
if 4 == len(img_.split()):
r,g,b,a = img_.split()
base_img_.paste(img_, pos,mask=a)
else:
base_img_.paste(img_, pos)
def drawRoundRec(drawObject, x, y, w, h, r, fill_color):
'''Rounds'''
drawObject.ellipse((x, y, x + r, y + r), fill=fill_color)
drawObject.ellipse((x + w - r, y, x + w, y + r), fill=fill_color)
drawObject.ellipse((x, y + h - r, x + r, y + h), fill=fill_color)
drawObject.ellipse((x + w - r, y + h - r, x + w, y + h), fill=fill_color)
'''rec.s'''
drawObject.rectangle((x + r / 2, y, x + w - (r / 2), y + h), fill=fill_color)
drawObject.rectangle((x, y + r / 2, x + w, y + h - (r / 2)), fill=fill_color)
def draw_plate_arc_style(img_, dic_):
draw = ImageDraw.Draw(img_)
# draw 1
# draw cover
# @brief
# wiki url will use "_" to replace " "
# display " " in mai_rating_img but will use "_" to download cover
# however cover.download will save img with "_" (cause ues "_" as param in cover.download())
wiki_title = dic_["title"].replace(" ", "_")
if 0 < cover.download_cover(wiki_title):
cover_img = Image.open("./cover/"+wiki_title+".png")
else:
cover_img = Image.open("./res/" + "gd" + ".png")
cover_img = cover_img.resize((250, 250), Image.ANTIALIAS)
paste_with_a(img_,cover_img,(25,25))
# draw rating base
# master 159 81 220
draw.polygon((275, 25, 525, 25, 575, 75, 275, 75), diff_color[dic_["level_label"]])
# write dx rating
draw.text((275 + 20 , 25 + 5), " "+str(dic_["ra"])+" (" + str(dic_["ds"]) + ")", font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 40), fill="#ffffff")
# write b rank
draw.text((800 , 25 + 8), "#" + str(i+1), font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 45), fill="#000000")
print(str(i))
# draw 2
# write title
draw.text((275 + 20, 25+50+25), dic_["title"], font=ImageFont.truetype('C:/windows/fonts/Deng.ttf', 48), fill="#000000")
# draw 3
# write score
draw.text((275 + 20, 25 + 50 + 25 + 60), str(dic_["achievements"]) + "%", font=ImageFont.truetype('C:/windows/fonts/ALGER.TTF', 58),fill="#000000")
# draw score rank "rate": "sssp"
score_rank_img = Image.open("./res/" + dic_["rate"] + ".png")
paste_with_a(img_, score_rank_img, (625, 25 + 50 + 25 + 60 - 20))
#draw 4
#draw type "type": "SD"
music_type_img = Image.open("./res/" + dic_["type"] + ".png")
paste_with_a(img_, music_type_img, (275 + 20, 25 + 50 + 25 + 60 + 90))
#draw fc "fc": "fcp"
if len(dic_["fc"]) > 0:
fc_img = Image.open("./res/" + dic_["fc"] + ".png")
else:
fc_img = Image.open("./res/" + "fc_dummy" + ".png")
paste_with_a(img_, fc_img, (275 + 20 + 150, 25 + 50 + 25 + 60 + 90 - 8))
# #draw fs "fs": ""
if len(dic_["fs"]) > 0:
fs_img = Image.open("./res/" + dic_["fs"] + ".png")
else:
fs_img = Image.open("./res/" + "fs_dummy" + ".png")
paste_with_a(img_, fs_img, (275 + 20 + 150 + 150 , 25 + 50 + 25 + 60 + 90 - 15))
def draw_name_pad_mai_style(base_img_):
# draw name pad
# load res
name_pad_img = Image.open("./res/name_pad/"+user_dic["name_pad"]+".png")
name_pad_img.convert('RGBA')
name_pad_img = name_pad_img.resize((1800, 300), Image.ANTIALIAS)
# draw ava
ava_img = Image.open("./res/ava/"+user_dic["ava"]+".png")
ava_img = ava_img.resize((250, 260), Image.ANTIALIAS)
paste_with_a(name_pad_img, ava_img, (20,20))
# draw rating base
rating_base_img = Image.open("./res/rating_base_rainbow.png")
rating_base_img = rating_base_img.resize((425, 85), Image.ANTIALIAS)
# write rating
draw = ImageDraw.Draw(rating_base_img)
ra_sum = ra_sum_sd + ra_sum_dx21
ra_sum_list = []
ra_pos_list = [(364 + 6,18),(321+ 6,18),(275+ 6,18),(228+ 6,18),(188+ 6,18)] # max 99999
while 1:
r = ra_sum%10
ra_sum_list.append(r)
ra_sum = int(ra_sum/10)
if 0 == ra_sum:
break
for i in range(len(ra_sum_list)):
draw.text(ra_pos_list[i], str(ra_sum_list[i]), font=ImageFont.truetype('C:/windows/fonts/BAUHS93.TTF', 42), fill="#eedd00")
# paste rating base
paste_with_a(name_pad_img,rating_base_img,(20 + 250 + 10, 20))
# draw mai_logo
maimai_img = Image.open("./res/logo.png")
maimai_img = maimai_img.resize((int(110 * 16 / 9), 110), Image.ANTIALIAS)
paste_with_a(name_pad_img,maimai_img,(20 + 250 + 10 + 425 + 10, 5))
# draw name base
name_base_img = Image.new('RGBA', (900 - 225, 105), (255, 255, 255, 0))
# write name
draw = ImageDraw.Draw(name_base_img)
drawRoundRec(draw,0,0,900 - 225, 105,25,"#666666")
drawRoundRec(draw, 3, 3, 900 - 225-6, 105-6, 25, "#ffffff")
draw.text((10 , 10), user_dic["name"], font=ImageFont.truetype('C:/windows/fonts/ALGER.TTF', 72), fill="#000000")
# paste name base
paste_with_a(name_pad_img,name_base_img, (20 + 250 + 10, 20 + 85 + 5))
#draw trophy
trophy_img = Image.open("./res/trophy.png")
trophy_img = trophy_img.resize((900 - 225, 60), Image.ANTIALIAS)
# write rating on trophy
draw = ImageDraw.Draw(trophy_img)
# draw.text((20, 7), "Standard:2222 DX2021:3333", font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 40), fill="#333333")
draw.text((20, 7), "Kakinuma/maimai_DX_rating_image", font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 38), fill="#333333")
# paste trophy
paste_with_a(name_pad_img,trophy_img,(20 + 250 + 10, 20 + 85 + 5 + 105 +5))
#paste name_pad
paste_with_a(base_img_,name_pad_img,(plate_edge,plate_edge))
if __name__ == '__main__':
# load user
user_dic = {"name":"rain","ava":"rain","name_pad":"150603"}
# load json
with open("./data/"+user_dic["name"]+".json", 'r', encoding='utf-8') as load_f:
load_dict = json.load(load_f)
record_list = load_dict["records"]
record_sd_list = []
record_dx21_list = []
for r in record_list:
if r["is_new"]:
record_dx21_list.append(r)
else:
record_sd_list.append(r)
record_sd_list = sorted(record_sd_list, key=lambda e: e.__getitem__('ra'), reverse=True)
record_dx21_list = sorted(record_dx21_list, key=lambda e: e.__getitem__('ra'), reverse=True)
record_sd_num = len(record_sd_list)
if record_sd_num > 25 :
record_sd_num = 25
record_dx21_num = len(record_dx21_list)
if record_dx21_num > 15 :
record_dx21_num = 15
ra_sum_sd = 0
ra_sum_dx21 = 0
# define
template_dic = {"title": "林檎華憐歌", "level": "11+", "level_index": 3, "level_label": "Master", "type": "SD", "dxScore": 1886, "achievements": 100.6206, "rate": "sssp", "fc": "fcp", "fs": "", "ra": 166, "ds": 11.8, "song_id": "322", "is_new": "false"}
diff_color = {"Master":"#9f51dc","Expert":"#ff7b7b","Advanced":"#00ffff","Re:MASTER":"#c495ea"}
plate_interval = 60
plate_edge = 90
plate_width = 900
plate_height = 300
# load base = 3*14 plate
base_img = Image.new('RGBA', (plate_width*3+plate_interval*2+plate_edge*2, plate_height*14+plate_interval*13+plate_edge*2),(81,188,243,255))
# merge sd plate to base
plate_startX = plate_edge
plate_startY = plate_edge + plate_height + plate_interval
for i in range(record_sd_num):
plate_img = Image.new('RGBA', (900, 300), (255, 255, 255, 0))
plate_img.convert('RGBA')
draw = ImageDraw.Draw(plate_img)
drawRoundRec(draw, 0, 0, 900,300, 50, "#aaaaaa")
drawRoundRec(draw, 3, 3, 900 - 6, 300 - 6, 50, "#ffffff")
draw_plate_arc_style(plate_img, record_sd_list[i])
ra_sum_sd += record_sd_list[i]["ra"]
x = plate_startX + i%2 * (plate_width + plate_interval)
y = plate_startY + int(i/2) * (plate_height + plate_interval)
paste_with_a(base_img,plate_img,(x, y))
print("SD",i,x,y)
# merge dx plate to base
plate_startX = plate_edge + plate_width + plate_interval + plate_width + plate_interval
plate_startY = plate_edge
for i in range(record_dx21_num):
plate_img = Image.new('RGBA', (900, 300), (255, 255, 255, 0))
plate_img.convert('RGBA')
draw = ImageDraw.Draw(plate_img)
drawRoundRec(draw, 0, 0, 900, 300, 50, "#aaaaaa")
drawRoundRec(draw, 3, 3, 900 - 6, 300 - 6, 50, "#ffffff")
draw_plate_arc_style(plate_img, record_dx21_list[i])
ra_sum_dx21 += record_dx21_list[i]["ra"]
x = plate_startX
y = plate_startY + i * (plate_height + plate_interval)
if 14==i :
# DX15 move to left*1 up*1 to align
x -= (plate_width + plate_interval)
y -= (plate_height + plate_interval)
paste_with_a(base_img,plate_img,(x, y))
print("DX",i,x, y)
draw_name_pad_mai_style(base_img)
base_img.save("./out.png")
print(ra_sum_sd,ra_sum_dx21)
| kakinumaCN/maimai_DX_rating_image | main.py | main.py | py | 9,086 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "cover.download_cover",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open"... |
28339823949 | def secondElem(a):
return a[1]
def alphasort(a):
a.sort(key=secondElem,reverse=True)
for i in range(len(a)-1):
if a[i][1] == a[i+1][1] and a[i][0] > a[i+1][0]:
a[i],a[i+1] = a[i+1],a[i]
return a
from collections import Counter
name = list(Counter(input()).items())
name = alphasort(name)
for i in range(3):
print(name[i][0],name[i][1])
| t3chcrazy/Hackerrank | company-logo.py | company-logo.py | py | 389 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
34200144967 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import matplotlib.lines as mlines
df= pd.read_csv('/home/nishchay/Documents/Arcon/Day7/winequality-red.csv')
X1=df.iloc[:,11].values
Y1=df.iloc[:,0].values
Y2=df.iloc[:,1].values
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('jet', 20)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
plt.title('Wine Quality Correlation')
labels=['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar','chlorides', 'free sulfur dioxide','asdf']
ax1.set_xticklabels(labels,fontsize=6)
ax1.set_yticklabels(labels,fontsize=6)
fig.colorbar(cax, ticks=[- .6,- .5,- .4,- .3,- .2,- .1,0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])
plt.show()
#################################################################################
col_labels = df.columns[1:]
corMat2 = df.corr().values[::-1]
fig, axes = plt.subplots(nrows=1,ncols=1)
ax0 = axes
ax0.set_xticks(np.linspace(.5,12.5,11))
ax0.set_xticklabels(col_labels,rotation=45)
ax0.set_yticks(np.linspace(.5,12.5,11))
ax0.set_yticklabels(col_labels[::-1],rotation=45)
#ax0.set_yticklabels(col_labels,rotation=45)
#visualize correlations using heatmap
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
cmap = cm.get_cmap('jet', 20)
fig.colorbar(cax, ticks=[- .6,- .5,- .4,- .3,- .2,- .1,0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])
plt.pcolor(corMat2,cmap='jet')
plt.show()
############################################################################
plt.plot(Y1,X1,'r--',Y2,X1,'bs')
plt.xlabel('Wine Quality')
plt.ylabel('fixed acidity')
red_line = mlines.Line2D(Y1,X1,color='red',marker='_',markersize=10,label='Fixed Acidity')
blue_line=mlines.Line2D(Y2,X1,color='blue',marker='|',markersize=10,label='Volatile Acidity')
plt.legend(handles=[red_line,blue_line])
plt.show()
labels = 'Python', 'C++', 'Ruby', 'Java'
sizes = [215, 130, 245, 210]
colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue']
explode = (0.1, 0, 0, 0) # explode 1st slice
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.show()
| nagrawal63/Neural-Networks | Day7/plot.py | plot.py | py | 2,190 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.... |
10414874563 | import itertools
import operator
import types
from typing import Any, List, Optional, Tuple, Type
import torch
from executorch.exir.dialects.edge._ops import EdgeOpOverload
from executorch.exir.error import ExportError, ExportErrorType
from executorch.exir.lowered_backend_module import LoweredBackendModule
from executorch.exir.verification.arg_validator import (
EdgeOpArgValidator,
RunHigherOrderOperatorError,
)
from torch._export.verifier import SpecViolationError, Verifier
from torch._ops import OpOverload
from torch._subclasses import FakeTensor
from torch.export.exported_program import ExportedProgram
from torch.fx import GraphModule
ALLOWED_META_KEYS = {"spec", "stack_trace"}
def _check_tensors_are_contiguous(gm: GraphModule) -> None:
# Tensors be of contiguous format
for name, param in itertools.chain(gm.named_parameters(), gm.named_buffers()):
if isinstance(param, torch.Tensor):
if not param.is_contiguous():
raise SpecViolationError(
f"Tensors in Aten dialect must be contiguous, {name} is not contiguous"
)
class EXIRATenDialectVerifierBase(Verifier):
dialect = "OLD_EXIR_ATEN_DISABLED"
def allowed_getattr_types(self) -> Tuple[Type[Any], ...]:
return (
torch.fx.GraphModule,
LoweredBackendModule,
torch.Tensor,
torch.ScriptObject,
)
def allowed_op_types(self):
return super().allowed_op_types() + (torch._ops.OpOverloadPacket,)
def __call__(self, *args, **kwargs):
if hasattr(self, "_check_graph_module"):
return self._check_graph_module(*args, **kwargs)
elif hasattr(self, "check_valid"):
return self.check_valid(*args, **kwargs)
else:
raise RuntimeError("")
class EXIRATenDialectVerifier(EXIRATenDialectVerifierBase):
dialect = "OLD_EXIR_ATEN"
def check_valid_op(self, op):
if isinstance(op, OpOverload):
# TODO These special ops should be removable easily.
if op.namespace in (
"quantized_decomposed",
"boltnn_nimble",
"nimble",
"quantized",
) or op in (
torch.ops.aten.mkldnn_rnn_layer.default,
torch.ops.aten._upsample_bilinear2d_aa.default,
torch.ops.aten.quantize_per_tensor.default,
torch.ops.aten.dequantize.self,
torch.ops.aten.max.default,
):
return
if torch.Tag.core not in op.tags and torch.Tag.view_copy not in op.tags:
# NOTE(qihan): whether view_copy operators are marked as canonical is still under
# discussion.
raise SpecViolationError(
f"Operator {op.__module__}.{op.__name__} is not Aten Canonical."
)
def get_aten_verifier(enable: bool = True):
return EXIRATenDialectVerifier if enable else EXIRATenDialectVerifierBase
def _get_inputs(graph_module: GraphModule) -> List[Optional[FakeTensor]]:
def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]:
if "val" in node.meta:
return node.meta["val"]
if len(node.users) == 0:
return None
# TODO(ycao): `val` should always exist after we enable shape environment
# serialization and deserialization.
raise ExportError(
ExportErrorType.VIOLATION_OF_SPEC,
f"Cannot construct an input for graph module: {graph_module}.",
)
return [
extract_input(node)
for node in graph_module.graph.nodes
if node.op == "placeholder"
]
def _check_tensor_args_matching_op_allowed_dtype(gm: GraphModule) -> None:
validator = EdgeOpArgValidator(gm)
inputs = _get_inputs(gm)
try:
validator.run(*inputs)
except RunHigherOrderOperatorError:
# NB: ignore higher order operator in the graph.
# If we lower a graph module to delegate and then compose it with some other graph module, retrace it,
# if we also turn on edge ops and validator (_check_ir_validity=True), we will run
# into RunHigherOrderOperatorError. The only thing we can do right now is to ignore this error, since
# by definition it's still a valid Edge dialect. This is not ideal because it ignores possible invalidity
# later in the graph.
return
if validator.violating_ops:
raise SpecViolationError(
f"These operators are taking Tensor inputs with mismatched dtypes: {validator.violating_ops}"
)
def EXIREdgeDialectVerifier( # noqa: C901
check_edge_ops: bool = True,
enable: bool = True,
class_only: bool = False,
):
class _EXIREdgeDialectVerifier(Verifier):
dialect = "EDGE"
def __init__(self) -> None:
self.check_edge_ops = check_edge_ops
if self.check_edge_ops:
self.check_valid_op = self.check_valid_edge_op
else:
self.check_valid_op = self.check_valid_aten_op
def allowed_getattr_types(self) -> Tuple[Type[Any], ...]:
return (
torch.fx.GraphModule,
LoweredBackendModule,
torch.Tensor,
torch.ScriptObject,
)
def allowed_op_types(self):
return super().allowed_op_types() + (EdgeOpOverload, types.FunctionType)
def check_valid_edge_op(self, op):
if not enable:
return
if op in [operator.getitem, torch.ops.aten.sym_size.int]:
return
if isinstance(op, OpOverload) and not isinstance(op, EdgeOpOverload):
raise SpecViolationError(
"Operator {}.{} is not an Edge operator.".format(
op.__module__, op.__name__
)
)
if isinstance(op, EdgeOpOverload):
self.check_valid_aten_op(op._op)
if isinstance(op, types.FunctionType):
assert op.__name__ in ("alloc",)
def check_valid_aten_op(self, op) -> None:
if isinstance(op, OpOverload):
if (
torch.Tag.core not in op.tags # type: ignore[attr-defined]
and torch.Tag.view_copy not in op.tags # type: ignore[attr-defined]
):
# NOTE(qihan): whether view_copy operators are marked as canonical is still under
# discussion.
raise SpecViolationError(
"Operator {}.{} is not Aten Canonical.".format(
op.__module__, op.__name__
)
)
def check_additional(self, gm: GraphModule) -> None:
if not enable:
return
if self.check_edge_ops:
_check_tensors_are_contiguous(gm)
_check_tensor_args_matching_op_allowed_dtype(gm)
def is_valid(self, gm: GraphModule) -> bool:
try:
self(gm)
return True
except SpecViolationError:
return False
def __call__(self, ep_or_gm):
if not enable:
return
gm = ep_or_gm
if isinstance(gm, ExportedProgram):
gm = ep_or_gm.graph_module
if hasattr(self, "_check_graph_module"):
return self._check_graph_module(gm)
elif hasattr(self, "check_valid"):
return self.check_valid(gm)
else:
raise RuntimeError("")
ret = _EXIREdgeDialectVerifier
if not class_only:
ret = ret()
return ret
EXIREdgeDialectVerifier()
| pytorch/executorch | exir/verification/verifier.py | verifier.py | py | 7,890 | python | en | code | 479 | github-code | 6 | [
{
"api_name": "torch.fx.GraphModule",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "itertools.chain",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch._export.... |
21699610056 | import cv2
# Method: getFrames
# Purpose: Extract a predefined number of frames from a provided video
# Parameters: video_capture: provided video
# frame_num: the desired number of frames
# frame_start: optional value to input for start of frame
def get_frames(video_capture, frame_num, frame_start=0):
counter = 0
image_arr = []
frame_num = frame_start + frame_num
# Loop through and create individual frames that were captured from the video file
while True and counter < frame_num:
is_image_good, image = video_capture.read()
if not is_image_good:
if counter == 0:
print('Video cannot be read from file')
break
# Use opencv to write the frame that was extracted from the video
if counter >= frame_start:
image_arr.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Increment the counter as more frames are extracted
counter += 1
# Frames to be returned
return image_arr
# Method: getEdges
# Purpose: gets the edges in an image via converting to gray scale than blurring the image
# Parameters: frame_list: list of frames
# line_size: how large the lines should be on edges
# blur_value: how blurred the image should be
def get_edges(frame_list, line_size, blur_value):
frame_edges = []
for i in frame_list:
gray_image = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
gray_blurred_image = cv2.medianBlur(gray_image, blur_value)
frame_edges.append(
cv2.adaptiveThreshold(gray_blurred_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, line_size,
blur_value))
return frame_edges
# Method: rgb2gray
# Purpose: Algorithm to convert a color image to gray scale in matplotlib
# Parameters: image (The image array to convert)
def rgb2gray(image):
red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2]
return 0.2989 * red + 0.5870 * green + 0.1140 * blue
| ccranson27/ccr_playground | frame_gathering.py | frame_gathering.py | py | 2,033 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.cvtColor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",... |
7066069240 | import sys,os,subprocess,string,re
from threading import Timer
import time,socket,json
from os import path
base_dir = path.dirname(path.abspath(sys.path[0]))
print(base_dir)
sys.path.append(base_dir)
class HfsSetup():
def __init__(self,hver='',plants='',adict={}):
self._run_code = True
self._plants = plants
self._hfs_version = hver
self._servers = ['10.60.96.203']
self._server = self._servers[0]
self._Kapps = ["houdinifx.exe","hython.exe"]
self._folders = ["temp"]
self.Makerdir()
self._temp = "%s/temp"%base_dir
self._code_base_path = adict['codebase'] # C:/script/CG/Houdini
self._B_plugin_path = adict['plugingase'] # B:/plugins/houdini
self._D_houdini_path = adict['hfsbase'] # D:/plugins/houdini
self._info_return = []
def getcustip(self):
self._hostname = socket.gethostname()
## print(self._hostname)
_ip_end = re.findall(r"\d+",self._hostname)
_ip_a = _ip_end[0]
z_num = 0
for i in range(0,len(_ip_a)):
if not _ip_a[i]=="0":
z_num=i
break
_ip_a =_ip_a[z_num:]
_ipList = socket.gethostbyname_ex(self._hostname)
self._this_ip = ''
for elm in _ipList[-1]:
_ip_ss = elm.split(".")
if _ip_a in _ip_ss:
self._this_ip = elm
break
def Makerdir(self):
for elm in self._folders:
_elm = "%s/%s"%(base_dir,elm)
if not os.path.exists(_elm):
os.makedirs(_elm)
def ConfigApp_win(self):
self._CopyHfs = True
self._info_return.append("Server tyr to chang to: %s"%self._server)
## copy files
# 7z tools
localpath = r"D:/plugins/tools/7z"
z7 = "D:/plugins/tools/7z/7z.exe"
fromdir = self._B_plugin_path.replace("/plugins/houdini","") + r"/tools/7-Zip"
h_source = os.path.abspath("%s/apps/win/%s.7z"%(self._B_plugin_path,self._hfs_version))
h_amd = os.path.abspath("%s/%s.7z"%(self._D_houdini_path,self._hfs_version))
if os.path.exists(h_amd):
self._CopyHfs = False
## set hfs server
version_base = self._hfs_version[:2]
py_path = os.path.abspath("%s/function/HoudiniLibs/hfsserver.py %s"%(self._code_base_path,base_dir))
_lic_info = {"hver":version_base,"server":self._server}
_lic_info_f = "%s/temp/lic_info.json"%base_dir
with open(_lic_info_f,"w")as f:
json.dump(_lic_info,f)
f.close()
server_cmds = py_path
## unzip houdini
cmd_un7z = z7 + " x -y -aos "
cmd_un7z += "%s/%s.7z"%(self._D_houdini_path,self._hfs_version) # D:/plugins/houdini
cmd_un7z += " -o%s" % ("%s/%s"%(self._D_houdini_path,self._hfs_version))
## subprocess
# creat the handl
setserver_log = open(r'%s/Server_info.txt'%self._temp,'wt')
copy7ztool_log = open(r'%s/Copy_ziptool.txt'%self._temp,'wt')
Uzi_Houdini_log = open(r'%s/Uzip_Houdini.txt'%self._temp,'wt')
set_server = subprocess.Popen(server_cmds,stdout=setserver_log,shell=True)
copy7ztool = subprocess.Popen("robocopy /S /NDL /NFL %s %s %s" % (fromdir, localpath, "*"),stdout=copy7ztool_log,shell=True)
if self._CopyHfs:
copyhoudini = subprocess.Popen("copy %s %s" % (h_source, h_amd),shell=True)
copyhoudini.wait()
copy7ztool.wait()
UzipHoudini = subprocess.Popen(cmd_un7z,stdout=Uzi_Houdini_log,shell=True)
_s_result = set_server.wait()
if not _s_result:
# print("License server changed to: %s"%self._server)
self._info_return.append("License server changed to: %s"%self._server)
UzipHoudini.wait()
# finish,close the handl
setserver_log.close()
copy7ztool_log.close()
Uzi_Houdini_log.close()
_h_result = UzipHoudini.returncode
## os.remove(h_amd)
if not _h_result:
print("Houdini setup finished. ")
def KillApps(self):
for app in self._Kapps:
if self._plants == "win":
cmds = r'c:\windows\system32\cmd.exe /c c:\windows\system32\TASKKILL.exe /F /IM %s'%app
elif self._plants == "Linux":
cmds = ''
subprocess.call(cmds,shell=True)
def Extued(self):
# print("Try to kill the houdinifx and hython before houdini app setup")
self._info_return.append("Try to kill the houdinifx and hython before houdini app setup")
try:
self.KillApps()
except:
pass
if self._run_code and self._plants=="win":
self.ConfigApp_win()
elif self._run_code and self._plants=="Linux":
self.ConfigApp_Linux()
def main(version='',plants='win',adict={}):
s_time = time.time()
app = HfsSetup(version,plants,adict)
app.Extued()
n_time = time.time()
print("Times for apps setup: %d s"%(n_time-s_time))
app._info_return.append("Times for apps setup: %d s"%(n_time-s_time))
return app._info_return
if __name__ == '__main__':
main('160557','win')
'''
_info_file = "%s/temp/app_info.json"%base_dir
_plugin_info = {}
if os.path.exists(_info_file):
with open(_info_file,"r")as f:
_plugin_info = json.load(f)
f.close()
''' | kRayvison/Pycharm_python36 | new_render_data/input/p/script/CG/Houdini/function/old/HoudiniMain/HoudiniAppSet.py | HoudiniAppSet.py | py | 5,487 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,... |
25081332800 | # If this is the name of a readable file, the Python commands in that file are
# executed before the first prompt is displayed in interactive mode.
# https://docs.python.org/3/using/cmdline.html#envvar-PYTHONSTARTUP
#
# Sample code which supports concurrent interactive sessions, by only
# appending the new history is taken from
# https://docs.python.org/3/library/readline.html?highlight=readline#example
#
# The goal is to store interactive Python shell history in
# $XDG_STATE_HOME/python/python_history instead of ~/.python_history.
import atexit
import os
import readline
histfile = os.path.join(
os.getenv("XDG_STATE_HOME", "~/.local/state"), "python", "python_history"
)
if not os.path.exists(histfile):
os.makedirs(os.path.dirname(histfile), exist_ok=True)
if not os.path.isfile(histfile):
if os.path.exists(histfile):
os.remove(histfile)
# Create an empty file
open(histfile, "a").close()
readline.read_history_file(histfile)
h_len = readline.get_current_history_length()
def save(prev_h_len, histfile):
new_h_len = readline.get_current_history_length()
# Make the history file much bigger for relative suggestions
readline.set_history_length(int(os.getenv("HISTSIZE", 1000000)))
readline.append_history_file(new_h_len - prev_h_len, histfile)
atexit.register(save, h_len, histfile)
# Map TAB to auto-completion instead of the TAB symbol
readline.parse_and_bind("tab: complete")
| mvshmakov/dotfiles | python/.config/python/pythonstartup.py | pythonstartup.py | py | 1,442 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number... |
38736630905 | import datetime
import logging
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from backend.data.measurements import MeasurementArray, Measurements
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def xyz2blh(x, y, z):
"""_summary_
Angle returned will be in radians
"""
A = 6378137.0
B = 6356752.314245
e = np.sqrt(1 - (B ** 2) / (A ** 2))
# calculate longitude, in radians
longitude = np.arctan2(y, x)
# calculate latitude, in radians
xy_hypot = np.hypot(x, y)
lat0 = np.zeros_like(x)
latitude = np.arctan(z / xy_hypot)
while np.any(np.abs(latitude - lat0) > 1e-9):
lat0 = latitude
N = A / np.sqrt(1 - e ** 2 * np.sin(lat0) ** 2)
latitude = np.arctan((z + e ** 2 * N * np.sin(lat0)) / xy_hypot)
# calculate height, in meters
N = A / np.sqrt(1 - e ** 2 * np.sin(latitude) ** 2)
small_angle_indices = np.abs(latitude) < np.pi / 4
R, phi = np.hypot(xy_hypot[small_angle_indices], z[small_angle_indices]), np.arctan(z[small_angle_indices] / xy_hypot[small_angle_indices])
height = np.zeros_like(x)
height[small_angle_indices] = R * np.cos(phi) / np.cos(latitude[small_angle_indices]) - N[small_angle_indices]
height[~small_angle_indices] = z[~small_angle_indices] / np.sin(latitude[~small_angle_indices]) - N[~small_angle_indices] * (1 - e ** 2)
return latitude, longitude, height
class Position:
"""
Position class to handle position analysis
"""
def __init__(
self,
data: MeasurementArray = None,
base: MeasurementArray = None,
sitelist: list = None,
) -> None:
self.data = data
self.base = base
self.sitelist = sitelist
if self.base is not None:
self.data = self.data - self.base
def __iter__(self):
return iter(self.data)
def rotate_enu(self) -> None:
"""
rotate Rotate the position to the ENU frame from the base
"""
for data in self.data:
for k in data.data:
print(k)
#locate the base with the same station id
base = self.base.locate(site=data.id['site'])
lat, lon, height = xyz2blh(base.data['x_0'], base.data['x_1'], base.data['x_2'])
rot = np.zeros((3,3, len(lat)))
rot[0,0] = -np.sin(lon)
rot[0,1] = -np.sin(lat)*np.cos(lon)
rot[0,2] = np.cos(lat)*np.cos(lon)
rot[1,0] = np.cos(lon)
rot[1,1] = -np.sin(lat)*np.sin(lon)
rot[1,2] = np.cos(lat)*np.sin(lon)
rot[2,0] = 0
rot[2,1] = np.cos(lat)
rot[2,2] = np.sin(lat)
project = np.empty((len(data.data['x_0']),3))
for i in range(3):
project[:, i] = data.data[f'x_{i}']
enu = np.matmul(rot.transpose(), project[:,:,np.newaxis])[:,:,0]
for i in range(3):
data.data[f'x_{i}'] = enu[:, i]
| GeoscienceAustralia/ginan | scripts/GinanEDA/backend/data/position.py | position.py | py | 3,039 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"l... |
37822719553 | """Contains the MetaCurriculum class."""
import os
from unitytrainers.curriculum import Curriculum
from unitytrainers.exception import MetaCurriculumError
import logging
logger = logging.getLogger('unitytrainers')
class MetaCurriculum(object):
"""A MetaCurriculum holds curriculums. Each curriculum is associated to a particular
brain in the environment.
"""
def __init__(self, curriculum_folder, default_reset_parameters):
"""Initializes a MetaCurriculum object.
Args:
curriculum_folder (str): The relative or absolute path of the
folder which holds the curriculums for this environment.
The folder should contain JSON files whose names are the
brains that the curriculums belong to.
default_reset_parameters (dict): The default reset parameters
of the environment.
"""
used_reset_parameters = set()
self._brains_to_curriculums = {}
try:
for curriculum_filename in os.listdir(curriculum_folder):
brain_name = curriculum_filename.split('.')[0]
curriculum_filepath = \
os.path.join(curriculum_folder, curriculum_filename)
curriculum = Curriculum(curriculum_filepath, default_reset_parameters)
# Check if any two curriculums use the same reset params.
if any([(parameter in curriculum.get_config().keys()) for parameter in used_reset_parameters]):
logger.warning('Two or more curriculums will '
'attempt to change the same reset '
'parameter. The result will be '
'non-deterministic.')
used_reset_parameters.update(curriculum.get_config().keys())
self._brains_to_curriculums[brain_name] = curriculum
except NotADirectoryError:
raise MetaCurriculumError(curriculum_folder + ' is not a '
'directory. Refer to the ML-Agents '
'curriculum learning docs.')
@property
def brains_to_curriculums(self):
"""A dict from brain_name to the brain's curriculum."""
return self._brains_to_curriculums
@property
def lesson_nums(self):
"""A dict from brain name to the brain's curriculum's lesson number."""
lesson_nums = {}
for brain_name, curriculum in self.brains_to_curriculums.items():
lesson_nums[brain_name] = curriculum.lesson_num
return lesson_nums
@lesson_nums.setter
def lesson_nums(self, lesson_nums):
for brain_name, lesson in lesson_nums.items():
self.brains_to_curriculums[brain_name].lesson_num = lesson
def increment_lessons(self, progresses):
"""Increments all the lessons of all the curriculums in this MetaCurriculum.
Args:
progresses (dict): A dict of brain name to progress.
"""
for brain_name, progress in progresses.items():
self.brains_to_curriculums[brain_name].increment_lesson(progress)
def set_all_curriculums_to_lesson_num(self, lesson_num):
"""Sets all the curriculums in this meta curriculum to a specified lesson number.
Args:
lesson_num (int): The lesson number which all the curriculums will
be set to.
"""
for _, curriculum in self.brains_to_curriculums.items():
curriculum.lesson_num = lesson_num
def get_config(self):
"""Get the combined configuration of all curriculums in this MetaCurriculum.
Returns:
A dict from parameter to value.
"""
config = {}
for _, curriculum in self.brains_to_curriculums.items():
curr_config = curriculum.get_config()
config.update(curr_config)
return config
| Sohojoe/ActiveRagdollAssaultCourse | python/unitytrainers/meta_curriculum.py | meta_curriculum.py | py | 3,968 | python | en | code | 37 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
57215059 | import pytest
import numpy as np
import os
from netZooPy import dragon
def test_dragon():
#1. test1
print('Start Dragon run ...')
n = 1000
p1 = 500
p2 = 100
X1, X2, Theta, _ = dragon.simulate_dragon_data(eta11=0.005, eta12=0.005, eta22=0.05,
p1=100, p2=500, epsilon=[0.1,0.1],
n=n, seed=123)
os.system('curl -O https://netzoo.s3.us-east-2.amazonaws.com/netZooPy/unittest_datasets/dragonx1.npy')
os.system('curl -O https://netzoo.s3.us-east-2.amazonaws.com/netZooPy/unittest_datasets/dragonx2.npy')
X1=np.load('dragonx1.npy')
X2=np.load('dragonx2.npy')
lambdas, lambdas_landscape = dragon.estimate_penalty_parameters_dragon(X1, X2)
lambdasSingle=tuple([int(10*x)/10 for x in lambdas]) # 3 digit precision
alamb=lambdas_landscape[1,1]
assert(lambdasSingle == (0.9, 0.9))
assert((alamb < 398.7*1.002) & (alamb > 398.7*0.998)) #0.2% of error
assert(int(X1[1,1]*1000)/1000 ==0.880)
assert(int(X2[1,1]*1000)/1000 ==0.664)
#2. test2
r = dragon.get_partial_correlation_dragon(X1, X2, lambdas)
adj_p_vals, p_vals = dragon.estimate_p_values_dragon(r, n, p1, p2, lambdas)
p_valstest=int(p_vals[2,1]*100)/100
adj_p_valstest=int(adj_p_vals[2,1]*10)/10 # 3 digit precision
assert(int(np.max(r)*100)/100 == 0.03)
assert(int(r[1,2]*100000)/100000 == 0.00012)
assert(p_valstest == 0.96)
assert(adj_p_valstest == 0.9)
#3. test monte carlo p-values
p1 = 3
p2 = 4
n = 10
lam = [0,0] # no shrinkage
test11 = np.array([[1,1/2.,-1/4.],
[1/2.,1,1/8.],
[1/4.,1/8.,1]])
test12 = np.array([[-3/4.,1/2.,1/4.,0],
[-3/4.,1/2.,1/4.,0],
[-3/4.,1/2.,1/4.,0]])
test21 = np.transpose(test12)
test22 = np.array([[1,-3/4.,1/2.,-1/4.],
[-3/4.,1,1/8.,1/16.],
[1/2.,1/8.,1,1/32.],
[-1/4.,1/16.,1/32.,1]])
test_mc_mat = np.identity(p1+p2)
test_mc_mat[0:3,0:3] = test11
test_mc_mat[0:3,3:7] = test12
test_mc_mat[3:7,0:3] = test21
test_mc_mat[3:7,3:7] = test22
dragon_p_mc = dragon.dragon.estimate_p_values_mc(test_mc_mat,n,p1,p2,lam,seed=412)
ground_truth_mc_p = np.array([[0,0,1/3.,0,1/12.,7/12.,1],
[0,0,2/3.,0,1/12.,7/12.,1],
[1/3.,2/3.,0,0,1/12,7/12.,1],
[0,0,0,0,0,0,5/6.],
[1/12.,1/12.,1/12.,0,0,5/6.,5/6.],
[7/12.,7/12.,7/12.,0,5/6.,0,5/6.],
[1,1,1,5/6.,5/6.,5/6.,0]])
assert(np.array_equal(dragon_p_mc,ground_truth_mc_p))
return()
def test_remove_zero_variance_preds():
layer1 = np.array([[1,2,3],
[1,5,6],
[1,4,9],
[1,10,11]])
layer2 = np.array([[1,2,3],
[2,5,6],
[3,4,9],
[4,10,11]])
layer1_manual_complete = np.array([[2,3],
[5,6],
[4,9],
[10,11]])
layer1_complete = dragon.dragon.remove_zero_variance_preds(layer1)
layer2_complete = dragon.dragon.remove_zero_variance_preds(layer2)
assert(np.array_equal(layer1_complete, layer1_manual_complete))
assert(np.array_equal(layer2_complete, layer2))
return()
def test_zero_variance_exception_estimate_penalty_parameters_dragon():
layer1 = np.array([[1,2,3],
[1,5,6],
[1,4,9],
[1,10,11]])
layer2 = np.array([[1,2,3],
[2,5,6],
[3,4,9],
[4,10,11]])
with pytest.raises(Exception) as exc:
dragon.dragon.estimate_penalty_parameters_dragon(X1 = layer1, X2 = layer2)
assert(str(exc.value) == "[netZooPy.dragon.dragon.estimate_penalty_parameters_dragon] Found variables with zero variance. These must be removed before use of DRAGON. Consider use of `dragon.dragon.remove_zero_variance_preds`.")
return()
def test_zero_variance_exception_get_shrunken_covariance_dragon():
layer1 = np.array([[1,2,3],
[1,5,6],
[1,4,9],
[1,10,11]])
layer2 = np.array([[1,2,3],
[2,5,6],
[3,4,9],
[4,10,11]])
with pytest.raises(Exception) as exc:
dragon.dragon.get_shrunken_covariance_dragon(X1 = layer1, X2 = layer2, lambdas = [0.5,0.5])
assert(str(exc.value) == "[netZooPy.dragon.dragon.get_shrunken_covariance_dragon] Found variables with zero variance. These must be removed before use of DRAGON. Consider use of `dragon.dragon.remove_zero_variance_preds`.")
return()
def test_singularity_exception():
layer1 = np.array([[1,2,3],
[2,5,6]])
layer2 = np.array([[1,2,3],
[2,5,6]])
with pytest.raises(Exception) as exc:
dragon.dragon.get_shrunken_covariance_dragon(X1 = layer1, X2 = layer2, lambdas=[0,0]) # no shrinkage
assert(str(exc.value) == "[dragon.dragon.get_shrunken_covariance_dragon] Sigma is not invertible for the input values of lambda. Make sure that you are using `estimate_penalty_parameters_dragon` to select lambda. You may have variables with very small variance or highly collinear variables in your data. Consider removing such variables.")
return()
| netZoo/netZooPy | tests/test_dragon.py | test_dragon.py | py | 5,525 | python | en | code | 71 | github-code | 6 | [
{
"api_name": "netZooPy.dragon.simulate_dragon_data",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "netZooPy.dragon",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.sys... |
195263286 | from django.db import models
from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
from django.forms.models import model_to_dict
from django.db.models import Q
from operator import itemgetter, attrgetter, methodcaller
import traceback
from xos.exceptions import *
from core.models import SlicePrivilege, SitePrivilege
from sets import Set
from urlparse import urlparse
CEILOMETER_KIND = "ceilometer"
class CeilometerService(Service):
KIND = CEILOMETER_KIND
class Meta:
app_label = "ceilometer"
verbose_name = "Ceilometer Service"
proxy = True
@property
def ceilometer_pub_sub_url(self):
return self.get_attribute("ceilometer_pub_sub_url", None)
@ceilometer_pub_sub_url.setter
def ceilometer_pub_sub_url(self, value):
self.set_attribute("ceilometer_pub_sub_url", value)
class MonitoringChannel(TenantWithContainer): # aka 'CeilometerTenant'
class Meta:
proxy = True
KIND = CEILOMETER_KIND
LOOK_FOR_IMAGES=[ #"trusty-server-multi-nic-docker", # CloudLab
"ceilometer-trusty-server-multi-nic",
#"trusty-server-multi-nic",
]
sync_attributes = ("private_ip", "private_mac",
"ceilometer_ip", "ceilometer_mac",
"nat_ip", "nat_mac", "ceilometer_port",)
default_attributes = {}
def __init__(self, *args, **kwargs):
ceilometer_services = CeilometerService.get_service_objects().all()
if ceilometer_services:
self._meta.get_field("provider_service").default = ceilometer_services[0].id
super(MonitoringChannel, self).__init__(*args, **kwargs)
self.set_attribute("use_same_instance_for_multiple_tenants", True)
def can_update(self, user):
#Allow creation of this model instances for non-admin users also
return True
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a monitoring channel since it creates a slice
raise XOSProgrammingError("MonitoringChannel's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("MonitoringChannel's self.creator was not set")
if self.pk is None:
#Allow only one monitoring channel per user
channel_count = sum ( [1 for channel in MonitoringChannel.objects.filter(kind=CEILOMETER_KIND) if (channel.creator == self.creator)] )
if channel_count > 0:
raise XOSValidationError("Already %s channels exist for user Can only create max 1 MonitoringChannel instance per user" % str(channel_count))
super(MonitoringChannel, self).save(*args, **kwargs)
model_policy_monitoring_channel(self.pk)
def delete(self, *args, **kwargs):
self.cleanup_container()
super(MonitoringChannel, self).delete(*args, **kwargs)
@property
def addresses(self):
if (not self.id) or (not self.instance):
return {}
addresses = {}
for ns in self.instance.ports.all():
if "private" in ns.network.name.lower():
addresses["private"] = (ns.ip, ns.mac)
elif ("nat" in ns.network.name.lower()) or ("management" in ns.network.name.lower()):
addresses["nat"] = (ns.ip, ns.mac)
#TODO: Do we need this client_access_network. Revisit in VTN context
#elif "ceilometer_client_access" in ns.network.labels.lower():
# addresses["ceilometer"] = (ns.ip, ns.mac)
return addresses
@property
def nat_ip(self):
return self.addresses.get("nat", (None, None))[0]
@property
def nat_mac(self):
return self.addresses.get("nat", (None, None))[1]
@property
def private_ip(self):
return self.addresses.get("nat", (None, None))[0]
@property
def private_mac(self):
return self.addresses.get("nat", (None, None))[1]
@property
def ceilometer_ip(self):
return self.addresses.get("ceilometer", (None, None))[0]
@property
def ceilometer_mac(self):
return self.addresses.get("ceilometer", (None, None))[1]
@property
def site_tenant_list(self):
tenant_ids = Set()
for sp in SitePrivilege.objects.filter(user=self.creator):
site = sp.site
for cs in site.controllersite.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
return tenant_ids
@property
def slice_tenant_list(self):
tenant_ids = Set()
for sp in SlicePrivilege.objects.filter(user=self.creator):
slice = sp.slice
for cs in slice.controllerslices.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
for slice in Slice.objects.filter(creator=self.creator):
for cs in slice.controllerslices.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
if self.creator.is_admin:
#TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
#For now, ceilometer code is changed to pusblish all such meters with tenant
#id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
#for all admin users.
tenant_ids.add("default_admin_tenant")
return tenant_ids
@property
def tenant_list(self):
return self.slice_tenant_list | self.site_tenant_list
@property
def tenant_list_str(self):
return ", ".join(self.tenant_list)
@property
def ceilometer_port(self):
# TODO: Find a better logic to choose unique ceilometer port number for each instance
if not self.id:
return None
return 8888+self.id
@property
def ceilometer_url(self):
if not self.private_ip:
return None
return "http://" + self.private_ip + ":" + str(self.ceilometer_port) + "/"
def model_policy_monitoring_channel(pk):
# TODO: this should be made in to a real model_policy
with transaction.atomic():
mc = MonitoringChannel.objects.select_for_update().filter(pk=pk)
if not mc:
return
mc = mc[0]
mc.manage_container()
SFLOW_KIND = "sflow"
SFLOW_PORT = 6343
SFLOW_API_PORT = 33333
class SFlowService(Service):
KIND = SFLOW_KIND
class Meta:
app_label = "ceilometer"
verbose_name = "sFlow Collection Service"
proxy = True
default_attributes = {"sflow_port": SFLOW_PORT, "sflow_api_port": SFLOW_API_PORT}
sync_attributes = ("sflow_port", "sflow_api_port",)
@property
def sflow_port(self):
return self.get_attribute("sflow_port", self.default_attributes["sflow_port"])
@sflow_port.setter
def sflow_port(self, value):
self.set_attribute("sflow_port", value)
@property
def sflow_api_port(self):
return self.get_attribute("sflow_api_port", self.default_attributes["sflow_api_port"])
@sflow_api_port.setter
def sflow_api_port(self, value):
self.set_attribute("sflow_api_port", value)
def get_instance(self):
if self.slices.exists():
slice = self.slices.all()[0]
if slice.instances.exists():
return slice.instances.all()[0]
return None
@property
def sflow_api_url(self):
if not self.get_instance():
return None
return "http://" + self.get_instance().get_ssh_ip() + ":" + str(self.sflow_api_port) + "/"
class SFlowTenant(Tenant):
class Meta:
proxy = True
KIND = SFLOW_KIND
sync_attributes = ("listening_endpoint", )
default_attributes = {}
def __init__(self, *args, **kwargs):
sflow_services = SFlowService.get_service_objects().all()
if sflow_services:
self._meta.get_field("provider_service").default = sflow_services[0].id
super(SFlowTenant, self).__init__(*args, **kwargs)
@property
def creator(self):
from core.models import User
if getattr(self, "cached_creator", None):
return self.cached_creator
creator_id=self.get_attribute("creator_id")
if not creator_id:
return None
users=User.objects.filter(id=creator_id)
if not users:
return None
user=users[0]
self.cached_creator = users[0]
return user
@creator.setter
def creator(self, value):
if value:
value = value.id
if (value != self.get_attribute("creator_id", None)):
self.cached_creator=None
self.set_attribute("creator_id", value)
@property
def listening_endpoint(self):
return self.get_attribute("listening_endpoint", None)
@listening_endpoint.setter
def listening_endpoint(self, value):
if urlparse(value).scheme != 'udp':
raise XOSProgrammingError("SFlowTenant: Only UDP listening endpoint URLs are accepted...valid syntax is: udp://ip:port")
self.set_attribute("listening_endpoint", value)
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a SFlow tenant since it creates a slice
raise XOSProgrammingError("SFlowTenant's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("SFlowTenant's self.creator was not set")
if not self.listening_endpoint:
raise XOSProgrammingError("SFlowTenant's self.listening_endpoint was not set")
if self.pk is None:
#Allow only one sflow channel per user and listening_endpoint
channel_count = sum ( [1 for channel in SFlowTenant.objects.filter(kind=SFLOW_KIND) if ((channel.creator == self.creator) and (channel.listening_endpoint == self.listening_endpoint))] )
if channel_count > 0:
raise XOSValidationError("Already %s sflow channels exist for user Can only create max 1 tenant per user and listening endpoint" % str(channel_count))
super(SFlowTenant, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(MonitoringChannel, self).delete(*args, **kwargs)
@property
def authorized_resource_list(self):
return ['all']
@property
def authorized_resource_list_str(self):
return ", ".join(self.authorized_resource_list)
| xmaruto/mcord | xos/services/ceilometer/models.py | models.py | py | 10,893 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "core.models.Service",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "core.models.TenantWithContainer",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sets.Set",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "core.mo... |
74632636347 | # -*- coding: utf-8 -*-
"""
Sihoo Celery Worker 模块
@author: AZLisme
@email: helloazl@icloud.com
"""
from celery import Celery
celery_app = Celery('SihooWorker')
def configure(app):
celery_app.config_from_object('sihoo.settings.celery-setting')
celery_app.config_from_envvar('SIHOO_CELERY_SETTINGS', silent=True)
app.config['CELERY'] = celery_app
| AZLisme/sihoo | sihoo/tasks/__init__.py | __init__.py | py | 370 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "celery.Celery",
"line_number": 13,
"usage_type": "call"
}
] |
158477486 | import datetime as dt
from rest_framework import status
from rest_framework.exceptions import NotAuthenticated, PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from bumblebee.core.exceptions import (
MissingFieldsError,
NoneExistenceError,
UrlParameterError,
)
from bumblebee.core.helpers import create_400, create_500
from bumblebee.feeds.api.serializers.feed_serializers import (
FeedBuzzSerializer,
FeedRebuzzSerializer,
)
from bumblebee.feeds.api.serializers.user_serializers import FeedUserSerializer
from bumblebee.feeds.utils import (
get_follow_suggestions_for_user,
get_folowing_buzzes_for_user,
)
from bumblebee.users.utils import DbExistenceChecker
class FeedBuzzListView(APIView):
""" """
permission_classes = [IsAuthenticated]
def get_posts(self, *args, **kwargs):
""" """
return get_folowing_buzzes_for_user(self.request.user)
def get(self, request, *args, **kwargs):
""" """
try:
post_instances = self.get_posts()
user_serializer = FeedUserSerializer(self.request.user, many=False)
buzz_serializer = FeedBuzzSerializer(
post_instances.get("buzzes"), many=True
)
rebuzz_serializer = FeedRebuzzSerializer(
post_instances.get("rebuzzes"), many=True
)
return Response(
data=dict(
updated_time=dt.datetime.now(),
user=user_serializer.data,
post=buzz_serializer.data + rebuzz_serializer.data,
),
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get feed due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class FeedFollowSuggestionsListView(APIView):
""" """
permission_classes = [IsAuthenticated]
def get_suggestions(self, *args, **kwargs):
""" """
return get_follow_suggestions_for_user(self.request.user)
def get(self, request, *args, **kwargs):
""" """
try:
suggestion_instances = self.get_suggestions()
user_serializer = FeedUserSerializer(self.request.user, many=False)
suggestion_serializer = FeedUserSerializer(suggestion_instances, many=True)
return Response(
data=dict(
updated_time=dt.datetime.now(),
user=user_serializer.data,
suggestions=suggestion_serializer.data,
),
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get suggestions due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
| sthasam2/bumblebee-backend | bumblebee/feeds/api/views/feed_views.py | feed_views.py | py | 4,226 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "bumblebee.feeds.utils.get_folowing_buzzes_for_user",
"line_number": 3... |
27577951391 | from django.shortcuts import get_object_or_404, render, redirect, HttpResponseRedirect
from django.views.generic import TemplateView, UpdateView
from django.contrib.auth import get_user_model
from .models import Message
from django.urls import reverse
from django.contrib import messages
from review.models import Review
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .forms import EditProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
from django.template import RequestContext
def handler404(request, *args, **argv):
response = render('404.html', {}, context_instance=RequestContext(request))
response.status_code = 404
return response
class HomePageView(TemplateView):
template_name = 'home.html'
def MessageView(request, username):
User = get_user_model()
message_user = get_object_or_404(User, username=username)
all_messages = message_user.messages.all()
if request.method == 'POST':
message = request.POST['message']
user_new_message = Message.objects.create(
customuser=message_user, text=message)
user_new_message.save()
messages.success(
request, 'Compliments dropped successfully, Create your own Account below to receive and give Anonymous compliments')
return redirect('account_signup')
return render(request, 'message.html', {'all_messages': all_messages,
'message_user': message_user,
})
class UserProfile(LoginRequiredMixin, TemplateView):
template_name = 'user_profile.html'
CustomUser = get_user_model()
@login_required(login_url='account_login')
def EditProfile(request, username):
user = get_object_or_404(get_user_model(), username=username)
if request.method == 'POST':
form = EditProfileForm(request.POST, instance=user)
if form.is_valid():
form.save()
# user_profile = user.get_absolute_url()
user_profile = reverse('user_profile')
return HttpResponseRedirect(user_profile)
form = EditProfileForm(instance=user)
return render(request, 'edit_profile.html', {'form': form})
@login_required(login_url='account_login')
def delete_message(request, m_id):
message_instance = get_object_or_404(Message, id=m_id)
message_instance_user = message_instance.customuser
if message_instance_user == request.user:
message_instance.delete()
return redirect(message_instance_user.get_absolute_url())
else:
return redirect('home')
@login_required(login_url='account_login')
def spam_message(request, m_id):
# get the Message filtering by the id provided in the url as *args
message_instance = get_object_or_404(Message, id=m_id)
# the person who created the message
message_instance_user = message_instance.customuser
if message_instance_user == request.user:
message_instance.text = 'The owner has marked this message as a spam.'
message_instance.save()
return redirect(message_instance_user.get_absolute_url())
else:
if request.user.is_authenticated:
url = 'home'
else:
url = 'account_login'
return redirect(url)
class AbouUs(TemplateView):
template_name = 'about.html'
class ContactUs(TemplateView):
template_name = 'contact.html'
def ReviewView(request):
reviews = Review.objects.all()
template_name = "review.html"
paginator = Paginator(reviews, 5)
page_number = request.GET.get('page')
try:
page_obj = paginator.get_page(page_number)
except PageNotAnInteger:
page_obj = paginator.page(page_number)
except EmptyPage:
page_obj = paginator.page(paginator.num_pages)
return render(request, template_name, {'page_obj': page_obj})
@login_required(login_url='account_login')
def SettingsView(request):
return render(request, 'settings.html', {})
def AddReview(request):
if request.method == 'POST':
name = request.POST['name']
review = request.POST['review']
occupation = request.POST['occupation']
new_review = Review.objects.create(
name=name, review=review, occupation=occupation)
new_review.save()
messages.success(request, 'Review submitted successfully')
return redirect('review')
return render(request, 'add_review.html', {})
| Afeez1131/Anonymous-v1 | anonymous/views.py | views.py | py | 4,560 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 23,
"usage_type": "name"
},
... |
7520101007 | """
Created on Wed Feb 24 12:34:17 2021
@author: Narmin Ghaffari Laleh
"""
##############################################################################
from dataGenerator.dataSetGenerator_ClamMil import Generic_MIL_Dataset
import utils.utils as utils
from extractFeatures import ExtractFeatures
from utils.core_utils import Train_MIL_CLAM
from utils.data_utils import ConcatCohorts_Classic
from eval.eval import CalculatePatientWiseAUC, CalculateTotalROC, MergeResultCSV
from sklearn.model_selection import StratifiedKFold
import numpy as np
import os
import pandas as pd
import random
from sklearn import preprocessing
import torch
##############################################################################
def CLAM_MIL_Training(args):
targetLabels = args.target_labels
args.feat_dir = args.feat_dir[0]
for targetLabel in targetLabels:
for repeat in range(args.repeatExperiment):
args.target_label = targetLabel
random.seed(args.seed)
args.projectFolder = utils.CreateProjectFolder(args.project_name, args.adressExp, targetLabel, args.model_name, repeat+1)
print(args.projectFolder)
if os.path.exists(args.projectFolder):
continue
else:
os.mkdir(args.projectFolder)
args.result_dir = os.path.join(args.projectFolder, 'RESULTS')
os.makedirs(args.result_dir, exist_ok = True)
args.split_dir = os.path.join(args.projectFolder, 'SPLITS')
os.makedirs(args.split_dir, exist_ok = True)
reportFile = open(os.path.join(args.projectFolder,'Report.txt'), 'a', encoding="utf-8")
reportFile.write('-' * 30 + '\n')
reportFile.write(str(args))
reportFile.write('-' * 30 + '\n')
if args.extractFeature:
imgs = os.listdir(args.datadir_train[0])
imgs = [os.path.join(args.datadir_train[0], i) for i in imgs]
ExtractFeatures(data_dir = imgs, feat_dir = args.feat_dir, batch_size = args.batch_size, target_patch_size = -1, filterData = True)
print('\nLOAD THE DATASET FOR TRAINING...\n')
patientsList, labelsList, args.csvFile = ConcatCohorts_Classic(imagesPath = args.datadir_train,
cliniTablePath = args.clini_dir, slideTablePath = args.slide_dir,
label = targetLabel, minNumberOfTiles = args.minNumBlocks,
outputPath = args.projectFolder, reportFile = reportFile, csvName = args.csv_name,
patientNumber = args.numPatientToUse)
yTrueLabel = utils.CheckForTargetType(labelsList)
le = preprocessing.LabelEncoder()
yTrue = le.fit_transform(yTrueLabel)
args.num_classes = len(set(yTrue))
args.target_labelDict = dict(zip(le.classes_, range(len(le.classes_))))
utils.Summarize(args, list(yTrue), reportFile)
print('\nLoad the DataSet...')
dataset = Generic_MIL_Dataset(csv_path = args.csvFile,
data_dir = args.feat_dir,
shuffle = False,
seed = args.seed,
print_info = True,
label_dict = args.target_labelDict,
patient_strat = True,
label_col = args.target_label,
ignore = [],
reportFile = reportFile)
if len(patientsList) < 20:
continue
if args.train_full:
print('-' * 30)
print('IT IS A FULL TRAINING FOR ' + targetLabel + '!')
train_data = pd.DataFrame(list(zip(patientsList, yTrue, yTrueLabel)), columns = ['PATIENT', 'yTrue', 'yTrueLabel'])
if args.early_stopping:
val_data = train_data.groupby('yTrue', group_keys = False).apply(lambda x: x.sample(frac = 0.1))
train_data = train_data[~train_data['PATIENT'].isin(list(val_data['PATIENT']))]
train_data.reset_index(inplace = True, drop = True)
val_data.reset_index(inplace = True, drop = True)
df = pd.DataFrame({'train': pd.Series(train_data['PATIENT']), 'test': pd.Series([]), 'val' : pd.Series(val_data['PATIENT'])})
df.to_csv(os.path.join(args.split_dir, 'TrainSplit.csv'), index = False)
train_dataset, val_dataset, test_dataset = dataset.Return_splits(from_id = False, csv_path = os.path.join(args.split_dir, 'TrainSplit.csv'))
else:
df = pd.DataFrame({'train': pd.Series(train_data['PATIENT']), 'test': pd.Series([]), 'val' : pd.Series([])})
df.to_csv(os.path.join(args.split_dir, 'TrainValSplit.csv'), index = False)
train_dataset, val_dataset, test_dataset = dataset.Return_splits(from_id = False, csv_path = os.path.join(args.split_dir, 'TrainValSplit.csv'))
datasets = (train_dataset, val_dataset, test_dataset)
model, _, _ = Train_MIL_CLAM(datasets = datasets, fold = 'FULL', args = args, trainFull = True)
torch.save(model.state_dict(), os.path.join(args.projectFolder, 'RESULTS', 'finalModel'))
print()
print('-' * 30)
reportFile.close()
else:
print('IT IS A ' + str(args.k) + 'FOLD CROSS VALIDATION TRAINING FOR ' + targetLabel + '!')
patientID = np.array(patientsList)
yTrue = np.array(yTrue)
yTrueLabel = np.array(yTrueLabel)
folds = args.k
kf = StratifiedKFold(n_splits = folds, random_state = args.seed, shuffle = True)
kf.get_n_splits(patientID, yTrue)
foldcounter = 1
for train_index, test_index in kf.split(patientID, yTrue):
testPatients = patientID[test_index]
trainPatients = patientID[train_index]
testyTrue = yTrue[test_index]
trainyTrue = yTrue[train_index]
testyTrueLabel = yTrueLabel[test_index]
trainyTrueLabel = yTrueLabel[train_index]
print('GENERATE NEW TILES...\n')
print('FOR TRAIN SET...\n')
train_data = pd.DataFrame(list(zip(trainPatients, trainyTrue, trainyTrueLabel)), columns = ['PATIENT', 'yTrue', 'yTrueLabel'])
print('FOR VALIDATION SET...\n')
val_data = train_data.groupby('yTrue', group_keys = False).apply(lambda x: x.sample(frac = 0.1))
train_data = train_data[~train_data['PATIENT'].isin(list(val_data['PATIENT']))]
print('FOR TEST SET...\n')
test_data = pd.DataFrame(list(zip(testPatients, testyTrue, testyTrueLabel)), columns = ['PATIENT', 'yTrue', 'yTrueLabel'])
train_data.reset_index(inplace = True, drop = True)
test_data.reset_index(inplace = True, drop = True)
val_data.reset_index(inplace = True, drop = True)
print('-' * 30)
print("K FOLD VALIDATION STEP => {}".format(foldcounter))
print('-' * 30)
df = pd.DataFrame({'train': pd.Series(train_data['PATIENT']), 'test': pd.Series(test_data['PATIENT']), 'val' : pd.Series(val_data['PATIENT'])})
df.to_csv(os.path.join(args.split_dir, 'TrainTestValSplit_{}.csv'.format(foldcounter)), index = False)
train_dataset, val_dataset, test_dataset = dataset.Return_splits(from_id = False, csv_path = os.path.join(args.split_dir, 'TrainTestValSplit_{}.csv'.format(foldcounter)))
datasets = (train_dataset, val_dataset, test_dataset)
model, results, test_auc = Train_MIL_CLAM(datasets = datasets, fold = foldcounter, args = args, trainFull = False)
reportFile.write('AUC calculated by CLAM' + '\n')
reportFile.write(str(test_auc) + '\n')
reportFile.write('-' * 30 + '\n')
patients = []
filaNames = []
yTrue_test = []
yTrueLabe_test = []
probs = {}
for i_temp in range(args.num_classes):
key = utils.get_key_from_value(args.target_labelDict, i_temp)
probs[key] = []
for item in list(results.keys()):
temp = results[item]
patients.append(temp['PATIENT'])
filaNames.append(temp['FILENAME'])
yTrue_test.append(temp['label'])
yTrueLabe_test.append(utils.get_key_from_value(args.target_labelDict, temp['label']))
for key in list(args.target_labelDict.keys()):
probs[key].append(temp['prob'][0][utils.get_value_from_key(args.target_labelDict, key)])
probs = pd.DataFrame.from_dict(probs)
df = pd.DataFrame(list(zip(patients, filaNames, yTrue_test, yTrueLabe_test)), columns =['PATIENT', 'FILENAME', 'yTrue', 'yTrueLabel'])
df = pd.concat([df, probs], axis = 1)
testResultsPath = os.path.join(args.result_dir, 'TEST_RESULT_SLIDE_BASED_FOLD_' + str(foldcounter) + '.csv')
df.to_csv(testResultsPath, index = False)
CalculatePatientWiseAUC(resultCSVPath = testResultsPath, args = args, foldcounter = foldcounter , clamMil = True, reportFile = reportFile)
reportFile.write('-' * 30 + '\n')
foldcounter += 1
patientScoreFiles = []
slideScoreFiles = []
for i in range(args.k):
patientScoreFiles.append('TEST_RESULT_PATIENT_BASED_FOLD_' + str(i + 1) + '.csv')
slideScoreFiles.append('TEST_RESULT_SLIDE_BASED_FOLD_' + str(i + 1) + '.csv')
CalculateTotalROC(resultsPath = args.result_dir, results = patientScoreFiles, target_labelDict = args.target_labelDict, reportFile = reportFile)
reportFile.write('-' * 30 + '\n')
MergeResultCSV(args.result_dir, slideScoreFiles, milClam = True)
reportFile.close()
##############################################################################
| KatherLab/HIA | CLAM_MIL_Training.py | CLAM_MIL_Training.py | py | 12,138 | python | en | code | 76 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.utils.CreateProjectFolder",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.path.exist... |
73933198267 | from .common import * # NOQA
import pytest
project_detail = {"project": None, "namespace": None, "cluster": None,
"project2": None, "namespace2": None, "cluster2": None}
user_token = {"user_c1_p1_owner": {"user": None, "token": None},
"user_c1_p1_member": {"user": None, "token": None},
"user_c1_p2_owner": {"user": None, "token": None},
"user_standard": {"user": None, "token": None}}
CATALOG_URL = "https://git.rancher.io/charts"
MYSQL_EXTERNALID_037 = "catalog://?catalog=library&template=mysql" \
"&version=0.3.7"
MYSQL_EXTERNALID_038 = "catalog://?catalog=library&template=mysql" \
"&version=0.3.8"
WORDPRESS_EXTID = "catalog://?catalog=library&template=wordpress" \
"&version=1.0.5"
def cluster_and_client(cluster_id, mgmt_client):
cluster = mgmt_client.by_id_cluster(cluster_id)
url = cluster.links.self + '/schemas'
client = rancher.Client(url=url,
verify=False,
token=mgmt_client.token)
return cluster, client
def wait_for_template_to_be_created(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(catalogId=name)
if len(templates) > 0:
found = True
time.sleep(interval)
interval *= 2
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def test_tiller():
name = random_test_name()
admin_client = get_user_client()
clusters = admin_client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster_id = clusters[0].id
p = admin_client. \
create_project(name="test-" + random_str(),
clusterId=cluster_id,
resourceQuota={
"limit": {
"secrets": "1"}},
namespaceDefaultResourceQuota={
"limit": {
"secrets": "1"}}
)
p = admin_client.reload(p)
proj_client = rancher.Client(url=p.links.self +
'/schemas', verify=False,
token=USER_TOKEN)
# need a cluster scoped client to create a namespace
_cluster, cluster_client = cluster_and_client(cluster_id, admin_client)
ns = cluster_client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota={
"limit": {
"secrets": "1"
}}
)
wait_for_template_to_be_created(admin_client, "library")
app = proj_client.create_app(
name=name,
externalId=WORDPRESS_EXTID,
targetNamespace=ns.name,
projectId=p.id,
answers=get_defaut_question_answers(admin_client, WORDPRESS_EXTID)
)
app = proj_client.reload(app)
# test for tiller to be stuck on bad installs
wait_for_condition(proj_client, app, check_condition('Installed', 'False'))
# cleanup by deleting project
admin_client.delete(p)
def test_app_deploy():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
wait_for_template_to_be_created(admin_client, "library")
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
proj_client.delete(app)
def test_app_delete():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
app = proj_client.delete(app)
validate_app_deletion(proj_client, app.id)
def test_app_upgrade_version():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
new_answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_038)
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_038,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=new_answer)
app = proj_client.reload(app)
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_038)
assert app.externalId == MYSQL_EXTERNALID_038, "incorrect template version"
proj_client.delete(app)
def test_app_rollback():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
rev_id = app.appRevisionId
new_answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_038)
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_038,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=new_answer)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_038)
assert app.externalId == MYSQL_EXTERNALID_038, "incorrect template version"
proj_client.action(obj=app,
action_name='rollback',
revisionId=rev_id)
app = proj_client.reload(app)
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
assert app.externalId == MYSQL_EXTERNALID_037, "incorrect template version"
proj_client.delete(app)
def test_app_answer_override():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answers = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answers)
print("App is active")
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
answers["mysqlUser"] = "admin1234"
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answers)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037, answers)
assert app["answers"].mysqlUser == "admin1234", \
"incorrect answer upgrade"
proj_client.delete(app)
def test_rbac_app_project_scope_deploy():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
catalog = admin_client.create_projectCatalog(
name="projectcatalog",
baseType="projectCatalog",
branch="master",
url=CATALOG_URL,
projectId=project_detail["project"].id)
time.sleep(5)
pId = project_detail["project"].id.split(":")[1]
catalog_proj_scoped_ext_id = "catalog://?catalog=" + pId + \
"/projectcatalog&type=" \
"projectCatalog&template=" \
"mysql&version=0.3.8"
answers = get_defaut_question_answers(
admin_client,
catalog_proj_scoped_ext_id)
app = proj_client.create_app(
name=random_test_name(),
externalId=catalog_proj_scoped_ext_id,
answers=answers,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id)
validate_catalog_app(proj_client, app, catalog_proj_scoped_ext_id)
p2, ns2 = create_project_and_ns(
USER_TOKEN,
project_detail["cluster"],
random_test_name("testapp"))
#Assign role
assign_members_to_project(admin_client,
user_token["user_c1_p2_owner"]["user"],
p2,
"project-owner")
#Verify "project-owner" of p1 can list the added catalog
user1_client = get_client_for_token(
user_token["user_c1_p1_owner"]["token"])
catalogs_list = user1_client.list_projectCatalog()
assert len(catalogs_list) == 1, \
"Project catalog not found for the user"
assert catalogs_list["data"][0]["name"] == \
"projectcatalog", "Incorrect project catalog found"
# Verify "project-member" of p1 can list the added catalog
user2_client = get_client_for_token(
user_token["user_c1_p1_member"]["token"])
catalogs_list_2 = user2_client.list_projectCatalog()
assert len(catalogs_list_2) == 1, \
"Project catalog not found for the user"
# Verify "project-owner" of p2 CANNOT list the added catalog
user3_client = get_client_for_token(
user_token["user_c1_p2_owner"]["token"])
catalogs_list_3 = user3_client.list_projectCatalog()
assert len(catalogs_list_3) == 0, \
"Project catalog found for the user"
# Verify A standard user CANNOT list the added catalog
user4_client = get_client_for_token(
user_token["user_standard"]["token"])
catalogs_list_4 = user4_client.list_projectCatalog()
assert len(catalogs_list_4) == 0, \
"Project catalog found for the user"
admin_client.delete(p2)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client = get_admin_client()
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
project_detail["project"], project_detail["namespace"] = \
create_project_and_ns(USER_TOKEN, clusters[0],
random_test_name("testapp"))
project_detail["cluster"] = clusters[0]
#create users
user_token["user_c1_p1_owner"]["user"], \
user_token["user_c1_p1_owner"]["token"] = create_user(client)
user_token["user_c1_p1_member"]["user"], \
user_token["user_c1_p1_member"]["token"] = create_user(client)
user_token["user_c1_p2_owner"]["user"], \
user_token["user_c1_p2_owner"]["token"] = create_user(client)
user_token["user_standard"]["user"], \
user_token["user_standard"]["token"] = create_user(client)
#Assign roles to the users
assign_members_to_project(client,
user_token["user_c1_p1_owner"]["user"],
project_detail["project"],
"project-owner")
assign_members_to_project(client,
user_token["user_c1_p1_member"]["user"],
project_detail["project"],
"project-member")
def fin():
client = get_user_client()
client.delete(project_detail["project"])
request.addfinalizer(fin)
| jim02468/rancher | tests/validation/tests/v3_api/test_app.py | test_app.py | py | 13,408 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytest.fixture",
"line_number": 319,
"usage_type": "call"
}
] |
5560963127 | """ this is a mixture of the best #free twitter sentimentanalysis modules on github.
i took the most usable codes and mixed them into one because all of them
where for a linguistical search not usable and did not show a retweet or a full tweet
no output as csv, only few informations of a tweet, switching language
or even to compare linguistic features in tweets of two different langauges and etc. etc ...
special and many many thanks to https://github.com/vprusso/youtube_tutorials who showed on his
page a tutorial on how to do a sentimentanalysis with python
i did this for users with not much skills and linguistical background to help them to get a corpus of twitterdata
and to show them how to do a comparison between sentence based vs document based sentimentanalysis
credits to all AVAILABLE FREE AND SIMPLE sentimentanalysis programms (dec. 2019) on github.
many thanks to everybody and of course to github for making this exchange and usage possible!
cemre koc (Goethe University, Frankfurt) Python3.7
"""
from textblob import TextBlob #Sentimentlexikon FOR GERMAN (TEXTBLOB_DE import textblob_de
import re #modul for regular expressions
from tweepy import API #Twitter API modul for more info: look tweepy doc please!
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy #usage of diffrent feautres of my programm
import sys #only if wanted
import csv ##only if wanted (see rest of programm)
import pandas as pd #pandas for illustration
import authentification #access to twitter
import numpy as np #collection of tweets via numpy
import matplotlib.pyplot as plt #if needed (see below for ploting)
import numpy
#output screen (if you use pycharm for full screen view)
#only if needed
pd.set_option('display.max_rows', 1000000000000)
pd.set_option('display.max_columns', 1000000000)
pd.set_option('display.width', 100000000000)
pd.set_option('display.float_format', '{:20,.2f}'.format)
#for maximal OUTPUT!
#pd.set_option('display.max_colwidth', -1)
#TWITTER AUTHENTIFICTION (Twitter development)
#please fill for that the identification.py with your credentials!
#you need a twitter developer account for getting these informations
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(authentification.CONSUMER_KEY, authentification.CONSUMER_SECRET)
auth.set_access_token(authentification.ACCESS_TOKEN, authentification.ACCESS_TOKEN_SECRET)
return auth
#TWITTER CLIENT SERVER
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):
friend_list.append(friend)
return friend_list
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets = []
for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
#TWITTER STREAMER FOR STREAMING AND LIVE TWEETS
class TwitterStreamer():
def __init__(self):
self.twitter_autenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# AUTHENTIFICATION AND CONNECTION TO API
listener = TwitterListener(fetched_tweets_filename)
auth = self.twitter_autenticator.authenticate_twitter_app()
stream = Stream(auth, listener)
#you can use the stream.filter for defining the search for words/hasthags!!!!!!
#same sentimentanalysis works for words or hashtags!!!
stream.filter(track=hash_tag_list)
#TWITTER STREAM LISTENER FOR PRINTING TWEETS
class TwitterListener(StreamListener):
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
if status == 420:
#OCCURS IF RATE LIMIT IS PASSED
return False
print(status)
#FOR ANALYZING CLEANING TWEETS (TO CONTENT)
class TweetAnalyzer():
#DELTETE ALL UNNECESSARY CHARACTERS
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
#SIMPLE SENTIMENTANALYSIS VIA TEXTBLOB (englisch)
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
#You can use the following classification of polarity for sentence based analysis
#since i am using this programm for document level classification I left it with 1 -1 and 0
# if (polarity == 0):
# print("Neutral")
# elif (polarity > 0 and polarity <= 0.3):
# print("Schwach positiv")
# elif (polarity > 0.3 and polarity <= 0.6):
# print("positiv")
# elif (polarity > 0.6 and polarity <= 1.0):
# print("Stark positiv")
# elif (polarity > -0.3 and polarity <= 0):
# print("schwach negativ")
# elif (polarity > -0.6 and polarity <= -0.3):
# print("Negativ")
# elif (polarity >= -1.0 and polarity <= -0.6):
# print("Stark negativ")
def tweets_to_data_frame(self, tweets):
df = pd.DataFrame(data=[tweet.full_text for tweet in tweets], columns=['tweets'])
#THIS IS FOR RETWEETS OF A CERTAIN TWEET! BUT BE CARFUL ONLY A CERTAIN NUMBER OF TWEETS PER DAY!
#TWITTER RESTRICTION
#remove the """ for usage!
"""replies = []
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
for full_tweets in tweepy.Cursor(api.user_timeline, screen_name='GretaThunberg', timeout=999999).items(20):
for tweet in tweepy.Cursor(api.search, q='to:GretaThunberg', since_id=1203618558267273225,
result_type='recent',
timeout=999999).items(100):
if hasattr(tweet, 'in_reply_to_status_id_str'):
if (tweet.in_reply_to_status_id_str == full_tweets.id_str):
replies.append(tweet.text)
print("Tweet :", full_tweets.text.translate(non_bmp_map))
for elements in replies:
print("Replies :", elements)
# replies.clear()"""
#DATA SET VIA DATAFRAME TO SHOW WITH NUMPY
#YOU CAN PRINT GIVEN DATA LIKE LENGTH RETWEET NUMBER LANGUAGE etc. CHOSSE:
#['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__',
# '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '_
# _lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '_
# _sizeof__', '__str__', '__subclasshook__', '__weakref__', '_api', '_json', 'author', 'contributors', 'coordinates',
# 'created_at', 'destroy', 'display_text_range', 'entities', 'favorite', 'favorite_count', 'favorited',
# 'full_text', 'geo', 'id', 'id_str', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'in_reply_to_status_id_str',
# 'in_reply_to_user_id', 'in_reply_to_user_id_str', 'is_quote_status', 'lang', 'parse', 'parse_list', 'place',
# 'possibly_sensitive', 'quoted_status', 'quoted_status_id', 'quoted_status_id_str', 'quoted_status_permalink',
# 'retweet', 'retweet_count', 'retweeted', 'retweets', 'source', 'source_url', 'truncated', 'user']
df['id'] = np.array([tweet.id for tweet in tweets])
df['len'] = np.array([len(tweet.full_text) for tweet in tweets])
df['date'] = np.array([tweet.created_at for tweet in tweets])
#df['source'] = np.array([tweet.source for tweet in tweets])
df['likes'] = np.array([tweet.favorite_count for tweet in tweets])
df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])
df["lang"] = ([tweet.lang for tweet in tweets])
#df["in_reply_to_status_id_str"] = ([tweet.replies for tweet in tweets])
return df
#Programm begins here!!
if __name__ == '__main__':
twitter_client = TwitterClient()
tweet_analyzer = TweetAnalyzer()
api = twitter_client.get_twitter_client_api()
#TWEET MODE EXTENDED FOR FULL TWEET OUTPUT!! RETWEETS STAY THE SAME!
#COUNT= LAST TWEET NUMBER OF USER (SCREEN NAME)
#HERE FOR GRETA THUNBERG, JUST DELETE AND TYPE ACCOUNT NAME TO CHANGE
#FOR EXAMPLE rtErdogan (for president of turkey), realDonaldTrump (for Trump) etc...
tweets = api.user_timeline(screen_name="GretaThunberg", count=200, tweet_mode="extended")
#print DATA
print(dir(tweets[0]))
#print(tweets[0].retweet_count) #retweet count print
#sentimentanalysis for printing it in a dataframe with the other informations!
df = tweet_analyzer.tweets_to_data_frame(tweets)
df['sentiment'] = np.array([tweet_analyzer.analyze_sentiment(tweet) for tweet in df['tweets']])
#AVARAGE LENGTH OF ALL TWEETS
#print(np.mean(df['len']))
# GET NUMBER OF LIKES
#print(np.max(df['likes']))
# GET NUMBER OF RETWEETS
#print(np.max(df['retweets']))
#EXAMPLE RETWEET STATUS OF A CERTAIN TWEET ID
#To get ID you need to look on your broswers URL of this CERTAIN TWEET
#print(np.max(df["lang"]))
##print(df.in_reply_to_status_id[1075801005504258061])
#ANYWAY THERE IS A RESTRICTION SINCE 2019 ON ONLY 200 TWEETS
#THANK YOU CAMBRIDGE ANALYTICA
print(df.head(200))
# DO CSV FILE (DELETE OR NAME IT NEW TO MAKE IT SEPRATE)
#df.to_csv('KocSentiment.csv')
#TIME SERIES FOR CHART VIEW!!! DONT FORGET TO TURN ON MATPLOT LIBRARY
#time_likes = pd.Series(data=df['len'].values, index=df['date'])
#time_likes.plot(figsize=(16, 4), color='r')
#plt.show()
#time_favs = pd.Series(data=df['likes'].values, index=df['date'])
#time_favs.plot(figsize=(16, 4), color='r')
#plt.show()
#time_retweets = pd.Series(data=df['retweets'].values, index=df['date'])
#time_retweets.plot(figsize=(16, 4), color='r')
#plt.show()
#LAYERED VIEW! FOR COMPARISON !!
#time_likes = pd.Series(data=df['likes'].values, index=df['date'])
#time_likes.plot(figsize=(16, 4), label="likes", legend=True)
#time_retweets = pd.Series(data=df['retweets'].values, index=df['date'])
#time_retweets.plot(figsize=(16, 4), label="retweets", legend=True)
#plt.show()
| CemFFM/Sentimentanalysis | full_equipt_sentimentanalysis .py | full_equipt_sentimentanalysis .py | py | 11,573 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.set_option",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.set_opti... |
18405188691 | import numpy as np
from astropy import table
from glob import glob
import pandas as pd
from scipy.stats import binned_statistic
def get_outlier_fraction(tbl, suffix='', bins=20):
diff = np.array(np.abs(tbl['z_est'] - tbl['z']) > 0.15 * (1 + tbl['z']),
dtype=float)
stat = binned_statistic(tbl['z%s' % suffix], diff, statistic='mean',
bins=bins)
return stat.statistic
def get_diagnostics(z1, z2):
diff = np.array(z1 - z2) / (1 + np.array((z1)))
outlier_mask = np.abs(diff) < 0.15 # * (1 + z1)
med = np.median(diff)
mad = np.median(np.abs(diff - med))
return 100*np.array((np.mean(diff[outlier_mask]),
np.std(diff[outlier_mask]),
med, mad, 1-outlier_mask.mean()))
def run_for_table_old(name, min=None):
t = table.Table.from_pandas(pd.read_csv(name))
tmax = t['mags'].max()
t = t[t['z_est'] > 0]
if min is None:
max_mag = 2
while max_mag <= max(max_mag, tmax):
t_ = t[t['mags'] <= max_mag]
if len(t_) > 0.9 * len(t):
break
max_mag += 1
diag_old = get_diagnostics(t_['z'], t_['z_est'])
max_outlier_rate = diag_old[-1]
used_fraction = len(t_)*100 / len(t)
i = 2
for i in range(max_mag, tmax + 1):
t_ = t[t['mags'] <= i]
x = t_['z']
y = t_['z_est']
if len(t_) == 0:
break
diag = get_diagnostics(x, y)
print(name, i, '%.3f' % diag[-1], len(t_), i,
'%.3f' % max_outlier_rate)
if diag[-1] > max_outlier_rate:
break
diag_old = diag
used_fraction = len(t_)*100 / len(t)
else:
i = min + 1
t_ = t[t['mags'] <= int(min)]
diag_old = get_diagnostics(t_['z'], t_['z_est'])
used_fraction = len(t_)*100 / len(t)
return len(t_['z']), diag_old, i - 1, used_fraction
def run_for_table(name):
if name.endswith('csv'):
df = pd.read_csv(name)
elif name.endswith('parquet'):
df = pd.read_parquet(name, columns=['mags', 'z', 'z_est'])
else:
return [0, [0]*5]
x = df['z']
y = df['z_est']
diag = get_diagnostics(x, y)
return len(df['z']), diag
def name_to_caption(name):
output = name.split('/')[-1].replace('.csv', '').replace('.parquet', '')
if '-' in output:
output_parts = output.split('-')[1:-1]
output = ' '.join([s.replace('_', ' ').replace('+', '')
for s in output_parts])
output = output.replace(' ', ' ').replace(' ', ', ')
return output
def get_stats_for_file(name, **kwargs):
output = table.Table(names=['Name', 'Mean', 'Std', 'Median',
'MAD', 'Outliers',
'Count'],
dtype=[str, float, float, float, float, float,
int])
row = run_for_table(name, **kwargs)
output.add_row([name_to_caption(name), *row[1], row[0]])
return output
def get_stats_for_folder(folder, **kwargs):
output = table.Table(names=['Name', 'Mean', 'Std', 'Median',
'MAD', 'Outliers',
'Count'],
dtype=[str, float, float, float, float, float,
int])
names = glob('%s/*.csv' % folder) + glob('%s/*.parquet' % folder)
names.sort()
for f in names:
row = run_for_table(f, **kwargs)
output.add_row([name_to_caption(f), *row[1], row[0]])
return output
| minzastro/semiphore_public | utils/stats.py | stats.py | py | 3,666 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.stats.binned_statistic",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"l... |
42636165267 |
import netCDF4
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
import cartopy.crs as ccrs
import matplotlib.colors as colors
# Land cover
lc = netCDF4.Dataset("../data/LandCover_half.nc")
lc.set_auto_mask(True)
lc.variables
land_cover = lc["Land Cover"][:]
# fix landcover
halfway = np.shape(land_cover)[1]//2
first_half = land_cover[:,:halfway]
second_half = land_cover[:,halfway:]
land_cover = np.concatenate((second_half,first_half), axis=1)
#land_cover[land_cover == 0] = np.nan
#land_cover[land_cover > 10] = np.nan
land_cover_int = land_cover.astype(int) # Convert land_cover to int
unique_land_cover = np.unique(land_cover_int)
# plot
fig = plt.figure(figsize=(10, 6))
ax = plt.axes(projection=ccrs.PlateCarree())
image = ax.imshow(land_cover_int , origin="lower", extent=[-180, 180, -90, 90], transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global()
plt.show()
#ds = netCDF4.Dataset("../results/iso_result_mon_mean.nc")
ds = netCDF4.Dataset("../results/iso_result_OMI_filterd_mon_mean.nc")
ds.set_auto_mask(True)
dar = ds["J_times_IspS"][:]
dar[dar > 1000] = np.nan
dar_mean = np.nanmean(dar, axis=0)
dar_mean.shape
# Subset everything above 30 degrees North
def calculate_mean_subset(latitude_threshold,latitude, data):
lat_indices = np.where(latitude > latitude_threshold)[0]
data_subset = data[:, lat_indices, :]
data_mean_subset = np.nanmean(data_subset, axis=0)
anomaly_subset = np.subtract(data_subset, data_mean_subset)
norm_subset = np.divide(anomaly_subset, data_mean_subset)
mean_data_subset = np.column_stack((np.arange(12), [np.nanmean(norm_subset[i, :, :]) for i in range(12)]))
return mean_data_subset
mean_iso_subset_30 = calculate_mean_subset(30, latitude_iso, dar)
print(mean_iso_subset_30)
calculate_mean_subset(30, latitude_iso, dar)
# Normalize the data between the maximum and 0
anomaly = np.subtract(dar, dar_mean)
norm = np.divide(anomaly, dar_mean)
np.nanmean(norm[5,:,:])
np.nanmin(norm[5,:,:])
np.nanmax(norm[5,:,:])
# Create a figure with multiple subplots
fig, axs = plt.subplots(3, 4, figsize=(16, 12), subplot_kw={'projection': ccrs.PlateCarree()})
# Iterate over the range of plots you want to display (0 to 11)
for i in range(12):
# Calculate the subplot indices based on the iteration index
row = i // 4 # Row index
col = i % 4 # Column index
# Plot the normalized data in the current subplot
im = axs[row, col].imshow(norm[i, :, :], origin="lower", extent=[-180, 180, -90, 90], transform=ccrs.PlateCarree(), cmap='RdYlGn', vmin=-4, vmax=4, )
axs[row, col].set_title("Iso_{:02d}".format(i)) # Format the title with leading zeros
# Add coastlines
axs[row, col].coastlines()
# Create a colorbar for the figure
fig.colorbar(im, ax=axs, fraction=0.022, pad=0.03, location='bottom')
# Adjust the spacing between subplots
plt.tight_layout()
# Move the colorbar below the subplots
fig.subplots_adjust(bottom=0.15)
# Display the plot
plt.show()
ds.close()
np.where(land_cover_int == 1)
norm[5, np.where(land_cover_int == 1)]
mean_norm_values = []
for lc in unique_land_cover:
indices = np.where(land_cover_int == lc) # Find indices of matching land_cover values
mean_norm = np.mean(norm[indices]) # Calculate mean norm for the current land_cover value
mean_norm_values.append(mean_norm)
print("Mean norm values for each land_cover group:")
for lc, mean_norm in zip(unique_land_cover, mean_norm_values):
print(f"Land Cover {lc}: {mean_norm}")
#### Compare to the HCHO
HCHO = netCDF4.Dataset("../data/OMI_iso_estimate/mon_average_OMI.nc")
HCHO.set_auto_mask(False)
## Conveert isoprene values from the units kg/gridcell/month to kg/m2/month
def gridcell_to_m2(latitude):
"Find the number of m2 in each grid cell in a 0.5 by 0.5 degree grid"
half_degree_lat = 111111.1/2 # latitude lengths stay at about 111.1km per degree
half_degree_lon = np.cos(np.deg2rad(latitude)) * (111111.1/2) # equation to get the length of each half degree of longitude (changes with latitude)
meter_square_gridcell = half_degree_lat * half_degree_lon
return meter_square_gridcell
latitudes = [float(lat) for lat in HCHO['lat'][:]] #get each latitude degree as a float
no_m2_in_grid = [gridcell_to_m2(lat) for lat in latitudes] #gets the number of m2 blocks in each grid cell
tiled = np.tile(no_m2_in_grid, (len(HCHO['lon'][:]), 1)).T #repeat across each longitude as distance remains the same across each latitude degree
# Get the isoprene emmited per m2
isoprene_per_m2 = (HCHO["EMworldC5H8"][:])/(tiled)
# Calculate the threshold value for the top 5%
threshold = np.percentile(isoprene_per_m2, 99.98)
isoprene_per_m2[isoprene_per_m2 == 0] = np.nan
# Set values above the threshold to NaN
isoprene_per_m2[isoprene_per_m2 > threshold] = np.nan
HCHO_mean = np.nanmean(isoprene_per_m2, axis=0)
HCHO_anomaly = np.subtract(isoprene_per_m2, HCHO_mean)
HCHO_norm = np.divide(HCHO_anomaly, HCHO_mean)
i = 6
np.nanmean(HCHO_norm[i,:,:])
np.nanmin(HCHO_norm[i,:,:])
# Subset everything above 30 degrees North
latitude_HCHO = HCHO.variables["lat"][:]
def HCHO_std_anomoly_subset(latitude_threshold, latitude, data):
lat_indices = np.where(latitude > latitude_threshold)[0]
data_subset = np.flipud(data)[:, lat_indices, :]
data_mean_subset = np.nanmean(data_subset, axis=0)
data_anomaly_subset = np.subtract(data_subset, data_mean_subset)
data_norm_subset = np.divide(data_anomaly_subset, data_mean_subset)
std_anomoly_subset = np.column_stack((np.arange(12), [np.nanmean(data_norm_subset[i, :, :]) for i in range(12)]))
return std_anomoly_subset
mean_HCHO_subset_30 = HCHO_std_anomoly_subset(30, latitude_HCHO, isoprene_per_m2)
print(mean_HCHO_subset_30)
HCHO_std_anomoly_subset(0, latitude_HCHO, isoprene_per_m2)
# Create a figure and a single subplot
fig, ax = plt.subplots(figsize=(10, 4))
# Plot the first line with a red color
ax.plot(mean_HCHO_subset_30[:, 0], mean_HCHO_subset_30[:, 1], 'r-', label='HCHO')
# Plot the second line with a blue color
ax.plot(mean_iso_subset_30[:, 0], mean_iso_subset_30[:, 1], 'b-', label='Model')
# Set the title and legend
ax.set_title('Average anomaly in the Northern temperate zone (>30 degrees)')
ax.legend()
# Display the plot
plt.show()
np.nanmin(HCHO_norm)
# Create a figure with multiple subplots
fig, axs = plt.subplots(3, 4, figsize=(16, 12), subplot_kw={'projection': ccrs.PlateCarree()})
# Iterate over the range of plots you want to display (0 to 11)
for i in range(12):
# Calculate the subplot indices based on the iteration index
row = i // 4 # Row index
col = i % 4 # Column index
# Plot the normalized data in the current subplot
im = axs[row, col].imshow(np.flipud(HCHO_norm[i, :, :]), origin="lower", extent=[-180, 180, -90, 90], transform=ccrs.PlateCarree(), cmap='RdYlGn')#, vmin=-4, vmax=4, )
axs[row, col].set_title("OMI_{:02d}".format(i)) # Format the title with leading zeros
# Add coastlines
axs[row, col].coastlines()
# Create a colorbar for the figure
fig.colorbar(im, ax=axs, fraction=0.022, pad=0.03, location='bottom')
# Adjust the spacing between subplots
plt.tight_layout()
# Move the colorbar below the subplots
fig.subplots_adjust(bottom=0.15)
# Display the plot
plt.show()
HCHO.close() | bikempastine/Isoprene_PModel | exploration/anomaly_mapping.py | anomaly_mapping.py | py | 7,350 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"lin... |
15790344554 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 23:15:17 2020
@author: malrawi
"""
import dicttoxml
import ruamel.yaml # https://pypi.org/project/ruamel.yaml/
import json
def read_yaml_as_dict(fname):
"""
A function used to read the ddoif dictionary in yaml format and return it as a python dictionary.
This functiona makes use of ruamel.yaml https://pypi.org/project/ruamel.yaml/
...
Input arguments
----------
- in_f: the name of yaml file
Output arguments:
- ddoif_dict: All attributes the ddoif dictionary
Methods
-------
ddoif_dict = ddoif_read(in_f='ATest.ddof', check_CRC=True)
"""
with open(fname) as fp:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
# ddoif_dict = yaml.load(fp, Loader=yaml.FullLoader)
# ddoif_dict = yaml.safe_load(fp)
yaml = ruamel.yaml.YAML(typ='safe') # this is claimed to be the safest way to load yaml https://stackoverflow.com/questions/50846431/converting-a-yaml-file-to-python-json-object
ddoif_dict = yaml.load(fp)
return ddoif_dict
def yaml_to_xml(yaml_fname, xml_fname, ids=False):
"""
A function used to read the ddoif dictionary in yaml format and save it into an xml file.
This function makes use of dicttoxml package.
...
Input arguments
----------
- yaml_fname: the name of yaml file as input
- xml_fname: the name of output xml file
Output arguments:
Methods
-------
yaml_to_xml('ddoif_dictionary.yaml', 'ddoif_dictionary.xml')
"""
yaml_dict = read_yaml_as_dict(yaml_fname)
xml_obj = dicttoxml.dicttoxml(yaml_dict, custom_root='ddoif', attr_type=False, ids=ids)
print('Converting to xml using dicttoxml version -- ', dicttoxml.__version__)
# print_xml(xml_obj)
with open(xml_fname, "wb") as fp:
fp.write(xml_obj)
def yaml_to_json(yaml_fname, json_fname):
"""
A function used to read the ddoif dictionary in yaml format and save it into a json file.
This functiona makes use of json package.
...
Input arguments:
----------
- yaml_fname: the name of yaml file as input
- json_fname: the name of ouput json file
Output arguments:
Methods:
-------
yaml_to_xml('ddoif_dictionary.yaml', 'ddoif_dictionary.xml')
"""
yaml_dict = read_yaml_as_dict(yaml_fname)
with open(json_fname, 'w') as fp:
json.dump(yaml_dict, fp, indent=True, )
def print_xml(xml_obj): # xml_obj = dicttoxml.dicttoxml(yaml_dict, custom_root='ddoif', attr_type=False)
from xml.dom.minidom import parseString
dom = parseString(xml_obj)
print(dom.toprettyxml())
| morawi/ddoif | ddoif_utils.py | ddoif_utils.py | py | 3,011 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "ruamel.yaml.yaml.YAML",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ruamel.yaml.yaml",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "ruamel.yaml",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "dicttoxml.dic... |
25009445303 | import datetime
import re
from random import shuffle
from collections import defaultdict
from django.utils.translation import (
activate,
get_language_info,
get_language,
)
from django import http
from django.shortcuts import render
from django.core.cache import cache
from django.conf import settings
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils import timezone
try:
from nltk.corpus import wordnet
except ImportError:
wordnet = None
from kl.search.models import Word, Search
SEARCH_SUMMARY_SKIPS = (
'crossword', 'korsord', 'fuck', 'peter', 'motherfucker',
)
def niceboolean(value):
if isinstance(value, bool):
return value
falseness = ('', 'no', 'off', 'false', 'none', '0', 'f')
return str(value).lower().strip() not in falseness
def uniqify(seq, idfun=None): # Alex Martelli ******* order preserving
if idfun is None:
def idfun(x): return x
seen = set()
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
##if marker in seen: continue
if marker in seen:
continue
seen.add(marker)
result.append(item)
return result
class SearchResult(object):
def __init__(self, word, definition='', by_clue=None):
self.word = word
self.definition = definition
self.by_clue = by_clue
def home(request, json=False, record_search=True):
# By default we are set to record the search in our stats
# This can be overwritten by a CGI variable called 'r'
# E.g. r=0 or r=no
if request.GET.get('r'):
record_search = niceboolean(request.GET.get('r'))
language = request.GET.get('lang', get_language()).lower()
slots = None
if request.GET.get('l'):
try:
length = int(request.GET.get('l'))
except ValueError:
return http.HttpResponseRedirect('/?error=length')
slots = request.GET.getlist('s')
# if not type(slots) is list:
if not isinstance(slots, list):
return http.HttpResponseRedirect('/?error=slots')
notletters = request.GET.get('notletters', '').upper()
notletters = [x.strip() for x in notletters.split(',')
if len(x.strip()) == 1 and not x.strip().isdigit()]
if not len(slots) >= length:
return http.HttpResponseRedirect('/?error=slots&error=length')
if not [x for x in slots if x.strip()]:
# all blank
return http.HttpResponseRedirect('/?error=slots')
clues = request.GET.get('clues', '')
if clues and ' ' in clues and ',' not in clues:
clues = clues.replace(' ',', ')
clues = [
x.strip() for x in clues.split(',')
if (
x.strip() and
x.strip().lower() not in STOPWORDS and
not x.count(' ')
)
]
search_results = [] # the final simple list that is sent back
for clue in clues:
alternatives = _find_alternative_synonyms(
clue,
slots[:length],
language,
notletters=notletters,
request=request
)
search_results.extend([
SearchResult(x, by_clue=clue)
for x in alternatives
])
# find some alternatives
search = ''.join([x and x.lower() or ' ' for x in slots[:length]])
cache_key = '_find_alternatives_%s_%s' % (search, language)
if notletters:
cache_key += '__not' + ''.join(notletters)
cache_key = cache_key.replace(' ','_')
if re.findall('\s', cache_key):
raise ValueError(
'invalid cache_key search=%r, language=%r' % (search, language)
)
alternatives = cache.get(cache_key)
if alternatives is None:
alternatives = _find_alternatives(
slots[:length],
language,
notletters=notletters
)
cache.set(cache_key, alternatives, 60 * 60 * 24)
alternatives_count = len(alternatives)
alternatives_truncated = False
if alternatives_count > 100:
alternatives = alternatives[:100]
alternatives_truncated = True
result = dict(
length=length,
search=search,
word_count=alternatives_count,
alternatives_truncated=alternatives_truncated,
)
already_found = [x.word for x in search_results]
search_results.extend([
SearchResult(each.word, definition=each.definition)
for each in alternatives
if each.word not in already_found
])
match_points = None
match_points = []
if search_results:
first_word = search_results[0].word
for i, letter in enumerate(first_word):
if letter.lower() == search[i]:
match_points.append(1)
else:
match_points.append(0)
result['match_points'] = match_points
result['words'] = []
for search_result in search_results:
v = dict(word=search_result.word)
if search_result.definition:
v['definition'] = search_result.definition
if search_result.by_clue:
v['by_clue'] = search_result.by_clue
result['words'].append(v)
if alternatives_count == 1:
result['match_text'] = _("1 match found")
elif alternatives_count:
if alternatives_truncated:
result['match_text'] = _(
"%(count)s matches found but only showing first 100"
) % dict(count=alternatives_count)
else:
result['match_text'] = _("%(count)s matches found") % dict(
count=alternatives_count
)
else:
result['match_text'] = _("No matches found unfortunately :(")
found_word = None
if len(search_results) == 1:
try:
found_word = Word.objects.get(
word=search_results[0].word,
language=language
)
except Word.DoesNotExist:
# this it was probably not from the database but
# from the wordnet stuff
found_word = None
if record_search:
_record_search(
search,
user_agent=request.META.get('HTTP_USER_AGENT',''),
ip_address=request.META.get('REMOTE_ADDR',''),
found_word=found_word,
language=language
)
request.session['has_searched'] = True
if json:
return http.JsonResponse(result)
# return _render_json(result)
else:
length = ''
show_example_search = not bool(request.session.get('has_searched'))
most_recent_search_word = None
if not show_example_search:
most_recent_search_word = _get_recent_search_word(request)
lang = get_language()
accept_clues = (
wordnet is not None and lang.lower() in ('en', 'en-gb', 'en-us')
)
context = {
'length': length,
'slots': slots,
'accept_clues': accept_clues,
'show_example_search': show_example_search,
'most_recent_search_word': most_recent_search_word,
}
return render(request, 'search/home.html', context)
def _find_alternatives(slots, language, notletters=[]):
length = len(slots)
if length == 1:
return Word.objects.filter(length=1, word=slots[0], language=language)
filter_ = dict(length=length, language=language)
slots = [x and x.lower() or ' ' for x in slots]
search = ''.join(slots)
start = ''
end = ''
try:
start = re.findall('^\w+', search)[0]
if len(start) > 1:
filter_['first2'] = start[:2].lower()
if len(start) > 2:
filter_['word__istartswith'] = start
else:
filter_['first1'] = start.lower()
except IndexError:
pass
try:
end = re.findall('\w+$', search)[0]
if len(end) > 1:
filter_['last2'] = end[-2:].lower()
if len(end) > 2:
filter_['word__iendswith'] = end
else:
filter_['last1'] = end.lower()
except IndexError:
pass
def filter_match(match):
if end:
matchable_string = search[len(start):-len(end)]
found_string = match.word[len(start):-len(end)]
else:
matchable_string = search[len(start):]
found_string = match.word[len(start):]
assert len(matchable_string) == len(found_string), \
"matchable_string=%r, found_string=%r" % (matchable_string, found_string)
for i, each in enumerate(matchable_string):
if each != ' ' and each != found_string[i]:
# can't be match
return False
return True
search_base = Word.objects
limit = 10000
# if the filter is really vague and the length is high we're going to get
# too many objects and we need to cut our losses.
if filter_['length'] > 5:
if filter_.get('word__istartswith') and filter_.get('word__iendswith'):
# It's long but has a startswith and an endswith, increase the limit
limit = 5000
elif filter_.get('word__istartswith') or filter_.get('word__iendswith'):
# we're going to get less than above but still many
limit = 2500
else:
limit = 1000
# if there's neither a start or a end (e.g. '_E_E_A_') it will get all words
# that are of that length then end truncate the result set then filter them
# as a string operation. Then there's a chance it might not ever test word we
# are looking for.
if not start and not end:
# must come up with some other crazy icontains filter
# Look for the longest lump of letter. For example in '_E_ERA_' 'era' is
# the longest lump
#lumps = re.findall('\w+', search)
lumps = search.split()
longest = sorted(lumps, lambda x,y: cmp(len(y), len(x)))[0]
if len(longest) > 1:
filter_['word__icontains'] = longest
else:
for each in uniqify(lumps):
search_base = search_base.filter(word__icontains=each)
limit = search_base.filter(**filter_).order_by('word').count()
elif (start and len(start) <= 2) or (end and len(end) <= 2):
# If you search for somethin like "___TAM__T"
# We so far only know it's 9 characters long (french as 21k 9 characters long
# words).
# We also have one tiny little 't' at the end but there's still
# 4086 options
for lump in re.findall(r'\s(\w+)\s', search):
filter_['word__icontains'] = lump
search_qs = search_base.filter(**filter_)
for notletter in notletters:
search_qs = search_qs.exclude(word__icontains=notletter)
all_matches = [x for x
in search_qs.order_by('word')[:limit]
if filter_match(x)]
return uniqify(all_matches, lambda x: x.word.lower())
def _find_alternative_synonyms(
word,
slots,
language,
notletters=None,
request=None
):
length = len(slots)
if notletters is None:
notletters = []
slots = [x and x.lower() or ' ' for x in slots]
search = ''.join(slots)
start = ''
end = ''
try:
start = re.findall('^\w+', search)[0]
except IndexError:
pass
try:
end = re.findall('\w+$', search)[0]
except IndexError:
pass
def filter_match(word):
if end and not word.endswith(end):
# Don't even bother
return False
elif start and not word.startswith(start):
# Don't even bother
return False
if end:
matchable_string = search[len(start):-len(end)]
found_string = word[len(start):-len(end)]
else:
matchable_string = search[len(start):]
found_string = word[len(start):]
assert len(matchable_string) == len(found_string)
for i, each in enumerate(matchable_string):
if each != ' ' and each != found_string[i]:
# can't be match
return False
return True
def test(word):
if len(word) == length:
if not notletters:
for letter in word:
if letter.upper() in notletters:
return False
return filter_match(word)
for variation in _get_variations(word, greedy=True, request=request):
if test(variation):
yield variation
def _get_variations(word, greedy=False,
store_definitions=True,
request=None):
a = _get_variations_wordnet(
word,
greedy=greedy,
store_definitions=store_definitions
)
return a
# b = _get_variations_synonym_dot_com(
# word,
# greedy=greedy,
# store_definitions=store_definitions,
# request=request
# )
# return a + b
def _record_search(
search_word,
user_agent='',
ip_address='',
found_word=None,
language=None,
search_type='',
):
if len(user_agent) > 200:
user_agent = user_agent[:200]
if len(ip_address) > 15:
import warnings
warnings.warn("ip_address too long (%r)" % ip_address)
ip_address = ''
elif ip_address == '127.0.0.1' and settings.DEBUG:
# because 127.0.0.1 can't be looked up, use a random other one
examples = '125.239.15.42,114.199.97.224,68.190.165.25,208.75.100.212,'\
'61.29.84.154,72.49.16.234,66.57.228.64,196.25.255.250,'\
'141.117.6.97,85.68.18.183,90.157.186.202'.split(',')
shuffle(examples)
ip_address = examples[0]
Search.objects.create(
search_word=search_word,
user_agent=user_agent.strip(),
ip_address=ip_address.strip(),
found_word=found_word,
language=language,
search_type=search_type,
)
def _get_recent_search_word(request):
# _today = datetime.datetime.today()
_today = timezone.now()
_since = datetime.datetime(_today.year, _today.month, 1)
_extra_exclude = dict(found_word__word__in=list(SEARCH_SUMMARY_SKIPS))
if request.META.get('HTTP_USER_AGENT'):
_extra_exclude['user_agent'] = request.META.get('HTTP_USER_AGENT')
if request.META.get('REMOTE_ADDR'):
_extra_exclude['ip_address'] = request.META.get('REMOTE_ADDR')
_extra_filter = dict()
# Special hack! Since the search summary has a cache of 1 hour,
# don't include things that are too recent
_extra_filter['add_date__lt'] = _today - datetime.timedelta(hours=1)
return _find_recent_search_word(
get_language(),
since=_since,
random=True,
extra_exclude=_extra_exclude,
**_extra_filter,
)
def _find_recent_search_word(
language,
since=None,
random=False,
extra_exclude={},
**extra_filter
):
searches = Search.objects.filter(
language=language,
found_word__isnull=False,
**extra_filter
).select_related('found_word')
if since:
searches = searches.filter(add_date__gte=since)
searches = searches.exclude(**extra_exclude)
if random:
# For some bizzare reason it seems that even if the exclude above
# has found_word__word__in=SEARCH_SUMMARY_SKIPS it still returns
# words from that list!!!!
# Hence this list comprehension.
found_words = [x.found_word for x in searches
if x.found_word.word not in SEARCH_SUMMARY_SKIPS]
shuffle(found_words)
try:
return found_words[0]
except IndexError:
return None
else:
searches = searches.order_by('-add_date')
return searches[0].found_word
return None
def get_search_stats(language):
# Total no words in our database
cache_key = 'no_total_words_%s' % language
no_total_words = cache.get(cache_key)
if no_total_words is None:
no_total_words = Word.objects.filter(language=language).count()
cache.set(cache_key, no_total_words, 60 * 60 * 24 * 30)
today = timezone.now()
# Searches today
# today_midnight = datetime.datetime(
# today.year,
# today.month,
# today.day, 0, 0, 0)
today_midnight = today - datetime.timedelta(days=1)
cache_key = 'no_searches_today_%s' % language
no_searches_today = cache.get(cache_key)
if no_searches_today is None:
no_searches_today = Search.objects.filter(
language=language,
add_date__gte=today_midnight
).count()
cache.set(cache_key, no_searches_today, 60 * 60)
# Searches yesterday
cache_key = 'no_searches_yesterday_%s' % language
no_searches_yesterday = cache.get(cache_key)
if no_searches_yesterday is None:
yesterday_midnight = today_midnight - datetime.timedelta(days=1)
no_searches_yesterday = Search.objects.filter(language=language,
add_date__range=(yesterday_midnight, today_midnight)
).count()
cache.set(cache_key, no_searches_yesterday, 60 * 60 * 24)
# Searches this week
cache_key = 'no_searches_this_week_%s' % language
no_searches_this_week = cache.get(cache_key)
if no_searches_this_week is None:
# find the first monday
monday_midnight = today_midnight
while monday_midnight.strftime('%A') != 'Monday':
monday_midnight = monday_midnight - datetime.timedelta(days=1)
no_searches_this_week = Search.objects.filter(
language=language,
add_date__gt=monday_midnight
).count()
cache.set(cache_key, no_searches_this_week, 60 * 60 * 24)
# Searches this month
cache_key = 'no_searches_this_month_%s' % language
no_searches_this_month = cache.get(cache_key)
if no_searches_this_month is None:
first_day_month = today.replace(day=1)
no_searches_this_month = Search.objects.filter(
language=language,
add_date__gte=first_day_month
).count()
cache.set(cache_key, no_searches_this_month, 60 * 60)
# Searches this year
cache_key = 'no_searches_this_year_%s' % language
no_searches_this_year = cache.get(cache_key)
if no_searches_this_year is None:
# first_day_year = datetime.datetime(today.year, 1, 1, 0, 0, 0)
first_day_year = today.replace(month=1, day=1)
no_searches_this_year = Search.objects.filter(
language=language,
add_date__gte=first_day_year
).count()
cache.set(cache_key, no_searches_this_year, 60 * 60)
return {
'no_total_words': no_total_words,
'no_searches_today': no_searches_today,
'no_searches_yesterday': no_searches_yesterday,
'no_searches_this_week': no_searches_this_week,
'no_searches_this_month': no_searches_this_month,
'no_searches_this_year': no_searches_this_year,
}
MONTH_NAMES = []
for i in range(1, 13):
d = datetime.date(2009, i, 1)
MONTH_NAMES.append(d.strftime('%B'))
def searches_summary(request, year, month, atleast_count=2,
lookup_definitions=False):
first_search_date = Search.objects.all().order_by('add_date')[0].add_date
last_search_date = Search.objects.all().order_by('-add_date')[0].add_date
year = int(year)
try:
month_nr = [x.lower() for x in MONTH_NAMES].index(month.lower()) + 1
except ValueError:
raise http.Http404("Unrecognized month name")
# turn that into a date
since = datetime.date(year, month_nr, 1)
if (month_nr + 1) > 12:
since_month_later = datetime.date(year+1, 1, 1)
else:
since_month_later = datetime.date(year, month_nr+1, 1)
today = timezone.now()
since_month_later_datetime = today.replace(
year=since_month_later.year,
month=since_month_later.month,
day=since_month_later.day
)
next_month_link = None
if since_month_later_datetime < first_search_date:
raise http.Http404("Too far back in time")
if since_month_later_datetime < last_search_date:
next_month_link = since_month_later.strftime("/searches/%Y/%B/")
since_datetime = today.replace(
year=since.year,
month=since.month,
day=since.day
)
previous_month_link = None
if since_datetime > last_search_date:
raise http.Http404("Too far into the future")
elif since_datetime > first_search_date:
if (month_nr - 1) < 1:
since_month_earlier = datetime.date(year-1, 12, 1)
else:
since_month_earlier = datetime.date(year, month_nr-1, 1)
previous_month_link = since_month_earlier.strftime("/searches/%Y/%B/")
base_searches = Search.objects.filter(
add_date__gte=since,
add_date__lt=since_month_later
)
found_searches = base_searches.exclude(
found_word=None
).select_related(
'found_word'
).exclude(
found_word__word__in=list(SEARCH_SUMMARY_SKIPS)
)
found_words = defaultdict(list)
definitions = {}
for each in found_searches:
found_words[each.language].append(each.found_word.word)
if each.language not in definitions:
definitions[each.found_word.language] = {}
if each.found_word.definition:
definitions[each.found_word.language][each.found_word.word.lower()]\
= each.found_word.definition.splitlines()
found_words = dict(found_words)
found_words_repeats = {}
for language, words in found_words.items():
counts = defaultdict(int)
for word in words:
if len(word) < 2:
# don't want to find single character words
# It's a bug that they're even in there
continue
counts[word.lower()] += 1
found_words_repeats[language] = sorted(
[k for (k, v) in counts.items()
if v >= atleast_count],
key=lambda x: x[1]
)
if lookup_definitions:
for lang, words in found_words_repeats.items():
for word in words:
try:
definitions[lang][word]
except KeyError:
if lang in ('en-us','en-gb'):
# wordnet
definition = _get_word_definition(word, language=lang)
else:
definition = None
if not definition:
definition = _get_word_definition_scrape(word, language=lang)
if definition:
add_word_definition(word, definition, language=lang)
# bake the definitions into found_words_repeats
for lang, words in found_words_repeats.items():
for i, word in enumerate(words):
words_dict = dict(word=word)
if lang in definitions:
if word in definitions[lang]:
words_dict = dict(words_dict, definitions=definitions[lang][word])
found_words_repeats[lang][i] = words_dict
all_words_plain = set()
for records in found_words_repeats.values():
for record in records:
all_words_plain.add(record['word'].lower())
all_words_plain = list(all_words_plain)
context = {
# 'language': language,
'month': month,
'year': year,
'all_words_plain': all_words_plain,
'found_words_repeats': found_words_repeats,
'previous_month_link': previous_month_link,
'next_month_link': next_month_link,
}
return render(request, 'search/searches_summary.html', context)
def about_crosstips(request):
return render(request, 'search/about-crosstips.html')
| peterbe/kl2 | kl/search/views.py | views.py | py | 24,492 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.corpus.wordnet",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.get_language",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 78,
"usage_type": "call"
},... |
19501436322 | """
This file (test_youbit.py) contains unit tests for the encode.py and decode.py files.
"""
from pathlib import Path
import os
import time
from yt_dlp.utils import DownloadError
from tests.conftest import uploads
from youbit import Encoder, download_and_decode
from youbit.settings import Settings, Browser
from youbit.download import Downloader
from youbit.util import get_md5
@uploads
def test_youbit_round_trip(browser: Browser, tempdir: Path):
test_file = Path(os.path.dirname(__file__)) / "testdata" / "files" / "test_file.jpg"
encoder = Encoder(test_file, Settings(browser=browser))
url = encoder.encode_and_upload()
time.sleep(
10
) # YouTube needs time to process the video before we can download the correct resolution
timeout = 0
while timeout < 60:
try:
downloader = Downloader(url)
if downloader.best_vbr > 6000:
break
except DownloadError:
time.sleep(5)
timeout += 5
continue
if timeout >= 60:
assert False, "Timeout"
output_path = download_and_decode(url, tempdir)
original_md5 = get_md5(test_file)
output_md5 = get_md5(output_path)
assert original_md5 == output_md5
| mevimo/youbit | tests/unit/test_youbit.py | test_youbit.py | py | 1,245 | python | en | code | 651 | github-code | 6 | [
{
"api_name": "youbit.settings.Browser",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
... |
4643605876 | import os
import shutil
import pickle
import glob
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from cam import create_dataset, Camera
def save_data(path, data):
with open(path, 'wb') as handle:
pickle.dump(data, handle)
print("Saved")
def load_data(path):
with open(path, 'rb') as handle:
data = pickle.load(handle)
return data
def camera_calibrate(images_folder='./img',
board_size=(6, 9),
world_scaling=1.,
debug=False):
images_names = sorted(glob.glob(images_folder))
images = []
for imname in images_names:
im = cv2.imread(imname, 1)
images.append(im)
# критерии, используемые детектором шахматной доски.
# Измените это, если код не может найти шахматную доску
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((1, board_size[0] * board_size[1], 3), np.float32)
objp[0, :, :2] = np.mgrid[0:board_size[0], 0:board_size[1]].T.reshape(-1, 2)
objp = world_scaling * objp
width = images[0].shape[1]
height = images[0].shape[0]
imgpoints = []
objpoints = []
for frame in images:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (board_size[0], board_size[1]), None)
if ret:
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
cv2.drawChessboardCorners(frame, (board_size[0], board_size[1]), corners, ret)
if debug:
cv2.imshow('img', frame)
k = cv2.waitKey(500)
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (width, height), None, None)
return mtx, dist
def stereo_camera_calibrate(images_folder1='./img',
images_folder2='./img',
board_size=(6, 9),
world_scaling=1.,
cameraMatrix1=None,
distCoeffs1=None,
cameraMatrix2=None,
distCoeffs2=None,
debug=False):
cam1_path = sorted(glob.glob(images_folder1))
cam2_path = sorted(glob.glob(images_folder2))
c1_images = []
c2_images = []
for im1, im2 in zip(cam1_path, cam2_path):
im = cv2.imread(im1, 1)
c1_images.append(im)
im = cv2.imread(im2, 1)
c2_images.append(im)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
objp = np.zeros((board_size[0] * board_size[1], 3), np.float32)
objp[:, :2] = np.mgrid[0:board_size[0], 0:board_size[1]].T.reshape(-1, 2)
objp = world_scaling * objp
width = c1_images[0].shape[1]
height = c1_images[0].shape[0]
imgpoints_left = []
imgpoints_right = []
objpoints = []
for frame1, frame2 in zip(c1_images, c2_images):
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
c_ret1, corners1 = cv2.findChessboardCorners(gray1, board_size, None)
c_ret2, corners2 = cv2.findChessboardCorners(gray2, board_size, None)
if c_ret1 == True and c_ret2 == True:
corners1 = cv2.cornerSubPix(gray1, corners1, (11, 11), (-1, -1), criteria)
corners2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), criteria)
if debug:
cv2.drawChessboardCorners(frame1, board_size, corners1, c_ret1)
cv2.imshow('img', frame1)
cv2.drawChessboardCorners(frame2, board_size, corners2, c_ret2)
cv2.imshow('img2', frame2)
cv2.waitKey(500)
objpoints.append(objp)
imgpoints_left.append(corners1)
imgpoints_right.append(corners2)
stereocalibration_flags = cv2.CALIB_FIX_INTRINSIC
ret, CM1, dist1, CM2, dist2, R, T, E, F = cv2.stereoCalibrate(objectPoints=objpoints,
imagePoints1=imgpoints_left,
imagePoints2=imgpoints_right,
cameraMatrix1=cameraMatrix1,
distCoeffs1=distCoeffs1,
cameraMatrix2=cameraMatrix2,
distCoeffs2=distCoeffs2,
imageSize=(width, height),
criteria=criteria,
flags=stereocalibration_flags)
return R, T
def mousePoint(event, x, y, flag, params):
if event == cv2.EVENT_LBUTTONDOWN:
print(f"[{x}, {y}]")
def DLT(P1, P2, point1, point2):
A = [point1[1] * P1[2, :] - P1[1, :],
P1[0, :] - point1[0] * P1[2, :],
point2[1] * P2[2, :] - P2[1, :],
P2[0, :] - point2[0] * P2[2, :]
]
A = np.array(A).reshape((4, 4))
# print('A: ')
# print(A)
B = A.transpose() @ A
# from scipy import linalg
U, s, Vh = np.linalg.svd(B, full_matrices=False)
# print('Triangulated point: ')
# print(Vh[3, 0:3] / Vh[3, 3])
return Vh[3, 0:3] / Vh[3, 3]
if __name__ == "__main__":
camera1 = Camera(camera_id=0, show_frame=False, vertical_flip=True, save_video=False)
camera2 = Camera(camera_id=1, show_frame=False, vertical_flip=True, save_video=False)
#
# create_screen(0)
# create_screen(1)
#
# camera1.initialize()
# camera2.initialize()
# create_dataset([camera1, camera2], './img/split/')
# ========================================== КАЛИБРОВКА КАМЕРЫ =====================================================
# mtx1, dist1 = camera_calibrate('./img/split/camera 1/*.jpg', debug=False)
# mtx2, dist2 = camera_calibrate('./img/split/camera 2/*.jpg', debug=False)
#
# R, T = stereo_camera_calibrate(images_folder1="./img/split/camera 1/*.jpg",
# images_folder2="./img/split/camera 2/*.jpg",
# cameraMatrix1=mtx1,
# cameraMatrix2=mtx2,
# distCoeffs1=dist1,
# distCoeffs2=dist2,
# debug=False)
# ==================================== СОХРАНЕНИЕ ДАННЫХ ===========================================================
# save_data('./data/matrix_camera_1080.pickle', mtx1)
# save_data('./data/matrix_camera.pickle', mtx2)
#
# save_data('./data/dist_camera_1080.pickle', dist1)
# save_data('./data/dist_camera.pickle', dist2)
#
# save_data('./data/stereo_R.pickle', R)
# save_data('./data/stereo_T.pickle', T)
# ============================================== ЗАГРУЗКА ДАННЫХ ===================================================
mtx2 = load_data('./data/matrix_camera_1080.pickle')
mtx1 = load_data('./data/matrix_camera.pickle')
dist2 = load_data('./data/dist_camera_1080.pickle')
dist1 = load_data('./data/dist_camera.pickle')
R = load_data('./data/stereo_R.pickle')
T = load_data('./data/stereo_T.pickle')
print(f"Camera matrix 0:\n {mtx1}")
print(f"Camera matrix 1:\n {mtx2}")
print(f"Camera dist 0:\n {dist1}")
print(f"Camera dist 1:\n {dist2}")
print(f"R:\n {R}")
print(f"T:\n {T}")
# board_size = (6, 9)
# world_scaling = 1.
# =============================================== РУЧНАЯ РАЗМЕТКА ДАННЫХ ===========================================
# count = 0
# while True:
#
# if not count:
# path = './img/1.jpg'
# else:
# path = './img/2.jpg'
#
# img = cv2.imread(path, 1)
#
# cv2.imshow("Img", img)
# cv2.setMouseCallback('Img', mousePoint)
#
# if cv2.waitKey(0) & 0xFF == ord('q'):
# if not count:
# count += 1
# continue
#
# cv2.destroyAllWindows()
# break
# # Право #Середина # лево
# uvs1 = np.array([[249, 175], [187, 177], [106, 166],
# [67, 296], [163, 409], [257, 289],
# [267, 408], [190, 405]])
#
# uvs2 = np.array([[506, 50], [408, 52], [321, 53],
# [286, 196], [355, 320], [503, 189],
# [494, 329], [398, 321]])
# frame1 = cv2.imread('./img/1.jpg')
# frame2 = cv2.imread('./img/2.jpg')
#
# plt.imshow(frame1[:, :, [2, 1, 0]])
# plt.scatter(uvs1[:, 0], uvs1[:, 1])
# plt.show()
#
# plt.imshow(frame2[:, :, [2, 1, 0]])
# plt.scatter(uvs2[:, 0], uvs2[:, 1])
# plt.show()
# #
# RT1 = np.concatenate([np.eye(3), [[0], [0], [0]]], axis=-1)
# P1 = mtx1 @ RT1
#
# RT2 = np.concatenate([R, T], axis=-1)
# P2 = mtx2 @ RT2
#
# from mpl_toolkits.mplot3d import Axes3D
#
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlim3d(0, -40)
# ax.set_ylim3d(-20, 20)
# ax.set_zlim3d(50, 100)
#
# p3ds = []
# for uv1, uv2 in zip(uvs1, uvs2):
# _p3d = DLT(P1, P2, uv1, uv2)
# p3ds.append(_p3d)
# p3ds = np.array(p3ds)
#
# connections = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [1, 7]]
# for _c in connections:
# # print(p3ds[_c[0]])
# # print(p3ds[_c[1]])
# ax.plot(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]], ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
# zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]], c='red')
#
# plt.show()
# =============================================== НАХОЖДЕНИЕ АВТО =================================================
import mediapipe as mp
from mpl_toolkits.mplot3d import Axes3D
def get_frame_keypoints(landmarks, frame):
frame_keypoints = []
print(landmarks)
for face_landmarks in landmarks:
for p in range(21):
pxl_x = int(round(frame.shape[1] * face_landmarks.landmark[p].x))
pxl_y = int(round(frame.shape[0] * face_landmarks.landmark[p].y))
kpts = [pxl_x, pxl_y]
frame_keypoints.append(kpts)
return frame_keypoints
mp_drawing = mp.solutions.drawing_utils
# mp_face = mp.solutions.face_mesh
mp_face = mp.solutions.hands
# face1 = mp_face.FaceMesh(max_num_faces=1,
# refine_landmarks=True,
# min_detection_confidence=0.5,
# min_tracking_confidence=0.5)
# face2 = mp_face.FaceMesh(max_num_faces=1,
# refine_landmarks=True,
# min_detection_confidence=0.5,
# min_tracking_confidence=0.5)
face1 = mp_face.Hands(max_num_hands=1,
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
face2 = mp_face.Hands(max_num_hands=1,
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
camera1.initialize()
camera2.initialize()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# connections = [[i, i+1] for i in range(467)]
ax.view_init(-90, -90)
mp_pose = mp.solutions.pose
connections = mp_face.HAND_CONNECTIONS
counter = 0
global_kps1 = []
global_kps2 = []
while True:
frame1 = camera1.read_frame()
frame2 = camera2.read_frame()
frame1_copy = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
frame2_copy = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
frame1_copy.flags.writeable = False
frame2_copy.flags.writeable = False
results1 = face1.process(frame1_copy)
results2 = face2.process(frame2_copy)
# if results1.multi_face_landmarks:
if results1.multi_hand_landmarks:
frame1_keypoints = get_frame_keypoints(results1.multi_hand_landmarks,
frame1)
else:
# frame1_keypoints = [[-1, -1]] * 468
frame1_keypoints = [[-1, -1]] * 21
if results2.multi_hand_landmarks:
frame2_keypoints = get_frame_keypoints(results2.multi_hand_landmarks,
frame2)
else:
frame2_keypoints = [[-1, -1]] * 21
global_kps1.append(frame1_keypoints)
global_kps2.append(frame2_keypoints)
# print("Frame kp 1:\n", frame1_keypoints)
# print("Frame kp 2:\n", frame2_keypoints)
for points1, points2 in zip(frame1_keypoints, frame2_keypoints):
cv2.circle(frame1, points1, 1, (255, 0, 0), cv2.FILLED)
cv2.circle(frame2, points2, 1, (255, 0, 0), cv2.FILLED)
frames = Camera().stack_images(0.8, [[frame1, frame2]])
cv2.imshow('Check', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
camera1.release()
camera2.release()
save_data('data/glob1_kps.pickle', global_kps1)
save_data('data/glob2_kps.pickle', global_kps2)
break
# @ - матричное умножение
RT1 = np.concatenate([np.eye(3), [[0], [0], [0]]], axis=-1)
P1 = mtx1 @ RT1
RT2 = np.concatenate([R, T], axis=-1)
P2 = mtx2 @ RT2
p3ds = []
for uv1, uv2 in zip(frame1_keypoints, frame2_keypoints):
_p3d = DLT(P1, P2, uv1, uv2)
p3ds.append(_p3d)
p3ds = np.array(p3ds)
for _c in connections:
ax.plot(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]],
ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]],
c='red')
ax.scatter(xs=[p3ds[:, 0], p3ds[:, 0]],
ys=[p3ds[:, 1], p3ds[:, 1]],
zs=[p3ds[:, 2], p3ds[:, 2]],
c='green')
# ax.set_axis_off()
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
plt.draw()
plt.pause(.001)
ax.clear()
# save_data('./data/glob1_kps.pickle', global_kps1)
# save_data('./data/glob2_kps.pickle', global_kps2)
# # ax.set_xlim3d(-14, -24)
# # ax.set_ylim3d(-5, 5)
# # ax.set_zlim3d(-500, 500)
#
#
# connections = [[0, 1], [1, 2], [2, 3], [3, 4],
# [0,5], [5,6], [6,7], [7,8],
# [5,9], [9,10], [10,11], [11,12],
# [9,13], [13,14], [14,15], [15,16],
# [13,17], [17,18], [18,19], [19,20], [17, 0]]
#
# for _c in connections:
# # print(p3ds[_c[0]])
# # print(p3ds[_c[1]])
# ax.plot(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]], ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
# zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]], c='red')
# ax.scatter(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]], ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
# zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]], c='green')
#
# def animate(i):
# print(i/360 * 100, "%")
# line = ax.view_init(210, i)
# return line
#
# import matplotlib.animation as animation
#
# # Создаем объект анимации:
# sin_animation = animation.FuncAnimation(fig,
# animate,
# frames=np.linspace(0, 360, 360),
# interval = 10,
# repeat = False)
#
# # Сохраняем анимацию в виде gif файла:
# sin_animation.save('моя анимация.gif',
# writer='imagemagick',
# fps=30)
# for angle in range(0, 360):
# ax.view_init(210, angle)
# plt.draw()
# plt.pause(.001)
# %%
| EvilFis/MultiCamVision | test_method.py | test_method.py | py | 16,755 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number":... |
2087116071 | from bson import ObjectId
import Usuario.search as buscarUsuario
import Produto.search as buscarProduto
from datetime import date
def inserir_compra(mydb):
compra = mydb.Compra
usuario = mydb.Usuario
lista_produtos = []
usuarios = buscarUsuario.userByID(mydb,ObjectId)
data_atual = date.today()
data_formatada = data_atual.strftime('%d/%m/%Y')
execucao = True
while execucao:
opcao = input(str("Deseja comprar um produto ? "))
if opcao.lower() == "sim":
produto = buscarProduto.produtoByID(mydb,ObjectId)
lista_produtos.append(produto)
else:
execucao = False
mydict = {
"data_compra":data_formatada,
'usuario':usuarios,
"produtos":lista_produtos
}
print(type(usuarios))
compra_id = compra.insert_one(mydict)
compra_realizada = compra.find_one({"_id":ObjectId(compra_id.inserted_id)})
usuario.update_one({"_id":ObjectId(usuarios["_id"])},{ "$push": { "compras":compra_realizada }})
print("\nCompra realizada com sucesso")
print(f'Id da compra {compra_id.inserted_id}') | Raniel-Santos/Banco-NoSQL-Python_MongoDB | Compra/insertCompra.py | insertCompra.py | py | 1,170 | python | pt | code | 1 | github-code | 6 | [
{
"api_name": "Usuario.search.userByID",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bson.ObjectId",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "Usuario.search",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "datetime.dat... |
11898315364 | #!/usr/bin/env python3
""" basic Flask app """
from flask import Flask, render_template, request, g
from flask_babel import Babel
import pytz
app = Flask(__name__)
babel = Babel(app)
users = {
1: {"name": "Balou", "locale": "fr", "timezone": "Europe/Paris"},
2: {"name": "Beyonce", "locale": "en", "timezone": "US/Central"},
3: {"name": "Spock", "locale": "kg", "timezone": "Vulcan"},
4: {"name": "Teletubby", "locale": None, "timezone": "Europe/London"},
}
class Config(object):
""" Babel configuration class """
LANGUAGES = ["en", "fr"]
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
def get_user() -> dict:
""" returns a user dictionary or None """
user_logged = request.args.get('login_as')
if user_logged and int(user_logged) in users:
return users[int(user_logged)]
return None
@app.before_request
def before_request():
""" find a user if any """
user = get_user()
g.user = user
@babel.localeselector
def get_locale():
""" determine the best match """
locale = request.args.get('locale')
if locale and locale in Config.LANGUAGES:
return locale
if g.user:
locale = g.user.get('locale')
if locale and locale in Config.LANGUAGES:
return locale
locale = request.headers.get('locale')
if locale and locale in Config.LANGUAGES:
return locale
return request.accept_languages.best_match(Config.LANGUAGES)
@babel.timezoneselector
def get_timezone():
""" get timezone selector """
try:
if request.args.get('timezone'):
time_zone = request.args.get('timezone')
pytz.timezone(time_zone)
elif g.user and g.user.get('timezone'):
time_zone = g.user.get('timezone')
pytz.timezone(time_zone)
else:
time_zone = app.config["BABEL_DEFAULT_TIMEZONE"]
pytz.timezone(time_zone)
except pytz.exceptions.UnknownTimeZoneError:
time_zone = "UTC"
return time_zone
app.config.from_object(Config)
@app.route("/", methods=["GET"])
def index():
""" returns the index """
return render_template('7-index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000")
| jeanpierreba/holbertonschool-web_back_end | 0x0A-i18n/7-app.py | 7-app.py | py | 2,161 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_babel.Babel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.request.arg... |
21321917983 | #! /usr/bin/env python3
import datetime
import AutoPrimer as ntp
import os
class Worker(object):
def __init__(self, name, command, options, channel, poster):
# date time stamp from scheduler
self.name = name
self.status = 'init' # init, running, done, expired
# which command this worker should execute
self.command = command
# what options to pipe to the command
self.options = options
self.channel = channel
self.poster = poster
# defines possible commands
self.comms = {
'find_primers' : self.find_primers,
'connection_status' : self.connection_status,
'hello' : self.hello
}
# all commands from comms should go into one of these two categories
self.message_commands = ['connection_status', 'hello']
self.action_commands = ['find_primers']
def start_message(self):
# start and stop message commands
if self.command in self.action_commands:
now = datetime.datetime.now()
mess = f"Starting {self.command} at " + now.strftime("%Y-%m-%d.%H-%M")
return mess
# start only message commands
if self.command in self.message_commands:
return self.comms[self.command]()
def run(self):
if self.command in self.action_commands:
self.comms[self.command]()
def done_message(self):
if self.command in self.action_commands:
now = datetime.datetime.now()
mess = f"Finshed {self.command} main at " + now.strftime("%Y-%m-%d.%H-%M")
common_dict[self.name]['status'] = 'closed'
self.status = 'expired'
return mess
#######
##### Commands
#######
def find_primers(self):
"""
Initiates AutoPrimer
"""
self.status = 'running'
if not self.options:
self.options = '/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer'
find_folder = os.path.isdir('/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer')
if find_folder:
ntp.submit_folder(self.options)
self.status = 'done'
else:
self.status = 'failed'
def connection_status(self):
"""
Checks the connection status to a given folder
"""
if self.options:
folder = self.options
find_folder = os.path.isdir(self.options)
else:
folder = '/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer'
find_folder = os.path.isdir('/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer')
if find_folder:
response = f"Connection established with {folder}. Everything looks good, ready to run"
else:
response = f"I can't find the {folder} - it is possible that the connection is bad"
self.status = 'expired'
return response
def hello(self):
"""
Replies to the user with a friendly hello message.
"""
response = f"Hello <@{self.poster}>! Looking forward to designing some primers for you."
self.status = 'expired'
return response | jcooper036/autoprimer | AutoPrimer/autobot/Worker.py | Worker.py | py | 3,361 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "da... |
2739075186 | """Given the html files for each of the language families,
build language tree for each and write them in json object
"""
from bs4 import BeautifulSoup
from tqdm import tqdm
import json
import sys
import os
link = 'html/indo-european.html'
link = 'html/mongolic.html'
link = 'html/bororoan.html'
def get_list(html):
if html.name == 'div' and 'item-list' in html.attrs['class']:
# Already inputted item list
item_list = html
else:
# Extract item list
result1 = html.find_all('div', {'class': 'item-list'}, recursive=False)
result2 = html.find_all('div', {'class': 'item-list'}, recursive=True)
if len(result1) == 1:
# Item list found in the top next level
item_list = result1[0]
elif len(result2) == 1:
# Item list not found in the first children level
# But there is only one list further on in the tree
item_list = result2[0]
ul = item_list.find_all('ul', recursive=False)
if len(ul) != 1:
# Failed
return
elements = ul[0].find_all('li', recursive=False)
return elements
def strip(html):
divs = html.find_all('div', recursive=False)
#if any('item-list' in tag.attrs['class'] for tag in divs):
if 'class' in html.attrs:
if 'first' in html.attrs['class'] or 'last' in html.attrs['class']:
name = html.find_next('a').text
elems = get_list(html)
elif 'lang-indent' in html.attrs['class']:
name = html.text
elems = None
else:
print(html)
assert False
else:
name = html.find_next('a').text
elems = get_list(html)
return name, elems
def unravel(tag):
name, elems = strip(tag)
if elems is not None:
return (name, [unravel(elem) for elem in elems])
else:
return (name, [])
def parse_file(path):
family = os.path.split(path)[-1].replace('.html', '')
with open(path, 'r') as f:
soup = BeautifulSoup(f.read(), 'html.parser')
root = soup.find_all('div', {"class": "views-field views-field-name-1"})
assert len(root) == 1, "Too many root candidates!"
root = root[0]
#top = root.find_parent().find_parent().find_parent().find_parent().find_next_sibling()
##blocks = top.find_all('li', {'class': 'first'})
top = root.find_next('div', {"class": "view-content"})
res = [unravel(el) for el in get_list(top)]
return {family: res}
def parse_all():
tree = {}
errcount = 0
for file in tqdm(os.listdir('html')):
if file == '.html':
continue
path = os.path.join('html', file)
try:
tree.update(parse_file(path))
except Exception as e:
print('ERROR IN', file)
errcount += 1
raise e
continue
print("Error count:", errcount)
return tree
if __name__ == '__main__':
res = parse_all()
with open('data/language_families.json', 'w') as f:
json.dump(res, f)
| ialsina/LangTree | parse_html.py | parse_html.py | py | 3,111 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.split",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_nu... |
30509139595 | """Add sessions
Revision ID: 821a722fb6c5
Revises: 371a1b269d3f
Create Date: 2017-05-04 14:38:19.372886
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '821a722fb6c5'
down_revision = '371a1b269d3f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('session',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('expires', sa.DateTime(), nullable=True),
sa.Column('awaiting_mfa', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('session')
# ### end Alembic commands ###
| UltrosBot/Ultros-site | migrations/versions/821a722fb6c5_add_sessions.py | 821a722fb6c5_add_sessions.py | py | 1,008 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
20793244215 | import numpy as np
from jax import numpy as jnp
from flax import struct
from flax.traverse_util import flatten_dict, unflatten_dict
from flax.core import Scope, lift, freeze, unfreeze
from commplax import comm, xcomm, xop, adaptive_filter as af
from commplax.util import wrapped_partial as wpartial
from typing import Any, NamedTuple, Iterable, Callable, Optional
Array = Any
# related: https://github.com/google/jax/issues/6853
@struct.dataclass
class SigTime:
start: int = struct.field(pytree_node=False)
stop: int = struct.field(pytree_node=False)
sps: int = struct.field(pytree_node=False)
class Signal(NamedTuple):
val: Array
t: Any = SigTime(0, 0, 2)
def taxis(self):
return self.t[0].shape[0], -self.t[0].shape[1]
def __mul__(self, other):
Signal._check_type(other)
return Signal(self.val * other, self.t)
def __add__(self, other):
Signal._check_type(other)
return Signal(self.val + other, self.t)
def __sub__(self, other):
Signal._check_type(other)
return Signal(self.val - other, self.t)
def __truediv__(self, other):
Signal._check_type(other)
return Signal(self.val / other, self.t)
def __floordiv__(self, other):
Signal._check_type(other)
return Signal(self.val // other, self.t)
def __imul__(self, other):
return self * other
def __iadd__(self, other):
return self + other
def __isub__(self, other):
return self - other
def __itruediv__(self, other):
return self / other
def __ifloordiv__(self, other):
return self // other
@classmethod
def _check_type(cls, other):
assert not isinstance(other, cls), 'not implemented'
def zeros(key, shape, dtype=jnp.float32): return jnp.zeros(shape, dtype)
def ones(key, shape, dtype=jnp.float32): return jnp.ones(shape, dtype)
def delta(key, shape, dtype=jnp.float32):
k1d = comm.delta(shape[0], dtype=dtype)
return jnp.tile(np.expand_dims(k1d, axis=list(range(1, len(shape)))), (1,) + shape[1:])
def gauss(key, shape, dtype=jnp.float32):
taps = shape[0]
k1d = comm.gauss(comm.gauss_minbw(taps), taps=taps, dtype=dtype)
return jnp.tile(np.expand_dims(k1d, axis=list(range(1, len(shape)))), (1,) + shape[1:])
def dict_replace(col, target, leaf_only=True):
col_flat = flatten_dict(unfreeze(col))
diff = {}
for keys_flat in col_flat.keys():
for tar_key, tar_val in target.items():
if (keys_flat[-1] == tar_key if leaf_only else (tar_key in keys_flat)):
diff[keys_flat] = tar_val
col_flat.update(diff)
col = unflatten_dict(col_flat)
return col
def update_varcolbykey(var, col_name, target, leaf_only=True):
wocol, col = var.pop(col_name)
col = dict_replace(col, target, leaf_only=leaf_only)
del var
return freeze({**wocol, col_name: col})
def update_aux(var, tar):
return update_varcolbykey(var, 'aux_inputs', tar, leaf_only=True)
def conv1d_t(t, taps, rtap, stride, mode):
assert t.sps >= stride, f'sps of input SigTime must be >= stride: {stride}, got {t.sps} instead'
if rtap is None:
rtap = (taps - 1) // 2
delay = -(-(rtap + 1) // stride) - 1
if mode == 'full':
tslice = (-delay * stride, taps - stride * (rtap + 1)) #TODO: think more about this
elif mode == 'same':
tslice = (0, 0)
elif mode == 'valid':
tslice = (delay * stride, (delay + 1) * stride - taps)
else:
raise ValueError('invalid mode {}'.format(mode))
return SigTime((t.start + tslice[0]) // stride, (t.stop + tslice[1]) // stride, t.sps // stride)
def conv1d_slicer(taps, rtap=None, stride=1, mode='valid'):
def slicer(signal):
x, xt = signal
yt = conv1d_t(xt, taps, rtap, stride, mode)
D = xt.sps // yt.sps
zt = SigTime(yt.start * D, yt.stop * D, xt.sps)
x = x[zt.start - xt.start: x.shape[0] + zt.stop - xt.stop]
return Signal(x, zt)
return slicer
def fullsigval(inputs: Signal, fill_value=1):
x, t = inputs
full_shape = (x.shape[0] + t.start - t.stop,) + x.shape[1:]
return jnp.full(full_shape, fill_value, dtype=x.dtype)
def vmap(f,
variable_axes={
'params': -1,
'const': None
},
split_rngs={
'params': True,
},
in_axes=(Signal(-1, None),), out_axes=Signal(-1, None)):
# in_axes needs to be wrapped by a tuple, see Flax's lifted vmap implemetation:
# https://github.com/google/flax/blob/82e9798274c927286878c4600b4b09650d1e7935/flax/core/lift.py#L395
vf = lift.vmap(f,
variable_axes=variable_axes, split_rngs=split_rngs,
in_axes=in_axes, out_axes=out_axes)
vf.__name__ = 'vmapped_' + f.__name__ # [Workaround]: lifted transformation does not keep the original name
return vf
def scan(f, in_axes=0, out_axes=0):
sf = lift.scan(f, in_axes=in_axes, out_axes=out_axes)
sf.__name__ = 'scanned' + f.__name__
return sf
def simplefn(scope, signal, fn=None, aux_inputs=None):
assert fn is not None, 'simple function cannot be None'
aux = ()
if aux_inputs is not None:
aux_name, aux_init = aux_inputs
aux += scope.variable('aux_inputs', aux_name, aux_init, signal).value,
return fn(signal, *aux)
def batchpowernorm(scope, signal, momentum=0.999, mode='train'):
running_mean = scope.variable('norm', 'running_mean',
lambda *_: 0. + jnp.ones(signal.val.shape[-1]), ())
if mode == 'train':
mean = jnp.mean(jnp.abs(signal.val)**2, axis=0)
running_mean.value = momentum * running_mean.value + (1 - momentum) * mean
else:
mean = running_mean.value
return signal / jnp.sqrt(mean)
def conv1d(
scope: Scope,
signal,
taps=31,
rtap=None,
mode='valid',
kernel_init=delta,
conv_fn = xop.convolve):
x, t = signal
t = scope.variable('const', 't', conv1d_t, t, taps, rtap, 1, mode).value
h = scope.param('kernel',
kernel_init,
(taps,), np.complex64)
x = conv_fn(x, h, mode=mode)
return Signal(x, t)
def mimoconv1d(
scope: Scope,
signal,
taps=31,
rtap=None,
dims=2,
mode='valid',
kernel_init=zeros,
conv_fn=xop.convolve):
x, t = signal
t = scope.variable('const', 't', conv1d_t, t, taps, rtap, 1, mode).value
h = scope.param('kernel', kernel_init, (taps, dims, dims), np.float32)
y = xcomm.mimoconv(x, h, mode=mode, conv=conv_fn)
return Signal(y, t)
def mimofoeaf(scope: Scope,
signal,
framesize=100,
w0=0,
train=False,
preslicer=lambda x: x,
foekwargs={},
mimofn=af.rde,
mimokwargs={},
mimoinitargs={}):
sps = 2
dims = 2
tx = signal.t
# MIMO
slisig = preslicer(signal)
auxsig = scope.child(mimoaf,
mimofn=mimofn,
train=train,
mimokwargs=mimokwargs,
mimoinitargs=mimoinitargs,
name='MIMO4FOE')(slisig)
y, ty = auxsig # assume y is continuous in time
yf = xop.frame(y, framesize, framesize)
foe_init, foe_update, _ = af.array(af.frame_cpr_kf, dims)(**foekwargs)
state = scope.variable('af_state', 'framefoeaf',
lambda *_: (0., 0, foe_init(w0)), ())
phi, af_step, af_stats = state.value
af_step, (af_stats, (wf, _)) = af.iterate(foe_update, af_step, af_stats, yf)
wp = wf.reshape((-1, dims)).mean(axis=-1)
w = jnp.interp(jnp.arange(y.shape[0] * sps) / sps,
jnp.arange(wp.shape[0]) * framesize + (framesize - 1) / 2, wp) / sps
psi = phi + jnp.cumsum(w)
state.value = (psi[-1], af_step, af_stats)
# apply FOE to original input signal via linear extrapolation
psi_ext = jnp.concatenate([w[0] * jnp.arange(tx.start - ty.start * sps, 0) + phi,
psi,
w[-1] * jnp.arange(tx.stop - ty.stop * sps) + psi[-1]])
signal = signal * jnp.exp(-1j * psi_ext)[:, None]
return signal
def mimoaf(
scope: Scope,
signal,
taps=32,
rtap=None,
dims=2,
sps=2,
train=False,
mimofn=af.ddlms,
mimokwargs={},
mimoinitargs={}):
x, t = signal
t = scope.variable('const', 't', conv1d_t, t, taps, rtap, 2, 'valid').value
x = xop.frame(x, taps, sps)
mimo_init, mimo_update, mimo_apply = mimofn(train=train, **mimokwargs)
state = scope.variable('af_state', 'mimoaf',
lambda *_: (0, mimo_init(dims=dims, taps=taps, **mimoinitargs)), ())
truth_var = scope.variable('aux_inputs', 'truth',
lambda *_: None, ())
truth = truth_var.value
if truth is not None:
truth = truth[t.start: truth.shape[0] + t.stop]
af_step, af_stats = state.value
af_step, (af_stats, (af_weights, _)) = af.iterate(mimo_update, af_step, af_stats, x, truth)
y = mimo_apply(af_weights, x)
state.value = (af_step, af_stats)
return Signal(y, t)
def fdbp(
scope: Scope,
signal,
steps=3,
dtaps=261,
ntaps=41,
sps=2,
d_init=delta,
n_init=gauss):
x, t = signal
dconv = vmap(wpartial(conv1d, taps=dtaps, kernel_init=d_init))
for i in range(steps):
x, td = scope.child(dconv, name='DConv_%d' % i)(Signal(x, t))
c, t = scope.child(mimoconv1d, name='NConv_%d' % i)(Signal(jnp.abs(x)**2, td),
taps=ntaps,
kernel_init=n_init)
x = jnp.exp(1j * c) * x[t.start - td.start: t.stop - td.stop + x.shape[0]]
return Signal(x, t)
def identity(scope, inputs):
return inputs
def fanout(scope, inputs, num):
return (inputs,) * num
# compositors
def serial(*fs):
def _serial(scope, inputs, **kwargs):
for f in fs:
if isinstance(f, tuple) or isinstance(f, list):
name, f = f
else:
name = None
inputs = scope.child(f, name=name)(inputs, **kwargs)
return inputs
return _serial
def parallel(*fs):
def _parallel(scope, inputs, **kwargs):
outputs = []
for f, inp in zip(fs, inputs):
if isinstance(f, tuple) or isinstance(f, list):
name, f = f
else:
name = None
out = scope.child(f, name=name)(inp, **kwargs)
outputs.append(out)
return outputs
return _parallel
| remifan/commplax | commplax/module/core.py | core.py | py | 10,764 | python | en | code | 49 | github-code | 6 | [
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flax.struct.field",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flax.struct",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flax.struct.field",
"lin... |
72498342269 |
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import WordNetError
from numpy import dot
from numpy.linalg import norm
import numpy as np
import pdb
class BaseModel:
def __init__(self, subject, predicate, _object):
#subjectFamily.getBaseRanking()[0], predicateFamily.getBaseRanking()[0], objectFamily.getBaseRanking()[0]
self.natural_model = (subject.word,predicate.word,_object.word)
self.model = (subject.getBaseRanking()[0], predicate.getBaseRanking()[0],_object.getBaseRanking()[0])
self.subject = wn.synset(self.model[0])
self.predicate = wn.synset(self.model[1])
self.object = wn.synset(self.model[2])
self.subjSim = 1.0
self.objSim = 1.0
self.predSim = 1.0
def synCompare(self,syn1,syn2):
return syn1[-4:-3] == syn2[-4:-3]
def rank(self,relation, embedder = None,w2vModel = None):
subject = wn.synset(relation[0])
predicate = wn.synset(relation[1])
_object = wn.synset(relation[2])
subjSimilarity, objSimilarity,predSimilarity = 1.,1.,1.
zero_v = np.zeros(shape=(300,))
#so we will adjust this now to perform cos_sim. For this, we need the dictionary...
if self.synCompare(self.model[0],relation[0]):
if embedder is not None and w2vModel is not None:
#perform cos_sim between self.natural_model <-- nlp and subject <-- wn
src=embedder[str(subject)[8:-2]] if str(subject)[8:-2] in embedder else zero_v
tgt=w2vModel[self.natural_model[0]] if self.natural_model[0] in w2vModel else zero_v
try:
subjSimilarity = (1.0-self.cos_sim(src,tgt))/2.
subjSimilarity = subjSimilarity[0]
except RuntimeWarning:
pass
else:
subjSimilarity = self.subject.lch_similarity(subject)
if self.synCompare(self.model[2],relation[2]):
if embedder is not None and w2vModel is not None:
#perform cos_sim between self.natural_model <-- nlp and subject <-- wn
src=embedder[str(_object)[8:-2]] if str(_object)[8:-2] in embedder else zero_v
tgt=w2vModel[self.natural_model[2]] if self.natural_model[2] in w2vModel else zero_v
try:
objSimilarity = (1.0-self.cos_sim(src,tgt))/2.
objSimilarity=objSimilarity[0]
except RuntimeWarning:
pass
else:
objSimilarity = self.object.lch_similarity(_object)
if self.synCompare(self.model[1],relation[1]):
if embedder is not None and w2vModel is not None:
#perform cos_sim between self.natural_model <-- nlp and subject <-- wn
src=embedder[str(predicate)[8:-2]] if str(predicate)[8:-2] in embedder else zero_v
tgt=w2vModel[self.natural_model[1]] if self.natural_model[1] in w2vModel else zero_v
try:
predSimilarity = (1.0-self.cos_sim(src,tgt))/2.
predSimilarity=predSimilarity[0]
except RuntimeWarning:
pass
else:
predSimilarity = self.predicate.lch_similarity(predicate)
if not predSimilarity:
predSimilarity=1.0
self.subjSim = subjSimilarity
self.objSim = objSimilarity
self.predSim = predSimilarity
self.netSim = (self.subjSim+self.predSim+self.objSim)/3.0
return (self.subjSim,self.objSim,self.predSim,self.netSim)
def getModel(self):
return self.model
def cos_sim(self,a,b):
return dot(a, b)/(norm(a)*norm(b)) | asuprem/imag-s | utils/baseModel.py | baseModel.py | py | 3,739 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "nltk.corpus.wordnet.synset",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.wordnet.synset",
"line_number": 15,
"usage_type": "call"
},
{
"api_nam... |
74800916026 | import openai
import uvicorn
from fastapi import FastAPI, Request, Form
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.vectorstores import DocArrayInMemorySearch
import os
import datetime
import random
#import IPython.display
from PIL import Image
import base64
import requests
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import openai
import tkinter as tk
from langchain.llms import OpenAI
from langchain.document_loaders import (
DataFrameLoader,
TextLoader,
PyPDFLoader
)
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
CharacterTextSplitter
)
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import (
DocArrayInMemorySearch,
Chroma
)
from langchain.chains import (
RetrievalQA,
ConversationalRetrievalChain
)
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
#from dotenv import load_dotenv, find_dotenv
#from dotenv import load_dotenv, find_dotenv
# Set your OpenAI API key here
def set_openai_api_key(api_key):
openai.api_key = api_key
# function to convert data and load it into panda format
# load data and preprocess it
def squad_json_to_dataframe(file_path, record_path=['data','paragraphs','qas','answers']):
"""
input_file_path: path to the squad json file.
record_path: path to deepest level in json file default value is
['data','paragraphs','qas','answers']
"""
file = json.loads(open(file_path).read())
# parsing different level's in the json file
js = pd.json_normalize(file, record_path)
m = pd.json_normalize(file, record_path[:-1])
r = pd.json_normalize(file,record_path[:-2])
# combining it into single dataframe
idx = np.repeat(r['context'].values, r.qas.str.len())
m['context'] = idx
data = m[['id','question','context','answers']].set_index('id').reset_index()
data['c_id'] = data['context'].factorize()[0]
return data
def preprocess(data):
data['answers'] = data['answers'].apply(lambda x: x[0]['text'] if x else None)
# create a new data structure combine questions and answers
# add $ at then end so its going to be easier to chunking later
data['qa'] = data['question'] +data['answers']+'$'
return data
def data_loader(data):
# load the dataframe into loader
# context
loader = DataFrameLoader(data, page_content_column="qa")
doc = loader.load()
doc = doc[:6000]
return doc
def create_text_splits(doc):
# splitting text into the specific chunck sizes
# defining the overlap size for each chunck
#from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(
separator = "$",
chunk_size = 125,
chunk_overlap = 20,
length_function = len,
is_separator_regex = False,
)
splits = text_splitter.split_documents(doc)
return splits
def initialize_openai_embeddings():
embedding = OpenAIEmbeddings(request_timeout=60)
return embedding
def get_gpt_model():
# get the specific gpt model
current_date = datetime.datetime.now().date()
if current_date < datetime.date(2023, 9, 2):
llm_name = "gpt-3.5-turbo-0301"
else:
llm_name = "gpt-3.5-turbo"
print(llm_name)
return llm_name
def create_docarray_in_memory_search(data, embedding):
db = DocArrayInMemorySearch.from_documents(data, embedding)
return db
def create_vectordb(splits, embedding):
vectordb = Chroma.from_documents(
documents=splits,
embedding=embedding,
)
# EXAMPLES:
#question = "What are major topics for this class?"
#docs = vectordb.similarity_search(question,k=4)
#print(docs[0].metadata['answers'])
return vectordb
def initialize_llm_chatbot(llm_name, temperature=0):
# create chatbot
llm = ChatOpenAI(model_name=llm_name, temperature=temperature)
# define chatbot memory
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
return llm, memory
def create_prompt_template(input_variables):
# Build prompt
template = """
start by greeting to the Stanfor chatbot.\n
if user say hi/hello respond like hello and welcome to the stanford chatbot, how can i assist you today?\n
try to ask the user Name, and remember it and when you respons back say the user Name as well.\n
Also, try to memorize the converstation, and act like you are a human and responding.\n
You are like an QA agent that you suppose to answer the question that you know.\n
You will always gretting every one at the beging, also you can ask for their name so you will respond back with their name to be more polit.\n
Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible.\n
Also, if you answered any question say something like "Do you have any other question that I can help with?".\n
If the person says no, thank you orI don't have any furthur questions, or any similar sentence to it. just say something like: bye, I am always here to help you with any questions that you may have.\n
{context}\n
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(input_variables=input_variables,template=template,)
return QA_CHAIN_PROMPT
def initialize_qa_chain(llm, vectordb, QA_CHAIN_PROMPT):
# Run chain
#retriever = db.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .5})
qa_chain = RetrievalQA.from_chain_type(llm,
retriever=vectordb.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .5}),
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
qa = ConversationalRetrievalChain.from_llm(
llm,
retriever=vectordb.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .3}),
memory=memory,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# Examples
# test topics
#question = "Is probability a class topic?"
#result = qa_chain({"query": question})
#result["result"]
return qa_chain, qa
# Set your OpenAI API key here
set_openai_api_key("sk-xxxxxxxxxxxxx")
data_df = squad_json_to_dataframe("data/train-v1.1.json") # convert json to dataframe
data_df = preprocess(data_df)
data_loader = data_loader(data_df)
splits = create_text_splits(data_loader)
embedding = initialize_openai_embeddings()
llm_name = get_gpt_model()
db = create_docarray_in_memory_search(data_loader, embedding)
vectordb = create_vectordb(splits, embedding)
llm, memory = initialize_llm_chatbot(llm_name, temperature=0)
QA_CHAIN_PROMPT = create_prompt_template(["context", "question"])
qa_chain, qa = initialize_qa_chain(llm, vectordb, QA_CHAIN_PROMPT)
def get_bot_response(user_message):
result = qa({"question": user_message})
response = result["answer"]
#result = qa_chain({"query": user_message})
#response = result["result"]
return str(response)
| carson-edmonds/AAI-520-Chatbot-Project | openai_fastapi/llm.py | llm.py | py | 7,658 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openai.api_key",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.json_norm... |
8385121611 | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import optparse
import collections
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
import sumolib # noqa
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
def get_options(args=None):
optParser = optparse.OptionParser()
optParser.add_option("-n", "--net-file", dest="netfile",
help="define the net file (mandatory)")
optParser.add_option("-o", "--output-file", dest="outfile",
default="tlsAdaptation.add.xml", help="define the output filename")
optParser.add_option("-r", "--route-files", dest="routefiles",
help="define the route file seperated by comma(mandatory)")
optParser.add_option("-b", "--begin", dest="begin", type="int",
default=0, help="begin time of the optimization period with unit second")
optParser.add_option("-y", "--yellow-time", dest="yellowtime", type="int",
default=4, help="yellow time")
optParser.add_option("-a", "--all-red", dest="allred", type="int",
default=0, help="all-red time")
optParser.add_option("-l", "--lost-time", dest="losttime", type="int",
default=4, help="lost time for start-up and clearance in each phase")
optParser.add_option("-g", "--min-green", dest="mingreen", type="int",
default=4, help=" minimal green time when there is no traffic volume")
optParser.add_option("--green-filter-time", dest="greenFilter", type="int", default=0,
help="when computing critical flows, do not count phases with a green time below INT")
optParser.add_option("-c", "--min-cycle", dest="mincycle", type="int",
default=20, help="minimal cycle length")
optParser.add_option("-C", "--max-cycle", dest="maxcycle", type="int",
default=120, help="maximal cycle length")
optParser.add_option("-e", "--existing-cycle", dest="existcycle", action="store_true",
default=False, help="use the existing cycle length")
optParser.add_option("--write-critical-flows", dest="write_critical_flows", action="store_true",
default=False, help="print critical flows for each tls and phase")
optParser.add_option("-p", "--program", dest="program", default="a",
help="save new definitions with this program id")
optParser.add_option("-H", "--saturation-headway", dest="satheadway", type="float", default=2,
help="saturation headway in seconds for calculating hourly saturation flows")
optParser.add_option("-R", "--restrict-cyclelength", dest="restrict", action="store_true",
default=False, help="restrict the max. cycle length as the given one")
optParser.add_option("-u", "--unified-cycle", dest="unicycle", action="store_true", default=False,
help=" use the calculated max cycle length as the cycle length for all intersections")
optParser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="tell me what you are doing")
(options, args) = optParser.parse_args(args=args)
if not options.netfile or not options.routefiles:
optParser.print_help()
sys.exit()
return options
def getFlows(net, routeFiles, tlsList, begin, verbose):
tlsFlowsMap = {}
end = begin + 3600
for tls in tlsList:
tlsFlowsMap[tls._id] = collections.defaultdict(lambda: collections.defaultdict(int))
for file in routeFiles.split(','):
if verbose:
print("route file:%s" % file)
for veh in sumolib.output.parse(file, 'vehicle'):
if float(veh.depart) >= end:
break
if float(veh.depart) >= begin:
edgeList = veh.route[0].edges.split()
for tls in tlsList:
# c: [[inLane, outLane, linkNo],[],..]
for c in tls.getConnections():
inEdge = c[0].getEdge().getID()
outEdge = c[1].getEdge().getID()
if inEdge in edgeList:
beginIndex = edgeList.index(inEdge)
if beginIndex < len(edgeList) - 1 and edgeList[beginIndex + 1] == outEdge:
pce = 1.
if veh.type == "bicycle":
pce = 0.2
elif veh.type in ["moped", "motorcycle"]:
pce = 0.5
elif veh.type in ["truck", "trailer", "bus", "coach"]:
pce = 3.5
tlsFlowsMap[tls._id][inEdge + " " + outEdge][c[2]] += pce
# remove the doubled counts
connFlowsMap = {}
for t in tlsList:
connFlowsMap[t.getID()] = {}
for subRoute in tlsFlowsMap[t.getID()]:
totalConns = len(tlsFlowsMap[t.getID()][subRoute])
for conn in tlsFlowsMap[t.getID()][subRoute]:
tlsFlowsMap[t.getID()][subRoute][conn] /= totalConns
connFlowsMap[t.getID()][conn] = tlsFlowsMap[t.getID()][subRoute][conn]
# remove the redundant connection flows
connFlowsMap = removeRedundantFlows(t, connFlowsMap)
return connFlowsMap
def getEffectiveTlsList(tlsList, connFlowsMap, verbose):
effectiveTlsList = []
for tl in tlsList:
if len(tl.getPrograms()) == 0:
continue
valid = True
for program in tl.getPrograms().values():
for phase in program.getPhases():
if len(phase.state) > len(tl.getConnections()):
print("Skipping TLS '%s' due to unused states (%s states, %s connections)" % (
tl.getID(), len(phase.state), len(tl.getConnections())))
valid = False
break
if valid:
for conn in connFlowsMap[tl.getID()]:
if connFlowsMap[tl.getID()][conn] > 0:
effectiveTlsList.append(tl)
break
return effectiveTlsList
def removeRedundantFlows(t, connFlowsMap):
# if two or more intersections share the lane-lane connection indices together,
# the redundant connection flows will set to zero.
connsList = t.getConnections()
connsList = sorted(connsList, key=lambda connsList: connsList[2])
redundantConnsList = []
identical = True
for c1 in connsList:
for c2 in connsList:
if c1[2] != c2[2]:
if c1[1]._edge == c2[0]._edge:
identical = identityCheck(c1[0]._edge, c2[0]._edge._incoming, identical)
if identical:
for toEdge in c2[0]._edge._outgoing:
for c in c2[0]._edge._outgoing[toEdge]:
if c._tlLink not in redundantConnsList:
redundantConnsList.append(c._tlLink)
else:
for conn_1 in c1[0]._edge._outgoing[c2[0]._edge]:
if conn_1._direction == 's':
for toEdge in c2[0]._edge._outgoing:
for conn_2 in c2[0]._edge._outgoing[toEdge]:
if conn_2._tlLink not in redundantConnsList:
redundantConnsList.append(conn_2._tlLink)
for conn in redundantConnsList:
if conn in connFlowsMap[t._id]:
connFlowsMap[t._id][conn] = 0.
return connFlowsMap
def identityCheck(e1, incomingLinks, identical):
for i in incomingLinks:
if i != e1:
identical = False
break
return identical
def getLaneGroupFlows(tl, connFlowsMap, phases, greenFilter):
connsList = tl.getConnections()
groupFlowsMap = {} # i(phase): duration, laneGroup1, laneGroup2, ...
connsList = sorted(connsList, key=lambda connsList: connsList[2])
# check if there are shared lane groups, i.e. some lane groups have only "g" (no "G")
ownGreenConnsList = []
for i, p in enumerate(phases):
for j, control in enumerate(p.state):
if control == "G" and j not in ownGreenConnsList:
ownGreenConnsList.append(j)
yellowRedTime = 0
greenTime = 0
currentLength = 0
phaseLaneIndexMap = collections.defaultdict(list)
for i, p in enumerate(phases):
currentLength += p.duration
if 'G' in p.state and 'y' not in p.state and p.duration >= greenFilter:
greenTime += p.duration
groupFlowsMap[i] = [p.duration]
groupFlows = 0
laneIndexList = []
for j, control in enumerate(p.state):
inEdge = connsList[j][0]._edge._id
if j == 0:
exEdge = inEdge
if (inEdge == exEdge and control == 'G') or (inEdge == exEdge and
control == 'g' and j not in ownGreenConnsList):
if j in connFlowsMap[tl._id]:
groupFlows += connFlowsMap[tl._id][j]
if connsList[j][0].getIndex() not in laneIndexList:
laneIndexList.append(connsList[j][0].getIndex())
if exEdge != inEdge or j == len(p.state) - 1:
if laneIndexList:
phaseLaneIndexMap[i].append(laneIndexList)
groupFlowsMap[i].append(groupFlows)
laneIndexList = []
groupFlows = 0
if control == "G":
if j in connFlowsMap[tl._id]:
groupFlows = connFlowsMap[tl._id][j]
if connsList[j][0].getIndex() not in laneIndexList:
laneIndexList.append(connsList[j][0].getIndex())
exEdge = inEdge
elif 'G' not in p.state and 'g' in p.state and 'y' not in p.state and 'r' not in p.state:
print("Check: only g for all connections:%s in phase %s" % (tl._id, i))
elif ('G' not in p.state and 'g' not in p.state) or ('G' not in p.state and 'y' in p.state and 'r' in p.state):
yellowRedTime += int(p.duration)
if options.verbose and i in groupFlowsMap:
print("phase: %s" % i)
print("group flows: %s" % groupFlowsMap[i])
print("The used lanes: %s" % phaseLaneIndexMap[i])
if options.verbose:
print("the current cycle length:%s sec" % currentLength)
return groupFlowsMap, phaseLaneIndexMap, currentLength
def getMaxOptimizedCycle(groupFlowsMap, phaseLaneIndexMap, currentLength, cycleList, options):
lostTime = len(groupFlowsMap) * options.losttime + options.allred
satFlows = 3600. / options.satheadway
# calculate the critical flow ratios and the respective sum
criticalFlowRateMap = {}
for i in groupFlowsMap: # [duration. groupFlow1, groupFlow2...]
criticalFlowRateMap[i] = 0.
maxFlow = 0
index = None
if len(groupFlowsMap[i][1:]) > 0:
for j, f in enumerate(groupFlowsMap[i][1:]):
if f >= maxFlow:
maxFlow = f
index = j
criticalFlowRateMap[i] = (maxFlow / float((len(phaseLaneIndexMap[i][index])))) / satFlows
else:
criticalFlowRateMap[i] = 0.
sumCriticalFlows = sum(criticalFlowRateMap.values())
if options.existcycle:
optCycle = currentLength
elif sumCriticalFlows >= 1.:
optCycle = options.maxcycle
if options.verbose:
print("Warning: the sum of the critical flows >= 1:%s" % sumCriticalFlows)
else:
optCycle = int(round((1.5 * lostTime + 5.) / (1. - sumCriticalFlows)))
if not options.existcycle and optCycle < options.mincycle:
optCycle = options.mincycle
elif not options.existcycle and optCycle > options.maxcycle:
optCycle = options.maxcycle
cycleList.append(optCycle)
return cycleList
def optimizeGreenTime(tl, groupFlowsMap, phaseLaneIndexMap, currentLength, options):
lostTime = len(groupFlowsMap) * options.losttime + options.allred
satFlows = 3600. / options.satheadway
# calculate the critical flow ratios and the respective sum
criticalFlowRateMap = {}
for i in groupFlowsMap: # [duration. groupFlow1, groupFlow2...]
criticalFlowRateMap[i] = 0.
maxFlow = 0
index = None
if len(groupFlowsMap[i][1:]) > 0:
for j, f in enumerate(groupFlowsMap[i][1:]):
if f >= maxFlow:
maxFlow = f
index = j
criticalFlowRateMap[i] = (maxFlow / float((len(phaseLaneIndexMap[i][index])))) / satFlows
else:
criticalFlowRateMap[i] = 0.
sumCriticalFlows = sum(criticalFlowRateMap.values())
if options.write_critical_flows:
print(tl.getID(), criticalFlowRateMap)
if options.existcycle:
optCycle = currentLength
elif sumCriticalFlows >= 1.:
optCycle = options.maxcycle
if options.verbose:
print("Warning: the sum of the critical flows >= 1:%s" % sumCriticalFlows)
else:
optCycle = int(round((1.5 * lostTime + 5.) / (1. - sumCriticalFlows)))
if not options.existcycle and optCycle < options.mincycle:
optCycle = options.mincycle
elif not options.existcycle and optCycle > options.maxcycle:
optCycle = options.maxcycle
# calculate the green time for each critical group
effGreenTime = optCycle - lostTime
totalLength = lostTime
minGreenPhasesList = []
adjustGreenTimes = 0
totalGreenTimes = 0
subtotalGreenTimes = 0
for i in criticalFlowRateMap:
groupFlowsMap[i][0] = effGreenTime * \
(criticalFlowRateMap[i] / sum(criticalFlowRateMap.values())) - options.yellowtime + options.losttime
groupFlowsMap[i][0] = int(round(groupFlowsMap[i][0]))
totalGreenTimes += groupFlowsMap[i][0]
if groupFlowsMap[i][0] < options.mingreen:
groupFlowsMap[i][0] = options.mingreen
minGreenPhasesList.append(i)
else:
subtotalGreenTimes += groupFlowsMap[i][0]
totalLength += groupFlowsMap[i][0]
# adjust the green times if minimal green times are applied for keeping the defined maximal cycle length.
if minGreenPhasesList and totalLength > options.maxcycle and options.restrict:
if options.verbose:
print("Re-allocate the green splits!")
adjustGreenTimes = totalGreenTimes - len(minGreenPhasesList) * options.mingreen
for i in groupFlowsMap:
if i not in minGreenPhasesList:
groupFlowsMap[i][0] = int((groupFlowsMap[i][0] / float(subtotalGreenTimes)) * adjustGreenTimes)
if options.verbose:
totalLength = lostTime
for i in groupFlowsMap:
totalLength += groupFlowsMap[i][0]
print("Green time for phase %s: %s" % (i, groupFlowsMap[i][0]))
print("the optimal cycle length:%s" % totalLength)
return groupFlowsMap
def main(options):
net = sumolib.net.readNet(options.netfile, withPrograms=True, withPedestrianConnections=True)
tlsList = net.getTrafficLights()
if options.verbose:
print("the total number of tls: %s" % len(tlsList))
print("Begin time:%s" % options.begin)
# get traffic flows for each connection at each TL
connFlowsMap = getFlows(net, options.routefiles, tlsList, options.begin, options.verbose)
# remove the tls where no traffic volumes exist
effectiveTlsList = getEffectiveTlsList(tlsList, connFlowsMap, options.verbose)
with open(options.outfile, 'w') as outf:
outf.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outf.write('<additional>\n')
if len(effectiveTlsList) > 0:
if options.unicycle:
cycleList = []
if options.verbose:
print("Firstly only calculate the maximal optimized cycle length! ")
for tl in effectiveTlsList:
if options.verbose:
print("tl-logic ID: %s" % tl._id)
programs = tl.getPrograms()
for pro in programs:
phases = programs[pro].getPhases()
# get the connection flows and group flows
groupFlowsMap, phaseLaneIndexMap, currentLength = getLaneGroupFlows(tl, connFlowsMap, phases, 0)
# only optimize the cycle length
cycleList = getMaxOptimizedCycle(groupFlowsMap, phaseLaneIndexMap,
currentLength, cycleList, options)
options.maxcycle = max(cycleList)
options.mincycle = max(cycleList)
options.restrict = True
if options.verbose:
print("The maximal optimized cycle length is %s." % max(cycleList))
print(" It will be used for calculating the green splits for all intersections.")
# calculate the green splits; the optimal length will be also calculate if options.unicycle is set as false.
for tl in effectiveTlsList:
if options.verbose:
print("tl-logic ID: %s" % tl._id)
programs = tl.getPrograms()
for pro in programs:
phases = programs[pro].getPhases()
# get the connection flows and group flows
groupFlowsMap, phaseLaneIndexMap, currentLength = getLaneGroupFlows(
tl, connFlowsMap, phases, options.greenFilter)
# optimize the cycle length and calculate the respective green splits
groupFlowsMap = optimizeGreenTime(tl, groupFlowsMap, phaseLaneIndexMap, currentLength, options)
# write output
outf.write(' <tlLogic id="%s" type="%s" programID="%s" offset="%i">\n' %
(tl._id, programs[pro]._type, options.program, programs[pro]._offset))
phases = programs[pro].getPhases()
for i, p in enumerate(phases):
duration = p.duration
if i in groupFlowsMap:
duration = groupFlowsMap[i][0]
outf.write(' <phase duration="%s" state="%s"/>\n' % (duration, p.state))
outf.write(' </tlLogic>\n')
else:
print("There are no flows at the given intersections. No green time optimization is done.")
outf.write('</additional>\n')
if __name__ == "__main__":
options = get_options(sys.argv)
main(options)
| ngctnnnn/DRL_Traffic-Signal-Control | sumo-rl/sumo/tools/tlsCycleAdaptation.py | tlsCycleAdaptation.py | py | 19,264 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_numbe... |
10418282793 | from __future__ import annotations
import platform
import dolphin_memory_engine
import pid
from randovania.game_connection.executor.memory_operation import (
MemoryOperation,
MemoryOperationException,
MemoryOperationExecutor,
)
MEM1_START = 0x80000000
MEM1_END = 0x81800000
def _validate_range(address: int, size: int):
if address < MEM1_START or address + size > MEM1_END:
raise MemoryOperationException(
f"Range {address:x} -> {address + size:x} is outside of the GameCube memory range."
)
class DolphinExecutor(MemoryOperationExecutor):
def __init__(self):
super().__init__()
self.dolphin = dolphin_memory_engine
self._pid = pid.PidFile("randovania-dolphin-backend")
@property
def lock_identifier(self) -> str | None:
return "randovania-dolphin-backend"
async def connect(self) -> str | None:
if platform.system() == "Darwin":
return "macOS is not supported"
if not self.dolphin.is_hooked():
self.dolphin.hook()
if not self.dolphin.is_hooked():
return "Unable to connect to Dolphin"
try:
self._pid.create()
except pid.PidFileError:
return "Another Randovania is connected to Dolphin already"
return None
def disconnect(self):
self._pid.close()
self.dolphin.un_hook()
def _test_still_hooked(self):
try:
if len(self.dolphin.read_bytes(0x0, 4)) != 4:
raise RuntimeError("Dolphin hook didn't read the correct byte count")
except RuntimeError as e:
self.logger.warning(f"Test read for Dolphin hook didn't work: {e}")
self.dolphin.un_hook()
def is_connected(self) -> bool:
if self.dolphin.is_hooked():
self._test_still_hooked()
return self.dolphin.is_hooked()
# Game Backend Stuff
def _memory_operation(self, op: MemoryOperation, pointers: dict[int, int | None]) -> bytes | None:
op.validate_byte_sizes()
address = op.address
if op.offset is not None:
if address not in pointers:
raise MemoryOperationException(f"Invalid op: {address:x} is not in pointers")
new_address = pointers[address]
if new_address is None:
return None
address = new_address + op.offset
_validate_range(address, op.byte_count)
if not self.dolphin.is_hooked():
raise MemoryOperationException("Lost connection do Dolphin")
try:
result = None
if op.read_byte_count is not None:
result = self.dolphin.read_bytes(address, op.read_byte_count)
if op.write_bytes is not None:
self.dolphin.write_bytes(address, op.write_bytes)
self.logger.debug(f"Wrote {op.write_bytes.hex()} to {address:x}")
except RuntimeError as e:
raise MemoryOperationException(f"Lost connection do Dolphin: {e}")
return result
async def perform_memory_operations(self, ops: list[MemoryOperation]) -> dict[MemoryOperation, bytes]:
pointers_to_read = set()
for op in ops:
if op.offset is not None:
pointers_to_read.add(op.address)
pointers = {}
for pointer in pointers_to_read:
if not self.dolphin.is_hooked():
raise MemoryOperationException("Lost connection do Dolphin")
try:
pointers[pointer] = self.dolphin.follow_pointers(pointer, [0])
except RuntimeError:
pointers[pointer] = None
self.logger.debug(f"Failed to read a valid pointer from {pointer:x}")
self._test_still_hooked()
if not self.dolphin.is_hooked():
raise MemoryOperationException("Lost connection do Dolphin")
result = {}
for op in ops:
op_result = self._memory_operation(op, pointers)
if op_result is not None:
result[op] = op_result
return result
| randovania/randovania | randovania/game_connection/executor/dolphin_executor.py | dolphin_executor.py | py | 4,135 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "randovania.game_connection.executor.memory_operation.MemoryOperationException",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "randovania.game_connection.executor.memory_operation.MemoryOperationExecutor",
"line_number": 25,
"usage_type": "name"
},
{
"ap... |
28634572744 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
from torch.autograd import Variable
pi = 0.01
class Recommend(nn.Module):
"""A model to build Recommendation system
"""
def __init__(self, past_observations, n_factors, output_dim):
super().__init__()
self.past_observations = past_observations
self.n_factors = n_factors
self.output_dim = output_dim
self.embedding = torch.nn.Embedding(self.output_dim, self.n_factors)
self.n1 = nn.Linear(self.n_factors * self.past_observations, 100)
self.n2 = nn.Linear(100, 50)
self.output = nn.Linear(50, self.output_dim)
init.constant(self.output.bias, -math.log((1-pi)/pi))
def forward(self, x):
""" We will have one Embedding matrix.
"""
k = []
for i in x:
val = self.embedding(i)
k.append(val.view(1, -1))
x = torch.cat(k)
x = self.n1(x)
x = F.relu(x)
x = self.n2(x)
x = F.relu(x)
x = self.output(x)
return x
class DataLoader():
def __init__(self, inputs, output, embed):
self.inputs = inputs
self.output = output
self.embed = embed
def __getitem__(self, idx):
o_in = torch.from_numpy(self.inputs[idx, :])
o_out = torch.from_numpy(self.output[idx, :])
return o_in, o_out
def __len__(self):
return self.inputs.shape[0]
class FocalLoss(nn.Module):
def __init__(self,
classes,
focusing_param=2.0,
balance_param=0.25,
use_gpu=False):
super().__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
self.classes = classes
self.use_gpu = use_gpu
def forward(self, x, y):
batch_size, next_best = y.size()[0], y.size()[1]
t = torch.FloatTensor(batch_size, self.classes)
t.zero_()
t.scatter_(1, y.data.cpu(), 1)
t = Variable(t)
sigmoid_p = F.sigmoid(x)
zeros = Variable(torch.zeros(sigmoid_p.size()))
if self.use_gpu:
zeros = zeros.cuda()
t = t.cuda()
pos_p_sub = ((t >= sigmoid_p).float() * (t-sigmoid_p)) + ((t < sigmoid_p).float() * zeros)
neg_p_sub = ((t >= zeros).float() * zeros) + ((t <= zeros).float() * sigmoid_p)
ce = (-1) * self.balance_param * (pos_p_sub ** self.focusing_param) * torch.log(torch.clamp(sigmoid_p, 1e-4, 1.0)) -(1-self.balance_param) * (neg_p_sub ** self.focusing_param) * torch.log(torch.clamp(1.0-sigmoid_p, 1e-4, 1.0))
pos_samples = float(batch_size * next_best)
return ce.sum()/pos_samples
| prakashjayy/av_mckinesy_recommendation_challenge | func.py | func.py | py | 2,764 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
73227662588 | import pandas as pd
import imp
import QR_Code_VCard_WC_copy
import imp
from tkinter import *
from tkinter.ttk import *
from tkinter.filedialog import askopenfile
import time
import os
from pathlib import Path
global current_path
current_path=Path.cwd()
def open_file():
global file_path
file_path = askopenfile(mode='r', filetypes=[('Excel', '*csv')])
#print(file_path)
file.set(os.path.basename(file_path.name))
percent.set("")
adhar.pack_forget()
window.update_idletasks()
if file_path or logo is not None:
pass
def open_logo():
global logo
logo = askopenfile(mode='r')
#print(file_path)
image.set(os.path.basename(logo.name))
percent.set("")
adhar.pack_forget()
button_status.set("Create QR Code")
button.pack()
window.update_idletasks()
if logo or file_path is not None:
pass
def close():
window.quit()
exit()
def start():
#Path location
if file_path == "":
path=r"{current_path}/BR+A Bussiness Card/01_CSV/Employee Information.csv".format(current_path=current_path)
else:
path=r"{}".format(file_path.name)
#Workbook Path
global employee_data
employee_data = pd.read_csv(path)
#Creating QR Code
global Employee_info
global x
x=0
bar['value'] = 0
percent.set("")
for i in employee_data.itertuples():
Employee_info=i
x=x+1
percentage = round((x/len(employee_data.index))*100)
#print(percentage)
file_total.set("QR Code(s) created: "+str(x))
imp.reload(QR_Code_VCard_WC_copy)
if percentage >= bar['value']+2:
bar['value'] = percentage
window.update_idletasks()
try:
bar['value'] = 100
percent.set("QR Code(s) Created and CSV file Updated Succesfully !!!")
window.update_idletasks()
employee_data.to_csv(r"{current_path}/01_CSV/Employee Information.csv".format(current_path=current_path), index=False, sep=',')
#print("QR Code(s) Created and CSV file Updated Succesfully !!!")
button.pack_forget()
button2.pack()
button_status.set("Close")
window.update_idletasks()
except:
#print("CSV file opened by another user and not updated!!! Please close csv file and restart script.")
percent.set("CSV file opened by another user and not updated!!! Please close csv file and restart script.")
button_status.set("Try again")
window.update_idletasks()
#exit()
#Window Interface
window = Tk()
window.title("BR+A-Virtual QR Code Generator")
window.geometry("400x200")
percent = StringVar()
file = StringVar()
image = StringVar()
file_total = StringVar()
button_status = StringVar()
file.set('Choose File')
image.set('Choose Background')
button_status.set("Create QR Code(s)")
Title_label = Label(window, text="Virtual QR Code Generator").pack()
bar = Progressbar(window, orient=HORIZONTAL, length=300)
bar.pack(pady=10,padx=10)
percent_label = Label(window, textvariable=percent)
percent_label.pack()
file_label = Label(window, textvariable=file_total).pack()
adhar = Label(window,text='Upload Excel')
adhar.pack()
adharbtn = Button(window,textvariable=file,command=lambda: open_file())
adharbtn.pack()
adharbtn = Button(window,textvariable=image,command=lambda: open_logo())
adharbtn.pack()
button = Button(window,textvariable=button_status, command=start)
button2 = Button(window,textvariable=button_status, command=close)
window.mainloop()
window.quit()
exit()
| JonJones98/Virtual-Business-Card-Generator | 06_Scripts/Excel_connection_csv.py | Excel_connection_csv.py | py | 3,698 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path.cwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tkinter.filedialog.askopenfile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.... |
17651609361 | from builtins import print, input, int
import mariadb
import sqlite3
import psycopg2
print("Indique en que base de datos quiere realizar las gestiones:")
print("1. PostgreSQL\n2. MariaDB\n3. SQLite3")
lectura = input()
lectura = int(lectura)
while True:
if lectura == 1:
# Creamos la conexión
conn = psycopg2.connect(
host="localhost",
database="bdpython",
user="openpg",
password="openpgpwd"
)
cursor = conn.cursor()
# Borramos la tabla en caso de que exista
cursor.execute("DROP TABLE IF EXISTS ejemplo_python;")
# Creamos la tabla de ejemplo
cursor.execute("""
CREATE TABLE PEDROPUERTAS (
id serial PRIMARY KEY,
nombre varchar(50),
salario real,
fecha_alta date,
inscrito boolean
);
""")
# Hacemos el insert de algunas filas
cursor.execute("""
INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito)
VALUES
('Juan', 5000, '2022-01-01', True),
('María', 6000, '2022-02-01', True),
('Pedro', 7000, '2022-03-01', False),
('Ana', 8000, '2022-04-01', True),
('Lucía', 9000, '2022-05-01', False);
""")
print("Filas añadidas.\n")
# Guardamos los cambios
conn.commit()
cursor.execute("SELECT * FROM PEDROPUERTAS;")
# Guardamos en la variable rows todas las filas seleccionadas
rows = cursor.fetchall()
for i in rows:
print(i)
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 1;")
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 2;")
print("\nFilas borradas.\n")
# Guardamos los cambios
conn.commit()
# Hacemos el select de las filas y mostramos con el bucle for
cursor.execute("SELECT * FROM PEDROPUERTAS;")
rows = cursor.fetchall()
for i in rows:
print(i)
# Cerramos la conexión
conn.close()
break
elif lectura == 2:
# Creamos la conexión
conn = mariadb.connect(
host="localhost",
user="root",
password="usuario",
database="bdpython"
)
cursor = conn.cursor()
# Borramos la tabla si existe
cursor.execute("DROP TABLE IF EXISTS PEDROPUERTAS")
# Creamos la tabla
cursor.execute("""
CREATE TABLE PEDROPUERTAS(
id INT AUTO_INCREMENT PRIMARY KEY,
nombre TEXT,
salario FLOAT,
fecha_alta DATE,
inscrito BOOLEAN
);
""")
# Insertamos datos
cursor.execute("""
INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito)
VALUES
('Juan', 5000, '2022-01-01', True),
('María', 6000, '2022-02-01', True),
('Pedro', 7000, '2022-03-01', False),
('Ana', 8000, '2022-04-01', True),
('Lucía', 9000, '2022-05-01', False);
""")
print("Filas añadidas.\n")
# Guardamos los cambios
conn.commit()
# Hacemos el select para mostrar los datos
cursor.execute("SELECT * FROM PEDROPUERTAS")
# Mostramos las filas por pantalla
rows = cursor.fetchall()
for i in rows:
print(i)
# Borramos filas
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 1")
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 2")
print("\nFilas borradas.\n")
# Guardamos los cambios
conn.commit()
# Mostramos todas las filas restantes
cursor.execute("SELECT * FROM PEDROPUERTAS")
rows = cursor.fetchall()
for i in rows:
print(i)
# Cerramos la conexión
conn.close()
break
elif lectura == 3:
# Creamos la conexión en memoria
conn = sqlite3.connect('bdpython.db')
cursor = conn.cursor()
# Creamos la tabla
cursor.execute('''CREATE TABLE IF NOT EXISTS PEDROPUERTAS (id INTEGER PRIMARY KEY, nombre TEXT,
salario REAL, fecha_alta DATE, inscrito BOOLEAN)''')
# Insertamos algunos datos
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Juan', 5000.0, '2022-01-01', 1)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Ana', 6000.0, '2022-02-01', 0)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Pedro', 7000.0, '2022-03-01', 1)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Sofia', 8000.0, '2022-04-01', 0)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Lucas', 9000.0, '2022-05-01', 1)")
conn.commit()
print("Filas añadidas.\n")
# Hacemos el select de las filas
cursor.execute("SELECT * FROM PEDROPUERTAS")
# Las guardamos en rows y las mostramos con el bucle for
rows = cursor.fetchall()
for i in rows:
print(i)
# Borramos algunas filas
cursor.execute("DELETE FROM PEDROPUERTAS WHERE id = 1")
cursor.execute("DELETE FROM PEDROPUERTAS WHERE id = 2")
print("\nFilas borradas.\n")
# Hacemos el select y las mostramos con el for
cursor.execute("SELECT * FROM PEDROPUERTAS")
rows = cursor.fetchall()
for i in rows:
print(i)
conn.close()
break
else:
print("Seleccione una opción correcta:")
lectura = input()
lectura = int(lectura)
| PedroPuertasR/2DAM | 2 Trimestre/SGE/ConexionBD/main.py | main.py | py | 6,179 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "builtins.print",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "builtins.print",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "builtins.input",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "builtins.int",
"line_n... |
19981905247 | import json
import os
from aiogram import Bot, Dispatcher, executor, types
from aiogram.dispatcher.filters import Text
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.dispatcher import FSMContext
from aiogram.utils.markdown import hbold, hlink
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, KeyboardButton
from dotenv import load_dotenv
from sbermarket import get_data
load_dotenv()
TOKEN = str(os.environ.get('TOKEN'))
bot = Bot(token=TOKEN, parse_mode=types.ParseMode.HTML)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
async def on_startup(_):
print('BOT STARTED')
class Form(StatesGroup):
search = State()
resource = State()
def main_menu_keyboard():
keyboard = ReplyKeyboardMarkup(resize_keyboard=True)
last_attempt = KeyboardButton(text='Получить предыдущий запрос')
help_button = KeyboardButton(text='Справка')
description_button = KeyboardButton(text='Описание')
search_button = KeyboardButton(text='Выбрать ресурс для поиска')
search_button = KeyboardButton(text='Ввести поисковый запрос')
keyboard.add(last_attempt).add(description_button, help_button).add(search_button)
return keyboard
def resource_keyboard():
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text='МВидео', callback_data='res_mvideo')],
[InlineKeyboardButton(text='СберМаркет', callback_data='res_sbermarket')],
[InlineKeyboardButton(text='DNS', callback_data='res_dns')],
[InlineKeyboardButton(text='Корпорация Центр', callback_data='res_kcent')],
[InlineKeyboardButton(text='Вернуться в главное меню', callback_data='main_menu')],
])
return keyboard
@dp.message_handler(commands='start')
async def start_command(message: types.Message):
await message.answer(f'<b>Добро пожаловать!</b>\nЭтот бот позволит найти интересующее Вас товары на СберМаркете со скидкой!',
reply_markup=main_menu_keyboard())
await message.delete()
@dp.message_handler(Text(equals='Выбрать ресурс для поиска'))
async def resource_command(message: types.Message):
await message.answer(text='Вы перешли в меню выбора сайта!', reply_markup=ReplyKeyboardRemove())
await message.answer(text='Пожалуйста выберете сайт для поиска!', reply_markup=resource_keyboard())
await message.delete()
@dp.message_handler(Text(equals='Ввести поисковый запрос'))
async def get_discount_search(message: types.Message):
await Form.search.set()
await message.reply("Вводите поисковый запрос:")
@dp.callback_query_handler(lambda callback: callback.startswith('res'), state=Form.search)
async def get_resource(callback: types.CallbackQuery):
if callback.data.endswith('mvideo'):
async with state.proxy() as data:
data['search'] = callback.data ### ФИКСИТЬ!!!!
@dp.callback_query_handler()
async def main_menu(callback: types.CallbackQuery):
await callback.message.answer('Возврат в главное меню!', reply_markup=main_menu_keyboard())
@dp.message_handler(state=Form.search)
async def get_discount_search(message: types.Message, state: FSMContext):
async with state.proxy() as data:
data['search'] = message.text
await message.answer('Идет поиск. Примерное время ожидания: 30 секунд\nОжидайте...')
get_data(message.text, message.from_user.id)
with open(f'data/sbermarket-{message.from_user["id"]}.json', encoding='utf-8') as file:
data = json.load(file)
for item in data[:6]:
card = f'{hlink(item.get("item_name"), item.get("url"))}\n' \
f'{hbold("Старая цена")} {item.get("old_price")}\n' \
f'👩🏿🎓👩🏿🎓{hbold("Новая цена")} -{item.get("discount")}%: {item.get("item_price")}👩🏿🎓👩🏿🎓\n'
await message.answer(card)
async with state.proxy() as data:
for i in data:
print(data)
await state.finish()
def main():
executor.start_polling(dp, skip_updates=True, on_startup=on_startup)
if __name__ == '__main__':
main()
| Baradys/scrappers | scrappers/sbermarket/sbermarket_bot.py | sbermarket_bot.py | py | 4,624 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "aiogram.Bot",
"... |
29929387988 | #!/bin/python
import xml.etree.ElementTree as ET
import sys
tree = ET.parse(sys.argv[1])
root = tree.getroot()
'''
print root.find('deckname').text
main = root.find('./zone')
for c in main.findall(path='card'):
print c.get('number')+c.get('name')
'''
for c in root[2]:
print(c.get('number') + ' ' + c.get('name'))
for c in root[3]:
print('SB: '+c.get('number') + ' ' + c.get('name'))
| nikisix/dex | xml_parser.py | xml_parser.py | py | 398 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
}
] |
10205108247 | from bson.objectid import ObjectId
from pyramid.httpexceptions import HTTPFound
from pyramid.security import remember, forget
from pyramid.url import route_url
from pyramid.view import view_config
from .forms import TaskForm, TaskUpdateForm
@view_config(route_name='home', renderer='templates/home.jinja2')
def task_list(request):
tasks = request.db['tasks'].find()
return {
'tasks': tasks,
'project': 'task_manager',
}
@view_config(route_name='tadd', renderer='templates/add.jinja2', permission='create')
def task_add(request):
form = TaskForm(request.POST, None)
if request.POST and form.validate():
entry = form.data
request.db['tasks'].save(entry)
return HTTPFound(route_url('home', request))
return {'form': form}
@view_config(route_name='tedit', renderer='templates/edit.jinja2', permission='edit')
def task_edit(request):
id_task = request.matchdict.get('id', None)
item = request.db['tasks'].find_one({'_id': ObjectId(id_task)})
form = TaskUpdateForm(request.POST,
id=id_task, name=item['name'],
active=item['active'])
if request.method == 'POST' and form.validate():
entry = form.data
entry['_id'] = ObjectId(entry.pop('id'))
request.db['tasks'].save(entry)
return HTTPFound(route_url('home', request))
return {'form': form}
@view_config(route_name='tdelete', permission='delete')
def task_delete(request):
id_task = request.matchdict.get('id', None)
if id_task:
request.db['tasks'].remove({'_id': ObjectId(id_task)})
return HTTPFound(route_url('home', request))
@view_config(route_name='auth', match_param='action=in', renderer='string', request_method='POST')
@view_config(route_name='auth', match_param='action=out', renderer='string')
def sign_in_out(request):
username = request.POST.get('username')
if username:
user = request.db['users'].find_one({'name': username})
if user and user['password'] == request.POST.get('password'):
headers = remember(request, user['name'])
else:
headers = forget(request)
else:
headers = forget(request)
return HTTPFound(location=request.route_url('home'), headers=headers)
| albertosdneto/tutorial_pyramid_mongo | task_manager/views.py | views.py | py | 2,297 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyramid.view.view_config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "forms.TaskForm",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyramid.httpexceptions.HTTPFound",
"line_number": 26,
"usage_type": "call"
},
{
"api_name... |
7126327995 | from django.urls import path
from . import views
# какие url какой view обрабатывается
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
path('post/t/', views.post_t, name='post_t'),
]
| x2wing/django_l2 | blog/urls.py | urls.py | py | 424 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
75131926908 | """
在python中,只有函数才是Callable(可Call的对象才是Callable)。但是tuple是一个数据类型,当然是不能Call(翻译成:使唤,hhh可能会比较容易理解)
"""
import cv2 as cv
import numpy as np
def negation_pixels(image):
print(image.shape)
height = image.shape[0]
width = image.shape[1]
channels = image.shape[2]
print("width: %s height: %s channels: %s " % (width, height, channels))
# 遍历每一个像素点的每一个通道,使他们的像素值取反
for row in range(height):
for col in range(width):
for c in range(channels):
pv = image[row, col, c] # pv是image对象在第row行,第col列,第c通道的像素值
image[row, col, c] = 255 - pv # 像素值取反
cv.imshow("negation pixels_1", image)
scr = cv.imread(r"beautyful_view.jpg")
cv.imshow("before", scr)
negation_pixels(scr)
cv.bitwise_not(scr)
cv.imshow("negation pixels_2", scr)
cv.waitKey(0)
cv.destroyAllWindows() | hahahei957/NewProject_Opencv2 | 04_像素取反.py | 04_像素取反.py | py | 1,037 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imshow",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_not",
"line_number":... |
31872746206 | from lxml import html
import requests
import re
MainPage = requests.get("https://www.carvezine.com/stories/")
tree = html.fromstring(MainPage.content)
links = tree.xpath('//a[@class="summary-title-link"]/@href')
text = ""
text.encode('utf-8').strip()
for link in links:
testURL = "https://www.carvezine.com" + link
story = requests.get(testURL)
storyTree = html.fromstring(story.content)
storyList = storyTree.xpath('//*[@class="sqs-block-content"]/p//text()')
storyText = ' '.join(storyList)
text += storyText
new_txt = re.sub('[^a-zA-Z0-9\'\.\,\!\?\:\;\(\)\"\$\#]', ' ', text)
open('collections.txt', 'w').write(new_txt) | RichardWen/python-practice | webscraping/storyscraper.py | storyscraper.py | py | 631 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_num... |
12989554153 | from PIL import Image, ImageDraw, ImageFont
import calendar, datetime, holidays
def get_image_calendar(dates, year, month):
width, height = 500, 500
img = Image.new('RGB', (width, height), color='white')
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('arial.ttf', size=30)
dict_for_month = {"January": "Январь", "February": "Февраль", "March": "Март", "April": "Апрель", "May": "Май",
"June":
"Июнь", "July": "Июль", "August": "Август", "September": "Сентябрь", "October": "Октябрь",
"November": "Ноябрь",
"December": "Декабрь"}
title = dict_for_month[calendar.month_name[month]] + ' ' + str(year)
title_size = draw.textlength(title, font=font)
draw.text(((width - 100) // 2, 20), title, font=font, fill='black')
cal = calendar.monthcalendar(year, month)
now = datetime.datetime.now()
cell_width = (width - 40) // 7
cell_height = (height - 100) // len(cal)
# рисуем дни недели
days = ["Пн", "Вт", "Ср", "Чт", "Пт", "Сб", "Вс"]
for i, day in enumerate(days):
day_width, day_height = draw.textlength(day, font=font), 20
if day == "СБ" or day == "ВС":
draw.text((20 + i * cell_width + (cell_width - day_width) // 2, 60), day, font=font, fill="red")
else:
draw.text((20 + i * cell_width + (cell_width - day_width) // 2, 60), day, font=font, fill="black")
Belarus_holidays_list = holidays.BY(years=year)
bel_holidays = [(i.month, i.day) for i in Belarus_holidays_list]
hol_dict = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: []}
for i in hol_dict:
for j in bel_holidays:
if i == j[0]:
hol_dict[i].append(j[1])
for i, row in enumerate(cal):
for j, day in enumerate(row):
if day != 0 and day not in dates:
day_width, day_height = draw.textlength(str(day), font=font), 20
if now.year == year and now.month == month and day < now.day:
draw.text((20 + j * cell_width + (cell_width - day_width) // 2,
100 + i * cell_height + (cell_height - day_height) // 2), str(day), font=font,
fill="gray")
elif row[-1] == day or row[-2] == day or day in hol_dict[month]:
draw.text((20 + j * cell_width + (cell_width - day_width) // 2,
100 + i * cell_height + (cell_height - day_height) // 2), str(day), font=font,
fill="red")
else:
draw.text((20 + j * cell_width + (cell_width - day_width) // 2,
100 + i * cell_height + (cell_height - day_height) // 2), str(day), font=font,
fill="black")
# сохраняем изображение в файл
img.save('calendar.png') | michaelgershov/Calendar | calendar_image.py | calendar_image.py | py | 3,088 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.new",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_num... |
5414745490 | #!flask/bin/python
"""Alternative version of the ToDo RESTful server implemented using the
Flask-RESTful extension."""
from flask import Flask, jsonify, abort, make_response, make_response, request, current_app
from flask.ext.restful import Api, Resource, reqparse, fields, marshal
from flask.ext.httpauth import HTTPBasicAuth
from datetime import timedelta
from functools import update_wrapper
from py2neo import Graph
graph = Graph("http://PyBase:sZzmKcoKKjG1pnUhjitl@pybase.sb04.stations.graphenedb.com:24789/db/data/")
app = Flask(__name__, static_url_path="")
api = Api(app)
auth = HTTPBasicAuth()
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/')
def hello_world():
return 'Hello crule World!'
@auth.get_password
def get_password(username):
if username == 'miguel':
return 'python'
return None
@auth.error_handler
def unauthorized():
# return 403 instead of 401 to prevent browsers from displaying the default
# auth dialog
return make_response(jsonify({'message': 'Unauthorized access'}), 403)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
task_fields = {
'title': fields.String,
'description': fields.String,
'done': fields.Boolean,
'uri': fields.Url('task')
}
person_fields = {
'userName': fields.String,
'playerID': fields.String,
'firstName': fields.String,
'lastName': fields.String,
'city':fields.String,
'email': fields.String,
'bankRef': fields.String,
'gender':fields.String,
'role': fields.String,
'active': fields.String,
'img': fields.String
}
class TaskListAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('title', type=str, required=True,
help='No task title provided',
location='json')
self.reqparse.add_argument('description', type=str, default="",
location='json')
super(TaskListAPI, self).__init__()
def get(self):
return {'tasks': [marshal(task, task_fields) for task in tasks]}
def post(self):
args = self.reqparse.parse_args()
task = {
'id': tasks[-1]['id'] + 1,
'title': args['title'],
'description': args['description'],
'done': False
}
tasks.append(task)
return {'task': marshal(task, task_fields)}, 201
class PeopleListAPI(Resource):
# decorators = [auth.login_required]
people = []
def __init__(self):
super(PeopleListAPI, self).__init__()
@crossdomain(origin='*')
def get(self):
if self.people == []:
qry = "MATCH (n :PERSON) RETURN n"
ans = graph.cypher.execute(qry)
for x in ans:
person = {
'userName': x[0]['userName'],
'playerID': x[0]['playerID'],
'firstName': x[0]['firstName'],
'lastName': x[0]['lastName'],
'city': x[0]['city'],
'email': x[0]['email'],
'bankRef': x[0]['bankRef'],
'gender':x[0]['gender'],
'role': x[0]['role'],
'active': x[0]['active'],
'img': x[0]['img']
}
self.people.append(person)
return jsonify({'people': self.people})
class TaskAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('title', type=str, location='json')
self.reqparse.add_argument('description', type=str, location='json')
self.reqparse.add_argument('done', type=bool, location='json')
super(TaskAPI, self).__init__()
def get(self, id):
task = [task for task in tasks if task['id'] == id]
if len(task) == 0:
abort(404)
return {'task': marshal(task[0], task_fields)}
def put(self, id):
task = [task for task in tasks if task['id'] == id]
if len(task) == 0:
abort(404)
task = task[0]
args = self.reqparse.parse_args()
for k, v in args.items():
if v is not None:
task[k] = v
return {'task': marshal(task, task_fields)}
def delete(self, id):
task = [task for task in tasks if task['id'] == id]
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return {'result': True}
api.add_resource(TaskListAPI, '/todo/api/v1.0/tasks', endpoint='tasks')
api.add_resource(PeopleListAPI, '/todo/api/v1.0/people', endpoint='people')
api.add_resource(TaskAPI, '/todo/api/v1.0/tasks/<int:id>', endpoint='task')
if __name__ == '__main__':
app.run(debug=True) | Spanarchie/BaseAPI | BaseAPI.py | BaseAPI.py | py | 6,714 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "py2neo.Graph",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.ext.restful.Api",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.ext.httpauth.HTT... |
33008999633 | '''
Script to process nbn coverage map csv files, transform and load into a MongoDB
Author: Rommel Poggenberg (29860571)
Date created: 19th April 2021 (FIT5147 TP2 2021)
'''
import csv
import pymongo
import pprint
import sys
import datetime
pp = pprint.PrettyPrinter(indent=4)
state_lookup={2:'New South Wales',3:'Victoria',4:'Queensland',5:'South Australia',6:'Western Australia',7:'Tasmania',8:'Northern Territory',9:'Australian Capital Territory'}
filter_tech={'Fibre to the Basement':'fttb', 'Fibre to the Curb':'fttc', 'Fibre to the Node':'fttn', 'Fibre to the Premises':'fttp', 'Fixed Wireless':'fixed_wireless', 'Hybrid Fibre Coaxial (HFC)':'hfc'}
filter_state={'Australian Capital Territory':'act', 'New South Wales':'nsw', 'Northern Territory':'nt', 'Queensland':'ql','South Australia':'sa', 'Tasmania':'tas', 'Victoria':'vic', 'Western Australia': 'wa'}
nbn_map_data={}
#Cut off date which nbn declared to be built 30th June 2020
nbn_build_deadline=datetime.datetime.strptime('30/06/2020 00:00:00', '%d/%m/%Y %H:%M:%S')
all_techs=['ALL_FixedWireless','ALL_FTTB','ALL_FTTC','ALL_FTTN','ALL_FTTP','ALL_HFC']
#Read CSV files
for tech in all_techs:
with open('data\\'+tech+'.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
#print(row)
data={}
data['Technology_Type']=row['Technology_Type']
data['Ready_for_Service_Date']=row['Ready_for_Service_Date']
try:
if datetime.datetime.strptime(row['Ready_for_Service_Date']+' 00:00:00', '%d/%m/%Y %H:%M:%S') <= nbn_build_deadline:
data['RFS_On_Schedule']=True
else:
data['RFS_On_Schedule']=False
except:
data['RFS_On_Schedule']=False
data['Area_ID']=row['Area_ID']
data['Service_Status']=row['Service_Status']
data['state']=state_lookup[int(row['Area_ID'][0])]
data['longitude']=row['longitude']
data['latitude']=row['latitude']
data['markerscale']=1
if data['state'] in nbn_map_data.keys():
if data['Technology_Type'] in nbn_map_data[data['state']].keys():
map_records=nbn_map_data[data['state']][data['Technology_Type']]
map_records.append(data)
nbn_map_data[data['state']][data['Technology_Type']]=map_records
else:
nbn_map_data[data['state']][data['Technology_Type']]=[data]
else:
nbn_map_data[data['state']]={}
nbn_map_data[data['state']][data['Technology_Type']]=[data]
#pp.pprint(nbn_map_data)
#sys.exit(0)
bar_chart_all={}
bar_chart_ontime={}
bar_chart_after={}
#Calculate areas within original build deadline
for state in nbn_map_data.keys():
for tech in nbn_map_data[state].keys():
for record in nbn_map_data[state][tech]:
try:
year=int(record['Ready_for_Service_Date'].split('/')[2])
except:
year=2024
year=int(year)
technology=record['Technology_Type']
#Schedule of all areas in all time
if year in bar_chart_all.keys():
if technology in bar_chart_all[year].keys():
bar_chart_all[year][technology]=bar_chart_all[year][technology]+1
else:
bar_chart_all[year]= {
"Fibre to the Basement": 0,
"Fibre to the Curb": 0,
"Fibre to the Node": 0,
"Fibre to the Premises": 0,
"Fixed Wireless": 0,
"Hybrid Fibre Coaxial (HFC)": 0,
"year": str(year)
}
bar_chart_all[year][technology]=1
#Find areas which were build on schedule
if record['RFS_On_Schedule']==True:
if year in bar_chart_ontime.keys():
if technology in bar_chart_ontime[year].keys():
bar_chart_ontime[year][technology]=bar_chart_ontime[year][technology]+1
else:
bar_chart_ontime[year]= {
"Fibre to the Basement": 0,
"Fibre to the Curb": 0,
"Fibre to the Node": 0,
"Fibre to the Premises": 0,
"Fixed Wireless": 0,
"Hybrid Fibre Coaxial (HFC)": 0,
"year": str(year)
}
bar_chart_ontime[year][technology]=1
#Find areas which will be built after the deadline
if record['RFS_On_Schedule']==False:
if year in bar_chart_after.keys():
if technology in bar_chart_after[year].keys():
bar_chart_after[year][technology]=bar_chart_after[year][technology]+1
else:
bar_chart_after[year]= {
"Fibre to the Basement": 0,
"Fibre to the Curb": 0,
"Fibre to the Node": 0,
"Fibre to the Premises": 0,
"Fixed Wireless": 0,
"Hybrid Fibre Coaxial (HFC)": 0,
"year": str(year)
}
bar_chart_after[year][technology]=1
#Get all schedules in a dictionary
raw_values={'all':bar_chart_all,'ontime':bar_chart_ontime,'after':bar_chart_after}
rollout_schedule={}
for key in raw_values:
for year in sorted(raw_values[key].keys()):
if key in rollout_schedule.keys():
values=rollout_schedule[key]
values.append(raw_values[key][year])
rollout_schedule[key]=values
else:
rollout_schedule[key]=[raw_values[key][year]]
#Write dictionaries to mongodb
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["nbn"]
col = db["map"]
col2 = db["chart"]
for state in nbn_map_data.keys():
for tech in nbn_map_data[state].keys():
print(state, tech)
col.insert_one({'technology_type':filter_tech[tech],'state':filter_state[state],'results':nbn_map_data[state][tech]})
for timeline in rollout_schedule.keys():
print(timeline)
col2.insert_one({'schedule':timeline,'results':rollout_schedule[timeline]})
| rommjp/NBN_Rollout_Visualisation | write_nbn_data_to_mongodb.py | write_nbn_data_to_mongodb.py | py | 5,360 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pprint.PrettyPrinter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name":... |
11370340934 | import torch
import torchvision
import random
import torch.nn as nn
import torch
from torch import tanh
import torch.nn.functional as F
# custom weights initialization
def weights_init_1st(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.15)
#m.weight.data.uniform_(-0.15, 0.15)
#m.weight.data.fill_(0.5)
def weights_init_2nd(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(-0.3, 0.3)
#m.weight.data.uniform_(0.01, 0.02)
#m.weight.data.fill_(0.5)
def print_net(model):
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.data.numpy())
def get_pi_net():
net = pi_net()
net.apply(weights_init_1st)
return net
class pi_net(nn.Module):
def __init__(self):
super(pi_net, self).__init__()
bias_on = True
self.linear1 = nn.Linear(36, 64, bias=bias_on)
self.linear2 = nn.Linear(64, 64, bias=bias_on)
self.linear3 = nn.Linear(64, 36, bias=bias_on)
#torch.nn.init.xavier_uniform_(self.linear1)
#torch.nn.init.xavier_uniform_(self.linear2)
def forward(self, x):
# --- 0000 ---- 0000 >>> z-score normalization
x = self.linear1(x)
# print("AFTER linear1 = = = = = = = = = =")
# print(x)
# print("AFTER linear1 = = = = = = = = = =")
x_avg = torch.sum(x) / 20
# print("AVG " + str(x_avg) )
# print("x - x_avg ~~~~~~~~~~~~~~")
x_minus_x_avg = x - x_avg
# print(x_minus_x_avg)
# print("x - x_avg ~~~~~~~~~~~~~~")
x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 20
# print("VAR " + str(x_std))
epsilon = 0.0000001
# print("STD " + str(torch.sqrt(x_std)))
x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
# print("BEFORE sigmoid = = = = = = = = = =")
# print(x_norm)
# print("BEFORE sigmoid = = = = = = = = = =")
#x = F.sigmoid(x_norm)
x = tanh(x_norm)
x = self.linear2(x)
x_avg = torch.sum(x) / 40
x_minus_x_avg = x - x_avg
x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 40
x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
x = tanh(x_norm)
# print("AFTER sigmoid = = = = = = = = = =")
# print(x)
# print("AFTER sigmoid = = = = = = = = = =")
x = self.linear3(x)
return x.view(-1, 36)
# --- 0000 ---- 0000 >>> feature scaling
# x = self.linear1(x)
# print("AFTER linear1 = = = = = = = = = =")
# print(x)
# print("AFTER linear1 = = = = = = = = = =")
# x_max = torch.max(x)
# x_min = torch.min(x)
# epsilon = 0.00001
# x_norm = ((x - x_min) / (x_max - x_min + epsilon))
# print("BEFORE sigmoid = = = = = = = = = =")
# print(x_norm)
# print("BEFORE sigmoid = = = = = = = = = =")
# x = F.sigmoid(x_norm)
# print("AFTER sigmoid = = = = = = = = = =")
# print(x)
# print("AFTER sigmoid = = = = = = = = = =")
# x = self.linear2(x)
# return x.view(-1, 4)
| ssainz/reinforcement_learning_algorithms | fleet_simulator/Models.py | Models.py | py | 3,261 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
19114657314 | import pandas as pd
import torch
import torch.nn as nn
import math
import download
import pickle
import random
max_seq_len=34
pd.set_option('display.max_colwidth', None)
print("here")
# Importing flask module in the project is mandatory
# An object of Flask class is our WSGI application.
from flask import Flask, request, jsonify
import json
#creating model template
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=max_seq_len):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
if self.pe.size(0) < x.size(0):
self.pe = self.pe.repeat(x.size(0), 1, 1)
self.pe = self.pe[:x.size(0), :, :]
x = x + self.pe
return self.dropout(x)
class ImageCaptionModel(nn.Module):
def __init__(self, n_head, n_decoder_layer, vocab_size, embedding_size):
super(ImageCaptionModel, self).__init__()
self.pos_encoder = PositionalEncoding(embedding_size, 0.1)
self.TransformerDecoderLayer = nn.TransformerDecoderLayer(d_model=embedding_size, nhead=n_head)
self.TransformerDecoder = nn.TransformerDecoder(decoder_layer=self.TransformerDecoderLayer,
num_layers=n_decoder_layer)
self.embedding_size = embedding_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.last_linear_layer = nn.Linear(embedding_size, vocab_size)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embedding.weight.data.uniform_(-initrange, initrange)
self.last_linear_layer.bias.data.zero_()
self.last_linear_layer.weight.data.uniform_(-initrange, initrange)
def generate_Mask(self, size, decoder_inp):
decoder_input_mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)
decoder_input_mask = decoder_input_mask.float().masked_fill(decoder_input_mask == 0, float('-inf')).masked_fill(
decoder_input_mask == 1, float(0.0))
decoder_input_pad_mask = decoder_inp.float().masked_fill(decoder_inp == 0, float(0.0)).masked_fill(
decoder_inp > 0, float(1.0))
decoder_input_pad_mask_bool = decoder_inp == 0
return decoder_input_mask, decoder_input_pad_mask, decoder_input_pad_mask_bool
def forward(self, encoded_image, decoder_inp):
encoded_image = encoded_image.permute(1, 0, 2)
decoder_inp_embed = self.embedding(decoder_inp) * math.sqrt(self.embedding_size)
decoder_inp_embed = self.pos_encoder(decoder_inp_embed)
decoder_inp_embed = decoder_inp_embed.permute(1, 0, 2)
decoder_input_mask, decoder_input_pad_mask, decoder_input_pad_mask_bool = self.generate_Mask(
decoder_inp.size(1), decoder_inp)
decoder_input_mask = decoder_input_mask
decoder_input_pad_mask = decoder_input_pad_mask
decoder_input_pad_mask_bool = decoder_input_pad_mask_bool
decoder_output = self.TransformerDecoder(tgt=decoder_inp_embed, memory=encoded_image,
tgt_mask=decoder_input_mask,
tgt_key_padding_mask=decoder_input_pad_mask_bool)
final_output = self.last_linear_layer(decoder_output)
return final_output, decoder_input_pad_mask
# Flask constructor takes the name of
# current module (__name__) as argument.
app = Flask(__name__)
# @app.route('/init')
# def function_to_run_only_once():
# loading pickle data
dbfile = open('index_to_word', 'rb')
index_to_word = pickle.load(dbfile)
print('loading indextoword')
dbfile.close()
dbfile = open('word_to_index', 'rb')
word_to_index = pickle.load(dbfile)
print('loading wordtoindex')
dbfile.close()
# download model from google driver
download.download_from_drive()
print('downloading model')
## Generate Captions !!!
model = ImageCaptionModel(16, 4, 8812, 512)
model.load_state_dict(torch.load("model_state.pth", map_location=torch.device('cpu')) )
model.eval()
# model = torch.load('./BestModel1', map_location=torch.device('cpu'))
print('loading model')
start_token = word_to_index['<start>']
end_token = word_to_index['<end>']
pad_token = word_to_index['<pad>']
print(start_token, end_token, pad_token)
K = 1
# The route() function of the Flask class is a decorator,
# which tells the application which URL should call
# the associated function.
@app.route('/')
# ‘/’ URL is bound with hello_world() function.
def hello_world():
return jsonify({'status': 'Server 2 is UP ...'})
@app.route('/foo', methods=['POST'])
def foo():
data = request.json
image_data_torch = torch.tensor(data['image_embedding'])
print(image_data_torch.shape)
img_embed = image_data_torch.permute(0, 2, 3, 1)
img_embed = img_embed.view(img_embed.size(0), -1, img_embed.size(3))
input_seq = [pad_token] * max_seq_len
input_seq[0] = start_token
input_seq = torch.tensor(input_seq).unsqueeze(0)
predicted_sentence = []
# return {'tt':"ok"}
with torch.no_grad():
for eval_iter in range(0, max_seq_len):
output, padding_mask = model.forward(img_embed, input_seq)
output = output[eval_iter, 0, :]
values = torch.topk(output, K).values.tolist()
indices = torch.topk(output, K).indices.tolist()
next_word_index = random.choices(indices, values, k=1)[0]
next_word = index_to_word[next_word_index]
input_seq[:, eval_iter + 1] = next_word_index
if next_word == '<end>':
break
predicted_sentence.append(next_word)
print("\n")
print("Predicted caption : ")
predicted_sentence[0]=predicted_sentence[0][0].upper()+predicted_sentence[0][1:]
sentence = " ".join(predicted_sentence + ['.'])
print(sentence)
return {'prediction': f'{sentence}'}
# main driver function
if __name__ == '__main__':
# run() method of Flask class runs the application
# on the local development server.
app.run(port=5001,use_reloader=False)
| razerspeed/Image-Caption-Generation | server2.py | server2.py | py | 6,750 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.set_option",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
... |
26040960706 | from __future__ import annotations
from textwrap import dedent
from typing import Callable
import pytest
from pants.backend.python.goals.publish import (
PublishPythonPackageFieldSet,
PublishPythonPackageRequest,
rules,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact
from pants.core.goals.publish import PublishPackages, PublishProcesses
from pants.core.util_rules.config_files import rules as config_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.process import Process
from pants.testutil.process_util import process_assertion
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
preserve_tmpdirs=True,
rules=[
*config_files_rules(),
*pex_from_targets.rules(),
*rules(),
QueryRule(PublishProcesses, [PublishPythonPackageRequest]),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
return set_options(rule_runner)
def set_options(rule_runner: PythonRuleRunner, options: list | None = None) -> PythonRuleRunner:
rule_runner.set_options(
options or [],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
env={
"TWINE_USERNAME": "whoami",
"TWINE_USERNAME_PYPI": "whoareyou",
"TWINE_PASSWORD_PYPI": "secret",
},
)
return rule_runner
@pytest.fixture
def packages():
return (
BuiltPackage(
EMPTY_DIGEST,
(
BuiltPackageArtifact("my-package-0.1.0.tar.gz"),
BuiltPackageArtifact("my_package-0.1.0-py3-none-any.whl"),
),
),
)
def project_files(
skip_twine: bool = False, repositories: list[str] = ["@pypi", "@private"]
) -> dict[str, str]:
return {
"src/BUILD": dedent(
f"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories={repositories!r},
skip_twine={skip_twine},
)
"""
),
"src/hello.py": """print("hello")""",
".pypirc": "",
}
def request_publish_processes(rule_runner: PythonRuleRunner, packages) -> PublishProcesses:
tgt = rule_runner.get_target(Address("src", target_name="dist"))
fs = PublishPythonPackageFieldSet.create(tgt)
return rule_runner.request(PublishProcesses, [fs._request(packages)])
def assert_package(
package: PublishPackages,
expect_names: tuple[str, ...],
expect_description: str,
expect_process: Callable[[Process], None] | None,
) -> None:
assert package.names == expect_names
assert package.description == expect_description
if expect_process:
assert package.process
expect_process(package.process.process)
else:
assert package.process is None
def test_twine_upload(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=False))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 2
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@pypi",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=pypi",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_USERNAME": "whoareyou", "TWINE_PASSWORD": "secret"}),
),
)
assert_package(
result[1],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@private",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=private",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_USERNAME": "whoami"}),
),
)
def test_skip_twine(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=True))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="(by `skip_twine` on src:dist)",
expect_process=None,
)
# Skip twine globally from config option.
rule_runner.set_options(["--twine-skip"])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 0
@pytest.mark.parametrize(
"options, cert_arg",
[
pytest.param(
[],
None,
id="No ca cert",
),
pytest.param(
["--twine-ca-certs-path={}"],
"--cert=ca_certs.pem",
id="[twine].ca_certs_path",
),
# This test needs a working ca bundle to work. Verified manually for now.
# pytest.param(
# ["--ca-certs-path={}"],
# "--cert=ca_certs.pem",
# id="[GLOBAL].ca_certs_path",
# ),
],
)
def test_twine_cert_arg(rule_runner, packages, options, cert_arg) -> None:
ca_cert_path = rule_runner.write_files({"conf/ca_certs.pem": ""})[0]
rule_runner.write_files(project_files(repositories=["@private"]))
set_options(rule_runner, [opt.format(ca_cert_path) for opt in options])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
process = result[0].process
assert process
if cert_arg:
assert cert_arg in process.process.argv
else:
assert not any(arg.startswith("--cert") for arg in process.process.argv)
| pantsbuild/pants | src/python/pants/backend/python/goals/publish_test.py | publish_test.py | py | 6,774 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.testutil.python_rule_runner.PythonRuleRunner",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.config_files.rules",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pants.backend.python.util_rules.pex_from_targets.rule... |
585614447 | from pathlib import Path
ROOT_FOLDER = Path("STOCK_VOLATILITY_NEW").resolve().parent
DATASET_DIR = ROOT_FOLDER / "data"
ALL_DATA_DIR = DATASET_DIR / "all_data.csv"
ALL_DATA_NEW_DIR = DATASET_DIR / "all_data_new.csv"
UNPROCESSED_DATA = DATASET_DIR / "index_funds_data.csv"
FORMATTED_DATA = DATASET_DIR / "formatted_data.csv"
DATA_FOR_ANALYSIS = DATASET_DIR / "new_formatted_data.csv"
EARNINGS_DATA = DATASET_DIR / "earnings.csv"
CPI_DATA = DATASET_DIR / "cpi.csv"
MARKET_SCHEDULE = DATASET_DIR / "nasdaq_schedule.csv"
TRAIN_DATA_PATH = DATASET_DIR / "train.csv"
VALID_DATA_PATH = DATASET_DIR / "valid.csv"
TEST_DATA_PATH = DATASET_DIR / "test.csv"
TICKERS = ["XLK", "XLP", "XLF", "XLV", "XLE", "XLI", "XLU"]
FORECAST_HORIZONS = ["one_days", "two_days", "three_days", "four_days", "five_days"]
ADDITIONAL_OUTPUT_COLS = ["volatility_target", "date", "ticker", "idx"]
LIGHTNING_LOGS_DIR = ROOT_FOLDER / "lightning_logs"
IMAGE_PATH = ROOT_FOLDER / "img"
MODEL_DATA = ROOT_FOLDER / "model_data"
BEST_MODEL_PATH = MODEL_DATA / "checkpoint.ckpt"
TIMESERIES_DATASET_PARAMS = MODEL_DATA / "ts_dataset_params.joblib"
PRES_ELECTION_DATES = [
"2004-11-02",
"2008-11-04",
"2012-11-06",
"2016-11-08",
"2020-11-03",
]
MIDTERM_ELECTION_DATES = [
"2006-11-07",
"2010-11-02",
"2014-11-04",
"2018-11-06",
"2022-11-08",
]
ELECTION_DATES = PRES_ELECTION_DATES + MIDTERM_ELECTION_DATES
| vladkramarov/index_fund_volatility | core.py | core.py | py | 1,412 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
}
] |
71483369148 | """
Created by kevin-desktop, on the 18/02/2023
ADD sentiment columns to a sample Excel sheet.
"""
import numpy as np
import pandas as pd
import tqdm
from asba_model import run_models
path = "data/marco_sample.pkl"
df = pd.read_pickle(path)
dic = {}
max_nb_terms = 0
for row in tqdm.tqdm(df.itertuples(name=None)):
ind = row[0]
ti = row[1]
ab = row[4]
dtkey = row[13].split(", ")
sentiments = run_models(ab, dtkey)
if max_nb_terms < len(sentiments):
max_nb_terms = len(sentiments)
dic[ind] = sentiments
df_sent = pd.DataFrame.from_dict(dic, orient="index")
df_sent.rename(columns={i: f'Term{i + 1}' for i in range(0, max_nb_terms)}, inplace=True)
df = pd.concat([df, df_sent], axis=1)
lst_columns_w_term = [col for col in df.columns if 'Term' in col]
df.fillna('0', inplace=True)
df.to_excel("data/sample_marco.xlsx")
| KnuxV/SentA | add_sentiment_to_dataframe.py | add_sentiment_to_dataframe.py | py | 868 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_pickle",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "asba_model.run_models",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.f... |
39430188225 | import json
import pandas as pd
import yfinance as yf
import pytz
from datetime import datetime, timedelta
import time
# Read forex pairs from JSON file
with open('forex_pairs.json', 'r') as file:
forex_pairs = json.load(file)
# Define the time frame and time zone
timeframe = '1h'
timezone = 'Africa/Nairobi'
# Define the number of periods for Moving Average calculation
period = 500
# Get the current time in Kenya time zone
kenya_tz = pytz.timezone(timezone)
current_time = datetime.now(kenya_tz)
# Define the start and end times for data retrieval
end_time = current_time
start_time = current_time - timedelta(hours=period)
# Save the signals to a file
filename = 'MA_signals.txt'
with open(filename, 'w') as file:
file.write(f"Today: {end_time}\n\n")
# Generate MA signals for each forex pair
for pair in forex_pairs:
symbol = pair + '=X'
retry_count = 0
success = False
data = None
while data is None:
try:
# Download historical price data using yfinance
data = yf.download(symbol, start=start_time, end=end_time, interval=timeframe)
except Exception as e:
print("Error occurred:", e)
print("Retrying...")
if not data.empty and len(data) >= 1:
# Extract the 'Close' prices from the downloaded data
close_prices = data['Close'].tolist()
# Define the window sizes for the moving averages
window_short = 20 # Short-term moving average window size
window_long = 50 # Long-term moving average window size
# Calculate the moving averages
moving_avg_short = sum(close_prices[-window_short:]) / window_short
moving_avg_long = sum(close_prices[-window_long:]) / window_long
# Get the latest closing price
latest_price = close_prices[-1]
entry_price = latest_price # Replace with your actual entry price
lot_size = 0.01 # Replace with your actual lot size
# Define risk-reward ratios
take_profit_ratio = 0.002 # 0.2%
stop_loss_ratio = 0.001 # 0.1%
# Determine the buy, sell, or hold signal based on moving average crossovers and price relationship
if moving_avg_short > moving_avg_long and latest_price > moving_avg_short:
signal = "Buy"
elif moving_avg_short < moving_avg_long and latest_price < moving_avg_short:
signal = "Sell"
else:
signal = "Hold"
if signal == "Sell":
current_price = latest_price
take_profit = current_price - (current_price * take_profit_ratio)
elif signal == "Buy":
current_price = latest_price
take_profit = current_price + (current_price * take_profit_ratio)
else:
take_profit = 0.0
rounded_number = round(take_profit, 6)
file.write(f"MA Signals for {pair} ({timeframe} timeframe): {signal} take_profit: {rounded_number}\n")
else:
print("No data available to generate a signal.")
| Nurain313/N1l8w5f9s2g5 | Trash/ma.py | ma.py | py | 3,229 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
71396397949 | # Import packages
import gpxpy
import numpy as np
# Read gpx-file
gpxFile = "yourfile.gpx"
gpx_file = open(gpxFile, 'r')
gpx = gpxpy.parse(gpx_file)
# Calculate speeds between points
speed = []
for track in gpx.tracks:
for segment in track.segments:
for point_no, point in enumerate(segment.points):
speed.append(point.speed_between(segment.points[point_no - 1]))
# Upper limit is defined as 3x the 75% quantile, this can be tweaked according to the GPS errors encountered
upperLimit = 3*(np.quantile(speed, q = 0.75))
# Find elements above the threshold
indices = [
index for index, item in enumerate(speed)
if item > upperLimit
]
pointsRemoved = 0
while len(indices) > 0:
gpxpy.gpx.GPXTrackSegment.remove_point(gpx,indices[0])
pointsRemoved = pointsRemoved + 1
# Calculate speeds between points
speed = []
for track in gpx.tracks:
for segment in track.segments:
for point_no, point in enumerate(segment.points):
speed.append(point.speed_between(segment.points[point_no - 1]))
indices = [
index for index, item in enumerate(speed)
if item > upperLimit
]
print(pointsRemoved)
# Write the corrected GPX file
outputFile = gpxFile[:-4] + "_corrected.gpx"
with open(outputFile, "w") as f:
f.write( gpx.to_xml()) | Haukiii/simpleGpxRunCorrector | simpleGPXrunCorrector.py | simpleGPXrunCorrector.py | py | 1,329 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gpxpy.parse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.quantile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "gpxpy.gpx.GPXTrackSegment.remove_point",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "gpxp... |
22758733002 | # (C) StackState 2020
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
@pytest.fixture(scope='session')
def sts_environment():
return {
'type': 'csv',
'health_file': '/home/static_health/health.csv',
'delimiter': ',',
'collection_interval': 15
}
@pytest.fixture(scope="class")
def instance(request):
cfg = {
'type': 'csv',
'health_file': 'health.csv',
'delimiter': ',',
'collection_interval': 15
}
request.cls.instance = cfg
| StackVista/stackstate-agent-integrations | static_health/tests/conftest.py | conftest.py | py | 560 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 17,
"usage_type": "call"
}
] |
71236766909 | import struct
from enum import Enum
import typing as ty
from mate.net.nao_data import Data, DebugValue, DebugImage
NO_SUBSCRIBE_KEY = "none"
K = ty.TypeVar('K')
def split(predicate: ty.Callable[[K], bool], dictionary: ty.Dict[K, dict]):
dict1 = {}
dict2 = {}
for key in dictionary:
if predicate(key):
dict1[key] = dictionary[key]
else:
dict2[key] = dictionary[key]
return dict1, dict2
class DebugMsgType(Enum):
subscribe = 0
unsubscribe = 1
update = 2
request_list = 3
list = 4
subscribe_bulk = 5
image = 6
class ConfigMsgType(Enum):
set = 0 # Sets a given key to a given value (at runtime)
get_mounts = 1 # ask for send_mounts, containing all mounts
get_keys = 2 # ask for send_keys of a given key
save = 3 # saves the current config
send_keys = 4 # containing key, value
send_mounts = 5 # containing filename, key
class ConnectionStatusType(Enum):
disconnected = 0
connected = 1
connection_lost = 2
connection_refused = 3
class Message:
def __init__(self,
type: DebugMsgType,
body: str = "",
length: int = None,
version: int = 1):
self.type = type
self.body = body
self.length = length if length is not None else max(0, len(body))
self.version = version
def __str__(self):
return "{}|v{}|{}|{}|{}".format(
type(self).__name__, self.version, self.type.name, self.length,
self.body)
class ConfigMessage(Message):
def __init__(self,
type: DebugMsgType,
body: str = "",
length: int = None,
version: int = 1):
super(ConfigMessage, self).__init__(type, body, length, version)
@staticmethod
def header_from_bytes(msg):
if len(msg) >= 8:
fmt = "<4sBBH"
(msg_head, raw_version, raw_type, msg_size) = struct.unpack(
fmt, msg[:8])
return (msg_head, raw_version, ConfigMsgType(raw_type), msg_size)
def toBytes(self):
msg_format = "<4sBBH{}s".format(len(self.body))
return struct.pack(msg_format, b'CONF', self.version, self.type.value,
self.length, self.body.encode())
class DebugMessage(Message):
def __init__(self,
type: DebugMsgType,
body: str = "",
length: int = None,
version: int = 1):
super(DebugMessage, self).__init__(type, body, length, version)
def toBytes(self):
fmt = "<4sbbxxIxxxx{}s".format(self.length)
return struct.pack(fmt, b'DMSG', self.version, self.type.value,
self.length, self.body.encode())
@staticmethod
def header_from_bytes(msg):
if len(msg) >= 16:
fmt = "<4sbbxxIxxxx"
(msg_head, raw_version, raw_type, msg_size) = struct.unpack(
fmt, msg[:16])
return (msg_head, raw_version, DebugMsgType(raw_type), msg_size)
@staticmethod
def to_body(type, msg):
if type == DebugMsgType.image:
return msg
else:
return msg.decode(errors='ignore')
@staticmethod
def get_image(body):
fmt = "<HHH"
(width, height, key_length) = struct.unpack(fmt, body[:6])
return body[6:6 + key_length].decode(), width, height, body[
6 + key_length:]
@staticmethod
def parse_data(d: dict) -> Data:
if d.get("isImage", False):
return DebugImage(
d["key"],
d.get("width", 0),
d.get("height", 0),
d.get("value", b'')
)
else:
return DebugValue(
d["key"],
d.get("timestamp", 0),
d.get("value", 0)
)
| humanoid-robotics-htl-leonding/robo-ducks-core | tools/mate/mate/net/utils.py | utils.py | py | 3,976 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "typing.TypeVar",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "enum.Enum",
"... |
38053163844 | from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib.sitemaps.views import sitemap
from django.contrib.sitemaps import Sitemap
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^$', 'core.views.home', name='home'),
url(r'^new/$', 'core.views.new', name='new'),
url(r'^statistics/$', 'core.views.statistics', name='statistics'),
url(r'^(?P<key>\w+-\w+-\w+-\w+-\w+)$', 'core.views.tip_redir'),
url(r'^(?P<key>\w+-\w+-\w+-\w+-\w+)/$', 'core.views.tip'),
url(r'^(?P<key>\w+-\w+-\w+)$', 'core.views.tip_redir', name='tip_redir'),
url(r'^(?P<key>\w+-\w+-\w+)/$', 'core.views.tip', name='tip'),
url(r'^gratuity-example/$', 'core.views.tips_example', name='tips_example'),
url(r'^w/(?P<key>\w+)/$', 'core.views.wallet', name='wallet'),
url(r'^w/(?P<key>\w+)/comments/$', 'core.views.comments', name='comments'),
url(r'^w/(?P<key>\w+)/pdf/$', 'core.views.download', {'format': "pdf"}, name='download'),
url(r'^w/(?P<key>\w+)/pdf-us/$', 'core.views.download', {'format': "pdf", "page_size":"US"}, name='download'),
url(r'^w/(?P<key>\w+)/odt/$', 'core.views.download', {'format': "odt"}, name='download'),
url(r'^w/(?P<key>\w+)/png/$', 'core.views.download', {'format': "png"}, name='download'),
url(r'^w/(?P<key>\w+)/wajax/$', 'core.views.wajax', name='wajax'),
)
urlpatterns += patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'qrcode/(?P<key>\w+)/$','core.views.qrcode_view', name='qrcode'),
)
if settings.BCTIP_MOD:
import bctip.urls_custom
urlpatterns += bctip.urls_custom.urlpatterns | norn/bctip | bctip/urls.py | urls.py | py | 1,711 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.i18n.i18n_patterns",
"line_number": 9,
"usage_type": "call"
},
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.