seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10478855527 | from PIL import Image as im
o = im.open('cave.jpg')
# Open two new images of the same size.
n1 = im.new(o.mode,o.size)
n2 = im.new(o.mode,o.size)
# Grab the x and y maximum coords of the original
xmax, ymax = o.size[0], o.size[1]
# Iterate over ever pixel in the image, and decide if the product of the
# coordinates is even or odd. If it is even, add the pixel to one of the
# images. Otherwise, add it to the other.
for i in range(xmax):
for j in range(ymax):
if i*j % 2 == 0:
n1.putpixel((i,j),o.getpixel((i,j)))
if i*j % 2 == 1:
n2.putpixel((i,j),o.getpixel((i,j)))
# Show both images (and look hard!)
n1.show()
n2.show() | nancejk/PythonChallenge | 11.py | 11.py | py | 653 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 5,... |
74536701223 | from django.shortcuts import render
from .forms import DataFrameForm
from main.utils import ContractAlternatives
import pandas as pd
from .utils import convert_date_format
# Create your views here.
def homepage(request):
return render(request, 'main/homepage.html')
def calculo_rapido(request):
if request.method == 'POST':
form = DataFrameForm(request.POST)
if form.is_valid():
months = form.cleaned_data['months']
interest_rate = form.cleaned_data['interest_rate']
loan_amount = form.cleaned_data['loan_amount']
monthly_payment = form.cleaned_data['monthly_payment']
date = form.cleaned_data['date']
date = str(date)
CODE = int(str(form.cleaned_data['bcb_code']).split()[0])
contract = ContractAlternatives(
months, interest_rate, loan_amount, monthly_payment, CODE,date)
print(date)
df = contract.resume_table()
df = df.to_html(classes='table table-striped table-hover', index=False, border=0, justify='center')
context = {
# 'form': form,
'df': df,
}
return render(request, 'main/calculo-rapido-result.html', context)
context = {
'form': DataFrameForm(),
# 'df_price': pd.DataFrame(),
}
return render(request, 'main/calculo-rapido-form.html', context)
| cmichellbs/sistema | main/views.py | views.py | py | 1,430 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "forms.DataFrameForm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "main.utils.ContractAlternatives",
"line_number": 25,
"usage_type": "call"
},
{
"api_n... |
25323402636 | import json
import pandas as pd
from django.conf import settings
from django.http.response import JsonResponse
from django_celery_results.models import TaskResult
from rest_framework import mixins, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from ml.permissions import HasAccess, IsOwner
from .models import AlgorithmData, Clustering
from .serializers import (
AlgorithmDataListSerializer,
AlgorithmDataSerializer,
ClusteringSerializer,
)
from .tasks import gaussian_mixture, kmeans, spectral_clustering
class ClusteringViewset(
mixins.RetrieveModelMixin, mixins.ListModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet
):
queryset = Clustering.objects.all()
serializer_class = ClusteringSerializer
permission_classes = [IsAuthenticated & HasAccess]
# def perform_create(self, serializer):
# serializer.save(creator=self.request.user)
class AlgorithmDataViewset(
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet,
):
queryset = AlgorithmData.objects.all()
serializer_class = AlgorithmDataSerializer
permission_classes = [IsAuthenticated & IsOwner]
def get_clustering(self):
try:
clustering = Clustering.objects.get(pk=self.kwargs["clustering_pk"])
except:
raise NotFound({"clustering": "Not found."})
return clustering
def perform_create(self, serializer):
serializer.save(clustering=self.get_clustering())
def get_queryset(self):
if self.action == "list":
# queryset = AlgorithmData.objects.filter(clustering=self.kwargs["clustering_pk"]
queryset = self.get_clustering().algorithmdata_set.all()
else:
queryset = self.queryset
return queryset
def list(self, request, *args, **kwargs):
ids = request.query_params.getlist("ids", None)
if ids and len(ids) != 0:
queryset = self.get_queryset().filter(pk__in=ids)
serializer = self.serializer_class(queryset, many=True)
else:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def get_serializer_class(self):
if self.action == "list":
return AlgorithmDataListSerializer
return self.serializer_class
@action(detail=True, methods=["post"])
def start(self, request, pk, *args, **kwargs):
instance = self.get_object()
if instance.task_id != None:
try:
task_instance = TaskResult.objects.get(task_id=instance.task_id)
except:
raise PermissionDenied("Cannot start. Task has been sent already.")
if task_instance.status != "FAILURE":
raise PermissionDenied("Cannot start. Task is finished successfully.")
if instance.algorithm == 0:
task_id = kmeans.delay(pk)
elif instance.algorithm == 1:
task_id = spectral_clustering.delay(pk)
elif instance.algorithm == 2:
task_id = gaussian_mixture.delay(pk)
if task_id:
instance.task_id = task_id
instance.save()
return Response("Started")
# @action(detail=True, methods=["get"])
# def (self, request, *args, **kwargs):
# instance = self.get_object()
# try:
# points = pd.read_csv(instance.plot_2d_points).to_json(orient="values")
# except:
# raise NotFound()
# return JsonResponse(json.loads(points), safe=False)
| mateusz28011/ml-api | ml/views.py | views.py | py | 4,070 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.mixins.RetrieveModelMixin",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.mixins",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "rest_framework.mixins.ListModelMixin",
"line_number": 25,
"usage_type... |
21141929383 | import socket
import time
from threading import Thread
from cryptography.fernet import Fernet
socket = socket.socket()
socket.bind(('', 9885))
socket.listen(50)
clients = []
try:
key = open("key.txt", "r", encoding="utf-8")
except FileNotFoundError:
key = Fernet.generate_key()
open("key.txt", "w", encoding="utf-8").write(key.decode("utf-8"))
else:
key = key.read().encode()
cipher = Fernet(key)
print(f"Ключ шифрования сервера: {key.decode('utf-8')}")
def accept_clients():
while True:
client = socket.accept()
login = cipher.decrypt(client[0].recv(5120)).decode("utf-8")
if login == "server":
client[0].close()
continue
Thread(target=send_to_other, args=("server", f"Новый пользователь в чате: {login}", "")).start()
client[0].send(cipher.encrypt(str(len(clients)).encode()))
print(f"Новый пользователь в чате: {client[1]} ({login})")
client = [client[0], login]
clients.append(client)
Thread(target=client_handler, args=(client,)).start()
time.sleep(0.5)
def send_to_other(login, message, sender):
for client in clients:
if client[1] == sender:
continue
client[0].send(cipher.encrypt(f"{login}|{message}".encode()))
def client_handler(client):
conn = client[0]
while True:
try:
data = conn.recv(5120)
except Exception:
clients.remove(client)
Thread(target=send_to_other, args=("server", f"Пользователь {client[1]} вышел из чата", ""),
daemon=True).start()
print(f"Потерял соединение с {client[1]}")
break
if not data:
clients.remove(client)
break
data = cipher.decrypt(data).decode("utf-8").split("|")
login = data[0]
msg = "|".join(data[1:])
# print(login, msg)
Thread(target=send_to_other, args=(login, msg, client[1])).start()
Thread(target=accept_clients).start()
| miniusercoder/irc | server.py | server.py | py | 2,119 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "socket.bind",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "socket.listen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cryptography.fernet.Fernet.gener... |
74133895143 | from django.urls import path
from . import views
app_name = 'mainapp'
urlpatterns = [
path('',views.mainpage,name='main-page'),
path('book_detail/<int:pk>',views.bookdetailpage,name='book-detail'),
path('author_detail/<int:pk>',views.authordetailpage,name='author-detail'),
path('book_list/',views.booklistpage,name='book-list'),
path('search/',views.searchbar,name='searchbar'),
path('category/<str:category>',views.category,name='category'),
path('item/<int:pk>',views.add_to_wishlist,name='wishlist-add'),
path('wishlist/',views.wishlist,name='wishlist'),
path('wishlist/remove/<int:pk>/',views.remove_from_wishlist,name='wishlist-remove'),
path('cart/',views.cart,name='cart'),
path('update_item/',views.updateitem,name='update_item'),
path('checkout/',views.checkout,name='checkout'),
path('process_order/',views.processOrder,name='process_order'),
path('contact/',views.contactus,name='contact')
]
| Chouaib-Djerdi/Fennec-Bookstore | backend/bookstore/mainapp/urls.py | urls.py | py | 962 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
15891318383 | import json
import asyncio
from aioredis import Channel
from websockets import WebSocketCommonProtocol
from app.queue.pubsub import ConsumerHandler, ProducerHandler
from app.queue.redis import queue_conn_sub, queue_conn_pub
CONNECTED = set()
async def web_socket_chat(_, websocket: WebSocketCommonProtocol):
CONNECTED.add(websocket)
channel_name = await websocket.recv()
channel_data = json.loads(channel_name)
print('channel_data', channel_data)
channel = Channel(channel_data['room_id'], is_pattern=False)
consumer_handler = await ConsumerHandler.initialize(channel,
queue_conn_pub)
producer_handler = await ProducerHandler.initialize(channel,
queue_conn_sub)
consumer_task = asyncio.ensure_future(consumer_handler.handle(websocket))
producer_task = asyncio.ensure_future(
producer_handler.broadcast(websocket))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
| Arthur264/music-new.chat | app/websockets.py | websockets.py | py | 1,155 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "websockets.WebSocketCommonProtocol",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "aioredis.Channel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.qu... |
34106559298 | __author__ = 'Shuo Yu'
import pymysql
import glob
import h5py
import numpy as np
def db_connect():
return pymysql.connect(host="127.0.0.1",
user="shuoyu",
passwd="qoowpyep",
db="silverlink",
charset='utf8',
autocommit=True).cursor()
dict_name_id = {
}
def db_write(cur, ins):
sql = '''
INSERT INTO test_data_stage_1b (sensor_id, subject_id, label_id, freq, timestamp, x_accel, y_accel, z_accel)
VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')
'''
# 1457307771365,null,0,E8BD107D58B4,25.0,-119,163,990
# print(ins)
sensor_id = ins[3]
subject_id = ins[1][-1:]
# subject_id = int(ins[1])
label_id = int(ins[2])
# if label_id == 6 and (subject_id == 2 or subject_id == 4):
# return
# elif label_id == 7:
# label_id == 6
freq = ins[4]
timestamp = int(ins[0])
x_accel = ins[5]
y_accel = ins[6]
z_accel = ins[7]
while True:
try:
cur.execute(sql % (sensor_id, subject_id, label_id, freq, timestamp, x_accel, y_accel, z_accel))
break
except Exception as e:
print(e)
timestamp += 1
def csv_to_db_for_fall_1(pattern, cur):
for file in glob.glob(pattern):
# file = 'C:/_test_space/6_tests_Shuo_Yu_12.5.csv'
with open(file, 'r') as fh:
print('Current file: %s' % file)
for line in fh:
if len(line) > 80: # \n async issue
print('async issue for %s' % line)
ins_1 = line.split(',')[:8]
ins_1[7] = int(ins_1[7]) // 1e13
ins_2 = line.split(',')[7:]
ins_2[0] = int(ins_2[0]) % 1e13
db_write(cur, ins_1)
db_write(cur, ins_2)
else:
if len(line) < 10:
continue
else:
if len(line.split(',')) >= 9:
temp = line.split(',')
temp.pop(1)
db_write(cur, temp)
def csv_to_db(pattern, cur):
for file in glob.glob(pattern):
# file = 'C:/_test_space/2016-4-23_14_2_23.csv'
with open(file, 'r') as fh:
print('Current file: %s' % file)
for line in fh:
if len(line) > 80: # \n async issue
print('async issue for %s' % line)
ins_1 = line.split(',')[:8]
ins_1[7] = int(ins_1[7]) // 1e13
ins_2 = line.split(',')[7:]
ins_2[0] = int(ins_2[0]) % 1e13
db_write(cur, ins_1)
db_write(cur, ins_2)
else:
if len(line) < 10:
continue
else:
temp = line.split(',')
if temp[1] != 'null':
db_write(cur, temp)
def matlab_to_db(pattern, cur):
sql = '''
INSERT INTO test_data_farseeing (subject_id, label_id, timestamp, x_accel, y_accel, z_accel)
VALUES ('%s', '%s', '%s', '%s', '%s', '%s')
'''
# label_id refers to is_fall in the mat file
subject_id = 0
for file in glob.glob(pattern):
subject_id += 1
print('%s: %s' % (subject_id, file))
d = h5py.File(file)
rows = np.matrix(d['tmp']).T[:, [0, 2, 3, 4, -1]].tolist()
for row in rows:
label_id = row[-1]
x_accel = round(float(row[1]) * 100)
y_accel = round(float(row[2]) * 100)
z_accel = round(float(row[3]) * 100)
timestamp = round(row[0] * 1000)
try:
cur.execute(sql % (subject_id, label_id, timestamp, x_accel, y_accel, z_accel))
except Exception as e:
print(e)
if __name__ == '__main__':
cur = db_connect()
# matlab_to_db('C:/_test_space/Fall examples/Signal files/*.mat', cur)
csv_to_db('C:/_test_space/new_samples_0115/2017-1-15_21_15_20.csv', cur) | Platinays/SilverLinkResearch | parse.py | parse.py | py | 4,222 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 106... |
13996961200 | from numpy import zeros
from dividexp import db
from dividexp.models import Users, Teams, Trips, Expenses
from math import fabs
from datetime import datetime
class TripManager:
def __init__(self):
self.id = 0
self.users_ids = {}
self.size = 1
self.expenses = []
self.team = []
def set_id(self, trip_id):
self.id = trip_id
def clear(self):
self.id = 0
self.users_ids = {}
self.size = 1
self.expenses = []
self.team = []
def update_trip_date(self):
trip = Trips.query.get(self.id)
trip.last_update_date = datetime.utcnow()
db.session.commit()
def collect_trips(self, user_id):
trips = []
# get list of user's teams
teams = Teams.query.filter_by(user_id=user_id).all()
for each_team in teams:
# get trip of each team
trip = Trips.query.filter_by(id=each_team.trip_id).first()
trips.append({
'id': trip.id,
'route': trip.route,
'create_date': trip.create_date.strftime('%d/%m/%Y'),
'last_update_date': trip.last_update_date.strftime('%d/%m/%Y'),
'total_spendings': trip.total_spendings
})
return trips
def enumerate_members(self, team):
for member in team:
print(member.user_id)
if member.user_id in self.users_ids:
continue
self.users_ids[member.user_id] = len(self.users_ids)
print(self.users_ids)
def edit_expense_table(self, row, sum):
credit = sum / self.size
for column in range(0, self.size):
if(row == column):
self.expense_table[row,
column] = self.expense_table[row, column] + sum
continue
if (self.expense_table[column, row] != 0):
new_credit = self.expense_table[column, row] - credit
if new_credit > 0:
self.expense_table[column, row] = new_credit
else:
self.expense_table[row, column] = fabs(new_credit)
self.expense_table[column, row] = 0
else:
self.expense_table[row,
column] = self.expense_table[row, column] + credit
def fill_table(self, team, expenses):
self.size = len(team)
self.expense_table = zeros((self.size, self.size))
self.enumerate_members(team)
for e in expenses:
# get row num
row = self.users_ids.get(e.user_id)
self.edit_expense_table(row, e.sum)
def recount_user_budget(self, user_id, col):
# team member id
user = Teams.query.get(user_id)
credit = 0.0
for row in range(0, self.size):
if(row == col):
user.balance = user.budget - self.expense_table[col, col]
else:
credit = credit + self.expense_table[row, col]
user.credit = credit
self.update_trip_date()
db.session.commit()
def get_credits_info(self, column):
credits_info = {}
users_list = list(self.users_ids.keys())
for row in range(0, self.size):
user = Users.query.filter_by(id=users_list[row]).first()
if(row == column):
continue
credits_info[user.username] = round(self.expense_table[row, column], 2)
return credits_info
def collect_users(self):
self.team.clear()
# get team members of the trip
trip = Trips.query.get(self.id)
team_members = trip.team_members
for each_member in team_members:
user = Users.query.filter_by(id=each_member.user_id).first()
self.recount_user_budget(each_member.id, self.users_ids.get(user.id))
credit_info = self.get_credits_info(self.users_ids.get(user.id))
self.team.append({
'id': each_member.id,
'username': user.username,
'name': user.name,
'email': user.email,
'image_file': user.image_file,
'budget': round(each_member.balance, 2),
'credit': round(each_member.credit, 2),
'progress_bar_value': int(100.0 * each_member.balance / each_member.budget),
'credits': credit_info
})
def collect_expenses(self):
self.expenses.clear()
all_expenses = Trips.query.filter_by(id=self.id).first().expenses
for each_expense in all_expenses:
user = Users.query.get(each_expense.user_id)
self.expenses.append({
'category': each_expense.category,
'sum': each_expense.sum,
'name': user.name,
'timestamp': each_expense.timestamp.strftime('%I:%M %p'),
})
def add_joint_expense(self, username, category, sum, notes):
# commit changes to the database
user = Users.query.filter_by(username=username).first()
team_member = Users.query.join(Users.teams).filter(
Users.username == username, Teams.trip_id == self.id).first()
expense = Expenses(trip_id=self.id, user_id=user.id, team_member_id=team_member.id,
sum=sum, category=category, notes=notes)
db.session.add(expense)
trip = Trips.query.get(self.id)
trip.total_spendings = trip.total_spendings + expense.sum
self.update_trip_date()
db.session.commit()
new_expense = {
'category': expense.category,
'sum': expense.sum,
'name': user.name,
'timestamp': expense.timestamp.strftime('%I:%M %p'),
}
self.expenses.append(new_expense)
self.edit_expense_table(self.users_ids.get(user.id), sum)
self.collect_users()
def add_expense(self, username, category, sum, notes):
# commit changes to the database
user = Users.query.filter_by(username=username).first()
team_member = Users.query.join(Users.teams).filter(
Users.username == username, Teams.trip_id == self.id).first()
expense = Expenses(trip_id=self.id, user_id=user.id, team_member_id=team_member.id,
sum=sum, category=category, notes=notes)
db.session.add(expense)
trip = Trips.query.get(self.id)
trip.total_spendings = trip.total_spendings + expense.sum
self.update_trip_date()
db.session.commit()
new_expense = {
'category': expense.category,
'sum': expense.sum,
'name': user.name,
'timestamp': expense.timestamp.strftime('%I:%M %p'),
}
self.expenses.append(new_expense)
# get user column, row in expense table
row = self.users_ids.get(user.id)
self.expense_table[row][row] = self.expense_table[row][row] + sum
self.collect_users()
def add_team_member(self, username, budget):
user = Users.query.filter_by(username=username).first()
if user:
# user is in a database
team_member = Teams(trip_id=self.id, user_id=user.id,
budget=budget, balance=budget)
db.session.add(team_member)
self.update_trip_date()
db.session.commit()
new_user = {
'id': team_member.id,
'username': user.username,
'name': user.name,
'email': user.email,
'image_file': user.image_file,
'budget': round(team_member.balance),
'credit': round(team_member.credit),
'progress_bar_value': int(100.0 * team_member.balance / team_member.budget)
}
self.team.append(new_user)
return True
else:
return False
def get_chart_items(self):
expenses_chart = {}
print(self.expenses)
for expense in self.expenses:
if expense['category'] in expenses_chart.keys():
current_value = expenses_chart.get(expense['category'])
expenses_chart[expense['category']] = current_value + expense['sum']
else:
expenses_chart[expense['category']] = expense['sum']
if len(expenses_chart) == 0:
return [], []
else:
return zip(*expenses_chart.items())
| veronika-suprunovich/dividexp | dividexp/manager.py | manager.py | py | 8,591 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dividexp.models.Trips.query.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dividexp.models.Trips.query",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "dividexp.models.Trips",
"line_number": 27,
"usage_type": "name"
},
{... |
3512694431 | import discord
import responses
import pymongo
import datetime
client = pymongo.MongoClient("mongodb+srv://discordroll:check@discordroll.ej9jzg7.mongodb.net/?retryWrites=true&w=majority")
db = client.users
print(db)
async def send_message(message, user_message, is_private):
try:
time = datetime.datetime.utcnow()
print(type(time))
#response = responses.get_response(user_message)
#await message.author.send(response) if is_private else await message.channel.send(response)
name = str(message.author)
#print(db.users.find_one({'name': name}))
if db.users.find_one({'name': name}) == None: #Проверка на пользователя в базе данных
print(f'New user {name} Added')
db.users.insert_one({"name": name,"balance": responses.balance, "CreatedAt": time}) # Добавляем в коллекцию server_message_log, одну запись
else:
print(f'Balance of user {name} was updated')
wallet = db.users.find_one({'name': name}).get('balance')
print(type(wallet), 'Счёт')
db.users.update_one({"name": name}, {"$set": {"balance": cashing(wallet, message), "updatedAt": time}}) #Обновление баланса пользователя
except Exception as e:
print(e)
def cashing(bal, message: str):
print('Im here')
bal += 5
return bal
def run_discord_bot():
TOKEN = ''
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print(f'{client.user} is now running!')
@client.event
async def on_message(message):
if message.author == client.user:
return
username = str(message.author)
user_message = str(message.content)
channel = str(message.channel)
print(f'{username} said: "{user_message}" ({channel})')
if user_message[0] == '?':
user_message = user_message[1:]
await send_message(message, user_message, is_private=True)
else:
await send_message(message, user_message, is_private=False)
client.run(TOKEN) | Tadjikistan/Proekt_2023_VG | Discord_bot/bot.py | bot.py | py | 2,257 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "re... |
71700208104 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('fitness', views.fitness, name='fitness'),
path('barbells', views.barbells, name='barbells'),
path('dumbbells', views.dumbbells, name='dumbbells'),
path('cart', views.cart, name='cart'),
path('product/', views.product, name="pics2"),
] | fravila08/fitness_store | ecom_app/urls.py | urls.py | py | 366 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
38040183892 | import numpy as np
import argparse
import cv2
import colorsys # 提取图片中主要颜色
from PIL import Image # python imaging library,已经是python平台事实上的图像处理标准库
import numpy as np
from skimage import draw
image = cv2.imread('F:\\maomi\\0.png')
def color_Handle():
color = [
# 黄色范围~这个是我自己试验的范围,可根据实际情况自行调整~注意:数值按[b,g,r]排布
# ([0, 70, 70], [100, 255, 255])
([0, 0, 200], [90, 80, 255])
]
#如果color中定义了几种颜色区间,都可以分割出来
for (lower, upper) in color:
# 创建NumPy数组
lower = np.array(lower, dtype="uint8") # 颜色下限
upper = np.array(upper, dtype="uint8") # 颜色上限
# 根据阈值找到对应颜色
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask=mask)
# 展示图片
cv2.imshow("images", np.hstack([image, output]))
# cv2.imshow("haha",output)
cv2.waitKey(0)
return output
#提取图片中的主要颜色
def get_dominant_color(image):
#颜色模式转换,以便输出rgb颜色值
image = image.convert('RGBA')
#生成缩略图,减少计算量,减小cpu压力
image.thumbnail((200, 200))
max_score = 0 # 原来的代码此处为None
dominant_color = 0 # 原来的代码此处为None,但运行出错,改为0以后 运行成功,原因在于在下面的 < span style = "font-family:Arial, Helvetica, sans-serif;" > score > max_score的比较中,max_score的初始格式不定 < /span >
for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):
# 跳过纯黑色
if a == 0:
continue
saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]
y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)
y = (y - 16.0) / (235 - 16)
# 忽略高亮色
if y > 0.9:
continue
# Calculate the score, preferring highly saturated colors.
# Add 0.1 to the saturation so we don't completely ignore grayscale
# colors by multiplying the count by zero, but still give them a low
# weight.
score = (saturation + 0.1) * count
if score > max_score:
max_score = score
dominant_color = (r, g, b)
red = r
green = g
blue = b
return dominant_color,red,green,blue
def area():
sp = image.shape # obtain the image shape
sz1 = sp[0] # height(rows) of image
sz2 = sp[1] # width(colums) of image
x = sz1 / 2
y = sz2 / 2
rule = 2
Y=np.array([a,b,a,b])
X=np.array([c,c,d,d])
rr, cc = draw.polygon(Y, X)
draw.set_color(img, [rr, cc], [255, 0, 0]) #画出规则矩形
#面积的计算
area = (b - a) * (d - c)
total_area = a * y
distance = (area / total_area) * rule
cropImg = image[a:b, c:d] # crop the image
cv2.imwrite(dest, cropImg) # write in destination path
return distance
#找到四个目标点的函数
def aim_point(img):
#遍历像素点
x_min, y_min,x_max,y_max = 0
for x in range(img.shape[0]):
for y in range(img.shape[1]):
if (img[x,y,0] > 200):
if x >= x_max :
x_max = x
elif x <= x_min :
x_min = x
else :
print('x no change')
if y >= y_max:
y_max = y
elif y <= y_min:
y_min = y
else:
print('y no change')
else :
print('black')
A = (x_min,y_min)
B = (x_max,y_min)
C = (x_max,y_max)
D = (x_min,y_max)
return A,B,C,D
#找到距离中心点最近的函数
#def key_point(a,b,c,d):
color_Handle()
| MrLeedom/colorRecognition | test1/test4.py | test4.py | py | 4,153 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 2... |
28068849012 | # 거리두기 확인하기
# https://programmers.co.kr/learn/courses/30/lessons/81302
from collections import deque
def bfs(place, x, y):
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
queue = deque()
queue.append((x, y))
# 큐가 빌 때까지 반복
while queue:
x, y = queue.popleft()
# 현재 위치에서 4가지 방향으로의 위치 확인
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# 대기실 공간을 벗어난 경우 무시
if nx < 0 or nx >= 5 or ny < 0 or ny >= 5:
continue
# 상하좌우 중 한곳이 파티션일 경우
if place[nx][ny] == 'X':
continue
# 상하좌우 중 한곳이 응시자일 경우
if place[nx][ny] == 'P':
return False
# 상하좌우 중 한곳이 빈 자리일 경우
# 빈자리에서 상하좌우로 이어진 자리가 응시자인지 검사
if place[nx][ny] == 'O':
for j in range(4):
nnx = nx + dx[j]
nny = ny + dy[j]
# 상하좌우 중 한곳이 원래 자리일 경우
if nnx == x and nny == y:
continue
if nnx < 0 or nnx >= 5 or nny < 0 or nny >= 5:
continue
# 빈 자리에서 바로 이어진 자리 중 응시자가 있는 경우
if place[nnx][nny] == 'P':
return False
return True
def solution(places):
answer = []
for place in places:
isFlag = True
for i in range(5):
for j in range(5):
if place[i][j] == 'P':
result = bfs(place, i, j)
if result is True:
continue
else:
isFlag = False
break
if isFlag is False:
answer.append(0)
break
if isFlag is True:
answer.append(1)
return answer
places = [["POOOP", "OXXOX", "OPXPX", "OOXOX", "POXXP"], ["POOPX", "OXPXP", "PXXXO", "OXXXO", "OOOPP"], ["PXOPX", "OXOXP", "OXPOX", "OXXOP", "PXPOX"], ["OOOXX", "XOOOX", "OOOXX", "OXOOX", "OOOOO"], ["PXPXP", "XPXPX", "PXPXP", "XPXPX", "PXPXP"]]
print(solution(places))
| hwanginbeom/algorithm_study | 2.algorithm_test/21.07.25/21.07.25_wooseok.py | 21.07.25_wooseok.py | py | 2,440 | python | ko | code | 3 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 10,
"usage_type": "call"
}
] |
8786955819 | from .fund import Fund
import glob
import re
import csv
import datetime
class FundLog:
@classmethod
def list_ids(self):
result = []
for i in glob.glob("fundlog-*.csv"):
m = re.match('fundlog-(\S+)\.csv', i)
result.append(m.groups()[0])
return result
def __init__(self, id):
self.__id = id
self.__filename = 'fundlog-%s.csv' % id
self.__log = []
@property
def id(self):
return self.__id
@property
def log(self):
if len(self.__log) == 0:
self.__load()
return self.__log
def __load(self):
self.__log = []
try:
with open(self.__filename, 'r') as f:
# import pdb; pdb.set_trace()
reader = csv.reader(f, quoting = csv.QUOTE_NONNUMERIC)
for i in reader:
date = i[0].split('-')
item = {'date': datetime.date(int(date[0]), int(date[1]), int(date[2])),
'price': int(i[1])}
self.__log.append(item)
except FileNotFoundError:
pass
def update(self):
fund = Fund()
log = fund.price_log(self.__id)
if len(log) == 0:
return
self.__log = log
with open(self.__filename, 'w') as f:
writer = csv.writer(f, quoting = csv.QUOTE_NONNUMERIC)
for i in log:
writer.writerow([i['date'], i['price']]);
if __name__ == '__main__':
print(FundLog.list_ids())
f = FundLog("JP90C0003PR7")
print(f.log)
f.update()
print(f.log)
| t-bucchi/accagg | accagg/fundlog.py | fundlog.py | py | 1,641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_NONNUMERIC",
"line_number... |
11751989424 | import logging
import jwt
from django.conf import settings
from django.contrib.auth import login
from django.views.generic import TemplateView
from expirybot.apps.blacklist.models import EmailAddress
from ..forms import MonitorEmailAddressForm
from ..models import EmailAddressOwnershipProof, UserProfile
from ..utils import make_user_permanent
LOG = logging.getLogger(__name__)
class AddEmailAddressView(TemplateView):
template_name = 'users/add_email_address.html'
class AddEmailError(ValueError):
pass
def get(self, request, *args, **kwargs):
try:
(email, profile) = self._validate_jwt(
self.kwargs['json_web_token']
)
except self.AddEmailError as e:
return self.render_to_response({'error_message': str(e)})
else:
return self.render_to_response({
'show_confirm_form': True,
'email_address': email,
'user': profile.user,
})
def post(self, request, *args, **kwargs):
try:
(email, profile) = self._validate_jwt(
self.kwargs['json_web_token']
)
self._add_email_address_to_profile(email, profile)
except self.AddEmailError as e:
return self.render_to_response({'error_message': str(e)})
else:
self._login_user_if_not_logged_in(profile.user)
return self.render_to_response({
'show_confirm_form': False,
'email_address': email,
'user': profile.user,
'form': MonitorEmailAddressForm()
})
def _validate_jwt(self, json_web_token):
try:
data = jwt.decode(json_web_token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
LOG.warn("Got expired JSON web token for user {}".format(
self.request.user.username))
raise self.AddEmailError('The link has expired')
except jwt.DecodeError:
LOG.error("Got invalid JSON web token for user {}: {}".format(
self.request.user.username, json_web_token))
raise self.AddEmailError('The link appears to be invalid')
else:
if data['a'] != 'add-email':
LOG.error(
"Got suspicious JSON web token on add-email for "
"user {}: {}".format(self.request.user.username, data)
)
raise self.AddEmailError('The link appears to be invalid')
email = data['e']
try:
profile = UserProfile.objects.get(uuid=data['u'])
except UserProfile.DoesNotExist:
raise self.AddEmailError(
'The user was not found')
return (email, profile)
def _add_email_address_to_profile(self, email_address, profile):
user = profile.user
(email_model, _) = EmailAddress.objects.get_or_create(
email_address=email_address
)
try:
existing_proof = EmailAddressOwnershipProof.objects.get(
email_address=email_model
)
except EmailAddressOwnershipProof.DoesNotExist:
existing_proof = None
else:
if existing_proof.profile != profile:
LOG.warn(
"Prevented change of ownership of email address {} "
"from {} to {}".format(
email_address, email_model.owner_profile, profile)
)
raise self.AddEmailError(
'{} is already being monitored'.format(email_address)
)
if existing_proof is None:
EmailAddressOwnershipProof.objects.create(
profile=profile,
email_address=email_model
)
if user.profile.is_temporary:
make_user_permanent(user, email_address)
def _login_user_if_not_logged_in(self, user):
"""
If the user opens the verification link on another device or browser
profile, automatically login that browser, until the browser is closed.
"""
if self.request.user.is_anonymous():
login(self.request, user)
self.request.session.set_expiry(0) # expire on browser close
| fawkesley/expirybot-web | expirybot/apps/users/views/add_email_address_view.py | add_email_address_view.py | py | 4,351 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "forms.MonitorEmailAddressForm",
"line_number": 59,
"usage_type": "call"
},
{
... |
10189099421 | import pytest
from auroraapi.interpret import Interpret
class TestInterpret(object):
def test_create_no_arguments(self):
with pytest.raises(TypeError):
Interpret()
def test_create_wrong_type(self):
with pytest.raises(TypeError):
Interpret("test")
def test_create(self):
d = { "intent": "test", "entities": {} }
i = Interpret(d)
assert isinstance(i, Interpret)
assert i.intent == "test"
assert len(i.entities) == 0
d = { "intent": "test", "entities": { "abc": "123" } }
i = Interpret(d)
assert isinstance(i, Interpret)
assert i.intent == "test"
assert len(i.entities) == 1
assert i.entities["abc"] == "123" | auroraapi/aurora-python | tests/test_interpret.py | test_interpret.py | py | 646 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pytest.raises",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "auroraapi.interpret.Interpret",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "auroraapi.int... |
26383114619 | from typing import Protocol
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, balanced_accuracy_score
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.dummy import DummyClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
class ScikitModel(Protocol):
def fit(self, X, y, sample_weight=None) -> 'ScikitModel': ...
def predict(self, X) -> np.ndarray: ...
def score(self, X, y, sample_weight=None) -> float: ...
def set_params(self, **params) -> 'ScikitModel': ...
def create_models():
models = [
DummyClassifier(strategy="most_frequent"),
GradientBoostingClassifier(),
LGBMClassifier(),
XGBClassifier(),
RandomForestClassifier(),
LinearSVC(),
MLPClassifier(hidden_layer_sizes=100, solver='sgd', alpha=0.5)
]
return models
def run_classification(train_df, test_df, target_column, models=None):
if models is None:
models = create_models()
else:
assert len(models) > 0 and type(models) == list
X_train, y_train = train_df.drop(columns=[target_column]), train_df[target_column]
X_test, y_test = test_df.drop(columns=[target_column]), test_df[target_column]
if len(y_train.unique()) == 1:
# If there is only one class, the dataset is not suitable for classification
return None
scores = []
for model in models:
model = model.fit(X_train, y_train)
probabilities = model.predict(X_test)
predictions = probabilities >= 0.5
accuracy = accuracy_score(y_test, predictions)
balanced_accuracy = balanced_accuracy_score(y_test, predictions)
f1 = f1_score(y_test, predictions)
scores.append((type(model).__name__, accuracy, balanced_accuracy, f1))
print(
f'Model: {type(model).__name__} \t Accuracy: {np.mean(accuracy):.3f} ({np.std(accuracy):.3f}), Balanced accuracy: {np.mean(balanced_accuracy):.3f} ({np.std(balanced_accuracy):.3f}), F1: {np.mean(f1):.3f} ({np.std(f1):.3f})')
return scores
| jarsba/gradu | scripts/base_clf.py | base_clf.py | py | 2,198 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Protocol",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sklearn.dummy.DummyClassifier",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "skle... |
26358980006 | #!/usr/bin/env python
import base64
import logging
import os
import sys
import toml
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
from prometheus_client.exposition import basic_auth_handler
from git_metrics import SaasGitMetrics, SaasConfigReadError, GitCommandError
from gql import GqlApi
import vault_client
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# TODO: For some reason it's faster if pool size is 0
POOL_SIZE = os.getenv('POOL_SIZE', 0)
# Clone the repos to this path
CACHE_DIR = os.getenv('CACHE_DIR', '.cache')
def get_saas_repos(config):
gql = GqlApi(config['graphql']['server'], config['graphql']['token'])
query = """
{
apps: apps_v1 {
codeComponents {
name
resource
url
}
}
}
"""
apps = gql.query(query)['apps']
return [
c['url']
for app in apps
for c in (app.get('codeComponents', {}) or {})
if c['resource'] == "saasrepo"
]
def init_vault_client(config):
v_server = config['vault']['server']
v_role_id = config['vault']['role_id']
v_secret_id = config['vault']['secret_id']
vault_client.init(v_server, v_role_id, v_secret_id)
def pgw_auth_handler(pgw_config):
def my_auth_handler(url, method, timeout, headers, data):
return basic_auth_handler(url,
method,
timeout,
headers,
data,
pgw_config['username'],
pgw_config['password'])
return my_auth_handler
if __name__ == "__main__":
config = toml.loads(base64.b64decode(os.environ['CONFIG_TOML']))
init_vault_client(config)
pgw_config = vault_client.read_all(config['pushgateway']['secret_path'])
registry = CollectorRegistry()
labels = ['saas_context', 'saas_service']
invalid_labels = ['saas_repo_url']
g_upstream_commits = Gauge('saas_upstream_commits',
'number of commits in the upstream repo',
labels, registry=registry)
g_commit_index = Gauge('saas_commit_index',
'commit number in upstream of the last promoted to '
'prod commit',
labels, registry=registry)
g_commit_ts = Gauge('saas_commit_ts',
'timestamp of the last promoted to prod commit '
'(in upstream)',
labels, registry=registry)
g_invalid_repos = Gauge('saas_invalid_repos',
'repos that could not be processed',
invalid_labels, registry=registry)
error = False
for saas_repo in get_saas_repos(config):
logging.info(['processing', saas_repo])
try:
sgm_repo = SaasGitMetrics(saas_repo, POOL_SIZE, cache=CACHE_DIR)
services = sgm_repo.services_hash_history()
except Exception as e:
if isinstance(e, SaasConfigReadError):
logging.error("cannot read config.yaml")
elif isinstance(e, GitCommandError):
logging.error("git command error")
logging.error(str(e))
else:
logging.error("generic error")
logging.error(str(e))
# report error saas url
g_invalid_repos.labels(
saas_repo_url=saas_repo,
).set(1)
# mark build as error
error = True
continue
for s in services:
context = s['context']
name = s['name']
g_upstream_commits.labels(
saas_context=context,
saas_service=name
).set(s['upstream_commits'])
g_commit_index.labels(
saas_context=context,
saas_service=name
).set(s['upstream_saas_commit_index'])
g_commit_ts.labels(
saas_context=context,
saas_service=name
).set(s['commit_ts'])
push_to_gateway(pgw_config['server'],
job='saas_metrics',
registry=registry,
handler=pgw_auth_handler(pgw_config))
if error:
sys.exit(1)
| app-sre/push-saas-metrics | push-saas-metrics.py | push-saas-metrics.py | py | 4,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line... |
993280779 | #! /usr/bin/python3
import logging
from binascii import hexlify
from struct import unpack
from time import strftime
from collections import OrderedDict
from . import commands_helpers, attributes_helpers
from .parameters import *
from .conversions import zgt_encode, zgt_decode, zgt_checksum, zgt_decode_struct
from .responses import RESPONSES
ZGT_LOG = logging.getLogger('zigate')
class ZiGate():
def __init__(self):
self._buffer = b''
self._devices_info = {}
# Store intersting (i.e. non technical properties) for futur use
def set_device_property(self, addr, endpoint, property_id, property_data):
"""
log property / attribute value in a device based dictionnary
please note that short addr is not stable if device is reset
(still have to find the unique ID)
all data stored must be directly usable (i.e no bytes)
"""
if endpoint:
str_addr = '{}_{}'.format(addr.decode(), endpoint.decode())
else:
str_addr = '{}_x'.format(addr.decode())
if str_addr not in self._devices_info:
self._devices_info[str_addr] = {}
self._devices_info[str_addr][property_id] = property_data
# Must be overridden by external program
def set_external_command(self, command_type, **kwargs):
pass
# Must be defined and assigned in the transport object
@staticmethod
def send_to_transport(data):
pass
def read_data(self, data):
"""Read ZiGate output and split messages
Must be called from a thread loop or asyncio event loop"""
self._buffer += data
endpos = self._buffer.find(b'\x03')
while endpos != -1:
startpos = self._buffer.find(b'\x01')
# stripping starting 0x01 & ending 0x03
data_to_decode = zgt_decode(self._buffer[startpos + 1:endpos])
self.interpret_response(data_to_decode)
ZGT_LOG.debug('--------------------------------------')
ZGT_LOG.debug(' # encoded : {}'.format(hexlify(self._buffer[startpos:endpos + 1])))
ZGT_LOG.debug(' # decoded : 01{}03'.format( ' '.join([format(x, '02x') for x in data_to_decode]).upper()))
ZGT_LOG.debug(' (@timestamp : {})'.format(strftime("%H:%M:%S")))
self._buffer = self._buffer[endpos + 1:]
endpos = self._buffer.find(b'\x03')
def interpret_response(self, data):
"""Interpret responses attributes"""
ZGT_LOG.info('RESPONSE DATA {}'.format(data))
#msg_type = int.from_bytes(data[0:2], byteorder='big', signed=False)
#msg_length = int.from_bytes(data[2:4], byteorder='big', signed=False)
#msg_crc = int.from_bytes(data[4:5], byteorder='big', signed=False)
#msg_data = data[5:]
#msg_rssi = int.from_bytes(data[-1:], byteorder='big', signed=False)
msg_data_length = len(data)-6
msg_type, msg_length, msg_crc, msg_data, msg_rssi = unpack('!HHB{}sB'.format(msg_data_length), data)
if msg_length-1 != len(msg_data):
ZGT_LOG.error('BAD LENGTH {0} != {1}'.format(msg_length,len(msg_data)))
return
computed_crc = zgt_checksum(data[0:2], data[2:4], data[5:])
if msg_crc != computed_crc:
ZGT_LOG.error('BAD CRC {0} != {1}'.format(msg_crc,computed_crc))
return
# Do different things based on MsgType
ZGT_LOG.debug('--------------------------------------')
if RESPONSES.get(msg_type):
#ZGT_LOG.debug('RESPONSE {:04x}'.format(msg_type))
# Analyze response data and show logs
resp = RESPONSES[msg_type](data)
resp.show_log()
ZGT_LOG.debug(' - MsgType : {:04x}'.format(msg_type))
ZGT_LOG.debug(' - MsgLength : {}'.format(msg_length))
ZGT_LOG.debug(' - ChkSum : {}'.format(msg_crc))
ZGT_LOG.debug(' - Data : {}'.format(hexlify(msg_data)))
ZGT_LOG.debug(' - RSSI : {}'.format(msg_rssi))
# If any command related to the response needs to be triggered, the do it
commands = resp.get_external_commands()
while commands:
cmd, params = commands.popitem(last=False)
self.set_external_command(cmd, **params)
else:
ZGT_LOG.debug('RESPONSE {:04x} : Unknown Message'.format(msg_type))
ZGT_LOG.debug(' - After decoding : {}'.format(hexlify(data)))
ZGT_LOG.debug(' - MsgType : {:04x}'.format(msg_type))
ZGT_LOG.debug(' - MsgLength : {}'.format(msg_type))
ZGT_LOG.debug(' - ChkSum : {}'.format(msg_crc))
ZGT_LOG.debug(' - Data : {}'.format(hexlify(msg_data)))
ZGT_LOG.debug(' - RSSI : {}'.format(msg_rssi))
def send_data(self, cmd, data=""):
"""send data through ZiGate
Calls "transport_write" which must be defined
in a serial connection or pyserial_asyncio transport"""
byte_cmd = bytes.fromhex(cmd)
byte_data = bytes.fromhex(data)
length = int(len(data)/2)
byte_length = length.to_bytes(2, 'big')
# --- non encoded version ---
std_msg = [0x01]
std_msg.extend(byte_cmd)
std_msg.extend(byte_length)
std_msg.append(zgt_checksum(byte_cmd, byte_length, byte_data))
if data != "":
std_msg.extend(byte_data)
std_msg.append(0x03)
# --- encoded version ---
enc_msg = [0x01]
enc_msg.extend(zgt_encode(byte_cmd))
enc_msg.extend(zgt_encode(byte_length))
enc_msg.append(zgt_checksum(byte_cmd, byte_length, byte_data))
if data != "":
enc_msg.extend(zgt_encode(byte_data))
enc_msg.append(0x03)
std_output = b''.join([bytes([x]) for x in std_msg])
encoded_output = b''.join([bytes([x]) for x in enc_msg])
ZGT_LOG.debug('--------------------------------------')
ZGT_LOG.debug('REQUEST : {} {}'.format(cmd, data))
ZGT_LOG.debug(' # standard : {}'.format(' '.join([format(x, '02x') for x in std_output]).upper()))
ZGT_LOG.debug(' # encoded : {}'.format(hexlify(encoded_output)))
ZGT_LOG.debug('(timestamp : {})'.format(strftime("%H:%M:%S")))
ZGT_LOG.debug('--------------------------------------')
self.send_to_transport(encoded_output)
def read_attribute(self, device_address, device_endpoint, cluster_id, attribute_id):
"""
Sends read attribute command to device
:type self: Zigate
:param str device_address: length 4. Example "AB01"
:param str device_endpoint: length 2. Example "01"
:param str cluster_id: length 4. Example "0000"
:param str attribute_id: length 4. Example "0005"
Examples:
========
Replace device_address AB01 with your devices address.
All clusters and parameters are not available on every device.
- Get device manufacturer name: read_attribute('AB01', '01', '0000', '0004')
- Get device name: read_attribute('AB01', '01', '0000', '0005')
- Get device battery voltage: read_attribute('AB01', '01', '0001', '0006')
"""
cmd = '02' + device_address + '01' + device_endpoint + cluster_id + '00 00 0000 01' + attribute_id
self.send_data('0100', cmd)
def read_multiple_attributes(self, device_address, device_endpoint, cluster_id, first_attribute_id, attributes):
"""
Constructs read_attribute command with multiple attributes and sends it
:type self: Zigate
:param str device_address: length 4. E
:param str device_endpoint: length 2.
:param str cluster_id: length 4.
:param str first_attribute_id: length 4
:param int attributes: How many attributes are requested. Max value 255
Examples:
========
Replace device_address AB01 with your devices address.
All clusters and parameters are not available on every device.
- Get five first attributes from "General: Basic" cluster:
read_multiple_attributes('AB01', '01', '0000', '0000', 5)
"""
cmd = '02' + device_address + '01' + device_endpoint + cluster_id + '00 00 0000' + '{:02x}'.format(attributes)
for i in range(attributes):
cmd += '{:04x}'.format(int(first_attribute_id, 16) + i)
self.send_data('0100', cmd)
def permit_join(self):
"""
permit join for 30 secs (1E)
:type self: Zigate
"""
self.send_data("0049", "FFFC1E00")
# Device Announce
# if msg_type == 0x004d :
# struct = OrderedDict([('short_addr', 16), ('mac_addr', 64),
# ('mac_capability', 'rawend')])
# msg = zgt_decode_struct(struct, msg_data)
#
# self.set_external_command(ZGT_CMD_NEW_DEVICE, addr=msg['short_addr'].decode())
# self.set_device_property(msg['short_addr'], None, 'MAC', msg['mac_addr'].decode())
#
# ZGT_LOG.debug('RESPONSE 004d : Device Announce')
# ZGT_LOG.debug(' * From address : {}'.format(msg['short_addr']))
# ZGT_LOG.debug(' * MAC address : {}'.format(msg['mac_addr']))
# ZGT_LOG.debug(' * MAC capability : {}'.format(msg['mac_capability']))
# Status
# elif msg_type == 0x8000:
# struct = OrderedDict([('status', 'int'), ('sequence', 8),
# ('packet_type', 16), ('info', 'rawend')])
# msg = zgt_decode_struct(struct, msg_data)
#
# status_codes = {0: 'Success', 1: 'Invalid parameters',
# 2: 'Unhandled command', 3: 'Command failed',
# 4: 'Busy', 5: 'Stack already started'}
# status_text = status_codes.get(msg['status'],
# 'Failed with event code: %i' %
# msg['status'])
#
# ZGT_LOG.debug('RESPONSE 8000 : Status')
# ZGT_LOG.debug(' * Status : {}'.format(status_text))
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - Response to command : {}'.format(msg['packet_type']))
# if hexlify(msg['info']) != b'00':
# ZGT_LOG.debug(' - Additional msg: ', msg['info'])
#
# # Default Response
# elif msg_type == 0x8001:
# zgt_log_levels = ['Emergency', 'Alert', 'Critical', 'Error',
# 'Warning', 'Notice', 'Information', 'Debug']
# struct = OrderedDict([('level', 'int'), ('info', 'rawend')])
# msg = zgt_decode_struct(struct, msg_data)
#
# ZGT_LOG.debug('RESPONSE 8001 : Log Message')
# ZGT_LOG.debug(' - Log Level : {}'.format(zgt_log_levels[msg['level']]))
# ZGT_LOG.debug(' - Log Info : {}'.format(msg['info']))
#
# # Version List
# elif msg_type == 0x8010:
# struct = OrderedDict([('major', 'int16'), ('installer', 'int16')])
# msg = zgt_decode_struct(struct, msg_data)
#
# ZGT_LOG.debug('RESPONSE : Version List')
# ZGT_LOG.debug(' - Major version : {}'.format(msg['major']))
# ZGT_LOG.debug(' - Installer version : {}'.format(msg['installer']))
# Device list
# elif msg_type == 0x8015:
# ZGT_LOG.debug('RESPONSE : Device List')
#
# while True:
# # If empty device list is returned escape
# if msg_data == b'\x00':
# break
#
# struct = OrderedDict([('ID', 8), ('addr', 16), ('IEEE', 64), ('power_source', 'int8'),
# ('link_quality', 'int8'), ('next', 'rawend')])
# msg = zgt_decode_struct(struct, msg_data)
# self.set_external_command(ZGT_CMD_LIST_DEVICES, **msg)
# ZGT_LOG.debug(' * deviceID : {}'.format(msg['ID']))
# ZGT_LOG.debug(' - addr : {}'.format(msg['addr']))
# ZGT_LOG.debug(' - IEEE : {}'.format(msg['IEEE']))
# ZGT_LOG.debug(' - Power Source : {}'.format(msg['power_source']))
# ZGT_LOG.debug(' - Link Quality : {}'.format(msg['link_quality']))
# #if int(msg['link_quality']) != 255:
# # # Found enpoint
# # for device in self._known_devices_full:
# # # If address match
# # if str(device[:4]) == msg['addr'].decode('UTF-8'):
# # # Update the device with Link Quality value
# # self.set_device_property(msg['addr'], device[4:6].encode(), "Link_quality", '{}'.format(msg['link_quality']))
# # self.set_device_property(msg['addr'], device[4:6].encode(), ZGT_LAST_SEEN, strftime('%Y-%m-%d %H:%M:%S'))
#
# #else:
# # ZGT_LOG.error('{} dead ? '.format(msg['ID']))
# # self.set_device_property(msg['addr'], device[4:6].encode(), "Link_quality", '0')
# # self.set_device_property(msg['addr'], device[4:6].encode(), ZGT_LAST_SEEN, strftime('%Y-%m-%d %H:%M:%S'))
#
# if len(msg['next']) < 13:
# break
# else:
# msg_data = msg['next']
######################## NOT CONVERTED ###########################
# # Node Descriptor
# if msg_type == 0x8042:
# struct = OrderedDict([('sequence', 8), ('status', 8), ('addr', 16),
# ('manufacturer_code', 16),
# ('max_rx', 16), ('max_tx', 16),
# ('server_mask', 16),
# ('descriptor_capability', 8),
# ('mac_flags', 8), ('max_buffer_size', 16),
# ('bit_field', 16)])
# msg = zgt_decode_struct(struct, msg_data)
#
# server_mask_binary = format(int(msg['server_mask'], 16), '016b')
# descriptor_capability_binary = format(int(msg['descriptor_capability'], 16), '08b')
# mac_flags_binary = format(int(msg['mac_flags'], 16), '08b')
# bit_field_binary = format(int(msg['bit_field'], 16), '016b')
#
# # Length 16, 7-15 Reserved
# server_mask_desc = ['Primary trust center',
# 'Back up trust center',
# 'Primary binding cache',
# 'Backup binding cache',
# 'Primary discovery cache',
# 'Backup discovery cache',
# 'Network manager']
# # Length 8, 2-7 Reserved
# descriptor_capability_desc = ['Extended Active endpoint list',
# 'Extended simple descriptor list']
# # Length 8
# mac_capability_desc = ['Alternate PAN Coordinator', 'Device Type',
# 'Power source', 'Receiver On when Idle',
# 'Reserved', 'Reserved',
# 'Security capability', 'Allocate Address']
# # Length 16
# bit_field_desc = ['Logical type: Coordinator',
# 'Logical type: Router',
# 'Logical type: End Device',
# 'Complex descriptor available',
# 'User descriptor available', 'Reserved',
# 'Reserved', 'Reserved',
# 'APS Flag', 'APS Flag', 'APS Flag',
# 'Frequency band', 'Frequency band',
# 'Frequency band', 'Frequency band',
# 'Frequency band']
#
# ZGT_LOG.debug('RESPONSE 8042 : Node Descriptor')
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - Status : {}'.format(msg['status']))
# ZGT_LOG.debug(' - From address : {}'.format(msg['addr']))
# ZGT_LOG.debug(' - Manufacturer code : {}'.format(msg['manufacturer_code']))
# ZGT_LOG.debug(' - Max Rx size : {}'.format(msg['max_rx']))
# ZGT_LOG.debug(' - Max Tx size : {}'.format(msg['max_tx']))
# ZGT_LOG.debug(' - Server mask : {}'.format(msg['server_mask']))
# ZGT_LOG.debug(' - Binary : {}'.format(server_mask_binary))
# for i, description in enumerate(server_mask_desc, 1):
# ZGT_LOG.debug(' - %s : %s' % (description, 'Yes' if server_mask_binary[-i] == '1' else 'No'))
# ZGT_LOG.debug(' - Descriptor : {}'.format(msg['descriptor_capability']))
# ZGT_LOG.debug(' - Binary : {}'.format(descriptor_capability_binary))
# for i, description in enumerate(descriptor_capability_desc, 1):
# ZGT_LOG.debug(' - %s : %s' % (
# description, 'Yes' if descriptor_capability_binary[-i] == '1' else 'No'))
# ZGT_LOG.debug(' - Mac flags : {}'.format(msg['mac_flags']))
# ZGT_LOG.debug(' - Binary : {}'.format(mac_flags_binary))
# for i, description in enumerate(mac_capability_desc, 1):
# ZGT_LOG.debug(' - %s : %s' % (description, 'Yes'if mac_flags_binary[-i] == '1' else 'No'))
# ZGT_LOG.debug(' - Max buffer size : {}'.format(msg['max_buffer_size']))
# ZGT_LOG.debug(' - Bit field : {}'.format(msg['bit_field']))
# ZGT_LOG.debug(' - Binary : {}'.format(bit_field_binary))
# for i, description in enumerate(bit_field_desc, 1):
# ZGT_LOG.debug(' - %s : %s' % (description, 'Yes' if bit_field_binary[-i] == '1' else 'No'))
#
# # Cluster List
# elif msg_type == 0x8043:
# struct = OrderedDict([('sequence', 8), ('status', 8), ('addr', 16),
# ('length', 8), ('endpoint', 8),
# ('profile', 16), ('device_id', 16),
# ('bit', 8), ('in_cluster_count', 'count'),
# ('in_cluster_list', 16),
# ('out_cluster_count', 'count'),
# ('out_cluster_list', 16)])
# msg = zgt_decode_struct(struct, msg_data)
#
# ZGT_LOG.debug('RESPONSE 8043 : Cluster List')
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - Status : {}'.format(msg['status']))
# ZGT_LOG.debug(' - From address : {}'.format(msg['addr']))
# ZGT_LOG.debug(' - Length : {}'.format(msg['length']))
# ZGT_LOG.debug(' - EndPoint : {}'.format(msg['endpoint']))
# ZGT_LOG.debug(' - Profile ID : {}'.format(msg['profile']))
# ZGT_LOG.debug(' - Device ID : {}'.format(msg['device_id']))
# ZGT_LOG.debug(' - IN cluster count : {}'.format(msg['in_cluster_count']))
# for i, cluster_id in enumerate(msg['in_cluster_list']):
# ZGT_LOG.debug(' - Cluster %s : %s (%s)' % (i, cluster_id, CLUSTERS.get(cluster_id, 'unknown')))
# ZGT_LOG.debug(' - OUT cluster count : {}'.format(msg['out_cluster_count']))
# for i, cluster_id in enumerate(msg['out_cluster_list']):
# ZGT_LOG.debug(' - Cluster %s : %s (%s)' % (i, cluster_id, CLUSTERS.get(cluster_id, 'unknown')))
#
# # Power Descriptor
# elif msg_type == 0x8044:
# struct = OrderedDict([('sequence', 8), ('status', 8),
# ('bit_field', 16), ])
# msg = zgt_decode_struct(struct, msg_data)
#
# bit_field_binary = format(int(msg['bit_field'], 16), '016b')
#
# # Others Reserved
# power_mode_desc = {'0000': 'Receiver on when idle',
# '0001': 'Receiver switched on periodically',
# '0010': 'Receiver switched on when stimulated,'}
# power_sources = ['Permanent mains supply', 'Rechargeable battery',
# 'Disposable battery'] # 4th Reserved
# current_power_level = {'0000': 'Critically low',
# '0100': 'Approximately 33%',
# '1000': 'Approximately 66%',
# '1100': 'Approximately 100%'}
#
# ZGT_LOG.debug('RESPONSE 8044 : Power Descriptor')
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - Status : {}'.format(msg['status']))
# ZGT_LOG.debug(' - Bit field : {}'.format(msg['bit_field']))
# ZGT_LOG.debug(' - Binary : {}'.format(bit_field_binary))
# ZGT_LOG.debug(' - Current mode : {}'.format(
# power_mode_desc.get(bit_field_binary[-4:], 'Unknown')))
# ZGT_LOG.debug(' - Sources : ')
# for i, description in enumerate(power_sources, 1):
# ZGT_LOG.debug(' - %s : %s %s' %
# (description,
# 'Yes' if bit_field_binary[8:12][-i] == '1' else 'No',
# '[CURRENT]' if bit_field_binary[4:8][-i] == '1' else '')
# )
# ZGT_LOG.debug(' - Level : {}'.format(
# current_power_level.get(bit_field_binary[:4], 'Unknown')))
#
# # Endpoint List
# elif msg_type == 0x8045:
# struct = OrderedDict([('sequence', 8), ('status', 8), ('addr', 16),
# ('endpoint_count', 'count'),
# ('endpoint_list', 8)])
# msg = zgt_decode_struct(struct, msg_data)
# endpoints = [elt.decode() for elt in msg['endpoint_list']]
# # self.set_device_property(msg['addr'], None, 'endpoints', endpoints)
# self.set_external_command(ZGT_CMD_LIST_ENDPOINTS, addr=msg['addr'].decode(), endpoints=endpoints)
#
# ZGT_LOG.debug('RESPONSE 8045 : Active Endpoints List')
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - Status : {}'.format(msg['status']))
# ZGT_LOG.debug(' - From address : {}'.format(msg['addr']))
# ZGT_LOG.debug(' - EndPoint count : {}'.format(msg['endpoint_count']))
# for i, endpoint in enumerate(msg['endpoint_list']):
# ZGT_LOG.debug(' * EndPoint %s : %s' % (i, endpoint))
#
# # Leave indication
# elif msg_type == 0x8048:
# struct = OrderedDict([('extended_addr', 64), ('rejoin_status', 8)])
# msg = zgt_decode_struct(struct, msg_data)
#
# ZGT_LOG.debug('RESPONSE 8048 : Leave indication')
# ZGT_LOG.debug(' - From address : {}'.format(msg['extended_addr']))
# ZGT_LOG.debug(' - Rejoin status : {}'.format(msg['rejoin_status']))
#
# # Default Response
# elif msg_type == 0x8101:
# struct = OrderedDict([('sequence', 8), ('endpoint', 8),
# ('cluster', 16), ('command_id', 8),
# ('status', 8)])
# msg = zgt_decode_struct(struct, msg_data)
#
# ZGT_LOG.debug('RESPONSE 8101 : Default Response')
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - EndPoint : {}'.format(msg['endpoint']))
# ZGT_LOG.debug(' - Cluster id : {} ({})'.format(
# msg['cluster'], CLUSTERS.get(msg['cluster'], 'unknown')))
# ZGT_LOG.debug(' - Command : {}'.format(msg['command_id']))
# ZGT_LOG.debug(' - Status : {}'.format(msg['status']))
#
# # Read attribute response, Attribute report, Write attribute response
# # Currently only support Xiaomi sensors.
# # Other brands might calc things differently
# elif msg_type in (0x8100, 0x8102, 0x8110):
# ZGT_LOG.debug('RESPONSE {:04x} : Attribute Report / Response'.format(msg_type))
# self.interpret_attributes(msg_data)
#
# # Zone status change
# elif msg_type == 0x8401:
# struct = OrderedDict([('sequence', 8), ('endpoint', 8),
# ('cluster', 16), ('src_address_mode', 8),
# ('src_address', 16), ('zone_status', 16),
# ('extended_status', 16), ('zone_id', 8),
# ('delay_count', 'count'), ('delay_list', 16)])
# msg = zgt_decode_struct(struct, msg_data)
#
# zone_status_binary = format(int(msg['zone_status'], 16), '016b')
#
# # Length 16, 10-15 Reserved
# zone_status_descs = ('Alarm 1', 'Alarm 2', 'Tampered',
# 'Battery', 'Supervision reports',
# 'Report when normal', 'Trouble',
# 'AC (Mains)', 'Test Mode',
# 'Battery defective')
# zone_status_values = (('Closed/Not alarmed', 'Opened/Alarmed'),
# ('Closed/Not alarmed', 'Opened/Alarmed'),
# ('No', 'Yes'), ('OK', 'Low'), ('No', 'Yes'),
# ('No', 'Yes'), ('No', 'Yes'),
# ('Ok', 'Failure'), ('No', 'Yes'),
# ('No', 'Yes'),)
#
# ZGT_LOG.debug('RESPONSE 8401 : Zone status change notification')
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# ZGT_LOG.debug(' - EndPoint : {}'.format(msg['endpoint']))
# ZGT_LOG.debug(' - Cluster id : {} ({})'.format(
# msg['cluster'], CLUSTERS.get(msg['cluster'], 'unknown')))
# ZGT_LOG.debug(' - Src addr mode : {}'.format(msg['src_address_mode']))
# ZGT_LOG.debug(' - Src address : {}'.format(msg['src_address']))
# ZGT_LOG.debug(' - Zone status : {}'.format(msg['zone_status']))
# ZGT_LOG.debug(' - Binary : {}'.format(zone_status_binary))
# for i, description in enumerate(zone_status_descs, 1):
# j = int(zone_status_binary[-i])
# ZGT_LOG.debug(' - %s : %s' % (description, zone_status_values[i-1][j]))
# ZGT_LOG.debug(' - Zone id : {}'.format(msg['zone_id']))
# ZGT_LOG.debug(' - Delay count : {}'.format(msg['delay_count']))
# for i, value in enumerate(msg['delay_list']):
# ZGT_LOG.debug(' - %s : %s' % (i, value))
#
# # Route Discovery Confirmation
# elif msg_type == 0x8701:
# ZGT_LOG.debug('RESPONSE 8701: Route Discovery Confirmation')
# ZGT_LOG.debug(' - Sequence : {}'.format(hexlify(msg_data[:1])))
# ZGT_LOG.debug(' - Status : {}'.format(hexlify(msg_data[1:2])))
# ZGT_LOG.debug(' - Network status : {}'.format(hexlify(msg_data[2:3])))
# ZGT_LOG.debug(' - Message data : {}'.format(hexlify(msg_data)))
#
# # APS Data Confirm Fail
# elif msg_type == 0x8702:
# struct = OrderedDict([('status', 8), ('src_endpoint', 8),
# ('dst_endpoint', 8), ('dst_address_mode', 8),
# ('dst_address', 64), ('sequence', 8)])
# msg = zgt_decode_struct(struct, msg_data)
#
# ZGT_LOG.debug('RESPONSE 8702 : APS Data Confirm Fail')
# ZGT_LOG.debug(' - Status : {}'.format(msg['status']))
# ZGT_LOG.debug(' - Src endpoint : {}'.format(msg['src_endpoint']))
# ZGT_LOG.debug(' - Dst endpoint : {}'.format(msg['dst_endpoint']))
# ZGT_LOG.debug(' - Dst mode : {}'.format(msg['dst_address_mode']))
# ZGT_LOG.debug(' - Dst address : {}'.format(msg['dst_address']))
# ZGT_LOG.debug(' - Sequence : {}'.format(msg['sequence']))
# No handling for this type of message
| elric91/ZiGate | pyzigate/interface.py | interface.py | py | 28,853 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "conversions.zgt_decode",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "binascii.hexlify",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "time.strfti... |
854328737 | #!/usr/bin/env python
"""Groot Object Protection Report for python"""
from pyhesity import *
from fnmatch import fnmatch
import psycopg2
from datetime import datetime
import codecs
import smtplib
from email.mime.multipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True)
parser.add_argument('-u', '--username', type=str, required=True)
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-n', '--numdays', type=int, default=31)
parser.add_argument('-f', '--filter', type=str, default=None)
parser.add_argument('-s', '--mailserver', type=str)
parser.add_argument('-p', '--mailport', type=int, default=25)
parser.add_argument('-t', '--sendto', action='append', type=str)
parser.add_argument('-r', '--sendfrom', type=str)
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
numdays = args.numdays
namefilter = args.filter
mailserver = args.mailserver
mailport = args.mailport
sendto = args.sendto
sendfrom = args.sendfrom
# authenticate
apiauth(vip, username, domain)
print('Collecting report data...')
# get groot connection info from cluster
reporting = api('get', 'postgres', quiet=True)
if 'errorCode' in reporting:
print('statistics DB not found on %s' % vip)
exit()
cluster = api('get', 'cluster')
# limit query to numdays
startUsecs = timeAgo(numdays, 'days')
# connect to groot
conn = psycopg2.connect(host=reporting[0]['nodeIp'], port=reporting[0]['port'], database="postgres", user=reporting[0]['defaultUsername'], password=reporting[0]['defaultPassword'])
cur = conn.cursor()
# sql query ----------------------------------------
sql_query = """
select
pj.job_name as "Job Name",
le.entity_name AS "Object Name",
TRIM (
leading 'k'
from
et.env_name
) as "Source Type",
rs.source_name as "Source Name",
jrs.status_name as "Job Status",
'Backup' as task_type,
ppolicy.name as "Policy Name",
CASE
WHEN jre.is_full_backup is True then 'Full'
ELSE 'Incremental'
END as "Full/Incremental",
jre.source_delta_size_bytes as "Data Read",
TO_CHAR(
(TRUNC(jre.duration_usecs / 6e+7, 2) || ' minute') :: interval,
'HH24:MI:SS'
) AS "Duration",
to_char(to_timestamp(jre.start_time_usecs / 1000000), 'YYYY-MM-DD HH12:mmAM') as "Start Time",
to_char(to_timestamp(jre.end_time_usecs / 1000000), 'YYYY-MM-DD HH12:mmAM') as "End Time",
to_char(to_timestamp(pjr.snapshot_expiry_time_usecs / 1000000), 'YYYY-MM-DD HH12:mmAM') as "Expiry Date",
CASE
WHEN pjr.sla_violated is True then 'Yes'
ELSE 'No'
END as "SLA Violation"
from
reporting.protection_job_run_entities jre,
reporting.protection_jobs pj,
reporting.leaf_entities le,
reporting.job_run_status jrs,
reporting.environment_types et,
reporting.protection_job_runs pjr,
reporting.registered_sources rs,
reporting.protection_policy ppolicy
where
jre.is_latest_attempt = true
and jre.job_id = pj.job_id
and jre.entity_id = le.entity_id
and jre.status = jrs.status_id
and jre.entity_env_type = et.env_id
and jre.job_run_id = pjr.job_run_id
and jre.parent_source_id = rs.source_id
and pj.policy_id = ppolicy.id
and jre.start_time_usecs > %s
order by
to_timestamp(jre.start_time_usecs / 1000000) desc;""" % startUsecs
now = datetime.now()
date = now.strftime("%m/%d/%Y %H:%M:%S")
csv = 'Job Name,Object Name,Source Type,Source Name,Job Status,Policy Name,Full/Incremental,Data Read,Duration,Start Time,End Time,Expiry Date,SLA Violation\n'
# get failures
cur.execute(sql_query)
rows = cur.fetchall()
for row in rows:
(jobName, objectName, sourceType, sourceName, jobStatus, taskType, policyName, fullincr, dataread, duration, startTime, endTime, expiryDate, slaviolated) = row
if namefilter is None or fnmatch(objectName.lower(), namefilter.lower()) or fnmatch(sourceName.lower(), namefilter.lower()) or fnmatch(jobName.lower(), namefilter.lower()):
csv += '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (jobName, objectName, sourceType, sourceName, jobStatus, policyName, fullincr, "%s MB" % (dataread / (1024 * 1024)), duration, startTime, endTime, expiryDate, slaviolated)
cur.close()
if namefilter is not None:
namefilterencoded = namefilter.replace('*', '_').replace('?', '_')
outfileName = 'soxReport-%s-%s.csv' % (cluster['name'], namefilterencoded)
subject = 'Cohesity Sox Report (%s) %s' % (cluster['name'], namefilterencoded)
else:
outfileName = 'soxReport-%s.csv' % cluster['name']
subject = 'Cohesity Sox Report (%s)' % cluster['name']
print('saving report as %s' % outfileName)
f = codecs.open(outfileName, 'w', 'utf-8')
f.write(csv)
f.close()
# email report
if mailserver is not None:
print('Sending report to %s...' % ', '.join(sendto))
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sendfrom
msg['To'] = ','.join(sendto)
part = MIMEBase('application', "octet-stream")
part.set_payload(open(outfileName, "rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % outfileName)
msg.attach(part)
smtpserver = smtplib.SMTP(mailserver, mailport)
smtpserver.sendmail(sendfrom, sendto, msg.as_string())
smtpserver.quit()
| bseltz-cohesity/scripts | groot/python/grootSoxReport/grootSoxReport.py | grootSoxReport.py | py | 5,406 | python | en | code | 85 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "datet... |
73326550503 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Python爬虫初探
from urllib import request
import chardet #检测html编码方式的模块
if __name__ == '__main__':
response = request.urlopen("http://www.baidu.com")
html = response.read()
htmlCharset = chardet.detect(html) #会得到一个字典 类似{'encoding': 'utf-8', 'confidence': 0.99, 'language': ''}
#print(htmlCharset['encoding'])
html = html.decode(htmlCharset['encoding'])
print(html) | BobXGY/PythonStudy | python_weather_spider/pc0.py | pc0.py | py | 482 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "chardet.detect",
"line_number": 12,
"usage_type": "call"
}
] |
8606037106 | from bson.objectid import ObjectId
from bson.json_util import dumps
from json import loads
from mongoengine import Document
from serializers.common_serializer import default
from common.common_module import parse_pages
from common.mongodb_module import build_lookup_filter
def get_all_generic(model: any, mongo_filter: dict, query_params: dict, model_name: str = '') -> list:
"""
:param model: MongoDB collection
:param mongo_filter: The specific document criteria
:param query_params: Dictionary with query params of request
:param model_name: Model name
:return: Documents
"""
pages = parse_pages(query_params)
sort = loads(query_params.get('sort')) if query_params.get('sort') else {'created_at': -1}
reference = query_params.get('with')
query_filter = loads(query_params.get('filter')) if query_params.get('filter') else {}
filter = mongo_filter if mongo_filter else query_filter
pipeline = build_lookup_filter(reference, model_name)
pipeline.append({'$sort': sort})
pipeline.append({'$skip': pages['page']})
pipeline.append({'$limit': pages['page_size']})
pipeline.append({'$match': filter})
docs = model.objects().aggregate(*pipeline)
return loads(dumps(docs), object_hook=default)
def get_by_id_generic(model: any, id: str, query_params: dict = {}, model_name: str = '') -> Document:
"""
:param model: MongoDB collection
:param id: The specific document ID to retrieve
:param query_params: Dictionary with query params of request
:param model_name: Model name
:return: Document
"""
reference = query_params.get('with')
pipeline = build_lookup_filter(reference, model_name)
pipeline.append({'$match': {'_id': ObjectId(id)}})
doc = model.objects().aggregate(*pipeline)
return loads(dumps(doc), object_hook=default)
def get_one_generic(model: any, filter: dict, query_params: dict = {}, model_name: str = '') -> Document:
"""
:param model: MongoDB collection
:param filter: The specific document criteria to retrieve
:param query_params: Dictionary with query params of request
:param model_name: Model name
:return: Document
"""
reference = query_params.get('with')
pipeline = build_lookup_filter(reference, model_name)
pipeline.append({'$match': filter})
doc = model.objects().aggregate(*pipeline)
return loads(dumps(doc), object_hook=default)
def count_generic(model: any, filter: dict = {}) -> int:
"""
:param model: MongoDB collection
:param filter: The specific document criteria to count
:return: Number of documents
"""
return model.objects(__raw__=filter).count()
| carlos-herrera-cervantes/todo-api-python-sanic | source/repositories/base_repository.py | base_repository.py | py | 2,688 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "common.common_module.parse_pages",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "common.mongodb... |
16217747254 | from jinja2 import Environment, FileSystemLoader
from os import walk
from shutil import copytree, rmtree, ignore_patterns
try: rmtree("./build")
except Exception as e: print("Creating build folder", e, "Occured")
copytree("./", "./build", ignore=ignore_patterns('*.py', "templates", "*.git"))
ENV = Environment(loader=FileSystemLoader('./templates'))
# List of pages to be rendered -- MUST be listed according to their
# order in the navigation bar
_, _, PAGE_LIST = next(walk('./templates'))
for file_name in PAGE_LIST:
template = ENV.get_template(file_name)
html = template.render()
# Write output in the corresponding HTML file
print('Writing', file_name)
with open(str("./build/" + file_name), 'w+') as out_file:
out_file.write(html) | Just-Moh-it/assignment-nerd | compiler.py | compiler.py | py | 773 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "shutil.rmtree",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "shutil.copytree",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "shutil.ignore_patterns",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment"... |
74962642022 | import tensorflow as tf
import numpy as np
import keras
import librosa
import os
# Indexes for the model:
# 0: Benjamin
# 1: Stromberg
# 2: Julia
# 3: Margaret
# 4: Nelson
model_path = "E:\\Python_Projects\\StrombergAI\\src\\speaker_recognition\\model\\stromberg-recognizer-model.keras"
def audio_to_fft(audio):
# Since tf.signal.fft applies FFT on the innermost dimension,
# we need to squeeze the dimensions and then expand them again
# after FFT
audio = tf.squeeze(audio, axis=-1)
fft = tf.signal.fft(
tf.cast(tf.complex(real=audio, imag=tf.zeros_like(audio)), tf.complex64)
)
fft = tf.expand_dims(fft, axis=-1)
# Return the absolute value of the first half of the FFT
# which represents the positive frequencies
return tf.math.abs(fft[:, : (audio.shape[1] // 2), :])
def audio_to_logmel(audio, sample_rate=16000, n_mels=128):
# Compute STFT
audio = tf.squeeze(audio, axis=-1)
stft = tf.signal.stft(audio,
frame_length=400, # window length. The FFT will be done on windowed frames of this length.
frame_step=160, # frame step. This is the distance to slide along the window.
fft_length=512) # fft length. This parameter determines the number length of the FFT.
spectrogram = tf.abs(stft)
# Compute mel spectrogram: Create mel filter and apply it
num_spectrogram_bins = stft.shape[-1]
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=n_mels,
num_spectrogram_bins=num_spectrogram_bins,
sample_rate=sample_rate,
lower_edge_hertz=0.0,
upper_edge_hertz=sample_rate // 2)
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# Compute log mel spectrogram
log_mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
# Add a channel dimension
log_mel_spectrogram = tf.expand_dims(log_mel_spectrogram, -1)
return log_mel_spectrogram
if __name__ == "__main__":
print("Loading the model...")
model = keras.models.load_model(model_path)
tf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
print("Model loaded")
# We do the recognizition of the voices by folders and each folder is one episode
episode_folder = "E:\\Python_Projects\\StrombergAI\\audio\\tests"
audios = []
directory = os.fsencode(episode_folder)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".wav"):
# Here we load the audio file into a tf tensor
full_path = os.path.join(episode_folder, filename)
audio, sampling_rate = librosa.load(full_path, mono=True, sr=16000)
audios.append((tf.convert_to_tensor(audio, dtype=tf.float32), filename))
# Now let the model predict the speaker by its index.
for audio_tensor, file_name in audios:
# Add the batch and feature dimensions to the tensor
audio_tensor = tf.expand_dims(audio_tensor, axis=0) # Add batch dimension
audio_tensor = tf.expand_dims(audio_tensor, axis=-1) # Add feature dimension
# Get the signal FFT
ffts = audio_to_logmel(audio_tensor)
# Predict
y_pred = model.predict(ffts)
print(file_name)
print(np.argmax(y_pred))
print(y_pred)
print(y_pred.item((0, 0)))
print("")
| TheItCrOw/Ask-Stromberg | src/speaker_recognition/test.py | test.py | py | 3,461 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.squeeze",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.signal.fft",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.signal",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tenso... |
1155657087 | from django.http import HttpResponseNotFound, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required
from django.views.generic import CreateView, UpdateView, DeleteView, ListView, DetailView
from datetime import datetime,timedelta
from django.utils import timezone
from django.urls import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.mixins import LoginRequiredMixin
from trash.forms import TrashForm
from food.models import Food
from trash.models import Trash
from user.models import Profile
# Create your views here.
@login_required(login_url="/login")
def home(request):
past_process = Trash.objects.all().filter(user=request.user)
profile = request.user.profile
food_list = Food.objects.all().filter(category=7)
query = Trash.objects.all().filter(user=request.user, on_process=True)
if query:
current_process = query[0]
else:
current_process = False
if request.method == 'POST' and not current_process:
form = TrashForm(request.POST)
if form.is_valid():
trash = form.save(commit=False)
trash.user = request.user
trash.on_process = True
trash.start_date = datetime.now() - timedelta(hours=6)
trash.start_date_formated = datetime.now().strftime(("%m/%d/%Y %H:%M:%S"))
aux_size = request.POST['user_size']
aux_duration = request.POST['user_duration']
if aux_duration and aux_size:
trash.user_size = float(aux_size)
trash.user_duration = timedelta(hours=int(aux_duration.split(":")[0]), minutes=int(aux_duration.split(":")[1]),seconds=int(aux_duration.split(":")[2]))
trash.duration = trash.user_duration
if trash.user_size < 200:
trash.size = "S"
if 200 < trash.user_size < 350:
trash.size = "M"
if 350 < trash.user_size:
trash.size = "L"
if aux_duration and not aux_size:
trash.user_duration = timedelta(hours=int(aux_duration.split(":")[0]), minutes=int(aux_duration.split(":")[1]),seconds=int(aux_duration.split(":")[2]))
trash.duration = trash.user_duration
if trash.size == "S":
trash.user_size = 150.00
if trash.size == "M":
trash.user_size = 275.00
if trash.size == "L":
trash.user_size = 425.00
if not aux_duration and aux_size:
trash.user_size = float(aux_size)
if trash.user_size < 200:
trash.size = "S"
trash.duration = timedelta(hours=6)
if 200 < trash.user_size < 350:
trash.size = "M"
trash.duration = timedelta(hours=8)
if 350 < trash.user_size:
trash.size = "L"
trash.duration = timedelta(hours=10)
trash.user_duration = trash.duration
if not aux_duration and not aux_size:
if trash.size == "S":
trash.duration = timedelta(hours=6)
trash.user_size = 150.00
if trash.size == "M":
trash.duration = timedelta(hours=8)
trash.user_size = 275.00
if trash.size == "L":
trash.user_size = 425.00
trash.duration = timedelta(hours=10)
trash.user_duration = trash.duration
trash.end_date = trash.start_date + trash.duration
trash.processed_size = trash.user_size * 0.4214
trash.save()
profile.processes.add(trash)
form.save_m2m()
duration = trash.user_duration
seconds = duration.seconds
return redirect('trash:process', pk=trash.id, seconds=seconds)
else:
form = TrashForm()
return render(request, 'trash/home.html', {'form': form, 'not_foods':food_list, 'past_processes':past_process, 'current_process': current_process},)
@login_required(login_url="/login")
def process(request, pk, seconds):
trash = get_object_or_404(Trash, id=pk, user=request.user)
return render(request, 'trash/trash_process.html', {'trash':trash})
@login_required(login_url="/login")
def process_completed(request, pk):
trash = get_object_or_404(Trash, id=pk, user=request.user)
trash.on_process = False
trash.save()
return render(request, 'trash/trash_process.html', {'trash':trash})
class TrashList(LoginRequiredMixin, ListView):
model = Trash
queryset = Trash.objects.order_by('id')
context_object_name = 'trashes'
def get_queryset(self):
filter_val = self.request.GET.get('filter')
order = self.request.GET.get('order')
if filter_val:
if order == "asc":
new_context = Trash.objects.filter(user=self.request.user).order_by(filter_val)
else:
query = "-" + filter_val
new_context = Trash.objects.filter(user=self.request.user).order_by(query)
else:
new_context = Trash.objects.filter(user=self.request.user)
return new_context
class TrashDetail(LoginRequiredMixin, DetailView):
model = Trash
class TrashDelete(LoginRequiredMixin, DeleteView):
model = Trash
success_url = reverse_lazy('trash:all')
class TrashUpdate(LoginRequiredMixin, UpdateView):
model = Trash
fields = ["end_date","duration","size", "foods"]
template_name_suffix = "_update_form"
def jsonProcess(request):
query = Trash.objects.all().filter(on_process=True)
if query:
print(query)
current_process = query[0]
duration = current_process.user_duration
seconds = duration.seconds
size = current_process.size
print(seconds)
data = {
'ID': current_process.id,
'Duration': seconds,
'Size': size,
}
response = JsonResponse(data, status=200)
return response
else:
return HttpResponseNotFound("PAGE NOT FOUND") | AndoniWadgymar/greenbin | greenbin/trash/views.py | views.py | py | 6,306 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "trash.models.Trash.objects.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "trash.models.Trash.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "trash.models.Trash",
"line_number": 19,
"usage_type": "name"
},
{
... |
16789541655 | #Online Gradient Descent
import numpy as np
from random import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Formatting content.......
print("Reading data.............")
def read_file(filename):
with open(filename) as f:
f.readline()
f.readline()
f.readline()
"""
Creating a list of lists containing the entire dataset
"""
content = f.readlines()
content = ([x.strip() for x in content])
data = []
for i in range(0,len(content)):
content[i] = content[i].split(' ')
data.append(list(map(float,content[i])))
data = np.array(data)
return data
data = read_file('oakland_part3_am_rf.node_features')
data[:,4] = data[:,4].astype(int)
test = read_file('oakland_part3_an_rf.node_features')
test[:,4] = test[:,4].astype(int)
###########################################################################################
print("Training.............")
labels = [1004, 1100, 1103, 1200, 1400]
def calc_label(y_o, i):
y = y_o.copy()
y[y == labels[i]] = 1
y[y!= 1] = 0
return y
#Create a vector of classifiers.. one for each class
w_t = []
for i in range(5):
w = []
for j in range(10):
w.append(uniform(0,1)*0.5)
w_t.append(w)
w_t = np.array(w_t)
#Initialize classifiers, labels and step_size
print("Hyper parameters.............")
alpha = 0.01 #Step_size
num_class = len(labels)
num_pass = 100
p = 0
lamb = 0
batch_size = 50
print("alpha: {}".format(alpha))
print("num of passes: {}".format(num_pass))
print("lambda: {}".format(lamb))
label_conv = {0:1004, 1:1100, 2:1103, 3:1200, 4:1400}
#3D array to store the weights
w = np.zeros(((int(len(data)/batch_size) + 1)*num_pass,5,10))
cross_entropy_train = []
iteration_batch = 0
while(p < num_pass): # Infinite passes over the dataset --> to mimic online learning with continuous stream of data
"""
Shuffle data..
"""
np.random.shuffle(data)
#Extract X and Y from the shuffled data
y_original = data[:,4].copy()
x = data[:,5:]
Y = np.zeros((len(data), num_class))
for c in range(num_class):
Y[:,c] = calc_label(y_original, c)
predict_train = []
index = 0
while(index + batch_size < len(data)): #Passing each data one at at time.. like online learning with sequential data streaming
x_batch = x[index: index+batch_size,:]
y_batch = Y[index: index+batch_size,:]
z = w_t.dot(x_batch.T).T
softmax = np.exp(z)
softmax = softmax/np.sum(softmax, axis = 1).reshape(-1,1)
predict = np.argmax(softmax, axis =1)
l = (-1/batch_size)*np.trace(y_batch.dot(np.log(softmax).T)) + 0.5*lamb*np.sum(w_t*w_t)
w[iteration_batch,:,:] = w_t
w_t = w_t - alpha*((1/batch_size)*(softmax - y_batch).T.dot(x_batch) + lamb*w_t)
predict_train.extend(predict.tolist())
cross_entropy_train.append(l)
index += batch_size
iteration_batch+=1
if(index < len(data)):
bs = len(data) - index
x_batch = x[index: index+bs,:]
y_batch = Y[index: index+bs,:]
z = w_t.dot(x_batch.T).T
softmax = np.exp(z)
softmax = softmax/np.sum(softmax, axis = 1).reshape(-1,1)
predict = np.argmax(softmax, axis =1)
l = (-1/bs)*np.trace(y_batch.dot(np.log(softmax).T))
w[iteration_batch,:,:] = w_t
w_t = w_t - alpha*(1/bs)*(softmax- y_batch).T.dot(x_batch)
predict_train.extend(predict.tolist())
cross_entropy_train.append(l)
iteration_batch+=1
print('current pass: {}'.format(p))
p = p + 1
predict_train = [label_conv[x] for x in predict_train]
predict_train = np.array(predict_train)
print('Train_predict{}'.format(np.shape(predict_train)))
####################################################################################################################################
#Plotting training cross entropy pass wise
print('Plotting training cross entropy pass wise')
f,ax = plt.subplots()
ax.plot(range(iteration_batch), cross_entropy_train)
plt.title('Cross Entropy training error batch wise')
plt.xlabel('Batch Iterations')
plt.ylabel('Softmax cross entropy error')
plt.show()
#####################################################################################################################################
#TESTING
print("Testing Calculations.............")
#Try on testing data....... (Calculating test loss)
#Testing data
y_test = test[:,4]
x = test[:,5:]
Y_test = np.zeros((len(test), num_class))
for c in range(num_class):
Y_test[:,c] = calc_label(y_test, c)
#Average weights
w_avg = np.mean(w, axis = 0)
index = 0
predict_test = []
cross_entropy_test = []
while(index + batch_size < len(test)):
x_batch = x[index: index+batch_size, :]
y_batch = Y_test[index: index+batch_size,:]
z = w_avg.dot(x_batch.T).T
softmax = np.exp(z)
softmax = softmax/np.sum(softmax, axis =1).reshape(-1,1)
predict = np.argmax(softmax, axis =1)
l = (-1/batch_size)*np.trace(y_batch.dot(np.log(softmax).T))
predict_test.extend(predict.tolist())
cross_entropy_test.append(l)
index+=batch_size
if(index < len(test)):
bs = len(test) - index
x_batch = x[index: index+bs,:]
y_batch = Y_test[index: index+bs,:]
z = w_avg.dot(x_batch.T).T
softmax = np.exp(z)
softmax = softmax/np.sum(softmax, axis =1).reshape(-1,1)
predict = np.argmax(softmax, axis =1)
l = (-1/bs)*np.trace(y_batch.dot(np.log(softmax).T))
predict_test.extend(predict.tolist())
cross_entropy_test.append(l)
#
#Plotting testing cross entropy loss for each classifier
f,ax = plt.subplots()
ax.plot(range(len(cross_entropy_test)), cross_entropy_test)
plt.legend()
plt.title('Testing cross entropy loss (with average weights) vs iterations (batch)')
plt.ylabel('Testing loss')
plt.xlabel('Iterations')
plt.show()
predict_test = np.array(predict_test)
#Testing 3D cloud
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
colors = ['green','black','cyan', 'brown','yellow']
labels = [1004, 1100, 1103, 1200, 1400]
label_conv = {0:1004, 1:1100, 2:1103, 3:1200, 4:1400}
for i in range(len(predict_test)):
predict_test[i] = label_conv[predict_test[i]]
predict_test = np.array(predict_test)
print('Testing cross entropy loss: {}'.format(len(y_test[predict_test!=y_test])/(len(y_test))))
for index, label in enumerate(labels):
test_label = test[(predict_test.reshape(-1,) == label) ]
ax.scatter(test_label[:,0], test_label[:,1], test_label[:,2], c = colors[index])
plt.show()
# ################################################################################################################
# #TRAINING
# print("Training loss calculations.............")
# #Training loss and point cloud
# y_train = data[:,4]
# x_train = data[:,5:]
# #Encoding
# y_encoded_train = np.zeros((len(data), num_class))
# for c in range(num_class):
# y_encoded_train[:,c] = calc_label(y_train, c)
# #Variables
# train_loss = []
# train_predict = []
# #Calculating predicted values, cross entropy error and loss for training data
# batch = 100
# index =0
# while(index+batch < len(data)):
# l = np.zeros(num_class)
# hypothesis = np.zeros((batch,num_class))
# for c in range(num_class):
# l[c] = (1/batch)*0.5*np.sum((avg_weights[c].dot(x_train[index:index+batch,:].T).T - y_encoded_train[index:index+batch,c])**2)
# hypothesis[:,c] = avg_weights[c].dot(x_train[index:index+batch,:].T).T
# train_predict.extend([label_conv[x] for x in np.argmax(hypothesis, axis = 1)])
# train_loss.append(l)
# index+=batch
# if(index < len(data)):
# batch = len(data)-index
# l = np.zeros(num_class)
# hypothesis = np.zeros((batch, num_class))
# for c in range(num_class):
# l[c] = (1/batch)*0.5*np.sum((avg_weights[c].dot(x_train[index:index+batch,:].T).T - y_encoded_train[index:index+batch,c])**2)
# hypothesis[:,c] = avg_weights[c].dot(x_train[index:index+batch,:].T).T
# train_predict.extend([label_conv[x] for x in np.argmax(hypothesis, axis = 1)])
# train_loss.append(l)
# train_predict = np.array(train_predict)
# cross_entropy_error = (len(train_predict[train_predict != y_train]))/len(train_predict)
# print("Cross entropy training error : {}".format(cross_entropy_error))
# train_loss = np.array(train_loss)
# #Plotting training loss for each classifier
# label_ = ['classifier1', 'classifier2', 'classifier3', 'classifier4', 'classifier5']
# f,ax = plt.subplots()
# for c in range(0,1):
# ax.plot(range(len(train_loss[:,c])), train_loss[:,c], label = label_[c])
# plt.legend()
# plt.title('Training loss (with average weights) vs iterations')
# plt.ylabel('Training loss')
# plt.xlabel('Iterations')
# plt.show()
# #Training 3D cloud
# fig = plt.figure()
# ax = fig.add_subplot(111, projection = '3d')
# colors = ['green','black','cyan', 'brown','yellow']
# for index, label in enumerate(labels):
# train_label = data[(train_predict.reshape(-1,) == label) ]
# ax.scatter(train_label[:,0], train_label[:,1], train_label[:,2], c = colors[index])
# plt.show()
| ManishaNatarajan/Online-Learning | log_reg.py | log_reg.py | py | 9,140 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_... |
33635193286 | import sys
import pypyodbc
from PyQt5.QtWidgets import *#QApplication, QMainWindow,QDialog,QMessageBox
from PyQt5 import uic
from PyQt5.QtGui import QIcon
import time
import random
from tkinter import *
from PyQt5.QtCore import pyqtSlot
class Elim(QDialog):
def __init__(self):
# lista=[["Dt1234","Jose","Gomez","Ramirez","19","el Parian","Perturbar el orden publico","18/04/17","06:39","Desempleado","Jorge ","Masculino","Chepe",]
# ,["Dt1235","Diego","Ramos","Rosas","21","el pasaje","Perturbar el orden publico","18/03/17","06:39","Desempleado","Jorge ","Masculino","go diego",],
# ["Dt1235", "Diego", "Ramos", "Rosas", "21", "el pasaje", "Perturbar el orden publico", "18/03/17","06:39", "Desempleado", "Jorge ", "Masculino", "go diego",]]
QMainWindow.__init__(self)
uic.loadUi('TABLA.ui',self)
self.tableWidget.setRowCount(4)
""" for c in range(3):
for i in range (13):
self.tableWidget.setItem(c, i, QTableWidgetItem(lista[c][i]))#"hola"+str(c)+str(i)))
"""
###############################
connection = pypyodbc.connect(
'Driver={SQL Server};Server=LAPTOP-M7FQT57H;Database=Detenidosp;uid=sa;pwd=1234' )
# Creating Cursor
cursor = connection.cursor()
# SQL Query
SQLCommand = ("select * from infractor")
# Processing Query
cursor.execute(SQLCommand)
i = 0
rowsbd = 100
fielddb = 13
self.tableWidget.setRowCount(8)
self.tableWidget.clearContents()
for rows in cursor.fetchall():
print("------------Employee %d-----------------" % i)
f = 0
for field in rows:
print(str(field))
#if f==5:
if f==8 :
numale=random.randint(10000,99999)
self.tableWidget.setItem(i, f, QTableWidgetItem('43710'+str(numale)))
else:
self.tableWidget.setItem(i, f, QTableWidgetItem(str(field)))
f += 1
print("---------------------------------------")
print('')
i = i + 1
connection.close()
class ven:
def __init__(self):
ven = Tk()
Button(ven, text="TAbLa", command=self.boton).pack()
ven.mainloop()
def boton(self):
app = QApplication(sys.argv)
# Crear un objeto de la clase
_ventana = Elim()
_ventana.show()
# Mostra la ventana
# _ventana.show()
# Ejecutar la aplicaci
app.exec_()
| whitoutpieces/Unknow | Creatabla.py | Creatabla.py | py | 2,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pypyodbc.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.randint",
"lin... |
35515269259 | import argparse
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
def moving_avg_filter(data_arr, w):
data_arr_cumsum = np.cumsum(data_arr)
data_arr_cumsum[w:] = (data_arr_cumsum[w:] - data_arr_cumsum[:-w])
data_arr_filtered = data_arr_cumsum[w-1:]/w
return data_arr_filtered
parser = argparse.ArgumentParser('Plot the loss vs iteraation and accuracy vs iteration for givern data file')
parser.add_argument('--data_file', default='', help='Data file filepath', dest='data_file') # 'loss_data/step_valid_metrics__2021_06_08__11_23_14 (copy).txt'
parser.add_argument('--step_size', default=1, type=int, help='Step size', dest='step_size')
parser.add_argument('--filter_size', default=1, type=int, help='Filter size', dest='filter_size')
FLAGS = parser.parse_args()
w = FLAGS.filter_size
data_arr = np.loadtxt(FLAGS.data_file, dtype='float', comments='#', delimiter='\t')
metric_list = ['average precision @ IoU=0.50 | area=all | maxDets=100', 'average recall @ IoU=0.50 | area=all | maxDets=100']
ind_dict = {'Bounding boxes': [2,13], 'Segmentation masks': [15,26]}
color_list = ['r','g']
fig, ax = plt.subplots(1,2, figsize=(8,3), sharey=True)
for i, (key, value) in enumerate(ind_dict.items()):
temp_ind_list = value
for j, metric in enumerate(metric_list):
temp_ind = temp_ind_list[j]
# steps = np.arange(len(data_arr[:, 0]))+1
steps = data_arr[:, 0]
temp_valid = data_arr[:, temp_ind]
if w > 1:
steps = steps[w-1:]
temp_valid = moving_avg_filter(temp_valid, w)
ind_start = 0
ind_step = FLAGS.step_size
ind_end = len(steps) + 1
# ind_end = 600
ax[i].plot(steps[ind_start:ind_end:ind_step], temp_valid[ind_start:ind_end:ind_step], color=color_list[j], linestyle='-', label='\n'.join(wrap(metric, 20)))
# set formatting to be the same for both subplots
# ax[i].tick_params(axis='both', which='both', labelsize=10)
# set x-axis ticks to be visible for second subplot
ax[i].yaxis.set_tick_params(labelleft=True)
ax[i].set_title(key)
ax[i].set_xlabel('epoch')
ax[i].grid(linestyle='--')
ax[i].legend(loc='lower right', fontsize='small')
#ax[0].set_ylabel('value')
ax[0].set_ylabel('Precision or Recall')
ax[1].set_ylabel('Precision or Recall')
# fig.suptitle('Validation Metrics for Mask RCNN')
# plt.tight_layout()
fig.subplots_adjust(left=0.07, bottom=0.15, right=0.99, top=0.91, wspace=0.20 ,hspace=0.20 )
fig.savefig('{}.png'.format(FLAGS.data_file[:-4]),dpi=200,transparent=True)
plt.show()
| onermustafaumit/MLNM | gland_segmentation/mask_rcnn/plot_valid_metrics.py | plot_valid_metrics.py | py | 2,635 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.cumsum",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.s... |
13909904792 | """Module with lagrangian decomposition methods."""
# Python packages
import gurobipy
import logging as log
import pandas as pd
import copy
# Package modules
from firedecomp.classes import solution
from firedecomp import config
from firedecomp.AL import ARPP
# Subproblem ------------------------------------------------------------------
class DecomposedPrimalProblem(ARPP.RelaxedPrimalProblem):
def __init__(self, problem_data, lambda1, beta1, list_y, nproblems, init_sol_dict=None, init_sol_prob=None, relaxed=False,
min_res_penalty=1000000, valid_constraints=None):
"""Initialize the DecomposedPrimalProblem.
Args:
problem_data (:obj:`Problem`): problem data.
lambda1 (:obj:`list`): list of numeric values of lambdas (integers)
resource_i (:obj:`int`): index resource
option_decomp (:obj:`str`): maximum number of iterations. Defaults to
Defaults to 0.01.
relaxed (:obj:`float`): Defaults to 0.01.
min_res_penalty (:obj:`int`): .
Default to 1000000
"""
self.list_y = list_y.copy()
self.nproblems = nproblems
self.NL = len(lambda1)
self.resource_i = 0
super().__init__(problem_data, lambda1, beta1, relaxed, min_res_penalty, valid_constraints)
self.__create_constraints_aux__()
self.copy_original_bounds()
solver_options = {
'OutputFlag': 0,
'LogToConsole': 0,
'TimeLimit': 5,
}
self.solve(solver_options=solver_options)
self.set_init_value_prob(self)
#if init_sol_dict is not None:
# self.set_init_value_dict(init_sol_dict)
#if init_sol_prob is not None:
# self.set_init_value_prob(init_sol_prob)
##########################################################################################
# PRIVATE METHOD: __create_gurobi_model__ ()
# OVERWRITE RelaxedPrimalProblem.__create_gurobi_model__()
def __create_gurobi_model__(self):
"""Create gurobi model.
"""
self.m = gurobipy.Model("Decomposed_Primal_Problem")
################################################################################
# PRIVATE METHOD: __create_var_y_and_aux_vars__
# OVERWRITE RelaxedPrimalProblem.__create_var_y_and_aux_vars__()
################################################################################
def __create_var_y_and_aux_vars__(self):
self.sizey = len(self.T + [self.min_t - 1])
self.y = self.m.addVars(self.T + [self.min_t - 1], vtype=self.vtype,
lb=self.lb, ub=self.ub, name="contention")
self.aux_total = self.m.addVars(self.NL, vtype=gurobipy.GRB.CONTINUOUS, name="aux_total_AL")
# fixed y
for i in range(0, self.sizey):
self.y[i].UB = self.list_y[i]
self.y[i].LB = self.list_y[i]
self.y[i].start = self.list_y[i]
################################################################################
# PRIVATE METHOD: __create_objfunc__()
################################################################################
def __create_objfunc__(self):
# Wildfire Containment (2) and (3)
Constr1 = []
Constr1.append(sum([self.PER[t] * self.y[t - 1] for t in self.T]) -
sum([self.PR[i, t] * self.w[i, t]
for i in self.I for t in self.T]))
list_Constr2 = list(
-1.0 * self.M * self.y[t] + sum([self.PER[t1] for t1 in self.T_int.get_names(p_max=t)]) * self.y[t - 1]
- sum([self.PR[i, t1] * self.w[i, t1] for i in self.I for t1 in self.T_int.get_names(p_max=t)])
for t in self.T)
# Non-Negligence of Fronts (14) and (15)
#list_Constr3 = list(
# (-1.0 * sum([self.w[i, t] for i in self.Ig[g]])) - (self.nMin[g, t] * self.y[t - 1] + self.mu[g, t])
# for g in self.G for t in self.T)
#list_Constr4 = list(sum([self.w[i, t] for i in self.Ig[g]]) - self.nMax[g, t] * self.y[t - 1]
# for g in self.G for t in self.T)
self.list_Constr = Constr1 + list_Constr2 #+ list_Constr3 + list_Constr4
# Objective
# =========
self.function_obj_total = (sum([self.C[i] * self.u[i, t] for i in self.I for t in self.T]) +
sum([self.P[i] * self.z[i] for i in self.I]) +
sum([self.NVC[t] * self.y[t - 1] for t in self.T]) +
sum([self.Mp * self.mu[g, t] for g in self.G for t in self.T]) +
0.001 * self.y[self.max_t])
if self.resource_i == 0:
self.function_obj = (sum([self.C[self.I[self.resource_i]]*self.u[self.I[self.resource_i], t] for t in self.T]) +
sum([self.P[self.I[self.resource_i]] * self.z[self.I[self.resource_i]]]) +
sum([self.NVC[t] * self.y[t-1] for t in self.T]) +
sum([self.Mp*self.mu[g, t] for g in self.G for t in self.T]) +
0.001*self.y[self.max_t])
else:
self.function_obj = (sum([self.C[self.I[self.resource_i]]*self.u[self.I[self.resource_i], t] for t in self.T]) +
sum([self.P[self.I[self.resource_i]] * self.z[self.I[self.resource_i]]]))
self.LR_obj = 0
self.LR_obj_const = []
for i in range(0, len(self.list_Constr)):
Constr1 = self.list_Constr[i]
self.LR_obj = self.LR_obj + 1.0 / (2.0 * self.beta[i]) * (
self.aux_total[i] * self.aux_total[i] - self.lambda1[i] * self.lambda1[i])
self.LR_obj_const.append(Constr1)
self.function_obj_total_pen = self.function_obj_total + self.LR_obj
self.m.setObjective(self.function_obj + self.LR_obj, self.sense_opt)
################################################################################
# METHOD: set_init_value
################################################################################
def set_init_value_dict(self, dict_update):
""" Update lambda in DPP model
Args:
lambda1 (:obj:`list`): Array with lambda values.
"""
Tlen = self.T
Glen = self.G
for i in range(0, len(self.I)):
res = self.I[i]
for tt in Tlen:
self.s_original[res, tt] = round(dict_update.get('s')[res, tt])
self.tr_original[res, tt] = round(dict_update.get('tr')[res, tt])
self.r_original[res, tt] = round(dict_update.get('r')[res, tt])
self.er_original[res, tt] = round(dict_update.get('er')[res, tt])
self.e_original[res, tt] = round(dict_update.get('e')[res, tt])
for grp in Glen:
for tt in Tlen:
self.mu_original[grp, tt] = dict_update.get('mu')[grp, tt]
################################################################################
# METHOD: set_init_value
################################################################################
def set_init_value_prob(self, problem_update):
""" Update lambda in DPP model
Args:
lambda1 (:obj:`list`): Array with lambda values.
"""
Tlen = self.T
Glen = self.G
for i in range(0, len(self.I)):
res = self.I[i]
for tt in Tlen:
self.s_original[res, tt] = round(problem_update.s[res, tt].X)
self.tr_original[res, tt] =round(problem_update.tr[res, tt].X)
self.r_original[res, tt] = round(problem_update.r[res, tt].X)
self.er_original[res, tt] =round(problem_update.er[res, tt].X)
self.e_original[res, tt] = round(problem_update.e[res, tt].X)
for grp in Glen:
for tt in Tlen:
self.mu_original[grp, tt] = problem_update.mu[grp, tt].X
################################################################################
# METHOD: copy_original_bounds
################################################################################
def copy_original_bounds(self):
""" Copy original dict
Args:
lambda1 (:obj:`list`): Array with lambda values.
"""
self.s_original = {}
self.tr_original = {}
self.r_original = {}
self.er_original = {}
self.e_original = {}
self.mu_original = {}
################################################################################
# METHOD: change_resource
################################################################################
def change_resource(self, resource_i, lambda1, beta1, v):
self.lambda1 = lambda1.copy()
self.beta = beta1.copy()
self.resource_i = resource_i
self.v = v
Tlen = self.T
Glen = self.G
self.m.reset()
for i in range(0, len(self.I)):
res = self.I[i]
for tt in Tlen:
if self.resource_i != i:
self.s[res, tt].setAttr("ub", round(self.s_original[res, tt]))
self.tr[res, tt].setAttr("ub", round(self.tr_original[res, tt]))
#self.r[res, tt].setAttr("ub", round(self.r_original[res, tt]))
self.er[res, tt].setAttr("ub", round(self.er_original[res, tt]))
self.e[res, tt].setAttr("ub", round(self.e_original[res, tt]))
self.s[res, tt].setAttr("lb", round(self.s_original[res, tt]))
self.tr[res, tt].setAttr("lb", round(self.tr_original[res, tt]))
#self.r[res, tt].setAttr("lb", round(self.r_original[res, tt]))
self.er[res, tt].setAttr("lb", round(self.er_original[res, tt]))
self.e[res, tt].setAttr("lb", round(self.e_original[res, tt]))
else:
self.s[res, tt].setAttr("ub", self.ub)
self.tr[res, tt].setAttr("ub", self.ub)
self.r[res, tt].setAttr("ub", self.ub)
self.er[res, tt].setAttr("ub", self.ub)
self.e[res, tt].setAttr("ub", self.ub)
self.s[res, tt].setAttr("lb", self.lb)
self.tr[res, tt].setAttr("lb", self.lb)
self.r[res, tt].setAttr("lb", self.lb)
self.er[res, tt].setAttr("lb", self.lb)
self.e[res, tt].setAttr("lb", self.lb)
self.__create_objfunc__()
self.__modify_constraints_aux__()
self.m.update()
def return_best_candidate(self, total_obj_function, total_unfeasibility):
candidate_list = []
counter = 0
for i in range(0, len(total_unfeasibility)):
if total_unfeasibility[i] <= 0:
candidate_list.append(i)
if len(candidate_list) > 0:
counter = candidate_list[0]
for i in range(0, len(candidate_list)):
if total_obj_function[counter] > total_obj_function[candidate_list[i]]:
counter = candidate_list[i]
else:
counter = total_unfeasibility.index(min(total_unfeasibility))
return counter
################################################################################
# METHOD: update_original_values
################################################################################
def update_original_values(self, DPP, change):
# change = 1
if change == 1:
Tlen = self.T
Glen = self.G
for res in self.problem_data.get_names("resources"):
for tt in Tlen:
self.s_original[res, tt] = DPP.get_variables().get_variable('s')[res, tt].X
self.tr_original[res, tt] = DPP.get_variables().get_variable('tr')[res, tt].X
self.r_original[res, tt] = DPP.get_variables().get_variable('r')[res, tt].X
self.er_original[res, tt] = DPP.get_variables().get_variable('er')[res, tt].X
self.r_original[res, tt] = DPP.get_variables().get_variable('e')[res, tt].X
for grp in Glen:
for tt in Tlen:
self.mu_original[grp, tt] = DPP.get_variables().get_variable('mu')[grp, tt].X
################################################################################
# PRIVATE METHOD: __modify_constraints_aux__()
################################################################################
def __modify_constraints_aux__(self):
for i in range(0, len(self.list_Constr)):
Constr1 = self.list_Constr[i]
self.m.remove(self.m.getConstrByName("aux2_const"+str(i)))
self.m.addConstr(self.lambda1[i] + self.beta[i] * Constr1 / self.v <= self.aux_total[i],
name="aux2_const" + str(i))
self.m.update()
| jorgerodriguezveiga/firedecomp | firedecomp/AL/ADPP.py | ADPP.py | py | 13,091 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "firedecomp.AL.ARPP.RelaxedPrimalProblem",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "firedecomp.AL.ARPP",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "gurobipy.Model",
"line_number": 54,
"usage_type": "call"
},
{
"ap... |
19257210633 | import pytest
from app import create_app
from app.models import TaskEntity
from app.services import tasks
@pytest.fixture()
def client():
app = create_app()
app.config.update(
{
'TESTING': True,
}
)
yield app.test_client()
tasks.clear()
TaskEntity.reset_id()
| macoyshev/to_Do_list | tests/app/conftest.py | conftest.py | py | 316 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.create_app",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.config.update",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "app.test_client",
... |
27102810736 | # -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Pedido
class PedidoForm(forms.ModelForm):
class Meta:
model = Pedido
fields = ('descripcion', 'fecha', 'proveedor', 'usuario','total',)
labels = {
'descripcion': _(u'Descripción'),
'fecha': _(u'Fecha Pedido'),
'proveedor': _(u'*Proveedor'),
'usuario': _(u'Usuario'),
'total': _(u'*Total'),
'id_materia_prima': _(u'*Materia'),
}
error_messages = {
'fecha': {
'required': _("El campo fecha es requerido"),
},
'proveedor': {
'required': _("El campo proveedor es requerido"),
},
'usuario': {
'required': _("El campo usuario es requerido"),
},
'total': {
'required': _("El campo total es requerido"),
},
}
| IvanVilla1585/RefrescosChupiFlum | ChupiFlum/pedido/forms.py | forms.py | py | 1,012 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.Pedido",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.utils.tran... |
36687194652 | from osgeo import gdal, gdal_array
import numpy as np
# "Before" image
im1 = "D:\\Python36\\testdata\\before.tif"
# "After" image
im2 = "D:\\Python36\\testdata\\after.tif"
#Output image name
output = "D:\\Python36\\testdata\\change.tif"
# Load before and after into arrays
ar1 = gdal_array.LoadFile(im1).astype(np.int8)
ar2 = gdal_array.LoadFile(im2)[1].astype(np.int8)
# Perform a simple array difference on the images
diff = ar2 - ar1
# Set up our classification scheme to try
# and isolate significant changes
classes = np.histogram(diff, bins=5)[1]
# The color black is repeated to mask insignificant changes
lut = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [255, 0, 0]]
# Starting value for classification
start = 1
# Set up the output image
rgb = np.zeros((3, diff.shape[0], diff.shape[1], ), np.int8)
# Process all classes and assign colors
for i in range(len(classes)):
mask = np.logical_and(start <= diff, diff <= classes[i])
for j in range(len(lut[i])):
rgb[j] = np.choose(mask, (rgb[j], lut[i][j]))
start = classes[i]+1
# Save the output image
gdal_array.SaveArray(rgb, output, format="GTiff",
prototype=im2)
output = None
| Jinunmeng/Python-ImageProcess | change_detection.py | change_detection.py | py | 1,207 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "osgeo.gdal_array.LoadFile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal_array",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.int8",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdal... |
14812101209 | import numpy as np
import matplotlib.pyplot as plt
import os
n = 100
A = 10
x = np.linspace(0, np.pi, 100) # задаём отрезок
fx = (-np.sin(x)*((np.sin((x**2)/np.pi))**(2*A)))
# создание папки для текстовика
try:
os.mkdir('results')
except OSError:
pass
complete_file = os.path.join('results', 'task_01_307B_Pogudin_16.txt')
f = open(complete_file, 'w')
# создаём текстовик
f.write(' x f(x)\n')
for i in range(n):
f.write(str("%.4f" % x[i])+' '+str("%.4f" % fx[i])+"\n")
f.close()
# график
fig, ax = plt.subplots()
ax.plot(x, fx, linewidth = 2)
ax.set_xlim(0, 3);
ax.set_ylim(-1, 0.25);
ax.grid(linewidth = 1)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.plot(x, fx, color = 'red')
plt.show() | AlexPogudin/wor1 | Pogudin_16_PY.py | Pogudin_16_PY.py | py | 784 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 7,
... |
74752000104 | import unittest
from unittest.mock import patch, MagicMock, call, DEFAULT
from requests.exceptions import ConnectionError
from lib.marcParse import (
parseMARC,
transformMARC,
extractAgentValue,
extractHoldingsLinks,
extractSubjects,
extractSubfieldValue,
parseHoldingURI
)
from lib.dataModel import WorkRecord, InstanceRecord
from lib.linkParser import LinkParser
from helpers.errorHelpers import DataError
class TestMARC(unittest.TestCase):
############
# Unit Tests
############
@patch('lib.marcParse.transformMARC', side_effect=['test1', None, 'test2'])
def test_parse_list(self, mock_marc):
res = parseMARC([1, 2, 3], 'test_rels')
self.assertEqual(res, ['test1', 'test2'])
@patch('lib.marcParse.extractSubfieldValue')
@patch('lib.marcParse.extractAgentValue')
@patch('lib.marcParse.extractHoldingsLinks')
@patch('lib.marcParse.extractSubjects')
def test_transform(self, mock_subfield, mock_agent, mock_links, mock_subjects):
mock_marc = MagicMock()
lang_field = MagicMock()
mock_eng = MagicMock()
mock_eng.value = 'English'
mock_und = MagicMock()
mock_und.value = 'undefined'
lang_field.subfield.return_value = [mock_eng, mock_und]
mockDict = {'546': [lang_field], '856': None}
def lclgetitem(name):
return mockDict[name]
mock_marc.__getitem__.side_effect = lclgetitem
testWork, testID = transformMARC(
('doab:test', '2019-01-01', mock_marc),
'test_rels'
)
self.assertEqual(testWork.identifiers[0].identifier, 'doab:test')
self.assertEqual(testWork.instances[0].rights[0].source, 'doab')
self.assertEqual(testWork.language[0].iso_3, 'eng')
self.assertEqual(testID, 'doab:test')
def test_agent_create(self):
testRec = MagicMock()
testRec.agents = []
mock_name = MagicMock()
mock_name.value = 'Test, Tester'
mock_role = MagicMock()
mock_role.value = 'tst'
testData = {
'100': [{
'a': [mock_name],
'4': [mock_role]
}]
}
testRels = {
'tst': 'testing'
}
extractAgentValue(testData, testRec, '100', testRels)
self.assertEqual(testRec.agents[0].name, 'Test, Tester')
self.assertEqual(testRec.agents[0].roles[0], 'testing')
@patch('lib.marcParse.parseHoldingURI', side_effect=[
('uri1', 'text/html'),
('uri2', 'application/pdf')
])
@patch.multiple(LinkParser, selectParser=DEFAULT, createLinks=DEFAULT)
def test_add_links(self, mock_parse, selectParser, createLinks):
testRec = MagicMock()
testItem = MagicMock()
mock_bad = MagicMock()
mock_bad.ind1 = 'err'
mock_html = MagicMock()
mock_html.ind1 = '4'
mock_pdf = MagicMock()
mock_pdf.ind1 = '4'
mock_url = MagicMock()
mock_url.value = 'general/url'
mock_note = MagicMock()
mock_note.value = 'DOAB Note'
mock_missing = MagicMock()
mock_missing.ind1 = '4'
mock_missing.subfield.side_effect = IndexError
def side_effect(subfield):
subs = {
'u': [mock_url],
'z': [mock_note]
}
return subs[subfield]
mock_pdf.subfield.side_effect = side_effect
mock_html.subfield.side_effect = side_effect
testHoldings = [
mock_bad,
mock_html,
mock_pdf,
mock_missing
]
extractHoldingsLinks(testHoldings, testRec, testItem)
selectParser.assert_has_calls([call(), call()])
createLinks.assert_has_calls([call(), call()])
@patch('lib.marcParse.requests')
def test_parse_holding_success(self, mock_req):
mock_redirect = MagicMock()
mock_redirect.status_code = 302
mock_redirect.headers = {'Location': 'testURI'}
mock_head = MagicMock()
mock_head.headers = {'Content-Type': 'text/testing'}
mock_req.head.side_effect = [mock_redirect, mock_head]
outURI, contentType = parseHoldingURI('testURI')
mock_req.head.assert_has_calls([
call('testURI', allow_redirects=False),
call('testURI', allow_redirects=False)
])
self.assertEqual(outURI, 'testURI')
self.assertEqual(contentType, 'text/testing')
@patch('lib.marcParse.requests.head', side_effect=ConnectionError)
def test_parse_holding_error(self, mock_head):
with self.assertRaises(DataError):
parseHoldingURI('errorURI')
@patch('lib.marcParse.requests.head')
def test_parse_holding_no_type(self, mock_head):
mock_header = MagicMock()
mock_header.headers = {}
mock_head.return_value = mock_header
testURI, contentType = parseHoldingURI('noContentURI')
self.assertEqual(testURI, 'noContentURI')
self.assertEqual(contentType, 'text/html')
def test_600_subjects(self):
testSubj = MagicMock()
testSubfield = MagicMock()
testSubfield.value = 'test'
testSubj.subfield.return_value = [testSubfield]
testData = {
'600': [testSubj]
}
testRec = MagicMock()
extractSubjects(testData, testRec, '600')
testRec.addClassItem.assert_called_once()
def test_610_subjects(self):
testSubj = MagicMock()
testSubj.ind2 = '7'
testSubfield = MagicMock()
testSubfield.value = 'test'
testSubj.subfield.return_value = [testSubfield]
testData = {
'610': [testSubj]
}
testRec = MagicMock()
extractSubjects(testData, testRec, '610')
testRec.addClassItem.assert_called_once()
def test_655_subjects(self):
testSubj = MagicMock()
testSubfield = MagicMock()
testSubfield.value = 'test'
testSubfield.side_effect = IndexError
testSubj.subfield.side_effect = IndexError
testData = {
'655': [testSubj]
}
testRec = MagicMock()
extractSubjects(testData, testRec, '655')
testRec.addClassItem.assert_called_once()
def test_subfield_general(self):
recDict = {'other': None, 'array': [], 'str': 'str'}
def getitem(name):
return recDict[name]
def setitem(name, val):
recDict[name] = val
testRec = MagicMock()
testRec.__getitem__.side_effect = getitem
testRec.__setitem__.side_effect = setitem
testRec.agents = []
mock_field = MagicMock()
mock_sub = MagicMock()
mock_sub.value = 'testing'
mock_field.subfield.return_value = [mock_sub]
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'other', 't'))
mock_field = MagicMock()
mock_sub = MagicMock()
mock_sub.value = 'testing'
mock_field.subfield.return_value = [mock_sub]
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'array', 't'))
mock_field = MagicMock()
mock_sub = MagicMock()
mock_sub.value = 'testing'
mock_field.subfield.return_value = [mock_sub]
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'str', 't'))
mock_field = MagicMock()
mock_field.subfield.side_effect = IndexError
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'err', 'e'))
mock_field = MagicMock()
mock_sub = MagicMock()
mock_sub.value = 'test_agent'
mock_field.subfield.return_value = [mock_sub]
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'agents', 'a', 'test'))
mock_field = MagicMock()
mock_sub = MagicMock()
mock_sub.value = 'test_id'
mock_field.subfield.return_value = [mock_sub]
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'identifiers', 'i', 'test'))
mock_field = MagicMock()
mock_sub = MagicMock()
mock_sub.value = 'test_id'
mock_field.subfield.return_value = [mock_sub]
testData = {
'test': [mock_field]
}
extractSubfieldValue(testData, testRec, ('test', 'pub_date', 'd'))
self.assertEqual(testRec.agents[0].name, 'test_agent')
self.assertEqual(testRec['str'], 'str; testing')
self.assertEqual(testRec['array'], ['testing'])
self.assertEqual(testRec['other'], 'testing')
self.assertEqual(2, testRec.addClassItem.call_count)
###################
# Functional Tests
###################
def test_transformMARC_5XX_fields(self):
mockMarc = MagicMock()
mockMarc.name = 'testing_5XX'
testRec = (
1,
'2019-01-01',
mockMarc
)
def mockField(fieldCode):
mockField = MagicMock()
mockValue = MagicMock()
if fieldCode == '505':
mockValue.value = 'table of contents'
elif fieldCode == '520':
mockValue.value = 'summary'
else:
mockValue.value = 'test'
mockField.subfield.return_value = [mockValue]
return [mockField]
mockMarc.__getitem__.side_effect = lambda field: mockField(field)
testWork, testID = transformMARC(testRec, {})
self.assertIsInstance(testWork, WorkRecord)
self.assertIsInstance(testWork.instances[0], InstanceRecord)
self.assertEqual(testWork.instances[0].summary, 'summary')
self.assertEqual(testWork.instances[0].table_of_contents, 'table of contents')
| NYPL/sfr-ingest-pipeline | lambda/sfr-doab-reader/tests/test_marc.py | test_marc.py | py | 10,255 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "lib.marcParse.parseMARC",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "un... |
29875713843 | # Copyright (c) 2023 Dawn
# Operations Research Calculator is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
import PyQt6.QtWidgets as QtWidgets
from PyQt6.QtCore import QParallelAnimationGroup, QPoint
from UI.Animation.utils import Utils
class IndexAnimation:
def __init__(self, parent:QtWidgets.QWidget, label_list:list) -> None:
self.parent = parent
self.indexAnimation = None
self.labelList = label_list
self.initIndexAnimation()
def initIndexAnimation(self):
self.aniBackground = QtWidgets.QLabel(self.parent)
self.aniBackground.setStyleSheet("background:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, "
"stop:0.8 #0099cc, stop:1 #ffffff);")
self.background = QtWidgets.QLabel(self.parent)
self.background.setStyleSheet("background:#0099cc")
# Mark the original position to restore the position later.
self.sacleResumeList = [label.geometry() for label in self.labelList]
def updateIndexAnimation(self, index:int, parent):
if self.indexAnimation != None:
self.indexAnimation.stop()
self.aniBackground.setGeometry(0, 60 * index, 300, 60)
self.background.setGeometry(0, 60 * index, 222, 60)
# Animation 1: Change of color.
for i in range(len(self.labelList)):
if i == index:
self.labelList[i].setStyleSheet("background:transparent;color:white")
elif i == index + 1:
self.labelList[i].setStyleSheet("background:qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, "
"stop:0.8 #006699, stop:1 #3b3b3b);color:white")
else:
self.labelList[i].setStyleSheet("background:#006699;color:white")
# Animation 2: Scale Animation.
group = QParallelAnimationGroup(self.parent)
for i in range(len(self.labelList)):
if i == index:
scaleAnimation0 = Utils(self.labelList[i], parent).scale(1.1)
group.addAnimation(scaleAnimation0)
scaleAnimation1 = Utils(self.background, parent).scale(1.1)
group.addAnimation(scaleAnimation1)
scaleAnimation2 = Utils(self.aniBackground, parent).scale(1.1)
group.addAnimation(scaleAnimation2)
else:
scaleResumeAnimation = Utils(self.labelList[i], parent).scaleResume(self.sacleResumeList[i])
group.addAnimation(scaleResumeAnimation)
# Animation 3: Shift Animation.
self.labelList[index].stackUnder(self.parent.stackedWidget)
self.aniBackground.stackUnder(self.labelList[index])
self.background.stackUnder(self.aniBackground)
self.parent.devide.stackUnder(self.background)
self.indexAnimation = Utils(self.aniBackground, parent).shift(QPoint(-400, self.aniBackground.y()), QPoint(self.aniBackground.x(), self.aniBackground.y()), 3000)
self.indexAnimation.setLoopCount(-1)
group.addAnimation(self.indexAnimation)
group.start() | Dawn-of-Time/Operations-Research-Calculator | UI/Animation/indexAnimation.py | indexAnimation.py | py | 3,489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt6.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "PyQt6.QtWidgets",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtWidgets.QLabel",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "P... |
42830439112 | import requests
from requests import utils
session = requests.session()
number_list = []
for i in range(1,6):
headers = {
'Host':'match.yuanrenxue.com',
'Connection':'keep-alive',
'Content-Length':'0',
'Pragma':'no-cache',
'Cache-Control':'no-cache',
'sec-ch-ua':'" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile':'?0',
'User-Agent':'yuanrenxue.project',
'sec-ch-ua-platform':'"Windows"',
'Accept':'*/*',
'Origin':'https://match.yuanrenxue.com',
'Sec-Fetch-Site':'same-origin',
'Sec-Fetch-Mode':'cors',
'Sec-Fetch-Dest':'empty',
'Referer':'https://match.yuanrenxue.com/match/3',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,en-US;q=0.6',
# 定期更新cookie
'Cookie':'sessionid=3wnl7zqp05d6i8lildtfxyrft4n20ax1; Hm_lvt_9bcbda9cbf86757998a2339a0437208e=1642744575,1642815874,1642819847,1642856073; Hm_lvt_c99546cf032aaa5a679230de9a95c7db=1642765423,1642815874,1642819848,1642856073; m=pua; tk=1672229442160709740; Hm_lpvt_9bcbda9cbf86757998a2339a0437208e=1642856926; Hm_lpvt_c99546cf032aaa5a679230de9a95c7db=1642856999',
}
session.headers = headers
response = session.post('https://match.yuanrenxue.com/jssm')
resp = session.get(f'https://match.yuanrenxue.com/api/match/3?page={i}').json()
datas = resp['data']
for data in datas:
print(data['value'])
number_list.append(data['value'])
print('答案为:',max(number_list,key=number_list.count))
| zqtz/yuanrenxue | exam03/exam03.py | exam03.py | py | 1,631 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.session",
"line_number": 3,
"usage_type": "call"
}
] |
45138682316 | from flask import Flask, send_from_directory
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__, static_folder='./build/static')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
'''
db = SQLAlchemy(app)
class User(db.Model):
__tablename__ = 'user'
token_id = db.Column(db.String(255), primary_key=True, unique=True)
email = db.Column(db.String(255), unique=True, nullable=False)
companyName = db.Column(db.String(255), unique=True
def __init__(self, token_id, email, companyName, solutionsCompleted, userOnboardingCompleted):
self.token_id = token_id
self.email = email
self.companyName = companyName
'''
@app.route('/', defaults={"filename": "index.html"})
@app.route('/<path:filename>')
def index(filename):
return send_from_directory('./build', filename)
#Function that sends a full route for POSTing data to your Flask API
'''
@app.route('/api/login', methods=['GET', 'POST'])
@cross_origin()
def login():
if request.method == 'POST':
# Returns User object if exists
requestData = request.get_json()
token_id = requestData['user']['sub'][6:]
email = requestData['user']['email']
if token_id and email:
userExists = User.query.filter_by(token_id=token_id).first()
if userExists is None:
# Create new user and store in db
userExists = User(token_id, email, None, None, None)
db.session.add(userExists)
db.session.commit()
return json.jsonify(
token_id=userExists.token_id,
email=userExists.email,
solutionsCompleted=userExists.solutionsCompleted,
userOnboardingCompleted=userExists.userOnboardingCompleted,
)
return {'Success': False, 'Response': 'bad request'}
'''
app.run(
host=os.getenv('IP',"0.0.0.0"),
port=int(os.getenv("PORT",8081)),
debug=True,
)
| hb275/DPP_Agency | app.py | app.py | py | 2,006 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_... |
24912437981 | # -*- coding: utf-8 -*-
__author__ = 'neo'
import json
import tornado.web
from common.mongo_utils import *
from common.api_tagdef import *
from common.iot_msg import *
from OpenAPI_server.http_utils import http_client_pool, http_client_post
from common.eventno import *
from common.iot_procdef import *
from OpenAPI_server.serv_zmq_cfg import stream_procs
from common.msgtypedef import *
from common.settings import PORTAL_API_URL
from common.auth_utils import resource_auth
from common.resouce_type import RESOURCE_CAMERA
from common.iot_request_handler import IotRequestHandler
# 门户api接口reader操作url
PORTAL_API_CAMERA_URL = PORTAL_API_URL + 'camera'
# 摄像头查询
class CamerasUtils(IotRequestHandler):
def data_received(self, chunk):
pass
# 加异步装饰器,在处理异步响应后finish连接
@tornado.web.asynchronous
def get(self):
try:
# 资源访问权限校验
searched_appkey = resource_auth(self.request.headers.get(TAG_APPKEY, ''), RESOURCE_CAMERA, None,
self.get_argument(TAG_APPID, default=''))
if searched_appkey is None:
self.set_status(401)
self.write({"status": 1000, "status_text": "Resource access request is not authorized"})
self.finish()
return
condition = {
TAG_CAMERA_NAME: self.get_argument(TAG_CAMERA_NAME, default=''),
Q_OFFSET: self.get_argument(Q_OFFSET, default='0'),
Q_LIMIT: self.get_argument(Q_LIMIT, default='10'),
TAG_APPKEY: searched_appkey,
}
resp = query_cameras(self, condition)
logging.info('query_cameras, condition = %s, result = %s', condition, resp)
self.write(resp)
self.finish()
except Exception as err:
self.set_status(501)
self.write({"status": 1000, "status_text": "Internal system error"})
self.finish()
logging.error('query_cameras fail! err = %s', err)
@tornado.web.asynchronous
def post(self):
"""
新增一个camera信息
"""
try:
# 获取request 内容
req_body = self.request.body.decode()
camera_info = json.loads(req_body)
# 增加appkey到请求报文
camera_info[TAG_APPKEY] = self.request.headers.get(TAG_APPKEY)
# 校验camera_info合法性
ret, err = camera_info_check(self, camera_info)
if ret is not True:
self.write({"status": 1000, "status_text": err})
self.finish()
logging.error('reader_info_check fail! err = %s', err)
else:
# 调用门户api完成实际操作, 在http_client_post中会进行返回响应关闭连接操作,此处不必处理
http_client_post(camera_info, "add", self, PORTAL_API_CAMERA_URL)
except Exception as err:
self.set_status(501)
self.write({"status": 1000, "status_text": "Internal system error"})
self.finish()
logging.error('Prog TAG fail! err = %s', err)
def on_finish(self):
try:
del http_client_pool[self._linkid]
except Exception:
# 有可能finish时,link还没有加入http_client_pool,del有可能异常,无需处理
self.set_status(501)
pass
# 摄像头详细信息查询
class CameraInfoReq(IotRequestHandler):
def data_received(self, chunk):
pass
def get(self, camera_code):
try:
# 资源访问权限校验
ret = resource_auth(self.request.headers.get(TAG_APPKEY, ''), RESOURCE_CAMERA, camera_code,
self.get_argument(TAG_APPID, default=''))
if ret is None:
self.set_status(401)
self.write({"status": 1000, "status_text": "Resource access request is not authorized"})
self.finish()
return
resp = query_camera_detail(self, camera_code)
logging.info('query_camera_detail, camera_code = %s, result = %s', camera_code, resp)
self.write(resp)
except Exception as err:
self.set_status(501)
self.write({"status": 1000, "status_text": "Internal system error"})
logging.error('query_camera_detail fail! err = %s', err)
@tornado.web.asynchronous
def post(self, camera_code):
"""
修改单个camera信息
:param camera_code:
"""
try:
# 获取request 内容
req_body = self.request.body.decode()
camera_info = json.loads(req_body)
# 增加appkey到请求报文
camera_info[TAG_APPKEY] = self.request.headers.get(TAG_APPKEY)
# 报文增加camera_code
camera_info[TAG_CAMERA_CODE] = camera_code
# 校验camera_code合法性
ret, err = check_camera_code(self, camera_info[TAG_CAMERA_CODE])
if ret is not True:
self.write({"status": 1000, "status_text": err})
self.finish()
logging.error('reader_info_check fail! err = %s', err)
else:
# 校验camera_info合法性
ret, err = camera_info_check(self, camera_info)
if ret is not True:
self.write({"status": 1000, "status_text": err})
self.finish()
logging.error('reader_info_check fail! err = %s', err)
else:
# 调用门户api完成实际操作, 在http_client_post中会进行返回响应关闭连接操作,此处不必处理
http_client_post(camera_info, "edit", self, PORTAL_API_CAMERA_URL)
except Exception as err:
self.set_status(501)
self.write({"status": 1000, "status_text": "Internal system error"})
self.finish()
logging.error('Prog TAG fail! err = %s', err)
@tornado.web.asynchronous
def delete(self, camera_code):
"""
删除一个camera信息
:param camera_code:
"""
try:
# 获取request 内容
camera_info = {'camera_code': camera_code, TAG_APPKEY: self.request.headers.get(TAG_APPKEY)}
# 增加appkey到请求报文
# 调用门户api完成实际操作, 在http_client_post中会进行返回响应关闭连接操作,此处不必处理
http_client_post(camera_info, "delete", self, PORTAL_API_CAMERA_URL)
except Exception as err:
self.set_status(501)
self.write({"status": 1000, "status_text": "Internal system error"})
self.finish()
logging.error('Prog TAG fail! err = %s', err)
def on_finish(self):
try:
del http_client_pool[self._linkid]
except Exception:
# 有可能finish时,link还没有加入http_client_pool,del有可能异常,无需处理
self.set_status(501)
pass
# 摄像头抓拍
class CameraCaptureReq(IotRequestHandler):
def data_received(self, chunk):
pass
# 加异步装饰器,在处理异步响应后finish连接
@tornado.web.asynchronous
def get(self, camera_code):
try:
# 资源访问权限校验
ret = resource_auth(self.request.headers.get(TAG_APPKEY, ''), RESOURCE_CAMERA, camera_code,
self.get_argument(TAG_APPID, default=''))
if ret is None:
self.set_status(401)
self.write({"status": 1000, "status_text": "Resource access request is not authorized"})
self.finish()
return
ret, msg = pack_camera_take_picture(self, self._linkid, camera_code, self.request.headers.get(TAG_IOT_H_TRANSID, ''))
if ret is True:
# 缓存当前http client
http_client_pool[self._linkid] = self
# 发送消息给处理机
if IOT_PROC_CAMERA_CTRL in stream_procs.keys():
stream_procs[IOT_PROC_CAMERA_CTRL].send(msg)
logging.debug('Send Camera Capture Req Succ! linkid = %d, camera_code = %s', self._linkid, camera_code)
else:
logging.error("Can't find process = %s", IOT_PROC_CAMERA_CTRL)
self.write({"status": 1000, "status_text": "Internal system error"})
self.finish()
else:
self.write({"status": 1000, "status_text": msg})
self.finish()
logging.error('pack_get_camera_tag_list_req fail! linkid = %d, camera_code = %s', self._linkid, camera_code)
except Exception as err:
self.set_status(501)
self.write({"status": 1000, "status_text": "Internal system error"})
self.finish()
logging.error('CameraCaptureReq fail! err = %s', err)
def on_finish(self):
try:
del http_client_pool[self._linkid]
except Exception:
# 有可能finish时,link还没有加入http_client_pool,del有可能异常,无需处理
pass
def get_term_code_by_camera_code(handler, db_name, collection, camera_code):
"""
根据reader_code获取对应的term_code
:param handler:
:param db_name:
:param collection:
:param camera_code:
:return:
"""
try:
return True, mongo_cli[db_name][collection].find_one({TAG_CAMERA_CODE: camera_code})[TAG_TERM_CODE]
except Exception as err:
handler.set_status(501)
logging.error("Can't find term_code by camera_code! db = %s.%s, camera_code = %s, err = %s", db_name, collection, camera_code, err)
return False, "unregistered camera"
def pack_camera_take_picture(handler, linkid, camera_code, trans_id):
"""
生成获取taglist的消息
:param handler:
:param linkid:
:param camera_code:
:param trans_id:
:return:
"""
try:
# 获取term_code
ret, term_code = get_term_code_by_camera_code(handler, DB_IOT, 'camera_info', camera_code)
logging.debug('camera_code = %s, term_code = %s', camera_code, term_code)
if ret is False:
return False, "unregistered camera"
req_dict = {TAG_CAMERA_CODE: camera_code}
msg = pack_full_msg(linkid, MSGTYPE_CAMERA_REQ_CAPTURE, json.dumps(req_dict),
IOT_PROC_CAMERA_CTRL, IOT_TCPINTF_TO_PF_REQ, term_code, camera_code, trans_id)
return True, msg
except Exception as err:
handler.set_status(501)
logging.error('pack_get_camera_tag_list_req fail! err = %s', err)
return False, "Internal system error"
def query_cameras(handler, condition):
"""
摄像头列表查询
:param handler:
:param condition:
"""
resp = ''
qc = {}
if condition[TAG_CAMERA_NAME] != '':
qc[TAG_CAMERA_NAME] = condition[TAG_CAMERA_NAME]
qc[TAG_APPKEY] = condition[TAG_APPKEY]
count = 0
try:
# 获取总条数
count = mongo_cli[DB_IOT]['camera_info'].find(qc, {TAG_CAMERA_NAME: 1, TAG_CAMERA_CODE: 1}).count()
except Exception as err:
handler.set_status(501)
logging.info("Can't find reader_info, condition = %s, err = %s", condition, err)
return count, resp
if count > 0:
try:
# 获取数据集
results = mongo_cli[DB_IOT]['camera_info'].find(qc, {TAG_CAMERA_NAME: 1, TAG_CAMERA_CODE: 1}).sort("_id", SORT_DESC)\
.skip(int(condition[Q_OFFSET])).limit(int(condition[Q_LIMIT]))
camera_list = []
for one_camera in results.__iter__():
if "_id" in one_camera.keys():
del one_camera["_id"]
one_camera['herf'] = '/cameras/' + one_camera[TAG_CAMERA_CODE]
camera_list.append(one_camera)
resp = {
Q_LIMIT: int(condition[Q_LIMIT]),
Q_OFFSET: int(condition[Q_OFFSET]),
Q_TOTAL: count,
TAG_CAMERAS: camera_list
}
except Exception as err:
handler.set_status(501)
logging.error('query_c = %s, Exception = %s', condition, err)
else:
resp = {
Q_LIMIT: int(condition[Q_LIMIT]),
Q_OFFSET: int(condition[Q_OFFSET]),
Q_TOTAL: count
}
return resp
def query_camera_detail(handler, camera_code):
"""
查询摄像头详细信息
:param handler:
:param camera_code:
:return:
"""
resp = ""
try:
info = mongo_cli[DB_IOT]['camera_info'].find_one({TAG_CAMERA_CODE: camera_code}, {"_id": 0})
if info is None:
resp = {"status": 1000, "status_text": "unregistered camera"}
else:
resp = info
except Exception as err:
handler.set_status(501)
logging.error('query_camera_detail fail!, reader_code = %s, e = %s', camera_code, err)
return resp
def camera_info_check(handler, camera_info):
"""
camera信息合法性校验
:param handler:
:param camera_info:
"""
# 校验状态
if TAG_STATUS in camera_info.keys():
try:
ret = mongo_cli[DB_IOT]['dictionary'].find({TAG_DICT_VALUE: str(camera_info[TAG_STATUS]), TAG_DICT_TYPE: TAG_STATUS}).count()
if ret == 0:
return False, "status is invalid"
except Exception as err:
handler.set_status(501)
logging.error('check status fail!, status = %s, e = %s', camera_info[TAG_STATUS], err)
return False, "Internal System Error"
else:
return False, TAG_STATUS + " is None"
# 校验读写器厂商
if TAG_COMPANY_CODE in camera_info.keys():
try:
ret = mongo_cli[DB_IOT]['company_info'].find({TAG_COMPANY_CODE: camera_info[TAG_COMPANY_CODE]}).count()
if ret == 0:
return False, "unsupported company"
except Exception as err:
handler.set_status(501)
logging.error('check company fail!, company_code = %s, e = %s', camera_info['company_code'], err)
return False, "Internal System Error"
else:
return False, TAG_COMPANY_CODE + " is None"
# 校验读写器型号
if TAG_TERM_CODE in camera_info.keys():
try:
ret = mongo_cli[DB_IOT]['term_model_info'].find({TAG_TERM_CODE: camera_info[TAG_TERM_CODE], TAG_COMPANY_CODE: camera_info[TAG_COMPANY_CODE]}).count()
if ret == 0:
return False, "term_code is invalid"
except Exception as err:
handler.set_status(501)
logging.error('check term_code fail!, term_code = %s, e = %s', camera_info[TAG_TERM_CODE], err)
return False, "Internal System Error"
else:
return False, TAG_TERM_CODE + " is None"
return True, ""
# 校验reader_code
def check_camera_code(handler, camera_code):
try:
ret = mongo_cli[DB_IOT]['camera_info'].find({TAG_CAMERA_CODE: camera_code}).count()
if ret == 0:
return False, "camera_code is invalid"
else:
return True, ""
except Exception as err:
handler.set_status(500)
logging.error('check_reader_code fail!, camera_code = %s, e = %s', camera_code, err)
return False, "Internal System Error"
| ennismar/python | OpenAPI/OpenAPI_server/camera_proc.py | camera_proc.py | py | 15,655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "common.settings.PORTAL_API_URL",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "common.iot_request_handler.IotRequestHandler",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "common.auth_utils.resource_auth",
"line_number": 34,
"usage_t... |
12381823391 | from captcha.image import ImageCaptcha
import os
import random
import string
import asyncio
import re
import datetime
import discord
from discord.ext import commands
def randomStr(n=5):
randlst = [random.choice(string.digits + string.ascii_lowercase + string.ascii_uppercase) for i in range(n)]
return ''.join(randlst)
def captcha(id, n=5, word=None):
string = None
if word is None:
string = randomStr(n)
else:
string = word
print(string)
if string is None: return print("Could not generate captcha.")
image = ImageCaptcha()
image.generate(string)
image.write(string, f'{id}.png')
return string
TOKEN = ''
intents = discord.Intents.all()
intents.members = True
bot = commands.Bot(command_prefix='.', intents=intents)
@bot.event
async def on_ready():
print('start!')
@bot.command()
async def test(ctx, arg=None):
channel = bot.get_channel(864881388766887987)
await channel.send(ctx.author.mention)
await captchaMsg_forCmd(ctx, channel, title='認証してください。', word=arg)
@bot.command()
async def a(ctx):
channel = bot.get_channel(1017815727182454784)
if ctx.channel.id is not channel.id: return
words = ''.join([random.choice(string.ascii_uppercase) for i in range(5)])
await captchaMsg_forCmd(ctx, channel, word=words, level=1)
@bot.command()
async def b(ctx):
channel = bot.get_channel(1017815321735856209)
if ctx.channel.id is not channel.id: return
words = ''.join([random.choice(string.digits + string.ascii_uppercase) for i in range(6)])
await captchaMsg_forCmd(ctx, channel, word=words, level=2)
@bot.command()
async def c(ctx):
channel = bot.get_channel(1017815388932821053)
if ctx.channel.id is not channel.id: return
words = randomStr(7)
await captchaMsg_forCmd(ctx, channel, word=words, level=3)
async def captchaMsg_forCmd(ctx, ch, word=None, check=None, level=None):
db_channel = bot.get_channel(1017817332493582356)
captcha_string = captcha(ctx.author.id, word=word)
false_text = f'正解は || {captcha_string} || です。\n(黒い部分を押すと答えが見えるよ!)'
embed = discord.Embed(description='画像に表示された文字を入力してください。')
await ch.send(embed=embed)
await ch.send(file=discord.File(f'./{ctx.author.id}.png'))
start = datetime.datetime.now()
os.remove(f'./{ctx.author.id}.png')
check = lambda msg: msg.author.id == ctx.author.id and re.match('^[A-Za-z0-9]*$', msg.content) and msg.channel.id == ch.id
try:
msg = await bot.wait_for('message', check=check, timeout=30)
if msg.content == captcha_string:
diff = datetime.datetime.now() - start
score = diff.total_seconds()
await ch.send(f'{msg.author.mention} 大正解!!おめでとう~!')
await db_channel.send(f'レベル{level}が{score}秒でクリアされました!')
elif msg.content != captcha_string:
await ch.send(f'{msg.author.mention} 不正解です。\nまたチャレンジしたい場合はコマンドをもう一度使ってください。')
await ch.send(false_text)
except asyncio.TimeoutError:
await ch.send('時間切れです。\nまたチャレンジしたい場合はコマンドをもう一度使ってください。')
await ch.send(false_text)
else:
return
bot.run(TOKEN)
| PriestessSakuraka/discord.py-captcha-bot | bot.py | bot.py | py | 3,460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "string.digits",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "string.a... |
26304669003 | import sys
import pandas as pd
from sqlalchemy import create_engine
import copy
def split_categories_columns(df):
row = df['categories'][0]
categories_column_names = row.split(';')
categories_column_names= [category.split('-')[0] for category in categories_column_names]
rename_dict = {}
for index, category in enumerate(categories_column_names):
rename_dict[index] = category
df = df.join(df['categories'].str.split(';', expand=True).rename(columns = rename_dict))
df = df.drop(columns = ['categories'])
for col_name in categories_column_names:
df[col_name] = df[col_name].apply(lambda x: int(x.split('-')[1]))
return df, categories_column_names
def clean_data(df1, categories_column_names):
"""
input:
df - data frame
categories_column_names - list of column names that shoud be cleand
"""
df = copy.deepcopy(df1)
for col_name in categories_column_names:
if df[col_name].nunique() < 2:
print(f'Dropping column {col_name} - {df[col_name].unique()} - only one unique value\n')
df = df.drop(columns = col_name)
elif df[col_name].nunique() > 2:
print(f"Column '{col_name}' - {df[col_name].unique()} have to many unique values - {df[col_name].nunique()} should have only 2 [0, 1]\n")
error_df = df[~df[col_name].isin([0, 1])]
if len(error_df) / len(df) < 0.1:
print(f"Removing rows with unvalid values - {error_df[col_name].unique()} that are less then 10% of dataset\n")
df = df[df[col_name].isin([0, 1])]
df[col_name] = df[col_name].apply(lambda x: bool(x))
elif len(error_df) / len(df) < 0.2:
print(f'Replacing unvalid values - {error_df[col_name].unique()} with mode of column - {df[col_name].mode()} that populates less then 20% of column values\n')
mode_value = df[col_name].mode()
df[col_name] = df[col_name].apply(lambda x: x if x.isin([0, 1]) else mode_value)
df[col_name] = df[col_name].apply(lambda x: bool(x))
else:
print(f"Dropping column '{col_name}' - {df[col_name].unique()} - more then 20% of unvalid values\n")
df.drop(columns = [col_name], inplace = True)
else:
df[col_name] = df[col_name].apply(lambda x: bool(x))
return df
def save_to_database(df, database_path):
engine = create_engine(f'sqlite:///{database_path}')
df.to_sql('disaster', engine, index=False, if_exists = 'replace')
def main():
if len(sys.argv) == 4:
messages_path, categories_path, datbase_path, = sys.argv[1:]
else:
messages_path = '..\\data\\messages.csv'
categories_path = '..\\data\\categories.csv'
datbase_path = '..\\disaster.db'
print('Loading messages data...\n')
messages = pd.read_csv(messages_path)
print('Loading categories data...\n')
categories = pd.read_csv(categories_path)
print('Merging messages and categories on id...\n')
df = messages.merge(on = 'id', right = categories)
print('Spliting vaules in categories column into seperate columns...\n')
df, categories_column_names = split_categories_columns(df)
print('Cleaning data from unvalid valies != [0, 1] and turning them to bool type values...\n')
df = clean_data(df, categories_column_names)
print('Deduplication...\n')
df = df.drop_duplicates(subset = ['id'], keep = False)
df = df.drop_duplicates(subset = ['message'], keep = False)
print('Saving to database...\n')
save_to_database(df, datbase_path)
print('Success !!!\n')
print(f'Output: {datbase_path}\n')
print(df.head())
if __name__ == '__main__':
main() | chrapkus/disaster_repository_project | scripts/process_data.py | process_data.py | py | 3,803 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"... |
42925904616 | import logging
import os
import shutil
from typing import List
import numpy as np
import pandas as pd
import pysam
from hmnfusion import bed as ibed
from hmnfusion import graph, region
# Helper functions
def cigar2position(cigars: List[List[int]], start: int) -> dict:
"""Construct from a cigar and a position, a position/operation.
Parameters
----------
cigars:
A cigar coming from pysam
start:
A genomic coordinate
Returns
-------
dict
A dictionary with:
- key: genomic coordinate
- value: cigar attribute (H, S, M, ...)
"""
data = {}
if cigars[0][0] in [4, 5]:
for i in range(cigars[0][1]):
i += 1
data[start - i] = cigars[0][0]
del cigars[0]
for cigar in cigars:
op, nb = cigar[0], cigar[1]
if op in [4, 5]:
for i in range(nb):
data[start + i] = op
i += 1
elif op == 0:
for i in range(nb):
data[start] = op
start += 1
elif op == 1:
pass
else:
start += 1
return data
def run(params: dict, bed: ibed.Bed, g: graph.Graph) -> None:
"""Main function to quantify fusion, by updating Graph object
Parameters
----------
params: dict
Some parameters
bed: hmnfusion.bed.Bed
A Bed object
g: hmnfusion.graph.Graph
An object containing fusion data
Return
------
None
"""
alignment = pysam.AlignmentFile(
params["falignment"]["path"], params["falignment"]["mode"]
)
nodes = g.graph.nodes
for n in nodes:
g.graph.nodes[n]["is_skip"] = False
if not g.graph.nodes[n]["is_interest"]:
continue
# Check fusion against bed.
sub_first = pd.DataFrame(columns=ibed.Bed.HEADER)
sub_second = pd.DataFrame(columns=ibed.Bed.HEADER)
if g.graph.nodes[n]["fusion"].first.is_init():
sel = bed.df.apply(
ibed.Bed.select_bed, axis=1, args=(g.graph.nodes[n]["fusion"].first,)
)
sub_first = bed.df[sel]
if g.graph.nodes[n]["fusion"].second.is_init():
sel = bed.df.apply(
ibed.Bed.select_bed, axis=1, args=(g.graph.nodes[n]["fusion"].second,)
)
sub_second = bed.df[sel]
if len(sub_first) > 1 or len(sub_second) > 1:
logging.warning(
"Fusion %s is found multiple times in bed -> skipping"
% (g.graph.nodes[n]["fusion"],)
)
g.graph.nodes[n]["is_skip"] = True
if len(sub_first) + len(sub_second) == 2:
logging.warning(
"Fusion %s is found on left and right of breakpoint in the bed -> skipping"
% (g.graph.nodes[n]["fusion"],)
)
g.graph.nodes[n]["is_skip"] = True
if len(sub_first) + len(sub_second) == 0:
logging.warning(
'Fusion %s isn"t found on left or right of breakpoint in the bed -> skipping'
% (g.graph.nodes[n]["fusion"],)
)
g.graph.nodes[n]["is_skip"] = True
if g.graph.nodes[n]["is_skip"]:
continue
# Init.
bed_sel = pd.DataFrame(columns=ibed.Bed.HEADER)
r = region.Region()
if len(sub_first) == 1:
bed_sel = sub_first
r = g.graph.nodes[n]["fusion"].first
elif len(sub_second) == 1:
bed_sel = sub_second
r = g.graph.nodes[n]["fusion"].second
# Swap.
g.graph.nodes[n]["fusion"].swap_region()
else:
logging.warning(
"Fusion %s, something bad happened -> skipping"
% (g.graph.nodes[n]["fusion"],)
)
# Run.
for aligned_segment in alignment.fetch(
bed_sel.iloc[0, 0], bed_sel.iloc[0, 1], bed_sel.iloc[0, 2]
):
# Filtering.
if (
aligned_segment.is_unmapped
or aligned_segment.is_duplicate
or aligned_segment.is_supplementary
):
continue
cigar2pos = cigar2position(
aligned_segment.cigartuples, aligned_segment.reference_start
)
if r.position not in cigar2pos.keys():
continue
g.graph.nodes[n]["fusion"].evidence.depth += 1
# Count split reads.
if aligned_segment.has_tag("SA"):
g.graph.nodes[n]["fusion"].evidence.split += 1
continue
# Count other Chrom.
if aligned_segment.is_paired:
if (
not aligned_segment.mate_is_unmapped
and not aligned_segment.is_unmapped
):
if (
aligned_segment.next_reference_id
!= aligned_segment.reference_id
):
g.graph.nodes[n]["fusion"].evidence.mate += 1
continue
# Count reads clipped.
count_clipped = np.zeros((2, params["clipped"]["interval"]))
for i in range(params["clipped"]["interval"]):
if cigar2pos.get(r.position - i - 1, 0) in [4, 5]:
count_clipped[0][i] = 1
if cigar2pos.get(r.position + i + 1, 0) in [4, 5]:
count_clipped[1][i] = 1
if np.max(np.sum(count_clipped, axis=1)) >= params["clipped"]["count"]:
g.graph.nodes[n]["fusion"].evidence.clipped += 1
def write(filename: str, name: str, g: graph.Graph) -> None:
"""Write a vcf file from a list of Fusion
Parameters
----------
filename: str
A filename to write fusion
name: str
Name of sample
g: hmnfusion.graph.Graph
A graph object to extract data
Return
------
None
"""
# Header.
shutil.copyfile(
src=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"templates",
"vcf",
"vcf.header.4-2.txt",
),
dst=filename,
)
# Fusions.
columns = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT"]
columns.append(name)
df = pd.DataFrame(columns=columns)
nodes = g.graph.nodes
for n in nodes:
logging.debug(g.graph.nodes[n]["fusion"])
# First.
ident = g.graph.nodes[n]["fusion"].get_name()
ident_1 = ident
if g.graph.nodes[n]["fusion"].second.is_init():
ident_1 += "-1"
infos = ["SVTYPE=FUS"]
infos += ["SOFT=%s" % (g.graph.nodes[n]["fusion"].get_software(),)]
infos += ["FROM=%s" % ("-".join(g.label_build_from(n)),)]
infos += ["CONS=%s" % (g.graph.nodes[n]["is_consensus"],)]
infos += ["VAF=%s" % (g.graph.nodes[n]["fusion"].evidence.get_vaf(),)]
infos += ["DP=%s" % (g.graph.nodes[n]["fusion"].evidence.depth,)]
infos += ["SU=%s" % (g.graph.nodes[n]["fusion"].evidence.get_sum(),)]
infos += ["SR=%s" % (g.graph.nodes[n]["fusion"].evidence.split,)]
infos += ["PE=%s" % (g.graph.nodes[n]["fusion"].evidence.mate,)]
infos += ["SC=%s" % (g.graph.nodes[n]["fusion"].evidence.clipped,)]
sinfos = ":".join(infos)
values = [
g.graph.nodes[n]["fusion"].first.chrom,
g.graph.nodes[n]["fusion"].first.position,
]
values += [ident_1, "N", "<FUS>", ".", ".", sinfos]
values += ["GT:VAF:DP:SU:SR:PE:SC"]
infos_values = []
for x in [
"./.",
g.graph.nodes[n]["fusion"].evidence.get_vaf(),
g.graph.nodes[n]["fusion"].evidence.depth,
g.graph.nodes[n]["fusion"].evidence.get_sum(),
g.graph.nodes[n]["fusion"].evidence.split,
g.graph.nodes[n]["fusion"].evidence.mate,
g.graph.nodes[n]["fusion"].evidence.clipped,
]:
infos_values.append(str(x))
values.append(":".join(infos_values))
df = pd.concat([df, pd.DataFrame([values], columns=columns)])
ident_2 = ident
if g.graph.nodes[n]["fusion"].second.is_init():
ident_2 += "-2"
# Second.
sinfos = ";".join(["SVTYPE=FUS", "DP=.", "SU=."])
values = [
g.graph.nodes[n]["fusion"].second.chrom,
g.graph.nodes[n]["fusion"].second.position,
ident_2,
"N",
"<FUS>",
".",
".",
sinfos,
"GT:VAF:DP:SU:SR:PE:SC",
"./.:.:.:.:.:.:.",
]
df = pd.concat([df, pd.DataFrame([values], columns=columns)])
df.to_csv(filename, mode="a", sep="\t", index=False)
| guillaume-gricourt/HmnFusion | src/hmnfusion/quantification.py | quantification.py | py | 8,947 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "hmnfusion.bed.Bed",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "hmnfusion.bed",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "hmnfusion.graph.Grap... |
27770016752 | import numpy as np
from sklearn.impute import SimpleImputer
X=[[np.nan,2,3],[4,np.nan,6],[10,np.nan,9]]
imputer = SimpleImputer(missing_values=np.nan,strategy='mean')
# allowed_strategies = ["mean", "median", "most_frequent", "constant"]
xx= imputer.fit_transform(X)
print(xx)
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
xx=imputer.fit_transform(X)
print(X)
print(xx)
| kshsky/PycharmProjects | ml-case/01-集装箱危险品瞒报预测/fillna_from_knn.py | fillna_from_knn.py | py | 459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.nan",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sklearn.impute.SimpleImputer",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
... |
12427518361 | from setuptools import setup, find_packages
pkg_vars = {}
setup(
name='croneval',
description='cron schedule expression evaluator',
author='Selçuk Karakayalı',
author_email='skarakayali@gmail.com',
maintainer='Selçuk Karakayalı',
url='http://github.com/karakays/croneval/',
packages=find_packages(),
python_requires='>=3.8',
license='MIT',
scripts=['bin/croneval'],
long_description=open('README.md').read(),
classifiers=[ "Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License"
]
)
| karakays/croneval | setup.py | setup.py | py | 575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 14,
"usage_type": "call"
}
] |
17795545171 | # Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
ans = []
stack = []
current = root
while current or stack:
while current:
stack.append(current)
current = current.left
current = stack.pop()
ans.append(current.val)
current = current.right
return ans
| fastso/learning-python | leetcode_cn/solved/pg_94.py | pg_94.py | py | 578 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
}
] |
29889532379 | # coding: utf-8
import requests
import json
def fetch_weather(location):
result = requests.get(
'https://api.seniverse.com/v3/weather/now.json',
params={
'key': 'glgqeom9bcm7swqq',
'location': location,
'language': 'zh-Hans',
'unit': 'c'
},
timeout=60)
return result.json()
user_location = 'nanchang'
w = fetch_weather(user_location)
# print(w)
print('\n------------------查询结果------------------')
print("The city name is:\t", w['results'][0]['location']['name'])
print("Today's weather is:\t", w['results'][0]['now']['text'])
print("The temperature is:\t", w['results'][0]['now']['temperature'], '摄氏度')
print("Weather updated at:\t", w['results'][0]['last_update'][:-6].replace(
'T', ' '))
| AIHackerTest/Leon-Huang_Py101-004 | Chap2/project/weather-api-mvp.py | weather-api-mvp.py | py | 803 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
}
] |
16776613582 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
setup(
name='django-multiplefilefield',
version=__import__('multiplefilefield').__version__,
description='Django Model and Form fields which can store multiple files',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Inokinoki/django-multiplefilefield',
author='Weixuan XIAO(BNBLord)',
author_email='veyx.shaw@gmail.com',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(),
package_data={
'multiplefilefield': [
'locale/*/LC_MESSAGES/*',
],
},
) | Inokinoki/django-multiplefilefield | setup.py | setup.py | py | 1,482 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_numb... |
26739483197 | from youtube_dl import YoutubeDL
import re
yt_regex = re.compile(r"^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$")
ytdlopts = {
'outtmpl': './downloads/%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': False,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpegopts = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -cwix -reconnect_delay_max 5',
'options': '-vn'
}
ytdl = YoutubeDL(ytdlopts)
def url_checker(url):
url_check = yt_regex.findall(url)
value = 0
try:
for k in url_check[0]:
if "list" in k or "&list" in k:
value += 1
except IndexError:
return False
if value > 1:
return True
if value < 1 or value == 1:
return False
def yt_search(download=True):
search = str(input("Input a search query: "))
while True:
if url_checker(search) is True:
data = ytdl.extract_info(url=search, download=download)
total = 0
for data in data['entries']:
file = ytdl.prepare_filename(data)
total += 1
return print(str(total) + " Video downloaded.")
if url_checker(search) is False:
file = ytdl.prepare_filename(ytdl.extract_info(url=search, download=download)['entries'][0])
return print("Video downloaded.")
if __name__ == "__main__":
yt_search()
| ArpitKhandelwal-developer/Yt-Download | main.py | main.py | py | 1,649 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "youtube_dl.YoutubeDL",
"line_number": 24,
"usage_type": "call"
}
] |
42324347897 | """
使用模型性能评价指标
"""
from deepepochs import Trainer, rename, metrics as dm
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader, random_split
# datasets
data_dir = './datasets'
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
mnist_full = MNIST(data_dir, train=True, transform=transform, download=True)
train_ds, val_ds, _ = random_split(mnist_full, [5000, 5000, 50000])
test_ds = MNIST(data_dir, train=False, transform=transform, download=True)
# dataloaders
train_dl = DataLoader(train_ds, batch_size=32)
val_dl = DataLoader(val_ds, batch_size=32)
test_dl = DataLoader(test_ds, batch_size=32)
# pytorch model
channels, width, height = (1, 28, 28)
model = nn.Sequential(
nn.Flatten(),
nn.Linear(channels * width * height, 64),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(64, 64),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(64, 10)
)
# 指标函数1
def acc(preds, targets):
"""
指标函数的参数为: (模型预测输出, 标签)
返回值: 当前mini-batch各样本指标的均值
"""
return dm.accuracy(preds, targets)
# 指标函数2
def recall(preds, targets):
return dm.recall(preds, targets, average='macro')
# 指标函数3
@rename('')
def multi_metrics(preds, targets):
"""
1. 指标函数也可以一次返回多个指标值,以字典的形式返回。
2. 在输出时,指标的名字由指标函数名和字典的键组成。
3. 可利用rename来改变函数名,或者直接通过指标函数的__name__属性改变函数名。
"""
return {
'p': dm.precision(preds, targets, average='macro'),
'r': dm.recall(preds, targets, average='macro')
}
opt = torch.optim.Adam(model.parameters(), lr=2e-4)
trainer = Trainer(model, F.cross_entropy, opt=opt, epochs=2,
metrics=[acc], # 1. 在训练、验证和测试中使用的指标
)
progress = trainer.fit(train_dl, val_dl,
metrics=[multi_metrics], # 2. 在训练和验证中使用的指标
train_metrics=[multi_metrics], # 3. 仅在训练中使用的指标
val_metrics=[multi_metrics] # 4. 仅在验证中使用的指标
)
test_rst = trainer.test(test_dl,
metrics=[recall] # 5. 仅在测试中使用的指标
)
| hitlic/deepepochs | examples/3-metrics.py | 3-metrics.py | py | 2,620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 15,
"usage_type": "call"
},
{
... |
39169556179 | from PIL import Image
def convertToSeamless(originalImg, outputPath):
imgH, imgW = originalImg.size
dim = imgW
if imgW > imgH:
dim = imgH
tempImg = originalImg.crop((0, 0, dim, dim))
seamlessImg = Image.new("RGB", (dim * 2, dim * 2), "white")
seamlessImg.paste(tempImg, (0, 0))
tempImgLR = tempImg.transpose(Image.FLIP_LEFT_RIGHT)
seamlessImg.paste(tempImgLR, (dim, 0))
tempImgTB = tempImg.transpose(Image.FLIP_TOP_BOTTOM)
seamlessImg.paste(tempImgTB, (0, dim))
tempImg = tempImgTB.transpose(Image.FLIP_LEFT_RIGHT)
seamlessImg.paste(tempImg, (dim, dim))
seamlessImg.save(outputPath)
| Ktlas/Seamless-Noise-Generator | ConvertToSeamless.py | ConvertToSeamless.py | py | 644 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.new",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.Image.FLIP_LEFT_RIGHT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
... |
37979061712 | import csv
import random
import sqlite3
import sys
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QTextEdit, QScrollArea, QPushButton
from StatistikWidjet_form import Ui_Statistic
import datetime as dt
from CONST import NOTES_DB, STATISTIC_DB, ARITHMETIC_DB, FUNNY_TEXTS
class StatisticWidget(QWidget, Ui_Statistic):
def __init__(self):
super().__init__()
self.setupUi(self)
self.setWindowIcon(QtGui.QIcon('Pi_.ico'))
self.save_btn = None
self.con = sqlite3.connect(STATISTIC_DB)
self.calendarWidget.clicked.connect(self.show_info)
self.sum_time = dt.timedelta()
self.show_info()
def show_info(self):
self.listWidget.clear()
self.verticalLayout.removeWidget(self.save_btn)
if self.save_btn is not None:
self.save_btn.deleteLater()
self.save_btn = None
d = self.calendarWidget.selectedDate().getDate()
self.d = dt.datetime(year=d[0], month=d[1], day=d[2]).date()
sql = """SELECT time, time_end, date_end FROM Activity
WHERE date = ?"""
cur = self.con.cursor()
items = cur.execute(sql, (self.d,)).fetchall()
self.sum_time = dt.timedelta()
for item in items:
timedelt = self.find_delta(item)
self.sum_time += timedelt
if timedelt.days == 0:
self.listWidget.addItem(' - '.join(item[:2]) + '; Длительность сессии: ' + str(timedelt))
self.listWidget.addItem('Всего за день: ' + str(self.sum_time))
math_count = self.arithmetic_count()
self.listWidget.addItem('Кол-во решенных примеров: ' + str(math_count))
self.change_label()
self.show_notes()
def change_label(self):
f = open(FUNNY_TEXTS, 'rt', encoding='utf-8')
data = f.readlines()
text = random.choice(data)
text = text.split(' ')
result = '\n'.join([' '.join(text[i:min(i + 6, len(text))]) for i in range(0, len(text), 6)])
self.label.setText(result)
def show_notes(self):
con = sqlite3.connect(NOTES_DB)
cur = con.cursor()
sql = """SELECT text, id FROM Notes
WHERE date = ?"""
notes = cur.execute(sql, (self.d,)).fetchall()
def clearLayout(layout):
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
clearLayout(self.verticalLayout_2)
self.TextEditGroup = []
for text, Id in notes:
lineEdit = QTextEdit(text)
lineEdit.setStyleSheet("background-color: rgb(255, 255, 127);")
self.TextEditGroup.append([lineEdit, Id])
self.verticalLayout_2.addWidget(lineEdit)
for el in self.TextEditGroup:
el[0].textChanged.connect(self.add_save_btn)
def add_save_btn(self):
if self.save_btn is None:
self.save_btn = QPushButton('Сохранить изменения')
self.verticalLayout.addWidget(self.save_btn)
self.save_btn.clicked.connect(self.save_note_changed)
def save_note_changed(self):
cur = self.con.cursor()
sql_update = """UPDATE Notes SET text = ?
WHERE id = ?"""
sql_delete = """DELETE FROM Notes
WHERE id = ?"""
for text_edit, Id in self.TextEditGroup:
if text_edit.toPlainText().strip() == '':
request = cur.execute(sql_delete, (Id,))
else:
request = cur.execute(sql_update, (text_edit.toPlainText(), Id))
self.con.commit()
def find_delta(self, item):
t1 = dt.time.fromisoformat(item[0])
t2 = dt.time.fromisoformat(item[1])
dt1 = dt.datetime.combine(self.d, t1)
d2 = dt.date.fromisoformat(item[2])
dt2 = dt.datetime.combine(d2, t2)
return dt2 - dt1
def arithmetic_count(self):
con = sqlite3.connect(ARITHMETIC_DB)
cur = con.cursor()
sql = """SELECT count FROM Arithmetic
WHERE date = ?"""
count = cur.execute(sql, (self.d,)).fetchall()
count = [elem[0] for elem in count]
return sum(count)
class Statistic:
def __init__(self):
now = dt.datetime.today()
self.start_dt = dt.datetime(now.year, now.month, now.day, now.hour, now.minute)
def end(self):
now = dt.datetime.today()
self.end_dt = dt.datetime(now.year, now.month, now.day, now.hour, now.minute)
con = sqlite3.connect(STATISTIC_DB)
cur = con.cursor()
if self.start_dt + dt.timedelta(minutes=1) <= self.end_dt:
sql = """INSERT INTO Activity(date, time, date_end, time_end)
VALUES (?, ?, ?, ?)"""
cur.execute(sql, (self.start_dt.date(), self.start_dt.strftime("%H:%M"),
self.end_dt.date(), self.end_dt.strftime("%H:%M")))
con.commit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = StatisticWidget()
ex.show()
sys.exit(app.exec())
| samsadlonka/yandex_lyceum_qt_project | Statistic.py | Statistic.py | py | 5,184 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "StatistikWidjet_form.Ui_Statistic",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 19,
"usage_type": "call"
},
{
"api_n... |
495364467 | import contextlib
import datetime
import errno
import inspect
import multiprocessing
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from .subprocess_pdb import ForkedPdb
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def merge_dicts(left, right):
check.dict_param(left, 'left')
check.dict_param(right, 'right')
result = left.copy()
result.update(right)
return result
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output(['python', path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = ['python', '-m', 'dagster', 'pipeline', 'execute', '-f', path, '-n', pipeline_fn_name]
if env_file:
cli_cmd.append('-e')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe)
raise cpe
@contextlib.contextmanager
def safe_tempfile_path():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield Path(path).as_posix()
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
pdb = ForkedPdb()
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(target=_kill_on_event, args=(termination_event,))
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/utils/__init__.py | __init__.py | py | 9,956 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "attribute"
},
{
"a... |
11424600276 | import abc
import datetime
import logging
import multiprocessing
import signal
import socket
import threading
import six
import edera.helpers
from edera.consumers import InterProcessConsumer
from edera.flags import InterProcessFlag
from edera.flags import InterThreadFlag
from edera.helpers import Sasha
from edera.helpers import SimpleBox
from edera.invokers import MultiProcessInvoker
from edera.invokers import MultiThreadedInvoker
from edera.invokers import PersistentInvoker
from edera.managers import CascadeManager
from edera.monitoring import MonitoringAgent
from edera.monitoring import MonitorWatcher
from edera.routine import deferrable
from edera.routine import routine
from edera.workflow.builder import WorkflowBuilder
from edera.workflow.executors import BasicWorkflowExecutor
from edera.workflow.executors import ManagedWorkflowExecutor
from edera.workflow.executors import MonitoringWorkflowExecutor
from edera.workflow.processors import TagFilter
from edera.workflow.processors import TargetCacher
from edera.workflow.processors import TargetLocker
from edera.workflow.processors import TargetPostChecker
from edera.workflow.processors import TaskFreezer
from edera.workflow.processors import TaskRanker
from edera.workflow.processors import WorkflowNormalizer
from edera.workflow.processors import WorkflowTrimmer
@six.add_metaclass(abc.ABCMeta)
class Daemon(object):
"""
A daemon that builds and executes workflows on regular basis.
The daemon consists of three modules: $prelude, $main, and $support.
The $support module runs in parallel with the other two.
The $prelude module must finish successfully in order for the $main module to start running.
Each module provides a seed function that generates a root task of the workflow to be executed
at a particular moment in time.
It also specifies schedules for different tags (along with $None).
Each specified tag is handled in a separate process.
The $builder persistently re-builds the workflow and $executor instances repeatedly attempt
to execute it.
Only $prelude's workflows can actually finish.
The daemon handles SIGINT/SIGTERM signals and knows how to stop gracefully.
However, you should avoid having active background threads while running the daemon.
This may lead to a deadlock (see documentation for $ProcessWorker for details).
Attributes:
autotester (Optional[DaemonAutoTester]) - the auto-tester used to testify workflows
Only the $main module will be auto-tested.
builder (WorkflowBuilder)
cache (Optional[Storage]) - the storage used to cache targets
executor (WorkflowExecutor)
interruption_timeout (TimeDelta) - time to wait for interrupted executors to terminate
locker (Optional[Locker]) - the locker used to deduplicate task execution
main (DaemonModule)
manager (ContextManager) - the context manager that controls both building and execution
monitor (Optional[Storage]) - the storage used for monitoring purposes
postprocessors (Iterable[WorkflowProcessor]) - the sequence of processors applied after
filtering by tag
prelude (Optional[StaticDaemonModule])
preprocessors (Iterable[WorkflowProcessor]) - the sequence of processors applied before
filtering by tag
support (Optional[DaemonModule])
Constants:
CONSUMER_CAPACITY (Integer) - the capacity of the consumer that serves monitoring agents
CONSUMER_BACKOFF (TimeDelta) - the backoff of the consumer that serves monitoring agents
See also:
$DaemonAutoTester
$DaemonModule
$DaemonSchedule
WARNING!
Avoid having active background threads when starting the daemon.
See $ProcessWorker's documentation for details.
"""
CONSUMER_CAPACITY = 1000
CONSUMER_BACKOFF = datetime.timedelta(seconds=1)
def __init__(self):
self.__consumer = InterProcessConsumer(
lambda kv: self.monitor.put(*kv),
self.CONSUMER_CAPACITY,
self.CONSUMER_BACKOFF)
@property
def autotester(self):
pass
@property
def builder(self):
return WorkflowBuilder()
@property
def cache(self):
pass
@property
def executor(self):
result = BasicWorkflowExecutor()
if self.monitor is not None:
host_name = socket.getfqdn()
process_name = multiprocessing.current_process().name
thread_name = threading.current_thread().name
agent_name = ":".join([host_name, process_name, thread_name])
agent = MonitoringAgent(agent_name, self.monitor, self.__consumer)
result = MonitoringWorkflowExecutor(result, agent)
result = ManagedWorkflowExecutor(result, self.manager)
return result
@property
def interruption_timeout(self):
return datetime.timedelta(minutes=1)
@property
def locker(self):
pass
@abc.abstractproperty
def main(self):
pass
@property
def manager(self):
return CascadeManager([])
@property
def monitor(self):
pass
@property
def postprocessors(self):
if self.cache is not None:
yield TargetCacher(self.cache)
yield WorkflowTrimmer()
yield TargetPostChecker()
if self.locker is not None:
yield TargetLocker(self.locker)
yield TaskRanker()
@property
def prelude(self):
pass
@property
def preprocessors(self):
yield TaskFreezer()
yield WorkflowNormalizer()
@routine
def run(self):
"""
Start the daemon.
"""
def check_interruption_flag():
if interruption_flag.raised:
raise SystemExit("SIGINT/SIGTERM received")
multiprocessing.current_process().name = "-"
threading.current_thread().name = "-"
interruption_flag = InterProcessFlag()
with Sasha({signal.SIGINT: interruption_flag.up, signal.SIGTERM: interruption_flag.up}):
logging.getLogger(__name__).info("Daemon starting")
try:
yield self.__run[check_interruption_flag].defer()
except SystemExit as error:
logging.getLogger(__name__).info("Daemon stopped: %s", error)
@property
def support(self):
pass
@routine
def __build(self, seeder, testable, tag, box):
if testable and self.autotester is not None:
root = self.autotester.seed(seeder)
else:
root = seeder(edera.helpers.now())
workflow = self.builder.build(root)
with self.manager:
if testable and self.autotester is not None:
yield
self.autotester.testify(workflow)
for processor in self.preprocessors:
yield deferrable(processor.process).defer(workflow)
TagFilter(tag).process(workflow)
for processor in self.postprocessors:
yield deferrable(processor.process).defer(workflow)
box.put(workflow)
@routine
def __execute(self, box, completion_flag):
while True:
workflow = box.get()
if workflow is not None:
break
yield edera.helpers.sleep.defer(datetime.timedelta(seconds=1))
yield deferrable(self.executor.execute).defer(workflow)
completion_flag.up()
@routine
def __run(self):
@routine
def run_consumer():
yield self.__consumer.run.defer()
@routine
def run_watcher():
if self.monitor is not None:
yield MonitorWatcher(self.monitor).run.defer(delay=datetime.timedelta(seconds=1))
@routine
def run_support():
if self.support is not None:
yield self.__run_module.defer("support", self.support)
@routine
def run_main():
if self.prelude is not None:
yield self.__run_module.defer("prelude", self.prelude, sustain=False)
regular = self.autotester is None
yield self.__run_module.defer("main", self.main, sustain=regular, testable=True)
self.autotester.finish()
timeout = 2 * self.interruption_timeout
yield MultiProcessInvoker(
{
"consumer": run_consumer,
"watcher": run_watcher,
"launcher#support": run_support,
"launcher#main": run_main,
},
interruption_timeout=timeout).invoke.defer()
@routine
def __run_module(self, name, module, sustain=True, testable=False):
timeout = 2 * self.interruption_timeout
yield MultiProcessInvoker(
{
(name if tag is None else name + "#" + tag): self.__run_module_branch.fix(
tag,
module.seed,
module.scheduling[tag],
sustain,
testable)
for tag in module.scheduling
},
interruption_timeout=timeout).invoke.defer()
@routine
def __run_module_branch(self, tag, seeder, schedule, sustain, testable):
def check_completion_flag():
if not sustain and completion_flag.raised:
raise SystemExit("finished successfully")
box = SimpleBox()
completion_flag = InterThreadFlag()
timeout = 2 * self.interruption_timeout
yield MultiThreadedInvoker(
{
"builder": PersistentInvoker(
self.__build.fix(seeder, testable, tag, box),
delay=schedule.building_delay).invoke,
"executor": MultiThreadedInvoker.replicate(
PersistentInvoker(
self.__execute.fix(box, completion_flag),
delay=schedule.execution_delay).invoke,
schedule.executor_count,
prefix="executor-",
interruption_timeout=self.interruption_timeout).invoke,
},
interruption_timeout=timeout).invoke[check_completion_flag].defer()
| thoughteer/edera | edera/daemon/daemon.py | daemon.py | py | 10,308 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "edera.consumers.InterProcessConsumer",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "edera.workflow.builder.WorkflowBuilder",
"line_number": 108,
"usage_type": "call"... |
29618692349 | from datetime import datetime, timedelta
from flask import Blueprint, send_from_directory, jsonify, request, session, redirect, url_for
from playhouse.shortcuts import dict_to_model, model_to_dict
from app.model import QueryType
from app.model import Product, ProductRegion, ProductCategory, ProductImage
from app.model import QueryLog, DetailLog, PurchaseLog, QueryBackLog, DetailBackLog
from app.utils import models_to_dict
from app.resp import suc, err
product = Blueprint('product', __name__)
product_api = Blueprint('product_api', __name__)
@product.route('/list/')
def html_product_list():
if 'user_id' not in session:
return redirect(url_for('user.html_login'))
return send_from_directory('templates/product/', 'list.html')
@product.route('/<int:product_id>')
def html_product_detail(product_id):
if 'user_id' not in session:
return redirect(url_for('user.html_login'))
return send_from_directory('templates/product', 'detail.html')
@product_api.route('/product')
def get_product():
region_id = request.args.get('region_id')
category_id = request.args.get('category_id')
assert(region_id or category_id)
if region_id:
QueryLog.insert({
'user_id': session['user_id'],
'query_type': QueryType.REGION.value,
'query_id': region_id
}).execute()
return suc(models_to_dict(Product.select().where(Product.region_id == region_id)))
if category_id:
QueryLog.insert({
'user_id': session['user_id'],
'query_type': QueryType.CATEGORY.value,
'query_id': category_id
}).execute()
return suc(models_to_dict(Product.select().where(Product.category_id == category_id)))
@product_api.route('/region')
def get_product_region():
return suc(models_to_dict(ProductRegion.select()))
@product_api.route('/category')
def get_product_category():
return suc(models_to_dict(ProductCategory.select()))
@product_api.route('/<int:product_id>')
def product_detail(product_id):
DetailLog.insert({
'user_id': session['user_id'],
'product_id': product_id,
}).execute()
return suc(model_to_dict(Product.get(product_id)))
@product_api.route('/image/<int:product_id>')
def get_product_images(product_id):
return suc([p.filename for p in ProductImage.select().where(ProductImage.product_id == product_id)])
@product_api.route('/repr_image/<int:product_id>')
def get_product_repr_image(product_id):
return suc(ProductImage.get(ProductImage.product_id == product_id).filename)
@product_api.route('/purchase/<int:product_id>', methods=["POST"])
def purchase(product_id):
PurchaseLog.insert({
'user_id': session['user_id'],
'product_id': product_id,
}).execute()
return suc()
@product_api.route('/back', methods=['POST'])
def back():
pathname, query = request.json['pathname'], request.json['search'].removeprefix('?')
if request.json['pathname'] == '/product/list/':
query_key, query_id = query.split('=')
QueryBackLog.insert({
'user_id': session['user_id'],
'query_type': QueryType.REGION.value if query_key == 'region_id' else QueryType.CATEGORY.value,
'query_id': query_id
}).execute()
else:
product_id = pathname.split('/')[-1]
DetailBackLog.insert({
'user_id': session['user_id'],
'product_id': product_id
}).execute()
return suc()
| revectores/online-shopping-simulator | src/app/handler/product/product.py | product.py | py | 3,502 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"l... |
17411563971 | # import os
# os.environ["NUMBA_ENABLE_CUDASIM"] = "1" dont do this ...
import numpy as np
from numba import cuda
from numba.core.errors import NumbaPerformanceWarning
import warnings
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
import gc
import time
import threading
cuda.select_device(0)
# constants
filedir = 'C:/Resin/articulated-dragon-mcgybeer20211115-748-6f8p9f/mcgybeer/articulated-dragon-mcgybeer/Dragon_v2/' #'C:/VSCode/PythonTest/' #
filename = 'Dragon_v2.stl' #'test.stl'# 'cube.stl'#
stop_loop = False
# end constants
from parallel_prefix_sum import *
from cuda_functions import *
from py_functions import *
mesh, voxel_bounds = initMesh(filedir + filename, scale = 0.5, scale_rel_to_buildplate = False, rotZ=0)
d_img = cuda.device_array((int(voxel_bounds[0]),int(voxel_bounds[1])), np.int32) # final image
d_img_1d = cuda.device_array((int(voxel_bounds[0]*voxel_bounds[1])), np.int32)
d_image_to_index = cuda.device_array((int(voxel_bounds[0]),int(voxel_bounds[1])), np.int32)
def run_loop(mesh, voxel_bounds, d_img, d_img_1d, d_image_to_index):
global stop_loop
for z_layer in range(voxel_bounds[2]):
if (stop_loop):
print('Loop aborted by user')
break
if (z_layer == 0): # first layer has height != zero
continue
# if (z_layer > 1):
# continue
print(z_layer, voxel_bounds[2]) # progress report
# get a 2D svg vector path at z_layer
slice_path = getPath(mesh, z_layer)
# filledImg = slice_path.rasterize(pitch=1.0, origin=[0,0], resolution=voxel_bounds[0:2], fill=True, width=0)
# plt.imsave(fname='output/debug_' + str(z_layer).zfill(4) + '.png', arr=filledImg, cmap='gray_r', format='png')
# break
# create a pixel image of sclice
getDeviceSliceImg(d_img, slice_path)
# saveImage(d_img, 'debug_' + str(z_layer).zfill(4) + '.png')
# break
# transform slice img to a sparse form mat[x,y] -> (x,y,1)
d_sparseImg = convertToSparseForm(d_img, d_img_1d)
# d_out_img = cuda.device_array((int(d_img.shape[0]),int(d_img.shape[1])), np.uint8)
# sparseImg_to_img(d_out_img, d_sparseImg)
# saveImage(d_out_img, 'debug_' + str(z_layer).zfill(4) + '.png')
# break
# get a thin part of the mesh. We need part of the mesh that is influenced by the exposure. Direction -z -> This is the part that has already been slices/exposed.
d_faces, d_vertices = getThinSlice(mesh, z_layer)
# find the closest point on the surface (only thin slice) for each pixel in d_sparseImg
# filter by distance
d_points_on_surface = getSurfacePoints(d_sparseImg, z_layer, d_faces, d_vertices)
d_faces = None
d_vertices = None
gc.collect()
# all points with large distance are infill and get full exposure
markInfill(d_img, d_points_on_surface, d_sparseImg)
# saveImage(d_img, 'img_infill_' + str(z_layer).zfill(4) + '.png')
# create new list without infill points and points that where to close
# stream = cuda.stream()
# sparseImg_dump = d_sparseImg.copy_to_host(stream=stream)
# points_on_surface_dump = d_points_on_surface.copy_to_host(stream=stream)
# image_to_index_dump = d_image_to_index.copy_to_host(stream=stream)
# stream.synchronize()
# try:
d_reducedSparseImg, d_reduced_points_on_surface = reducePoints(d_sparseImg, d_points_on_surface, d_image_to_index)
# except Exception as inst:
# # dump relevant data to files
# np.savetxt("output/sparseImg_dump.txt", sparseImg_dump, delimiter =", ")
# np.savetxt("output/points_on_surface_dump.txt", points_on_surface_dump, delimiter =", ")
# np.savetxt("output/image_to_index_dump.txt", image_to_index_dump, delimiter =", ")
# print('Error in Layer ',z_layer)
# print(type(inst))
# break
d_sparseImg = None
d_points_on_surface = None
gc.collect()
d_out_img = cuda.device_array((int(d_img.shape[0]),int(d_img.shape[1])), np.uint8)
sparseImg_to_img(d_out_img, d_reducedSparseImg)
# saveImage(d_out_img, 'debug_' + str(z_layer).zfill(4) + '.png')
# break
# we need to know which surface points get light from which pixels and vice versa. For each connection we calculate the distance. This sets memory < recalculation.
d_pixel_to_surface_points, d_pixel_to_surface_points_distances, d_surface_point_to_pixels, d_surface_point_to_pixels_distances = createSparseDependencies(d_reducedSparseImg, d_reduced_points_on_surface, d_image_to_index, z_layer)
# find good exposure for pixels near surface.
d_sparse_pixel_exposure, d_sparse_surface_exposure, d_steps = initExposure(d_reducedSparseImg, d_reduced_points_on_surface, d_pixel_to_surface_points, d_pixel_to_surface_points_distances, d_surface_point_to_pixels, d_surface_point_to_pixels_distances)
# increase in small weighted steps until desired expusure for surface points is reached
optimizeExposure(d_sparse_pixel_exposure, d_sparse_surface_exposure, d_steps, d_pixel_to_surface_points, d_pixel_to_surface_points_distances, d_surface_point_to_pixels, d_surface_point_to_pixels_distances)
# combine with infill
copyExposureToFinalImg(d_img, d_sparse_pixel_exposure, d_reducedSparseImg)
# export result
saveImage(d_img, 'img_' + str(z_layer).zfill(4) + '.png')
# collect garbage
d_reducedSparseImg = None
d_reduced_points_on_surface = None
d_sparse_pixel_exposure = None
d_steps = None
d_sparse_surface_exposure = None
d_pixel_to_surface_points = None
d_pixel_to_surface_points_distances = None
d_surface_point_to_pixels = None
d_surface_point_to_pixels_distances = None
gc.collect()
# print(numba.core.runtime.rtsys.get_allocation_stats())
meminfo = cuda.current_context().get_memory_info()
print("free: %s bytes, total, %s bytes" % (meminfo[0], meminfo[1]))
time.sleep(0.001)
d_img = None
d_img_1d = None
d_image_to_index = None
d_reducedSparseImg = None
d_reduced_points_on_surface = None
d_sparse_pixel_exposure = None
d_steps = None
d_sparse_surface_exposure = None
d_pixel_to_surface_points = None
d_pixel_to_surface_points_distances = None
d_surface_point_to_pixels = None
d_surface_point_to_pixels_distances = None
gc.collect()
def get_input():
global stop_loop
keystrk=input('Press a key \n')
# thread doesn't continue until key is pressed
print('You pressed: ', keystrk)
stop_loop=True
print('flag is now:', stop_loop)
n=threading.Thread(target=run_loop, args=[mesh, voxel_bounds, d_img, d_img_1d, d_image_to_index])
i=threading.Thread(target=get_input)
i.start()
n.start() | Mherder89/PythonLCDSlicer | LCD_slicer.py | LCD_slicer.py | py | 6,432 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.simplefilter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numba.core.errors.NumbaPerformanceWarning",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numba.cuda.select_device",
"line_number": 13,
"usage_type": "call"
},
{... |
74260921062 | import numpy as np
from nuscenes import NuScenes
from typing import Dict, List, Set
from pathlib import Path
import pickle
from tqdm import tqdm
import json
import copy
import argparse
from easydict import EasyDict
import yaml
from nuscenes.eval.detection.config import config_factory
from ..dataset import DatasetTemplate
from pcdet.datasets.nuscenes import nuscenes_utils
from pcdet.utils import common_utils
from pcdet.datasets.v2x_sim.v2x_sim_utils import get_points_and_boxes_of_1lidar, get_nuscenes_sensor_pose_in_global, get_pseudo_sweeps_of_1lidar
from pcdet.datasets.v2x_sim.v2x_sim_eval_utils import transform_det_annos_to_nusc_annos, V2XSimDetectionEval
class V2XSimDataset_RSU(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, nusc=None):
root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(dataset_cfg, class_names, training, root_path, logger)
self.infos: List[Dict] = list()
self._prefix = 'mini' if 'mini' in self.dataset_cfg.VERSION else 'full'
if nusc is None:
self.nusc = NuScenes(dataroot=root_path, version=dataset_cfg.VERSION, verbose=False)
else:
self.nusc = nusc
self.point_cloud_range = np.array(dataset_cfg.POINT_CLOUD_RANGE)
self.classes_of_interest = set(dataset_cfg.get('CLASSES_OF_INTEREST', ['car', 'pedestrian']))
self.num_sweeps = dataset_cfg.get('NUM_HISTORICAL_SWEEPS', 10) + 1
self.num_historical_sweeps = dataset_cfg.get('NUM_HISTORICAL_SWEEPS', 10)
path_train_infos = self.root_path / f"{self._prefix}_{self.dataset_cfg.INFO_PATH['train'][0]}"
print('path_train_infos: ', path_train_infos)
if not path_train_infos.exists():
self.logger.warn('dataset infos do not exist, call build_v2x_sim_info')
else:
self.include_v2x_sim_data(self.mode)
self.all_sample_data_tokens = [_info['lidar_token'] for _info in self.infos] # for evaluation
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def include_v2x_sim_data(self, mode):
self.logger.info('Loading V2X-Sim dataset')
v2x_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = f"{self._prefix}_{info_path}"
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for _info in infos:
lidar_rec = self.nusc.get('sample_data', _info['lidar_token'])
if 'SEM' not in lidar_rec['channel']:
v2x_infos.append(_info)
self.infos.extend(v2x_infos)
self.infos.sort(key=lambda e: e['timestamp'])
if self.training and self.dataset_cfg.get('MINI_TRAINVAL_STRIDE', 1) > 1:
self.infos = self.infos[::self.dataset_cfg.MINI_TRAINVAL_STRIDE] # use 1/4th of the trainval data
self.logger.info('Total samples for V2X-Sim dataset: %d' % (len(v2x_infos)))
def _build_train_val_split(self):
# town 4, 5 for train
# town 3 for val
train_locs = set([4, 5])
val_locs = set([3,])
train_scenes_token, val_scenes_token = list(), list()
for scene in self.nusc.scene:
log = self.nusc.get('log', scene['log_token'])
if log['location'] in train_locs:
train_scenes_token.append(scene['token'])
else:
val_scenes_token.append(scene['token'])
if 'mini' not in self.dataset_cfg.VERSION:
# full
trainval_split = {
'train': set(train_scenes_token),
'val': val_scenes_token
}
else:
# mini -> just for testing functionalities
split_tokens = train_scenes_token if len(train_scenes_token) > 0 else val_scenes_token
trainval_split = {
'train': set(split_tokens),
'val': split_tokens
}
path_ = self.root_path / Path(f"{self._prefix}_trainval_split.pkl")
with open(path_, 'wb') as f:
pickle.dump(trainval_split, f)
def build_v2x_sim_info(self) -> None:
path_trainval_split = self.root_path / Path(f"{self._prefix}_trainval_split.pkl")
if not path_trainval_split.exists():
self._build_train_val_split()
with open(path_trainval_split, 'rb') as f:
trainval_split = pickle.load(f)
lidar_name = 'LIDAR_TOP_id_0'
train_infos, val_infos = list(), list()
for sample in tqdm(self.nusc.sample, total=len(self.nusc.sample), desc='create_info', dynamic_ncols=True):
if lidar_name not in sample['data']:
continue
stuff = get_points_and_boxes_of_1lidar(self.nusc,
sample['data'][lidar_name],
self.classes_of_interest,
self.dataset_cfg.get('POINTS_IN_BOXES_GPU', False),
self.dataset_cfg.get('THRESHOLD_BOXES_BY_POINTS', 5))
gt_boxes = stuff['boxes_in_lidar'] # (N_gt, 7)
gt_names = stuff['boxes_name'] # (N_gt,)
num_points_in_boxes = stuff['num_points_in_boxes'] # (N_gt,)
assert gt_boxes.shape[0] == gt_names.shape[0] == num_points_in_boxes.shape[0]
info = dict()
info['token'] = sample['token']
info['lidar_token'] = sample['data'][lidar_name]
# for evaluation
info['glob_se3_lidar'] = get_nuscenes_sensor_pose_in_global(self.nusc, info['lidar_token'])
info['gt_boxes'] = gt_boxes # (N_gt, 7)
info['gt_names'] = gt_names # (N_gt,)
info['num_points_in_boxes'] = num_points_in_boxes # (N_gt,)
info['lidar_path'] = self.nusc.get_sample_data_path(info['lidar_token']) # legacy from nuscenes_dataset
# get timestamp
sample_data_record = self.nusc.get('sample_data', sample['data'][lidar_name])
info['timestamp'] = sample_data_record['timestamp']
if sample['scene_token'] in trainval_split['train']:
train_infos.append(info)
else:
val_infos.append(info)
if len(train_infos) > 0:
path_train_infos = self.root_path / f"{self._prefix}_v2x_sim_infos_{self.num_historical_sweeps}sweeps_train.pkl"
with open(path_train_infos, 'wb') as f:
pickle.dump(train_infos, f)
self.logger.info(f"v2x-sim {self.dataset_cfg.VERSION} | num samples for training: {len(train_infos)}")
if len(val_infos) > 0:
path_val_infos = self.root_path / f"{self._prefix}_v2x_sim_infos_{self.num_historical_sweeps}sweeps_val.pkl"
with open(path_val_infos, 'wb') as f:
pickle.dump(val_infos, f)
self.logger.info(f"v2x-sim {self.dataset_cfg.VERSION} | num samples for val: {len(val_infos)}")
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
raise NotImplementedError
elif kwargs['eval_metric'] == 'nuscenes':
return self.nuscenes_eval(det_annos, class_names, **kwargs)
else:
raise NotImplementedError
def nuscenes_eval(self, det_annos: List[Dict], class_names, **kwargs):
"""
Args:
det_annos: each dict is
{
'metadata': {
'token': sample token
'lidar_token'
}
'boxes_lidar': (N, 7) - x, y, z, dx, dy, dz, heading | in LiDAR
'score': (N,)
'pred_labels': (N,) | int, start from 1
'name': (N,) str
}
"""
nusc_annos = {
'meta': {'use_camera': False, 'use_lidar': True, 'use_radar': False,
'use_map': False, 'use_external': False},
'results': {}
}
for info in self.infos:
nusc_annos['results'].update(
{info['lidar_token']: list()} # NOTE: workaround to eval w.r.t lidar_token
)
transform_det_annos_to_nusc_annos(det_annos, nusc_annos)
output_path = Path(kwargs['output_path'])
output_path.mkdir(exist_ok=True, parents=True)
res_path = str(output_path / 'results_nusc.json')
with open(res_path, 'wb') as f:
pickle.dump(nusc_annos, f)
self.logger.info(f'The predictions of NuScenes have been saved to {res_path}')
try:
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
except:
eval_version = 'cvpr_2019'
eval_config = config_factory(eval_version)
nusc_eval = V2XSimDetectionEval(nusc=self.nusc,
config=eval_config,
result_path=res_path,
eval_set='',
output_dir=output_path,
verbose=True,
dataset_infos=self.infos)
metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)
with open(output_path / 'metrics_summary.json', 'r') as f:
metrics = json.load(f)
result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version)
return result_str, result_dict
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
stuff = get_pseudo_sweeps_of_1lidar(self.nusc,
info['lidar_token'],
self.num_historical_sweeps,
self.classes_of_interest,
points_in_boxes_by_gpu=self.dataset_cfg.get('POINTS_IN_BOXES_GPU', False),
threshold_boxes_by_points=self.dataset_cfg.get('THRESHOLD_BOXES_BY_POINTS', 5))
points = stuff['points'] # (N_pts, 5 + 2) - point-5, sweep_idx, inst_idx
gt_boxes = stuff['gt_boxes'] # (N_inst, 7)
gt_names = stuff['gt_names'] # (N_inst,)
instances_tf = stuff['instances_tf'] # (N_inst, N_sweep, 4, 4)
# get lidar_id
lidar_id = self.nusc.get('sample_data', info['lidar_token'])['channel'].split('_')[-1]
input_dict = {
'points': points, # (N_pts, 5 + 2) - point-5, sweep_idx, inst_idx
'gt_boxes': gt_boxes, # (N_inst, 7)
'gt_names': gt_names, # (N_inst,)
'instances_tf': instances_tf,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {
'lidar_token': info['lidar_token'],
'num_sweeps_target': self.num_sweeps,
'sample_token': info['token'],
'lidar_id': lidar_id,
}
}
# data augmentation & other stuff
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
if __name__ == '__main__':
cfg_file = './tools/cfgs/dataset_configs/v2x_sim_dataset_rsu.yaml' # NOTE: launch this from OpenPCDet directory
dataset_cfg = EasyDict(yaml.safe_load(open(cfg_file)))
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--create_v2x_sim_rsu_infos', action='store_true')
parser.add_argument('--no-create_v2x_sim_rsu_infos', dest='create_v2x_sim_rsu_infos', action='store_false')
parser.set_defaults(create_v2x_sim_rsu_infos=False)
parser.add_argument('--training', action='store_true')
parser.add_argument('--no-training', dest='training', action='store_false')
parser.set_defaults(training=True)
parser.add_argument('--version', type=str, default='v2.0-trainval', help='')
args = parser.parse_args()
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
if args.create_v2x_sim_rsu_infos:
v2x_dataset = V2XSimDataset_RSU(dataset_cfg,
class_names=dataset_cfg.CLASSES_OF_INTEREST,
training=args.training,
root_path=ROOT_DIR / 'data' / 'v2x-sim',
logger=common_utils.create_logger())
v2x_dataset.build_v2x_sim_info()
# python -m pcdet.datasets.nuscenes.v2x_sim_dataset --create_v2x_sim_rsu_infos --training --version v2.0-mini | quan-dao/practical-collab-perception | pcdet/datasets/v2x_sim/v2x_sim_dataset_rsu.py | v2x_sim_dataset_rsu.py | py | 13,285 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "dataset.DatasetTemplate",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"l... |
3640824514 | import math
from typing import List
import flair
import torch
import torch.nn as nn
import torch.nn.functional as F
from .data import tokenize, tokenize_batch
from .two_hot_encoding import NGramsEmbedding
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(
self,
dictionary,
nlayers: int,
ngrams: int,
hidden_size: int,
unk_t: int,
nout=None,
embedding_size: int = 100,
is_forward_lm=True,
document_delimiter: str = "\n",
dropout=0.1,
):
super(RNNModel, self).__init__()
self.ntoken = len(dictionary)
self.encoder = NGramsEmbedding(len(dictionary), embedding_size)
self.ngrams = ngrams
self.unk_t = unk_t
self.dictionary = dictionary
self.nlayers = nlayers
self.is_forward_lm = is_forward_lm
self.nout = nout
self.document_delimiter = document_delimiter
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.dropout = dropout
if nlayers == 1:
self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)
else:
self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)
self.drop = nn.Dropout(dropout)
self.decoder = nn.Linear(hidden_size, len(dictionary))
if nout is not None:
self.proj = nn.Linear(hidden_size, nout)
self.initialize(self.proj.weight)
self.decoder = nn.Linear(nout, len(dictionary))
else:
self.proj = None
self.decoder = nn.Linear(hidden_size, len(dictionary))
self.init_weights()
@staticmethod
def initialize(matrix):
in_, out_ = matrix.size()
stdv = math.sqrt(3.0 / (in_ + out_))
matrix.detach().uniform_(-stdv, stdv)
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.weight)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
# [#ngram, #seq_len, #batch_size]
emb = self.encoder(input)
emb = self.drop(emb)
output, hidden = self.rnn(emb, hidden)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return decoded, hidden
def forward2(self, input, hidden, ordered_sequence_lengths=None):
encoded = self.encoder(input)
encoded = self.drop(encoded)
self.rnn.flatten_parameters()
output, hidden = self.rnn(encoded, hidden)
if self.proj is not None:
output = self.proj(output)
decoded = self.decoder(
output.view(output.size(0) * output.size(1), output.size(2))
)
return (
decoded.view(output.size(0), output.size(1), decoded.size(1)),
output,
hidden,
)
def init_hidden(self, bsz):
weight = next(self.parameters()).detach()
return (
weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),
weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),
)
def __getstate__(self):
# serialize the language models and the constructor arguments (but nothing else)
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"ngrams": self.ngrams,
"unk_t": self.unk_t,
}
return model_state
def __setstate__(self, d):
# special handling for deserializing language models
if "state_dict" in d:
# re-initialize language model with constructor arguments
language_model = RNNModel(
dictionary=d["dictionary"],
nlayers=d["nlayers"],
ngrams=d["ngrams"],
hidden_size=d["hidden_size"],
unk_t=d["unk_t"],
nout=d["nout"],
embedding_size=d["embedding_size"],
is_forward_lm=d["is_forward_lm"],
document_delimiter=d["document_delimiter"],
dropout=d["dropout"],
)
language_model.load_state_dict(d["state_dict"])
# copy over state dictionary to self
for key in language_model.__dict__.keys():
self.__dict__[key] = language_model.__dict__[key]
# set the language model to eval() by default (this is necessary since FlairEmbeddings "protect" the LM
# in their "self.train()" method)
self.eval()
else:
self.__dict__ = d
def save(self, file):
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"ngrams": self.ngrams,
"unk_t": self.unk_t,
}
torch.save(model_state, str(file), pickle_protocol=4)
def get_representation(
self,
strings: List[str],
start_marker: str,
end_marker: str,
chars_per_chunk: int = 512,
):
len_longest_str: int = len(max(strings, key=len))
# pad strings with whitespaces to longest sentence
padded_strings: List[str] = []
for string in strings:
if not self.is_forward_lm:
string = string[::-1]
padded = f"{start_marker}{string}{end_marker}"
padded_strings.append(padded)
# cut up the input into chunks of max charlength = chunk_size
chunks = []
splice_begin = 0
longest_padded_str: int = len_longest_str + len(start_marker) + len(end_marker)
for splice_end in range(chars_per_chunk, longest_padded_str, chars_per_chunk):
chunks.append([text[splice_begin:splice_end] for text in padded_strings])
splice_begin = splice_end
chunks.append(
[text[splice_begin:longest_padded_str] for text in padded_strings]
)
hidden = self.init_hidden(len(chunks[0]))
batches: List[torch.Tensor] = []
# push each chunk through the RNN language model
for chunk in chunks:
len_longest_chunk: int = len(max(chunk, key=len))
sequences_as_char_indices: List[torch.Tensor] = []
for string in chunk:
chars = list(string) + [" "] * (len_longest_chunk - len(string))
chars = ["".join(chars)]
# [ngram, 1, sequence]
n_gram_char_indices = tokenize_batch(
self.dictionary, chars, self.ngrams, otf=True).unsqueeze(dim=1)
sequences_as_char_indices.append(n_gram_char_indices)
# [ngram, batch_size, sequence]
batches.append(torch.cat(sequences_as_char_indices, dim=1))
output_parts = []
for batch in batches:
# [ngram, sequence, batch_size]
batch = batch.transpose(1, 2).to(flair.device)
_, rnn_output, hidden = self.forward2(batch, hidden)
output_parts.append(rnn_output)
# concatenate all chunks to make final output
output = torch.cat(output_parts)
return output
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens in the sequence.
The positional encodings have the same dimension as the embeddings, so that the two can be summed.
Here, we use sine and cosine functions of different frequencies.
.. math:
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(
self,
dictionary,
embedding_size,
nhead,
nhid,
nlayers,
ngrams,
unk_t,
is_forward_lm=True,
document_delimiter="\n",
dropout=0.5,
):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError(
"TransformerEncoder module does not exist in PyTorch 1.1 or lower."
)
self.ntoken = len(dictionary)
self.dictionary = dictionary
self.is_forward_lm = is_forward_lm
self.ngrams = ngrams
self.unk_t = unk_t
self.nlayers = nlayers
self.document_delimiter = document_delimiter
self.dropout = dropout
self.hidden_size = nhid
self.nhead = nhead
self.model_type = "Transformer"
self.src_mask = None
self.pos_encoder = PositionalEncoding(embedding_size, dropout)
encoder_layers = TransformerEncoderLayer(
embedding_size, nhead, self.hidden_size, dropout
)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = NGramsEmbedding(self.ntoken, embedding_size)
self.embedding_size = embedding_size
self.decoder = nn.Linear(embedding_size, self.ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(src.size(1)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.embedding_size)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
def __getstate__(self):
# serialize the language models and the constructor arguments (but nothing else)
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"ngrams": self.ngrams,
"unk_t": self.unk_t,
"nhead": self.nhead
}
return model_state
def __setstate__(self, d):
# special handling for deserializing language models
if "state_dict" in d:
# re-initialize language model with constructor arguments
language_model = TransformerModel(
dictionary=d["dictionary"],
nlayers=d["nlayers"],
ngrams=d["ngrams"],
nhid=d["hidden_size"],
unk_t=d["unk_t"],
# nout=d["nout"],
embedding_size=d["embedding_size"],
is_forward_lm=d["is_forward_lm"],
document_delimiter=d["document_delimiter"],
dropout=d["dropout"],
nhead=d["nhead"]
)
language_model.load_state_dict(d["state_dict"])
# copy over state dictionary to self
for key in language_model.__dict__.keys():
self.__dict__[key] = language_model.__dict__[key]
# set the language model to eval() by default (this is necessary since FlairEmbeddings "protect" the LM
# in their "self.train()" method)
self.eval()
else:
self.__dict__ = d
def save(self, file):
# TODO: Can we make this flair compatible?
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
# "nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"ngrams": self.ngrams,
"unk_t": self.unk_t,
"nhead": self.nhead
}
torch.save(model_state, str(file), pickle_protocol=4)
def forward2(self, input, ordered_sequence_lengths=None):
encoded = self.encoder(input) * math.sqrt(self.embedding_size)
encoded = self.pos_encoder(encoded)
output = self.transformer_encoder(encoded, self.src_mask)
decoded = self.decoder(output)
# if self.proj is not None:
# output = self.proj(output)
# decoded = self.decoder(
# output.view(output.size(0) * output.size(1), output.size(2))
# )
return decoded, output
def get_representation(
self,
strings: List[str],
start_marker: str,
end_marker: str,
chars_per_chunk: int = 512,
):
len_longest_str: int = len(max(strings, key=len))
# pad strings with whitespaces to longest sentence
padded_strings: List[str] = []
for string in strings:
if not self.is_forward_lm:
string = string[::-1]
padded = f"{start_marker}{string}{end_marker}"
padded_strings.append(padded)
# cut up the input into chunks of max charlength = chunk_size
chunks = []
splice_begin = 0
longest_padded_str: int = len_longest_str + len(start_marker) + len(end_marker)
for splice_end in range(chars_per_chunk, longest_padded_str, chars_per_chunk):
chunks.append([text[splice_begin:splice_end] for text in padded_strings])
splice_begin = splice_end
chunks.append(
[text[splice_begin:longest_padded_str] for text in padded_strings]
)
batches: List[torch.Tensor] = []
# push each chunk through the RNN language model
for chunk in chunks:
len_longest_chunk: int = len(max(chunk, key=len))
sequences_as_char_indices: List[torch.Tensor] = []
for string in chunk:
chars = list(string) + [" "] * (len_longest_chunk - len(string))
chars = ["".join(chars)]
# [ngram, 1, sequence]
n_gram_char_indices = tokenize_batch(
self.dictionary, chars, self.ngrams, otf=True, device=flair.device
).unsqueeze(dim=1)
sequences_as_char_indices.append(n_gram_char_indices)
# [ngram, batch_size, sequence]
batches.append(torch.cat(sequences_as_char_indices, dim=1))
output_parts = []
for batch in batches:
# [ngram, sequence, batch_size]
batch = batch.transpose(1, 2)
_, transformer_output = self.forward2(batch)
output_parts.append(transformer_output)
# concatenate all chunks to make final output
output = torch.cat(output_parts)
return output
| HallerPatrick/two_hot_encoding | multihot/model.py | model.py | py | 17,797 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "two_hot_encoding.NGramsEmbedding",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.... |
3469842733 | import discord
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
for guild in client.guilds:
for channel in guild.channels:
print(channel, channel.id)
while True:
channel = client.get_channel(int(input("Entrez l'id du channel...")))
print("\n\n===============================================\n\n")
print("MESSAGES DU SALON", channel.name)
messages = await channel.history().flatten()
messages.reverse()
for mes in messages:
print("[" + mes.author.name, mes.created_at.strftime("%d/%m/%Y, %H:%M:%S") + "] ", mes.content)
print("\n\n===============================================\n\n")
client.run('SECRET-BOT_TOCKEN')
| antoinech2/DiscordMessageLogBot | bot.py | bot.py | py | 789 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.Client",
"line_number": 3,
"usage_type": "call"
}
] |
25619711613 | # importing necessary libraries
import numpy as np
from scipy import linalg
import pickle
import sys
import getopt
# defining a function to run the Randomized Kaczmarz (RK) algorithm
def run_RK(A, E, F, b, e, n_run, n_iter, eta, sigma_A, sigma_b, folder):
"""
The arguments for the run_RK() function are the following:
A : The non noisy matrix
E : The multiplicative left-hand noise to the matrix A
F : The multiplicative right-hand noise to the matrix A
b : The non noisy right-hand side vector (Ax=b)
e : The noise added to the vector b
n_run : numbre of runs of the RK algorithm to be performed
n_iter : numbre of iterations in each run
eta : The learning rate
sigma_A : The left-hand noise magnitude
sigma_b : The right-hand noise magnitude
folder : Foldre to store results
"""
noiseE = sigma_A*E
noiseF = sigma_A*F
noiseb = sigma_b*e
A_tld = np.linalg.multi_dot([np.identity(m)+noiseE, A, np.identity(n)+noiseF]) # The noisy matrix
b_tld = b+noiseb # The noisy right-hand side vector
print("cond(A_tld): ",np.linalg.cond(A_tld))
# computing R_tld
R = (np.linalg.norm(np.linalg.pinv(A_tld), ord=2)*np.linalg.norm(A_tld, 'fro'))**2
# compute the min singular value of A_tld: lambda_min
U, s, Vt = np.linalg.svd(A_tld)
lambda_min = min(s)
# computing the horizon of the theoretical bound
horizon = (np.linalg.norm( (noiseE @ A + A @ noiseF + noiseE @ A @ noiseF) @ x_ls - noiseb, ord=2) / lambda_min)**2
# generating probabilities of choosing the rows
probas = []
frob_norm_Atld = np.linalg.norm(A_tld, ord='fro')
for i in range(A_tld.shape[0]):
probas.append((np.linalg.norm(A_tld[i], ord=2)**2)/(frob_norm_Atld**2))
# lists to store results
distance_to_x_ls = [[] for i in range(int(n_run))]
bound = [[] for i in range(int(n_run))]
# generate starting point x_0, must be in the column space of Atld^T
x_0 = np.transpose(A_tld) @ np.random.normal(size=(m,))
# runing the RK algorithm
for r in range(int(n_run)):
# starting point
x = x_0
distance_to_x_ls[r].append(np.linalg.norm(x - x_ls)**2)
if r==0:
bound[r].append(np.linalg.norm(x_0 - x_ls, ord=2)**2+ horizon)
for i in range(int(n_iter)):
#row_idx = np.random.randint(0,m)
row_idx = int(np.random.choice(m, 1, p=probas))
if np.linalg.norm(A_tld[row_idx,:])==0:
continue
else:
x = x + eta*(b_tld[row_idx] - np.dot(A_tld[row_idx,:],x))/((np.linalg.norm(A_tld[row_idx,:], ord=2))**2)*A_tld[row_idx,:]
distance_to_x_ls[r].append(np.linalg.norm(x - x_ls)**2)
if r==0:
bound[r].append(((1-1/R)**(i+1))*(np.linalg.norm(x_0 - x_ls, ord=2)**2) + horizon)
print("end run", r)
# saving results
name = folder + 'sigmaA_{}_sigmab_{}'.format(sigma_A,sigma_b)
with open(name+'_distance_to_x_ls', "wb") as fp: pickle.dump(distance_to_x_ls,fp)
with open(name+'_bound', "wb") as fp: pickle.dump(bound,fp)
def main(argv):
n_iter = None
n_run = None
eta = None
zeroF = None
zeroE = None
try:
opts, args = getopt.getopt(argv[1:], '', ["n_iter=", "n_run=", "eta=", "zeroF=", "zeroE=" ])
except:
print("Error")
for opt, arg in opts:
if opt in ['--n_iter']:
n_iter = arg
elif opt in ['--n_run']:
n_run = arg
elif opt in ['--eta']:
eta = arg
eta = float(eta)
elif opt in ['--zeroF']:
zeroF = arg
elif opt in ['--zeroE']:
zeroE = arg
# generating matrix A
global m,n
m = 500 # numbre of rows of A
n = 300 # numbre of columns of A
r = 300 # rank of A
sigma_min = 1 # minimum singular value of A
sigma_max = 10 # maximum singular value of A
singular_values = np.linspace(sigma_min,sigma_max,r)
U = linalg.orth(np.random.randn(m,r))
V = linalg.orth(np.random.randn(n,r))
D = np.diag(singular_values)
A = np.linalg.multi_dot([U, D, np.transpose(V)])
print("cond(A): ",np.linalg.cond(A))
# generating vector b
x_opt = np.random.randn(n,)
b = np.dot(A,x_opt)
# checking consistency of noise free system
print("rank(A) ",np.linalg.matrix_rank(A))
aug = np.column_stack((A,b))
print("rank(A|b) ",np.linalg.matrix_rank(aug))
print("Noisless linear system is consistent: ", np.linalg.matrix_rank(A)==np.linalg.matrix_rank(aug))
# compute x_ls
global x_ls
x_ls = np.dot(np.linalg.pinv(A),b)
# generating the noises
E = np.random.normal(loc=[0.]*m, scale=[1.]*m, size=(m,m))
F = np.random.normal(loc=[0.]*n, scale=[1.]*n, size=(n,n))
e = np.random.randn(m)
# save generated data and noise
with open("Results/multiplicative_noise/Matrix_A", "wb") as fp: pickle.dump(A,fp)
with open("Results/multiplicative_noise/vector_b", "wb") as fp: pickle.dump(b,fp)
with open("Results/multiplicative_noise/noiseE", "wb") as fp: pickle.dump(E,fp)
with open("Results/multiplicative_noise/noiseF", "wb") as fp: pickle.dump(F,fp)
with open("Results/multiplicative_noise/e", "wb") as fp: pickle.dump(e,fp)
# runing RK with different noise magnitudes
if zeroF == True: # case F = 0
run_RK(A, E, np.zeros((n,n)), b, e, n_run, n_iter, eta, 1, 1, 'Results/multiplicative_noise/ZeroF/')
run_RK(A, E, np.zeros((n,n)), b, e, n_run, n_iter, eta, 0.5, 0.5, 'Results/multiplicative_noise/ZeroF/')
run_RK(A, E, np.zeros((n,n)), b, e, n_run, n_iter, eta, 0.05, 0.05, 'Results/multiplicative_noise/ZeroF/')
run_RK(A, E, np.zeros((n,n)), b, e, n_run, n_iter, eta, 0.1, 0.1, 'Results/multiplicative_noise/ZeroF/')
run_RK(A, E, np.zeros((n,n)), b, e, n_run, n_iter, eta, 10, 10, 'Results/multiplicative_noise/ZeroF/')
run_RK(A, E, np.zeros((n,n)), b, e, n_run, n_iter, eta, 0.005, 0.005, 'Results/multiplicative_noise/ZeroF/')
elif zeroE == True: # case E = 0
run_RK(A, np.zeros((m,m)), F, b, e, n_run, n_iter, eta, 0.005, 0.005, 'Results/multiplicative_noise/ZeroE/')
run_RK(A, np.zeros((m,m)), F, b, e, n_run, n_iter, eta, 0.01, 0.01, 'Results/multiplicative_noise/ZeroE/')
run_RK(A, np.zeros((m,m)), F, b, e, n_run, n_iter, eta, 0.5, 0.5, 'Results/multiplicative_noise/ZeroE/')
elif (zeroE == True and zeroF == True):
print("ERROR, E or F should not be zero")
else: # general case
run_RK(A, E, F, b, e, n_run, n_iter, eta, 0.005, 0.005, 'Results/multiplicative_noise/GeneralCase/')
run_RK(A, E, F, b, e, n_run, n_iter, eta, 0.01, 0.01, 'Results/multiplicative_noise/GeneralCase/')
run_RK(A, E, F, b, e, n_run, n_iter, eta, 0.1, 0.1, 'Results/multiplicative_noise/GeneralCase/')
if __name__ == "__main__":
main(sys.argv)
| SoumiaBouch/doubly_Noisy_Randomized_Kaczmarz | RK_multiplicative_noise.py | RK_multiplicative_noise.py | py | 7,176 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.linalg.multi_dot",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.identity",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.... |
25547237634 | from django.db import models
class Sauceproduct(models.Model):
sauces = models.ForeignKey(
'sauce.Sauce', on_delete=models.SET_NULL, null=True, blank=True)
product = models.ForeignKey(
'products.Product', on_delete=models.SET_NULL, null=True, blank=True)
description = models.CharField(max_length=100, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.sauces
| Jeffer-UAO/backend-menuinteractivo | saucesproduct/models.py | models.py | py | 464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": ... |
37371783451 | # Send a heartbeat every 5 minutes to Slack
# This uses the slack-cli library: https://github.com/rockymadden/slack-cli
from requests import get
import time
import subprocess
import json
import datetime
def get_date_time():
return str(datetime.datetime.now())
def send_slack():
try:
query = ["../../slack", "chat", "send", "-tx", f'Heartbeat start: {get_date_time()}', "-ch", '#pi']
res_json = subprocess.check_output(query) # Call slack process
res = json.loads(res_json) # Parse json
print(res)
assert res["ok"] # Make sure we got ok as response
timestamp = res["ts"]
channel = res["channel"]
return timestamp, channel
except subprocess.CalledProcessError:
print("Subprocess call failed!")
print(f"Query: {' '.join(query)}")
raise subprocess.CalledProcessError()
except AssertionError:
print("Assertion 'ok == True' failed...")
print(f"Query: {' '.join(query)}")
print(res)
raise AssertionError()
def update_slack(timestamp, channel):
try:
query = ["../../slack", "chat", "update", "-tx", f'Heartbeat update: {get_date_time()}', "-ch", f"{channel}", "-ts", f'{timestamp}']
res_json = subprocess.check_output(query) # Call slack process
res = json.loads(res_json) # Parse json
assert res["ok"] # Make sure we got ok as response
except subprocess.CalledProcessError:
print("Subprocess call failed!")
print(f"Query: {' '.join(query)}")
raise subprocess.CalledProcessError()
except AssertionError as e:
print("Assertion 'ok == True' failed...")
print(f"Query: {' '.join(query)}")
print(res)
raise AssertionError()
def main():
print("Sending first message to Slack...")
timestamp, channel = send_slack()
print("Done sending first message. Sleeping...")
while True:
time.sleep(10*60) # Sleep for 10 minutes
print("Updating Slack...")
update_slack(timestamp, channel)
print("Done updating Slack! Sleeping...")
if __name__ == "__main__":
main() | Kladdy/pi-python | heartbeat/heartbeat.py | heartbeat.py | py | 2,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "... |
73518710823 | import sys
import socket
import time
from variables import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, RESPONSE
from utils import port_check, addres, get_message, send_message
def create_presence(name: str = 'Guest'):
message = {
ACTION: PRESENCE,
TIME: time.time(),
USER: {
ACCOUNT_NAME: name
}
}
return message
def answer(message):
if RESPONSE in message:
if message[RESPONSE] == 200:
return '200 : OK'
if message[RESPONSE] == 400:
return '400 : BAD REQUEST'
else:
raise ValueError
def main():
addr = addres(sys.argv)
port = port_check(sys.argv)
messenger = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
messenger.connect((addr, port))
send_message(messenger, create_presence())
try:
ans = get_message(messenger)
mes = answer(ans)
print(mes)
except ValueError:
print('Не удалось прочитать сообщение сервера')
if __name__ == '__main__':
main()
| Kederly84/async_chat_python | HomeWork4/client.py | client.py | py | 1,068 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "variables.ACTION",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "variables.TIME",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "variables.USER",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "variables.PRESENCE",
... |
37529358270 | # Clase en vídeo: https://youtu.be/_y9qQZXE24A?t=12475
### Products API ###
from fastapi import APIRouter
router = APIRouter(prefix="/products",
tags=["products"],
responses={404: {"message": "No encontrado"}})
products_list = ["Producto 1", "Producto 2",
"Producto 3", "Producto 4", "Producto 5"]
@router.get("/")
async def products():
return products_list
@router.get("/{id}")
async def products(id: int):
return products_list[id]
| mouredev/Hello-Python | Backend/FastAPI/routers/products.py | products.py | py | 505 | python | es | code | 17,209 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 7,
"usage_type": "call"
}
] |
72854617705 | import gzip
import json
import networkx as nx
def writeobj(filename, obj):
with gzip.open(filename, 'wb') as f:
f.write(json.dumps(obj, indent=4))
def readobj(filename):
with gzip.open(filename, 'r') as f:
return json.loads(f.read())
def mkdict(obj, key='id'):
return dict((x[key], x) for x in obj)
def cosponsors():
'Return a graph of cosponsors'
ret = nx.Graph()
bills = mkdict(readobj('bills-113.json.gz'))
people = mkdict(readobj('people.json.gz'))
cosponsorships = readobj('cosponsorship.json.gz')
for bill in cosponsorships:
bill_id = bill['bill']
if bill_id not in bills:
continue
author_id = bills[bill_id]['sponsor']['id']
author = people[author_id]['name']
cosponsor = people[bill['person']]['name']
if ret.has_edge(cosponsor, author):
ret[cosponsor][author]['weight'] += 1
else:
ret.add_edge(cosponsor, author, weight=1)
return ret
| maksim2042/PyCon2013_SNA | src/govtrack/net.py | net.py | py | 1,002 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "gzip.open",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 12,
... |
9262854569 | import time, yaml
from operator import itemgetter
from .keys import *
from .helpers import *
def flags_compile(flags):
first = flags[0]
faction = {
"id": f"compile_flags_{len(flags)}",
"do": "flags",
"from": first["from"],
"flags": flags,
}
return faction
class ActionTimer:
def __init__(self, times):
self.times = times
self.reset()
def reset(self):
self.start = time.perf_counter()
self.end = self.start
def update(self, action):
self.end = time.perf_counter()
self.times[action['id']] = self.end - self.start
self.start = self.end
#
# CQML Virtual Machine
#
class VM:
def __init__(self, yaml, spark):
self.spark = spark
self.yaml = yaml
self.actions= yaml["actions"]
self.cactions = []
self.macros = {}
self.compile(self.actions)
self.last = self.cactions[0]
self.debug = False
self.skip_errors = False
self.df = {}
self.sizes = {}
self.times = {}
self.pkg = None
def key_actions(self, key):
return {a['id']: a[key] for a in self.cactions if (key in a) and (kSkip not in a)}
def saveable(self):
saves = self.key_actions(kSave)
return saves
def save(self, id, df, type):
size = df.count()
action = {
"id": id,
"do": kSave,
"size": size,
kSkip: True
}
if size > 0:
action[kSave] = type
#print(action)
self.cactions[:0] = [action]
self.df[id] = df
return df
def log(self, str, name=False):
if self.debug:
if name: print(name)
print(str)
def macro(self, todo, action):
mdef = todo.split("|")
mcall = todo.split(".")
if len(mdef) > 1:
action['do'] = mdef[1]
self.macros[action['id']] = yaml.dump(action)
return 0
key = mcall[1]
template = self.macros[key]
expanded = template.format(**action)
#print(expanded)
dict = yaml.safe_load(expanded)
dict['id'] = action['id']
self.cactions.append(dict)
return 1
def reload(self, yaml_file):
with open(yaml_file) as data:
raw_yaml = yaml.full_load(data)
self.actions = raw_yaml["actions"]
self.log(self.actions)
self.compile(self.actions)
def compile(self, action_dict):
flags = []
for id, action in action_dict.items():
action['id'] = id
todo = action['do']
if (todo=='flag') and (action['from'] == "$id"):
flags.append(action)
elif len(flags) > 0:
faction = flags_compile(flags)
self.cactions.append(faction)
self.cactions.append(action)
flags = []
elif 'macro' in todo:
self.macro(todo, action)
else:
self.cactions.append(action)
if len(flags) > 0:
faction = flags_compile(flags)
self.cactions.append(faction)
return self.cactions
def run(self, only=None):
out = None
timer = ActionTimer(self.times)
for action in self.cactions:
self.log(f"run[{action['id']}]: action['do']")
if kSkip not in action:
#if only and action['do'] == only:
out = self.perform(action)
timer.update(action)
return out
def get_key(self, name):
key = self.last[name[1:]] if name[0] == '$' else name
#self.log(f"get_key: {name} -> {key}")
return key
def get_frame(self, name):
key = self.get_key(name)
return self.df[key]
def set_frame(self, name, frame):
key = self.get_key(name)
self.df[key] = frame
def test_action(self, n, show=False):
action = self.cactions[n]
id = action['id']
return test_id(self, id, show)
def test_id(self, id, show=False):
action = self.actions[id]
self.log(f"# {id}: {action}")
self.perform(action)
df = self.df[id]
if show and not isinstance(df, dict):
df.show()
return df
def ensure_unique(self, df, key):
all = df.select(key)
#print(all)
unique = all.distinct()
#print(unique)
n_dupes = all.count() - unique.count()
if (n_dupes != 0):
msg = f"ensure_unique.{key}[{n_dupes}] = {all.count()} - {unique.count()}"
self.log(msg)
if not self.skip_errors:
raise Exception("FAIL."+msg)
return df
def perform(self, action):
id, do = itemgetter('id', 'do')(action)
print(f'*perform[{do}]: {id}')
method = getattr(self, f'do_{do}')
df = method(action)
if not isinstance(df, dict):
self.sizes[id] = df.count()
df = df if kKeepIndistinct in action else df.distinct()
df = self.ensure_unique(df, action[kUniq]) if kUniq in action else df
df = df.sort(get_sort(action), ascending=False)
self.set_frame(id, df)
self.last = action
return df
| TheSwanFactory/cqml | src/cqml/vm.py | vm.py | py | 5,297 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.perf_counter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "yaml.dump",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"l... |
40370154292 | import pyautogui
import socket
import logging
#import sys
import threading
import time
#todo: might need refactoring
def log(msg):
#will always get a reference to an existing logger
#with the name indicated
logger=logging.getLogger("log")
logger.info(msg)
class MySocket(object):
def __init__(self, host, port, maxlen):
log("Initializing socket")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_address = (host, port)
self.max_connections = 5
self.MAXLEN = maxlen
#this is used in Loops to indicate
#if the program should stop or keep listening
self.listen=True
def start_listening(self):
log("Starting up on %s port %s" % self.server_address)
self.sock.bind(self.server_address)
self.sock.listen(self.max_connections)
#main loop
while self.listen:
log('waiting for a new connection')
clientsocket, address= self.sock.accept()
log( 'connection from %s at %s '% address)
self.process_connection(clientsocket, address)
log('finished processing connection :)')
log('listening stopped!')
#todo: refactor this shit!
def process_connection(self, connection, addr):
try:
msg = ""
#todo: not sure about this shit!
while True:
data = connection.recv(self.MAXLEN)
msg+=data
log('received "%s"' % data)
if not MySocket.msg_received(msg):
pass
processed_msg = MySocket.process_msg(msg)
if processed_msg is None:
log('unsupported message type!')
return
log('no more data from %s at %s' % addr)
log('the full message is "%s"' % msg)
log('the extracted message is "%s"' % processed_msg)
log('sending ACK to client...')
log('pressing "playpause"...')
pyautogui.press(processed_msg)
#todo: put this into a variable
connection.sendall("0")
return
except KeyboardInterrupt:
log('Interrupted!')
finally:
connection.close()
#removes the length indicator from the received message
@staticmethod
def process_msg(msg):
extracted_msg = msg.split(',')[1]
if extracted_msg not in ['playpause','prevtrack', 'nexttrack', 'volumeup', 'volumedown' ]:
return None
return extracted_msg
#checks the length of message and returns True if
#it's been delivered completely. The msg would be
#in the format: "7,message"
@staticmethod
def msg_received(msg):
#we wait until we get a comma because that's the separator
if not ',' in msg:
return False
split = msg.split(',')
return int(split[0]) == len(split[1])
def stop_listening(self):
log('stopping listening on connections...')
self.listen=False
if __name__ == '__main__':
# setting logging message level
logging.basicConfig(level=logging.INFO)
s=MySocket('',10000, 64)
#t=threading.Thread(target=s.start_listening)
s.start_listening()
| gpuma/karu | karu-server/prueba.py | prueba.py | py | 3,327 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREA... |
9020601076 | import os
from matplotlib.pyplot import imread
import random
import numpy as np
# set seed:
seed_value = 1
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
elif fullPath.endswith('.png'):
allFiles.append(fullPath)
return allFiles
def main(dir):
all_files = getListOfFiles(dir)
random.shuffle(all_files)
n = len(all_files)
data = []
# sub sample 1/3 of the database
for i in range(n//3):
im = imread(all_files[i])[:,:,:3]
data.append(im)
return data | MeitalRann/Music-Genre-Detection-using-Deep-CNN-Architecture | lib/get_images.py | get_images.py | py | 1,088 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_... |
42191776626 | from flask import Flask, render_template
from flask import url_for
app = Flask(__name__)
@app.route('/')
@app.route('/list_prof/<list>')
def list_prof(list):
jobs_list = ['Капитан', 'Штурман', 'Врач', 'Солдат', 'Гей']
return render_template('list_prof.html', list=list, jobs=jobs_list)
if __name__ == '__main__':
app.run(port=8080, host='127.0.0.1')
# http://127.0.0.1:8080/list_prof/ol | Xanderstp/html | pr/flask2-3.py | flask2-3.py | py | 451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 11,
"usage_type": "call"
}
] |
72493576743 | import re;
import collections;
warnings = [];
classStack = [];
def arrayJoin(arr, delim = " "):
result = "";
first = True;
for line in arr:
if len(line) == 0:
continue;
if not first:
result += delim;
first = False;
result += line;
return result;
def warning(message):
warnings.append(arrayJoin(classStack, ".") + message);
class FunctionParam:
def __init__(self, type, name):
self.type = type;
self.name = name;
self.comment = "";
self.commentJoin = [];
self.optional = None;
def finished(self):
if len(self.commentJoin) == 0 and self.type != "nil":
warning(str(self) + " has no comment");
self.comment = arrayJoin(self.commentJoin);
self.commentJoin = None;
def __str__(self):
s = "(" + self.type;
if self.name:
s += " " + self.name;
if self.optional:
s += " [" + self.optional + "]";
s += ")";
if self.comment:
s += " : " + self.comment;
return s;
# Represents a single function signature for a potentially overloaded function.
# Contains a comment block, list of input paramters and returns.
# As well as if the signature is for a static function and a list of 'see also's.
class FunctionInstance:
commentLineSplit = re.compile('/\*\*+(.*)|\s*\*?/?(.*)');
metaMatch = re.compile('@\S+');
whitespaceSplit = re.compile('\s+');
def __init__(self):
self.comment = "";
self.params = [];
self.returns = [];
self.isStatic = False;
self.parsing = None;
self.alsoSee = [];
self.inCode = False;
self.isPrivate = False;
def addParam(self, param):
self.params.append(param)
def idList(self):
s = "";
first = True;
if len(self.params) == 0:
s = "Void";
else:
for param in self.params:
if first:
first = False;
else:
s += "_";
s += param.type + "_" + param.name;
s += "__";
if len(self.returns) == 0:
s += "Void";
else:
first = True;
for ret in self.returns:
if first:
first = False;
else:
s += "_";
s += ret.type;
return s;
# Parses a single comment line.
# Deals with looking for metatags such as @static, @param, @returns and @see.
# Also replaces <br>'s with newlines.
def parseLine(self, line):
matchIter = FunctionInstance.metaMatch.finditer(line);
for match in matchIter:
meta = str(match.group()).lower();
line = line.strip();
if meta == "@static":
self.isStatic = True;
return "";
if meta == "@private":
self.isPrivate = True;
return "";
if meta == "@param":
# A param line can be in either format of
# @param Type Name [Optional] Comment (Splits into 4)
# or
# @param Type Name Comment (Splits into 3)
# So we need to split it into 4 and then combine 3 and 4
# to make the whole comment if there is no optional.
split = FunctionInstance.whitespaceSplit.split(line, 4);
optional = None;
type = split[1];
comment = "";
name = split[2];
# Check for optional
if name[0] == "[" and name[len(name) - 1] == "]":
# Grab the optional from the square brackets.
optional = name[1:len(name) - 1];
name = split[3];
if len(split) > 4:
comment = split[4];
else:
# There is no optional so combine 3 and 4 if they are available.
if len(split) > 3:
comment = split[3];
if len(split) > 3:
comment += " " + split[4];
param = FunctionParam(type, name);
self.params.append(param);
param.optional = optional;
self.parsing = param;
if len(split) > 3:
return self.parseLine(comment);
else:
return "";
if meta == "@returns":
# Return tag has the format
# @return Type Comment
# So splitting is fairly simple.
split = FunctionInstance.whitespaceSplit.split(line, 2);
returnParam = FunctionParam(split[1], None);
self.returns.append(returnParam);
self.parsing = returnParam;
if len(split) > 2:
return self.parseLine(split[2]);
else:
return "";
if meta == "@see":
# See tag has the format
# @see Type
split = FunctionInstance.whitespaceSplit.split(line, 2);
self.alsoSee.append(split[1]);
# We don't need to include this line in the comments
return "";
code = line.find("<pre>");
if code >= 0:
self.inCode = True;
code = line.find("</pre>");
if code >= 0:
self.inCode = False;
if self.inCode:
line = line + "\n";
return line.replace("<br>", "\n");
# Takes an array of unparsed comment lines and parses them into
# the main comment block and the function parameters and returns.
def parseComment(self, lines):
commentJoin = [];
for line in lines:
for match in FunctionInstance.commentLineSplit.finditer(line):
group = match.group(1) if match.group(1) else match.group(2);
if group:
s = self.parseLine(str(group));
if len(s) > 0:
if not self.parsing:
commentJoin.append(s);
else:
self.parsing.commentJoin.append(s);
self.comment = arrayJoin(commentJoin);
for param in self.params:
param.finished();
for returnParam in self.returns:
returnParam.finished();
if len(self.comment) == 0 and not self.isPrivate:
warning("." + str(self) + " has no comment");
self.parsing = None;
def __str__(self):
s = "(";
first = True;
for param in self.params:
if not first:
s += ", ";
first = False;
s += param.type + " " + param.name;
if param.optional:
s += " [" + param.optional + "]";
s += ") -> (";
first = True;
for returnParam in self.returns:
if not first:
s += ", ";
first = False;
s += returnParam.type;
s += ")";
return s;
# Represents a function with it's Lua name, C++ Name and all the overloaded
# function signatures it can have.
# Also stores if it's a special function like a constructor, destructor or operator.
class FunctionDescription:
specialFuncNames = dict({"new":1, "__gc":1, "__eq":1});
def __init__(self, name, cppName):
self.name = name;
self.cppName = cppName;
self.instances = [];
if name in FunctionDescription.specialFuncNames:
self.isSpecial = True;
else:
self.isSpecial = False;
def addInstance(self, instance):
self.instances.append(instance);
def parseComment(self, comment):
instance = FunctionInstance();
lines = comment.splitlines(False);
classStack.append(self.name);
# Parse lines for anything that affects all of the function instances.
instance.parseComment(lines);
self.instances.append(instance);
classStack.pop();
def __str__(self):
return self.name + " (" + self.cppName + ")"
# Stores a representation of a class.
# This class can have a comment block, a name, a namespace and a list
# of all the functions and operators that this class has.
class ClassDescription:
wrapperRegex = re.compile('"([^"]+)"[^,]*,\s*([^}]+)}');
commentSplitter = re.compile("(/\*\*.*?\*/)|(int \w+\(lua_State[^\)]+\))", re.S);
charParse = re.compile("int (\w+)\(lua_State");
def __init__(self, name):
self.name = name;
self.classComment = "";
self.namespace = None;
# Special functions refer to ctor, dtor and operators
self.specialFuncs = collections.OrderedDict();
self.funcs = collections.OrderedDict();
self.cppLookups = collections.OrderedDict();
self.fileContents = "";
self.wrappers = "";
self.inCode = False;
def addFunction(self, funcDesc):
self.cppLookups[funcDesc.cppName] = funcDesc;
if funcDesc.isSpecial:
self.specialFuncs[funcDesc.name] = funcDesc;
else:
self.funcs[funcDesc.name] = funcDesc;
# Parses a C++ wrapper file.
# Looks for the luaL_Reg structure which contains all the mappings between
# Lua functions and the C++ function names.
# Then splits the file up into comments and function signatures and parses
# comments to functions.
def parseFile(self, filename):
classStack.append(self.name);
file = open(filename);
self.fileContents = file.read();
luaRegIndex = re.search("luaL_Reg", self.fileContents).start();
bracketStart = self.fileContents.find("{", luaRegIndex);
bracketEnd = self.fileContents.find("};", luaRegIndex);
self.wrappers = self.fileContents[bracketStart + 1:bracketEnd];
self.parseWrappers(self.wrappers);
self.parseComments();
classStack.pop();
# Parses the luaL_Reg structure for mappings between the Lua function names
# and the C++ function names.
def parseWrappers(self, content):
for match in ClassDescription.wrapperRegex.finditer(content):
func = FunctionDescription(match.group(1), match.group(2).strip())
self.addFunction(func);
def parseClassLine(self, line):
matchIter = FunctionInstance.metaMatch.finditer(line);
for match in matchIter:
meta = str(match.group()).lower();
if meta == "@class":
return "";
code = line.find("<pre>");
if code >= 0:
self.inCode = True;
code = line.find("</pre>");
if code >= 0:
self.inCode = False;
if self.inCode:
line = line + "\n";
return line.replace("<br>", "\n");
def parseClassComment(self, comment):
commentJoin = [];
for line in comment.splitlines(False):
for match in FunctionInstance.commentLineSplit.finditer(line):
group = match.group(1) if match.group(1) else match.group(2);
if group:
commentJoin.append(self.parseClassLine(str(group)));
self.classComment = arrayJoin(commentJoin);
# Splits the file into comments and function signatures.
def parseComments(self):
comments = [];
for match in ClassDescription.commentSplitter.finditer(self.fileContents):
line = match.group();
index = line.find("/*");
if index == 0:
if line.find("@class") > 0:
self.parseClassComment(line);
else:
comments.append(line);
else:
cppName = ClassDescription.charParse.match(line).group(1);
if not cppName in self.cppLookups:
continue
func = self.cppLookups[cppName];
if len(comments) == 0:
warning("." + func.name + " has no comments");
for comment in comments:
func.parseComment(comment);
comments = [];
if len(self.classComment) == 0:
warning(" has not class comment");
def __str__(self):
s = "Special functions\n"
for k,v in self.specialFuncs.items():
s += str(v) + "\n"
s += "Normal functions\n"
for k,v in self.funcs.items():
s += str(v) + "\n"
return s
class Documentation:
whitespaceSplit = re.compile('\s+');
def __init__(self):
self.classes = collections.OrderedDict();
def parseClass(self, className, filepath):
classDesc = ClassDescription(className);
classDesc.parseFile(filepath);
self.classes[className] = classDesc;
def parseList(self, listPath):
basePath = "";
file = open(listPath);
contents = file.read();
lines = contents.splitlines(False);
for line in lines:
#line = line.strip();
if len(line) == 0 or line.find("//") == 0:
continue;
split = Documentation.whitespaceSplit.split(line, 2);
if split[0] == "__basePath":
basePath = split[1];
else:
classDesc = ClassDescription(split[0]);
classDesc.parseFile(basePath + split[1]);
self.classes[split[0]] = classDesc;
| astrellon/Rouge | python/luaDocs.py | luaDocs.py | py | 11,114 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 257,... |
74642998503 | # -*- coding: utf-8 -*-
"""
@author: Florian Koch
@license: All rights reserved
"""
import pandas as pd
import json
with open('../../data/Öffentliche Beleuchtung der Stadt Zürich/beleuchtung.json') as data_file:
data = json.load(data_file)
df = pd.io.json.json_normalize(data, ['features', ['geometry', 'coordinates']],['name', ])
df.columns = ['E', 'N', 'name']
df.name = 'illumination'
df.to_csv('../../data/prepared/illumination.csv')
| limo1996/ETH-DataScience | src/preprocess/beleuchtung.py | beleuchtung.py | py | 450 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.io.json.json_normalize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.io",
"line_number": 11,
"usage_type": "attribute"
}
] |
27036725509 | # =========================================================================
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from collections import OrderedDict
import io
import os
import logging
import json
class FeatureMap(object):
def __init__(self, dataset_id, data_dir):
self.data_dir = data_dir # must keep to be used in embedding layer for pretrained emb
self.dataset_id = dataset_id
self.num_fields = 0
self.total_features = 0
self.input_length = 0
self.features = OrderedDict()
self.labels = []
self.column_index = dict()
self.group_id = None
self.default_emb_dim = None
def load(self, json_file, params):
logging.info("Load feature_map from json: " + json_file)
with io.open(json_file, "r", encoding="utf-8") as fd:
feature_map = json.load(fd) #, object_pairs_hook=OrderedDict
if feature_map["dataset_id"] != self.dataset_id:
raise RuntimeError("dataset_id={} does not match feature_map!".format(self.dataset_id))
self.num_fields = feature_map["num_fields"]
self.labels = feature_map.get("labels", [])
self.total_features = feature_map.get("total_features", 0)
self.input_length = feature_map.get("input_length", 0)
self.group_id = feature_map.get("group_id", None)
self.default_emb_dim = params.get("embedding_dim", None)
self.features = OrderedDict((k, v) for x in feature_map["features"] for k, v in x.items())
if params.get("use_features", None):
self.features = OrderedDict((x, self.features[x]) for x in params["use_features"])
if params.get("feature_specs", None):
self.update_feature_specs(params["feature_specs"])
self.set_column_index()
def update_feature_specs(self, feature_specs):
for col in feature_specs:
namelist = col["name"]
if type(namelist) != list:
namelist = [namelist]
for name in namelist:
for k, v in col.items():
if k != "name":
self.features[name][k] = v
def save(self, json_file):
logging.info("Save feature_map to json: " + json_file)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
feature_map = OrderedDict()
feature_map["dataset_id"] = self.dataset_id
feature_map["num_fields"] = self.num_fields
feature_map["total_features"] = self.total_features
feature_map["input_length"] = self.input_length
feature_map["labels"] = self.labels
if self.group_id is not None:
feature_map["group_id"] = self.group_id
feature_map["features"] = [{k: v} for k, v in self.features.items()]
with open(json_file, "w") as fd:
json.dump(feature_map, fd, indent=4)
def get_num_fields(self, feature_source=[]):
if type(feature_source) != list:
feature_source = [feature_source]
num_fields = 0
for feature, feature_spec in self.features.items():
if feature_spec["type"] == "meta":
continue
if len(feature_source) == 0 or feature_spec.get("source") in feature_source:
num_fields += 1
return num_fields
def sum_emb_out_dim(self, feature_source=[]):
if type(feature_source) != list:
feature_source = [feature_source]
total_dim = 0
for feature, feature_spec in self.features.items():
if feature_spec["type"] == "meta":
continue
if len(feature_source) == 0 or feature_spec.get("source") in feature_source:
total_dim += feature_spec.get("emb_output_dim",
feature_spec.get("embedding_dim",
self.default_emb_dim))
return total_dim
def set_column_index(self):
logging.info("Set column index...")
idx = 0
for feature, feature_spec in self.features.items():
if "max_len" in feature_spec:
col_indexes = [i + idx for i in range(feature_spec["max_len"])]
self.column_index[feature] = col_indexes
idx += feature_spec["max_len"]
else:
self.column_index[feature] = idx
idx += 1
self.input_length = idx
for label in self.labels:
self.column_index[label] = idx
idx += 1
def get_column_index(self, feature):
if feature not in self.column_index:
self.set_column_index()
return self.column_index[feature]
| xue-pai/FuxiCTR | fuxictr/features.py | features.py | py | 5,503 | python | en | code | 671 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_nu... |
18363748651 | import argparse
from pwm import main as pwm_main
from pwm import *
'''
-m pour le fichier contenant la matrice (option longue --pfm, pas de valeur par défaut) ;
-t pour le seuil de score de matrice (option longue --threshold, pas de valeur par défaut) ;
-l pour la longueur du promoteur (option longue --promotor-length, par défaut 1000) ;
-w pour la longueur de la fenêtre glissante (option longue --window-size, par défaut 40) ;
-s pour le seuil de score de fenêtre (option longue --window-threshold, pas de valeur par défaut) ;
-p pour la valeur du pseudo-poids (option longue --pseudocount, valeur par défaut 0.1).
'''
def main(args):
parser = argparse.ArgumentParser()
parser.parse_args()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--pfm", help="le fichier contenant la matrice")
parser.add_argument("-t", "--threshold", help="The threshold of the results", type=int)
parser.add_argument("-l", "--promotor-length", help="The lengh of the promotor", type=int, default=1000)
parser.add_argument("-w", "--window-size", help="the size of the sliding window", type=int, default=40)
parser.add_argument("-s", "--window-threshold", type=int)
parser.add_argument("-p", "--pseudocount", help="le seuil de poids", type=int, default=0.1)
parser.add_argument("-v", "--verbosity", help="increases the verbosity", action="store_true")
args = parser.parse_args()
if (args.verbosity):
print("verbosity turned on")
nb_occurences = scan_sequence()
score_de_fenetre = score_window()
pwm_main()
| fayssalElAnsari/Bioinformatics-python-sequence-analyser | app/src/putative_TFBS.py | putative_TFBS.py | py | 1,622 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pwm.main",
"line_number": 35,
"usage_type": "call"
}
] |
38628061872 | #!/usr/bin/env python
import os
from posixpath import split
import pygame
import numpy as np
from eden.core import Eden
import platform
if platform.system() == 'Windows':
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
from pygame.transform import scale as surf_scale
from typing import List, Tuple
asset_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'assets')
FONT = os.path.join(asset_dir, 'consola.ttf')
FONT_SIZE = 16
FONT_WHITE = (255, 255, 255)
FONT_BLACK = (0, 0, 0)
FONT_RED = (200, 0, 0)
FONT_GREEN = (0, 200, 0)
FONT_BLUE = (0, 0, 200)
PIC_SIZE= 32
BLOCK_BORDER = 0
BLOCK_SYNTHESIZE = 1
BLOCK_BACKPACK = 2
BLOCK_EQUIPMENT = 3
BLOCK_MAP = 4
BLOCK_ATTRIBUTE = 5
BLOCK_ASSET_NAME = {
BLOCK_BORDER: 'border',
BLOCK_SYNTHESIZE: 'synthesize',
BLOCK_BACKPACK: 'slot_bp',
BLOCK_EQUIPMENT: 'slot_eq',
BLOCK_MAP: 'background',
BLOCK_ATTRIBUTE: 'background'
}
FOG_ASSET_NAME = ['fog_black', 'fog_white']
TYPE_NAME = ["agent", "being", "item", "resource", "buff", "weather", "landform", "attribute"]
ACTION_IDLE = 0
ACTION_ATTACK = 1
ACTION_COLLECT = 2
ACTION_PICKUP = 3
ACTION_CONSUME = 4
ACTION_EQUIP = 5
ACTION_SYNTHESIZE = 6
ACTION_DISCARD = 7
ACTION_MOVE = 8
class Render:
def __init__(self, env:Eden, display_size:int=20) -> None:
assert display_size >= 16, "display_size is supposed to be >= 16"
self.env = env
self.n_agent = env.backend.agent_count
self.current_agent = 0
self.action = [[]] * self.n_agent
self.display_size = display_size
self.info_linewidth = display_size - 7
self.backpack_line_n = int(np.ceil(self.env.backpack_size / 10))
self.equipment_line_n = int(np.ceil(self.env.equipment_size / 10))
self.synthesize_line_n = int(np.ceil(len(self.env.synthesize_list) / 10))
self.attribute_line_n = int(np.ceil(len(env.attribute_name) / 2))
self.window_x = int(1 + self.display_size + 1 + 10 + 1) # border map border slot border
self.window_y = int(1 + self.display_size + 1 + 6 + 1) # border map border info border
self.window_y = max(
self.window_y,
1 + self.backpack_line_n + # backpack
1 + self.equipment_line_n + # equipment
1 + self.synthesize_line_n + 1 # synthesize
)
self.is_daytime = True
self.current_vision_range = 0
self.ui_info = None
self.assets = {}
self.block_size = PIC_SIZE
self.block = np.zeros((self.window_x, self.window_y), dtype=int) # record block_type of every block
self.nameint = np.zeros_like(self.block, dtype=int) - 1 # used in click support for [backpack, equipment, synthesize]
self.mouse_info = ""
self.block_x = -1
self.block_y = -1
self._init_display()
def ui_run(self):
while self.fresh():
self.clock.tick(15)
pygame.quit()
def fresh(self) -> bool:
'''
Fresh the ui. Return False if ui window is closed else True
'''
self._draw_ui()
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEBUTTONDOWN:
action = self._handle_mouse_click(event)
if len(action) > 0:
self.action[self.current_agent] = action
self.current_agent += 1
if self.current_agent >= self.n_agent:
self.current_agent = 0
self.env.step(self.action)
elif event.type == pygame.MOUSEMOTION:
self._handle_mouse_motion(event)
elif event.type == pygame.VIDEORESIZE:
self.block_size = np.floor(min(event.w / self.window_x, event.h / self.window_y))
self.font = pygame.font.Font(FONT, int(self.block_size // 2))
self.attr_font = pygame.font.Font(FONT, int(self.block_size // 4))
return True
def _draw_ui(self):
self.ui_info = self.env.backend.ui(self.current_agent)
self._draw_border()
self._draw_info()
self._draw_synthesize_list()
cursor = self._draw_map()
cursor = self._draw_attribute(cursor)
cursor = self._draw_backpack(cursor)
cursor = self._draw_equipment(cursor)
pygame.display.flip()
def _handle_mouse_motion(self, event):
cursor_x, cursor_y = pygame.mouse.get_pos()
self.block_x = int(cursor_x // self.block_size)
self.block_y = int(cursor_y // self.block_size)
# mouse info
x = self.block_x
y = self.block_y
if x < 0 or x >= self.window_x:
return []
if y < 0 or y >= self.window_y:
return []
target_0, target_1 = self._get_target_param()
if self.block[x, y] == BLOCK_MAP:
cursor = 1 + (target_0 * self.env.map_size_x + target_1) * 6
if self.ui_info[cursor + 1] < 0: # not occupied, record lanform name
self.mouse_info = self.env.run_script(f"get.landform.{target_0}-{target_1}")
else: # occupied, record object info
self.mouse_info = self.env.run_script(f"get.map.{target_0}-{target_1}|get.info.$0")
elif self.block[x, y] == BLOCK_BACKPACK:
row = y - 1
col = x - 2 - self.display_size
slot = row * 10 + col
name_id = self.env.run_script(f"get.agent.{self.current_agent}|get.backpack.$0.{slot}")
if name_id != "":
self.mouse_info = self.env.run_script(f"get.info.{name_id}")
else:
self.mouse_info = "Empty Backpack Slot"
elif self.block[x, y] == BLOCK_EQUIPMENT:
row = y - 2 - self.backpack_line_n
col = x - 2 - self.display_size
slot = row * 10 + col
name_id = self.env.run_script(f"get.agent.{self.current_agent}|get.equipment.$0.{slot}")
if name_id != "":
self.mouse_info = self.env.run_script(f"get.info.{name_id}")
else:
self.mouse_info = "Empty Equipment Slot"
elif self.block[x, y] == BLOCK_SYNTHESIZE:
row = y - 3 - self.backpack_line_n - self.equipment_line_n
col = x - 2 - self.display_size
slot = row * 10 + col
name = self.env.backend_cfg.typeid2name[f"item:{self.nameint[x, y]}"]
self.mouse_info = f"[{name}] " + self.env.run_script(f"get.synthesize_table.{name}")
def _handle_mouse_click(self, event) -> List[float]:
if event.button not in [pygame.BUTTON_LEFT, pygame.BUTTON_RIGHT]:
return []
x = self.block_x
y = self.block_y
if x < 0 or x >= self.window_x:
return []
if y < 0 or y >= self.window_y:
return []
target_0, target_1 = self._get_target_param()
if self.block[x, y] == BLOCK_MAP:
cursor = 1 + (target_0 * self.env.map_size_x + target_1) * 6
if self.ui_info[cursor + 1] < 0: # not occupied, move here
return [ACTION_MOVE, target_0, target_1]
else: # occupied, take action accordingly
type_name = TYPE_NAME[int(self.ui_info[cursor + 1])]
if type_name == 'agent':
return [ACTION_IDLE, -1, -1]
elif type_name == 'being':
return [
ACTION_COLLECT if event.button == pygame.BUTTON_LEFT else ACTION_ATTACK,
target_0, target_1
]
elif type_name == 'resource':
return [ACTION_COLLECT, target_0, target_1]
elif type_name == 'item':
return [ACTION_PICKUP, target_0, target_1]
elif self.block[x, y] == BLOCK_BACKPACK:
if self.nameint[x, y] < 0: # empty, return
return []
if event.button == pygame.BUTTON_RIGHT:
return [ACTION_DISCARD, target_0, target_1]
if self.nameint[x, y] in [int(x.split(':')[-1]) for x in self.env.backend_cfg.equip_list]:
return [ACTION_EQUIP, target_0, target_1]
elif self.nameint[x, y] in self.env.backend_cfg.consume_list:
return [ACTION_CONSUME, target_0, target_1]
elif self.block[x, y] == BLOCK_EQUIPMENT:
if self.nameint[x, y] < 0: # empty, return
return []
return [ACTION_EQUIP, target_0, target_1]
elif self.block[x, y] == BLOCK_SYNTHESIZE:
return [ACTION_SYNTHESIZE, target_0, target_1]
return []
def _get_target_param(self):
target_0 = -1
target_1 = -1
x = self.block_x
y = self.block_y
if self.block[x, y] == BLOCK_SYNTHESIZE:
target_0 = self.nameint[x, y]
target_1 = 1
elif self.block[x, y] == BLOCK_BACKPACK:
target_0 = self.nameint[x, y]
target_1 = 1
elif self.block[x, y] == BLOCK_EQUIPMENT:
# equipment
target_0 = self.nameint[x, y]
target_1 = 1
elif self.block[x, y] == BLOCK_MAP:
# map
lt_x, lt_y = self._get_display_rect()
target_0 = x - 1 + lt_x
target_1 = y - 1 + lt_y
return int(target_0), int(target_1)
def _init_display(self):
pygame.init()
self.screen = pygame.display.set_mode(
(self.window_x * PIC_SIZE, self.window_y * PIC_SIZE),
pygame.RESIZABLE
)
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(FONT, int(self.block_size // 2))
self.attr_font = pygame.font.Font(FONT, int(self.block_size // 4))
self._init_assets()
self._init_block()
for name in BLOCK_ASSET_NAME.values():
assert name in self.assets.keys(), "Could not find {name} in assets"
for name in FOG_ASSET_NAME:
assert name in self.assets.keys(), "Could not find {name} in assets"
def _init_assets(self):
for file in os.listdir(asset_dir):
if '.png' != file[-4:]:
continue
surf = pygame.image.load(os.path.join(asset_dir, file)).convert_alpha()
self.assets[file[:-4]] = surf
def _init_block(self):
# map
for x in range(1, 1 + self.display_size):
for y in range(1, 1 + self.display_size):
self.block[x, y] = BLOCK_MAP
# attribute
for x in range(1, 1 + 6):
for y in range(2 + self.display_size, self.window_y - 1):
self.block[x, y] = BLOCK_ATTRIBUTE
# info
shift_x = 8
for y in range(2 + self.display_size, self.window_y - 1):
for x in range(8, 1 + self.display_size):
self.block[x, y] = BLOCK_ATTRIBUTE
# backpack
shift_x = 2 + self.display_size
shift_y = 1
for i in range(self.env.backpack_size):
y = int(i // 10)
x = i - y * 10
self.block[x + shift_x, y + shift_y] = BLOCK_BACKPACK
# equipment
shift_y += self.backpack_line_n + 1
for i in range(self.env.equipment_size):
y = int(i // 10)
x = i - y * 10
self.block[x + shift_x, y + shift_y] = BLOCK_EQUIPMENT
# synthesize list
shift_y += self.equipment_line_n + 1
for i in range(len(self.env.synthesize_list)):
y = int(i // 10)
x = i - y * 10
self.block[x + shift_x, y + shift_y] = BLOCK_SYNTHESIZE
def _draw_border(self):
for x in range(self.window_x):
for y in range(self.window_y):
name = BLOCK_ASSET_NAME[self.block[x, y]]
self.screen.blit(
surf_scale(self._get_asset(name), [self.block_size] * 2),
(x * self.block_size, y * self.block_size)
)
def _draw_map(self):
lt_x, lt_y = self._get_display_rect()
# weather
weather_id = int(self.ui_info[0])
weather_name = self.env.backend_cfg.typeid2name[f"weather:{weather_id}"]
# map
self.screen.blit(
self.font.render("(X)".center(4), True, FONT_WHITE),
((1 + min(self.display_size, self.env.map_size_x)) * self.block_size, int(0.25 * self.block_size))
)
self.screen.blit(
self.font.render("(Y)".center(4), True, FONT_WHITE),
(0, (1 + min(self.display_size, self.env.map_size_y)) * self.block_size)
)
cursor = 1
for x in range(self.env.map_size_x):
for y in range(self.env.map_size_y):
block_x = x - lt_x
block_y = y - lt_y
if block_x >=0 and block_y >= 0 and block_x < self.display_size and block_y < self.display_size:
if block_x == 0:
self.screen.blit(
self.font.render(str(y).center(4), True, FONT_WHITE),
(0, int((1.25 + block_y) * self.block_size))
)
if block_y == 0:
self.screen.blit(
self.font.render(str(x).center(4), True, FONT_WHITE),
((1 + block_x) * self.block_size, int(0.25 * self.block_size))
)
name = self.env.backend_cfg.typeid2name[f"landform:{int(self.ui_info[cursor])}"]
self.screen.blit(
surf_scale(self._get_asset(name), [self.block_size] * 2),
((1 + block_x) * self.block_size, (1 + block_y) * self.block_size)
)
self.screen.blit(
surf_scale(self.assets[weather_name], [self.block_size] * 2),
((1 + block_x) * self.block_size, (1 + block_y) * self.block_size)
)
if int(self.ui_info[cursor+1]) >= 0:
type_name = TYPE_NAME[int(self.ui_info[cursor+1])]
name = self.env.backend_cfg.typeid2name[
f"{type_name}:{int(self.ui_info[cursor+2])}"
]
self.screen.blit(
surf_scale(self._get_asset(name), [self.block_size] * 2),
((1 + block_x) * self.block_size, (1 + block_y) * self.block_size)
)
# if type_name == 'agent':
# self.screen.blit(
# self.attr_font.render(f"{int(self.ui_info[cursor+3])}".center(5), True, FONT_WHITE, FONT_RED),
# (int((1.125 + block_x) * self.block_size), (1 + block_y) * self.block_size)
# )
# self.screen.blit(
# self.attr_font.render(f"{int(self.ui_info[cursor+4])}".center(3), True, FONT_WHITE, FONT_GREEN),
# ((1 + block_x) * self.block_size, int((1.75 + block_y) * self.block_size))
# )
# self.screen.blit(
# self.attr_font.render(f"{int(self.ui_info[cursor+5])}".center(3), True, FONT_WHITE, FONT_BLUE),
# (int((1.5 + block_x) * self.block_size), int((1.75 + block_y) * self.block_size))
# )
# elif type_name == 'being':
# self.screen.blit(
# self.attr_font.render(f"{int(self.ui_info[cursor+3])}".center(5), True, FONT_WHITE, FONT_RED),
# (int((1.125 + block_x) * self.block_size), (1 + block_y) * self.block_size)
# )
cursor += 6
# is daytime
self.is_daytime = (self.ui_info[cursor] > 1e-4)
cursor += 1
# position
agent_x = int(self.ui_info[cursor])
agent_y = int(self.ui_info[cursor+1])
# fog
for x in range(self.env.map_size_x):
for y in range(self.env.map_size_y):
block_x = x - lt_x
block_y = y - lt_y
if block_x < 0 or block_y < 0:
continue
if block_x >= self.display_size or block_y >= self.display_size:
continue
if abs(x - agent_x) + abs(y - agent_y) <= self.current_vision_range:
continue
self.screen.blit(
surf_scale(self.assets[FOG_ASSET_NAME[int(self.is_daytime)]], [self.block_size] * 2),
((1 + block_x) * self.block_size, (1 + block_y) * self.block_size)
)
cursor += 2
return cursor
def _draw_attribute(self, cursor):
# attribute
attribute_num = int(self.ui_info[cursor])
cursor += 1
shift_x = 1
shift_y = 2 + self.display_size
for i in range(attribute_num):
y = int((shift_y + i / 2) * self.block_size)
x = int(shift_x * self.block_size)
self.screen.blit(
self.font.render(f"{self.env.attribute_name[i]:15s}{int(self.ui_info[cursor]):5d}", True, FONT_WHITE),
(x, y)
)
if self.env.attribute_name[i] == 'Vision' and self.is_daytime:
self.current_vision_range = int(self.ui_info[cursor])
elif self.env.attribute_name[i] == 'NightVision' and not self.is_daytime:
self.current_vision_range = int(self.ui_info[cursor])
cursor += 1
self.screen.blit(
self.font.render("Attribute", True, FONT_WHITE),
[int((shift_x + .25) * self.block_size), int((shift_y - .75) * self.block_size)]
)
return cursor
def _draw_backpack(self, cursor):
backpack_size = int(self.ui_info[cursor])
cursor += 1
shift_x = 2 + self.display_size
shift_y = 1
for i in range(backpack_size):
y = i // 10
x = i - y * 10
x += shift_x
y += shift_y
self.screen.blit(
surf_scale(self.assets['slot_bp'], [self.block_size] * 2),
(x * self.block_size, y * self.block_size)
)
if int(self.ui_info[cursor]) >= 0:
name = self.env.backend_cfg.typeid2name[f"item:{int(self.ui_info[cursor])}"]
self.screen.blit(
surf_scale(self._get_asset(name), [self.block_size] * 2),
(x * self.block_size, y * self.block_size)
)
self.screen.blit(
self.font.render(str(int(self.ui_info[cursor+1])), True, FONT_WHITE),
(int((x + 0.15) * self.block_size), int((y + 0.15) * self.block_size))
)
self.nameint[x, y] = int(self.ui_info[cursor])
cursor += 2
self.screen.blit(
self.font.render("Backpack", True, FONT_WHITE),
[int((shift_x + .25) * self.block_size), int((shift_y - .75) * self.block_size)]
)
return cursor
def _draw_equipment(self, cursor):
equipment_size = int(self.ui_info[cursor])
cursor += 1
shift_x = 2 + self.display_size
shift_y = 2 + self.backpack_line_n
for i in range(equipment_size):
y = i // 10
x = i - y * 10
x += shift_x
y += shift_y
self.screen.blit(
surf_scale(self.assets['slot_eq'], [self.block_size] * 2),
(x * self.block_size, y * self.block_size)
)
if int(self.ui_info[cursor]) >= 0:
name = self.env.backend_cfg.typeid2name[f"item:{int(self.ui_info[cursor])}"]
self.screen.blit(
surf_scale(self._get_asset(name), [self.block_size] * 2),
(x * self.block_size, y * self.block_size)
)
self.nameint[x, y] = int(self.ui_info[cursor])
cursor += 1
self.screen.blit(
self.font.render("Equipment", True, FONT_WHITE),
[int((shift_x + .25) * self.block_size), int((shift_y - .75) * self.block_size)]
)
return cursor
def _draw_synthesize_list(self):
shift_x = 2 + self.display_size
shift_y = 3 + self.backpack_line_n + self.equipment_line_n
for i in range(len(self.env.synthesize_list)):
y = i // 10
x = i - y * 10
x += shift_x
y += shift_y
name = self.env.backend_cfg.typeid2name[f"item:{self.env.synthesize_list[i]}"]
self.screen.blit(
surf_scale(self._get_asset(name), [self.block_size] * 2),
(x * self.block_size, y * self.block_size)
)
self.nameint[x, y] = self.env.synthesize_list[i]
self.screen.blit(
self.font.render("Synthesize List", True, FONT_WHITE),
[int((shift_x + .25) * self.block_size), int((shift_y - .75) * self.block_size)]
)
def _draw_info(self):
x = 8
y = 2 + self.display_size
self.screen.blit(
self.font.render(f"Information", True, FONT_WHITE),
[int((x + .25) * self.block_size), int((y - .75) * self.block_size)]
)
shift_pixel_x = 0
shift_pixel_y = 0
for word in self.mouse_info.split(' '):
word_surf = self.font.render(word + ' ', True, FONT_WHITE)
if x * self.block_size + shift_pixel_x + word_surf.get_width() >= (1 + self.display_size) * self.block_size:
shift_pixel_y += self.block_size // 2
shift_pixel_x = 0
self.screen.blit(word_surf, [x * self.block_size + shift_pixel_x, y * self.block_size + shift_pixel_y])
shift_pixel_x += word_surf.get_width()
def _get_display_rect(self):
cursor = 1 + self.env.map_size_x * self.env.map_size_y * 6 + 1 # weather, map, daytime
agent_x = self.ui_info[cursor]
agent_y = self.ui_info[cursor+1]
left_top_x = max(0, min(self.env.map_size_x - self.display_size, agent_x - self.display_size // 2))
left_top_y = max(0, min(self.env.map_size_y - self.display_size, agent_y - self.display_size // 2))
return int(left_top_x), int(left_top_y)
def _get_asset(self, name):
assert name in self.assets.keys(), "Could not find {name} in assets"
return self.assets[name]
if __name__ == "__main__":
import gym, eden
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--load_history', '-l', action='store_true', help='whether to load actions from history.log')
parser.add_argument('--display_size', '-d', type=int, default=20, help='the map display size, should be no less than 16')
args = parser.parse_args()
env = gym.make('eden-v0')
env.reset()
if args.load_history:
action_history = []
with open('history.log', 'r') as file:
lines = file.readlines()
for line in lines:
if '[[' not in line:
continue
multi_action = line[:-1].strip('[]').split('], [')
action_history.append([[int(x) for x in single_action.split(', ')] for single_action in multi_action])
for action in action_history:
env.step(action)
render = Render(env, display_size=args.display_size)
render.ui_run()
| DouPiChen/Eden-v0 | python/eden/interactive.py | interactive.py | py | 23,947 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "platform.system",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ctypes.windll.user32.SetProcessDPIAware",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ctypes.windll",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_nam... |
32413575452 | import math
import os
from pathlib import Path
from typing import Any, Optional, Tuple, Union
from warnings import warn
import torch
from renate.types import NestedTensors
def mmap_tensor(
filename: str, size: Union[int, Tuple[int, ...]], dtype: torch.dtype
) -> torch.Tensor:
"""Creates or accesses a memory-mapped tensor."""
t = torch.from_file(
filename,
shared=True,
size=math.prod(size) if isinstance(size, tuple) else size,
dtype=dtype,
device="cpu",
)
return t.view(size)
class Storage(torch.utils.data.Dataset):
"""An abstract class for permanent storage of datasets."""
def __init__(self, directory: str) -> None:
self._directory = directory
def __len__(self) -> int:
return self._length
def __getitem__(self, idx: int) -> Any:
raise NotImplementedError()
def dump_dataset(self, ds: torch.utils.data.Dataset) -> None:
raise NotImplementedError()
def load_dataset(self, directory: Union[str, Path]):
raise NotImplementedError()
class MemoryMappedTensorStorage(Storage):
"""A class implementing permanent storage of nested tensor datasets.
This implements storage for `length` data points consisting of nested tensors of fixed types
and shapes. `Storage` implements `__len__` and `__getitem__` and therefore can be used as a
torch `Dataset`. To populate the storage, it also implements `dump_dataset`. It does _not_ keep
track which slots have or have not been populated.
`Storage` is given a path to a directory, where it creates (or accesses, if they already exist)
memory-mapped tensor files.
Args:
directory: Path to a directory.
data_point: Prototypical datapoint from which to infer shapes/dtypes.
length: Number of items to be stored.
"""
def __init__(self, directory: str) -> None:
warn(
f"""{self.__class__.__name__} will be deprecated very soon. Use FileTensorStorage
instead. {self.__class__.__name__} is currently not fully functional, as some of the
necessary parts of the interface have been modified and simplified. """,
DeprecationWarning,
stacklevel=2,
)
super().__init__(directory)
self._storage: Optional[NestedTensors] = None
@staticmethod
def _create_mmap_tensors(path: str, data_point: NestedTensors, length: int) -> NestedTensors:
if isinstance(data_point, torch.Tensor):
os.makedirs(os.path.dirname(path), exist_ok=True)
filename = f"{path}.pt"
return mmap_tensor(filename, size=(length, *data_point.size()), dtype=data_point.dtype)
elif isinstance(data_point, tuple):
return tuple(
MemoryMappedTensorStorage._create_mmap_tensors(
os.path.join(path, f"{i}.pt"), data_point[i], length
)
for i in range(len(data_point))
)
elif isinstance(data_point, dict):
return {
key: MemoryMappedTensorStorage._create_mmap_tensors(
os.path.join(path, f"{key}.pt"), data_point[key], length
)
for key in data_point
}
else:
raise TypeError(f"Expected nested tuple/dict of tensors, found {type(data_point)}.")
@staticmethod
def _get(storage: NestedTensors, idx: int) -> NestedTensors:
if isinstance(storage, torch.Tensor):
return storage[idx]
elif isinstance(storage, tuple):
return tuple(MemoryMappedTensorStorage._get(t, idx) for t in storage)
elif isinstance(storage, dict):
return {key: MemoryMappedTensorStorage._get(t, idx) for key, t in storage.items()}
else:
raise TypeError(f"Expected nested tuple/dict of tensors, found {type(storage)}.")
def __getitem__(self, idx: int) -> NestedTensors:
"""Read the item stored at index `idx`."""
return self._get(self._storage, idx)
@staticmethod
def _set(storage: NestedTensors, idx: int, data_point: NestedTensors) -> None:
if isinstance(storage, torch.Tensor):
assert isinstance(data_point, torch.Tensor)
assert data_point.dtype is storage.dtype
storage[idx] = data_point
elif isinstance(storage, tuple):
assert isinstance(data_point, tuple)
assert len(data_point) == len(storage)
for i in range(len(storage)):
MemoryMappedTensorStorage._set(storage[i], idx, data_point[i])
elif isinstance(storage, dict):
assert isinstance(data_point, dict)
assert set(data_point.keys()) == set(storage.keys())
for key in storage:
MemoryMappedTensorStorage._set(storage[key], idx, data_point[key])
else:
raise TypeError(f"Expected nested tuple/dict of tensors, found {type(storage)}.")
def dump_dataset(self, ds):
self._length = len(ds)
self._storage = self._create_mmap_tensors(self._directory, ds[0], self._length)
for idx in range(len(self)):
self._set(self._storage, idx, ds[idx])
class FileTensorStorage(Storage):
"""A class implementing permanent storage of nested tensor datasets to disk as pickle files.
This implements storage for `length` data points consisting of nested tensors of fixed types
and shapes. `Storage` implements `__len__` and `__getitem__` and therefore can be used as a
torch `Dataset`. To populate the storage, it also implements `dump_dataset`. It does _not_ keep
track which slots have or have not been populated.
`Storage` is given a path to a directory, where it creates (or accesses, if they already exist)
pickle files one for each point in the dataset.
Args:
directory: Path to a directory.
"""
def __init__(self, directory: str) -> None:
super().__init__(directory)
def dump_dataset(self, ds: torch.utils.data.Dataset) -> None:
for i in range(len(ds)):
torch.save(ds[i], self._compose_file_path_from_index(i))
def __getitem__(self, idx: int) -> Any:
if not hasattr(self, "_length"):
self.load_dataset(None)
return torch.load(self._compose_file_path_from_index(idx))
def load_dataset(self, directory: Union[str, Path]):
self._length = len([x for x in os.listdir(self._directory) if x.endswith(".pt")])
def _compose_file_path_from_index(self, idx: int) -> str:
return os.path.join(self._directory, f"{idx}.pt")
| awslabs/Renate | src/renate/memory/storage.py | storage.py | py | 6,650 | python | en | code | 251 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.dtype",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.from_file",
"lin... |
10933710251 | from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
from allauth.socialaccount.models import SocialAccount
from .models import Repository, Milestone, Task
import requests
from .scripts import sync_repos
from .forms import MilestoneForm, TaskForm
def homepage(request):
return render(request, 'roadmap/home.html')
def create_new_roadmap_subsite(request):
if not request.user.is_authenticated:
return render(request, 'roadmap/create_new_roadmap.html')
username = SocialAccount.objects.get(user=request.user)
url = f"https://api.github.com/users/{username}/repos"
response = requests.get(url)
if response.status_code == 200:
repos = response.json()
sync_repos.sync(repos)
else:
repos = []
context = {"repos": repos}
return render(request, 'roadmap/create_new_roadmap.html', context)
def create_new_roadmap(request, repository_id):
repository = get_object_or_404(Repository, git_id=repository_id)
milestones = Milestone.objects.filter(repository=repository)
tasks = Task.objects.filter(repository=repository)
milestone_form = MilestoneForm()
task_form = TaskForm(repository=repository)
context = {
'milestone_form': milestone_form,
'milestones': milestones,
'task_form': task_form,
'tasks': tasks,
'repository': repository,
}
if request.method == 'POST':
milestone_form = MilestoneForm(request.POST)
task_form = TaskForm(request.POST, repository=repository)
if 'milestoneform' in request.POST:
if milestone_form.is_valid():
milestone = milestone_form.save(commit=False)
milestone.repository = repository
milestone.save()
if 'taskform' in request.POST:
if task_form.is_valid():
task = task_form.save(commit=False)
task.repository = repository
task.save()
return render(request, 'roadmap/create_roadmap.html', context)
def my_roadmaps_subsite(request):
if not request.user.is_authenticated:
return render(request, 'roadmap/my_roadmaps.html')
username = SocialAccount.objects.get(user=request.user)
url = f"https://api.github.com/users/{username}/repos"
response = requests.get(url)
if response.status_code == 200:
repos = response.json()
sync_repos.sync(repos)
else:
repos = []
context = {"repos": repos}
return render(request, 'roadmap/my_roadmaps.html', context)
def roadmap(request, repository_id):
repository = get_object_or_404(Repository, git_id=repository_id)
milestones = Milestone.objects.filter(repository=repository)
context = {
'milestones' : milestones,
'repository' : repository,
}
return render(request, 'roadmap/roadmap.html', context)
def mark_task_done(request, task_id):
task = get_object_or_404(Task, pk=task_id)
task.isdone = True
task.save()
repository_git_id = task.repository.git_id
return redirect(f'http://localhost:8000/roadmaps/{repository_git_id}/')
def mark_task_undone(request, task_id):
task = get_object_or_404(Task, pk=task_id)
task.isdone = False
task.save()
repository_git_id = task.repository.git_id
return redirect(f'http://localhost:8000/roadmaps/{repository_git_id}/')
def delete_task(request, task_id):
task = get_object_or_404(Task, id=task_id)
repository_git_id = task.repository.git_id
task.delete()
return redirect(f'http://localhost:8000/create_new_roadmap/{repository_git_id}/')
def delete_milestone(request, milestone_id):
milestone = get_object_or_404(Milestone, id=milestone_id)
repository_git_id = milestone.repository.git_id
milestone.delete()
return redirect(f'http://localhost:8000/create_new_roadmap/{repository_git_id}/') | MichalKozlowskii/git-roadmap | roadmap/views.py | views.py | py | 3,907 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "allauth.socialaccount.models.SocialAccount.objects.get",
"line_number": 16,
"usage_type":... |
38715613362 | #!/usr/bin/env python3
import json
def sum_numbers(data, ignore_red=False):
if isinstance(data, list):
total = 0
for e in data:
total += sum_numbers(e, ignore_red)
return total
elif isinstance(data, str):
return 0
elif isinstance(data, int):
return data
elif isinstance(data, dict):
total = 0
for key in data:
if ignore_red and data[key] == 'red':
return 0
total += sum_numbers(key, ignore_red)
total += sum_numbers(data[key], ignore_red)
return total
else:
print("Unknown type", type(data))
return 0
with open('input.txt', 'r') as f:
data = json.load(f)
print(sum_numbers(data))
print(sum_numbers(data, True)) | lvaughn/advent | 2015/12/accounting.py | accounting.py | py | 778 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 28,
"usage_type": "call"
}
] |
5558299385 | from unidecode import unidecode
import unittest
class Event:
def __init__(self, name, player, quantity):
self.name = name
self.player = player
self.quantity = quantity
def __str__(self):
return ', '.join([self.name, unidecode(self.player), str(self.quantity)])
def __eq__(self, other):
return self.__dict__ == other.__dict__
@staticmethod
def GetEventUpdates(oldEvents, newEvents):
result = []
newSavesEvents = filter(lambda x: x.name == "Saves", newEvents)
if len(newSavesEvents) > 0:
oldSavesEvents = filter(lambda x: x.name == "Saves", oldEvents)
if len(oldSavesEvents) > 0:
for newSavesEvent in newSavesEvents:
oldSavesEvent = next((x for x in oldSavesEvents if x.player == newSavesEvent.player), None)
if not oldSavesEvent or (oldSavesEvent and oldSavesEvent.quantity/3 < newSavesEvent.quantity/3):
result.append(newSavesEvent)
else:
result.extend(newSavesEvents)
result.extend(filter(lambda x: x not in oldEvents and x.name != "Saves", newEvents))
return result
class TestEvent(unittest.TestCase):
def test_onePlayerNoSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 3)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 0)
def test_onePlayerMinorSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 5)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 0)
def test_onePlayerSavesDecrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 2)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 0)
def test_onePlayerValidSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 6)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 1)
self.assertEqual(events[0], e2)
def test_onePlayerValidSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 14)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 1)
self.assertEqual(events[0], e2)
def test_onePlayerNewSaves(self):
e1 = Event("Foo", u"Cech", 0)
e2 = Event("Saves", u"Cech", 14)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 1)
self.assertEqual(events[0], e2)
def test_onePlayerOldSaves(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Foo", u"Cech", 14)
events = Event.GetEventUpdates([e1], [e2])
self.assertEqual(len(events), 1)
self.assertEqual(events[0].name, "Foo")
def test_twoPlayersNoSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 3)
e3 = Event("Saves", u"Guzan", 5)
e4 = Event("Saves", u"Guzan", 5)
events = Event.GetEventUpdates([e1, e3], [e2, e4])
self.assertEqual(len(events), 0)
def test_twoPlayersMinorSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 5)
e3 = Event("Saves", u"Guzan", 6)
e4 = Event("Saves", u"Guzan", 8)
events = Event.GetEventUpdates([e1, e3], [e2, e4])
self.assertEqual(len(events), 0)
def test_twoPlayersValidSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 6)
e3 = Event("Saves", u"Guzan", 7)
e4 = Event("Saves", u"Guzan", 9)
events = Event.GetEventUpdates([e1, e3], [e2, e4])
self.assertEqual(len(events), 2)
self.assertEqual(events[0], e2)
self.assertEqual(events[1], e4)
def test_twoPlayersOnePlayerValidSavesIncrement(self):
e1 = Event("Saves", u"Cech", 3)
e2 = Event("Saves", u"Cech", 5)
e3 = Event("Saves", u"Guzan", 7)
e4 = Event("Saves", u"Guzan", 9)
events = Event.GetEventUpdates([e1, e3], [e2, e4])
self.assertEqual(len(events), 1)
self.assertEqual(events[0], e4)
def test_twoPlayersNewSaves(self):
e1 = Event("Foo", u"Cech", 0)
e2 = Event("Saves", u"Cech", 14)
e3 = Event("Saves", u"Guzan", 7)
events = Event.GetEventUpdates([e1], [e2, e3])
self.assertEqual(len(events), 2)
self.assertEqual(events[0], e2)
self.assertEqual(events[1], e3)
def test_twoPlayersOnePlayerNewSaves(self):
e1 = Event("Saves", u"Cech", 14)
e2 = Event("Saves", u"Cech", 14)
e3 = Event("Saves", u"Guzan", 7)
events = Event.GetEventUpdates([e1], [e2, e3])
self.assertEqual(len(events), 1)
self.assertEqual(events[0], e3)
if __name__ == '__main__':
unittest.main()
| arxoclay/fpl-updates | event.py | event.py | py | 5,007 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unidecode.unidecode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 133,
"usage_type": "call"
}
] |
24524063847 | from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, render
from .models import News, Tag
def page_list(set, request):
paginator = Paginator(set, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return page_obj
# Главная страница
def index(request):
post_list = News.objects.all()
page_obj = page_list(post_list, request)
template = 'news/index.html'
context = {
'page_obj': page_obj,
}
return render(request, template, context)
# Страница со списком новостей по тегу
def news_tag(request, slug):
tag = get_object_or_404(Tag, slug=slug)
news = News.objects.filter(tag=tag)
page_obj = page_list(news, request)
template = 'news/news_tag.html'
context = {
'page_obj': page_obj,
'tag': tag,
}
return render(request, template, context)
def news_detail(request, pk):
new = get_object_or_404(News, pk=pk)
context = {
'new': new,
}
return render(request, 'news/news_detail.html', context)
| Gabrie1002/zavodnews | zavodnews/news/views.py | views.py | py | 1,127 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.paginator.Paginator",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.News.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.News.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"a... |
3770442163 | from fastai.vision.widgets import *
from fastai.vision.all import *
from pathlib import Path
import PIL
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import color
file_name='style.pkl'
learn_inference = load_learner(Path()/file_name)
#defining wallpaper class for testing
class Wallpaper():
def __init__(self, id, color, style):
#path to wallpaper
self.id = id
#color of wallpaper
self.color = color
#style of wallpaper
self.style = style
categories = ['texture', 'geometric', 'stripe', 'nature']
#sample list of wallpapers
wallpapers = [Wallpaper("26", "green", "texture"), Wallpaper("24", "darkblue", "stripe"), Wallpaper("30", "gray", "texture"), Wallpaper("7", "darkblue", "geometric"), Wallpaper("31", "darkblue", "nature"), Wallpaper("22", "gray", "nature")]
def give_recommendation(image_path):
#loading image
location = image_path
image = open(location, 'rb')
#classifying style of image
pred, pred_idx, probs = learn_inference.predict(PILImage.create(image))
style = pred
#classifying color of image
image_color = color.compare(image_path)
matches = []
#checking if there are wallpapers with the same color and style
for wallpaper in wallpapers:
if wallpaper.color == image_color and wallpaper.style == style:
matches.append(wallpaper)
if len(matches) == 6:
return matches
#if there are not enough wallpapers with the same color and style, return wallpapers with the same style
if len(matches) < 6:
for wallpaper in wallpapers:
if wallpaper.style == style:
matches.append(wallpaper)
if len(matches) == 6:
return matches
return matches
#test
print(give_recommendation("/Users/Leah/boho/images/test/1.jpg"))
| leahuriarte/recfromimage | style.py | style.py | py | 1,926 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "color.compare",
"line_number": 42,
"usage_type": "call"
}
] |
6508824338 | import numpy as np
import chainer
import argparse
import csv
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from chainer import serializers
from fix_data import OneDimSensorData
"""
LSTMを書いて, 訓練するクラス
"""
class LSTM(chainer.Chain):
def __init__(self):
super().__init__(
l1=L.Linear(1, 5),
l2=L.LSTM(5, 30),
l3=L.Linear(30, 1),
l4=L.Linear(1, 2)
)
def predictor(self, x):
# テストデータでの評価用途
self.l2.reset_state()
row = x.shape[0]
col = x.shape[1]
for i in range(col):
h = self.l1(xp.array(x[:, i].reshape(row, 1), dtype=xp.float32))
h = self.l2(h)
h = self.l3(h)
h = self.l4(h)
return [0 if data[0] > data[1] else 1 for data in h.data] # WATCH: あとでmapできるようにかきかえる
def __call__(self, x, t):
# ひとつのデータごとに誤差を計算する
self.l2.reset_state()
row = x.shape[0]
col = x.shape[1]
accum_loss = None
for i in range(col):
h = self.l1(xp.array(x[:, i].reshape(row, 1), dtype=xp.float32))
h = self.l2(h)
h = self.l3(h)
if i != col-1:
loss = F.mean_squared_error(h, xp.array(x[:, i+1].reshape(row, 1), dtype=xp.float32))
accum_loss = loss if accum_loss is None else accum_loss + loss
h = self.l4(h)
accum_loss += F.softmax_cross_entropy(h, xp.array(t, dtype=xp.int32))
# print(loss.data)
return accum_loss
# gpu
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID')
args = parser.parse_args()
# 学習データとテストデータ
one_dim_sensor = OneDimSensorData()
one_dim_sensor.load_csv()
one_dim_sensor.shuffle()
train, train_label, test, test_label, one_label_counts = one_dim_sensor.divide_train_and_test()
# model
model = LSTM()
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# for cuda
xp = cuda.cupy if args.gpu >= 0 else np
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
# training loop
display = 1000
total_loss = 0
epoch = 500
n_size = len(train)
batch_size = 1000
# question_num = len(test)
loss_plt_ary = []
accuracy_plt_ary = []
recall_plt_ary = []
for i in range(epoch):
sffindx = np.random.permutation(n_size)
for j in range(0, n_size, batch_size):
x = train[sffindx[j:(j+batch_size) if (j+batch_size) < n_size else n_size]]
y = train_label[sffindx[j:(j+batch_size) if (j+batch_size) < n_size else n_size]]
loss = model(x, y)
# print(loss.data)
model.zerograds()
loss.backward()
optimizer.update()
if j%display == 0:
print("{} epoch {} number, loss {}".format(i, j, loss.data))
last_loss = loss.data
# テストデータでチェック
answer_num = 0
recall = 0
for t_d, t_l in zip(test, test_label):
answers = model.predictor(t_d)
bool_ans = (answers==t_l)
for bool in bool_ans:
if bool:
answer_num += 1
for answer, tl in zip(answers, t_l):
if answer == tl and answer:
recall += 1
print('main/loss {}, accuracy rate {}, recall rate {}, one_counts {}'.format(last_loss, answer_num/7410, recall/one_label_counts, one_label_counts))
loss_plt_ary.append(last_loss)
accuracy_plt_ary.append(answer_num/7410)
recall_plt_ary.append(recall/one_label_counts)
serializers.save_npz("fixed_lstm_model_2", model)
def write_csv(path, ary):
with open('results/lstm_fixed_result/%s.csv' % path, 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(ary)
write_csv('loss', loss_plt_ary)
write_csv('accuracy', accuracy_plt_ary)
write_csv('recall', recall_plt_ary)
# csvにデータを書き込み
with open('results/lstm_fixed_result/loss.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer
| awkrail/laugh_maker | lstm_new.py | lstm_new.py | py | 4,159 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "chainer.Chain",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "chainer.links.Linear",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "chainer.links",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "chainer.links.L... |
23603010540 | # -*- coding: utf-8 -*-
# NOTES
# ALl mask related mathematics happens assuming (and only after conversion to) the mask image as a 2D array [... x ...]
# No channels=1 are used for the mask. Ofcourse while reading, it is read as [.. x .. x 1];
import os, cv2, pickle as pk, numpy as np
import params
from clustering import dominant_clusters
###########################################################################################################
def simply_erode_opencv(inp_mask, adj_labels_to_erode):
mask = inp_mask.copy();
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for label in adj_labels_to_erode:
# print(label)
# Pick the partial_mask
partial_mask = np.zeros(mask.shape); partial_mask[mask==label] = 1;
mask[mask==label] = 0;
er_partial_mask = cv2.erode(partial_mask, kernel, iterations = 2);
mask[er_partial_mask==1] = label;
return mask;
def get_bw_mask(mask_path, k_gap_erode=4):
###############################################################
# Collects all masks from the org folder and erodes any two
# masks thata re close by.
###############################################################
mask_files = os.listdir(mask_path);
mask, partial_mask_label = [], 1;
adj_labels_to_erode, k_gap = [], k_gap_erode;
for mask_file in mask_files:
partial_mask = cv2.imread(os.path.join(mask_path,mask_file), cv2.IMREAD_GRAYSCALE);
partial_mask[partial_mask>0] = partial_mask_label;
partial_mask_inds = np.where(partial_mask==partial_mask_label)
try:
mask+=partial_mask;
except:
mask = partial_mask;
found_something_close_by = 0;
for p in range(partial_mask_inds[0].shape[0]):
row = partial_mask_inds[0][p]; col = partial_mask_inds[1][p];
if row-k_gap>=0 and row+k_gap<partial_mask.shape[0] and col-k_gap>=0 and col+k_gap<partial_mask.shape[1]:
window = mask[row-k_gap:row+k_gap, col-k_gap:col+k_gap];
other_labels = window[(window>0) & (window!=partial_mask_label)]
for label in other_labels:
if label not in adj_labels_to_erode:
#print(partial_mask_label, label, row, col);
adj_labels_to_erode.append(label);
found_something_close_by = 1;
if found_something_close_by:
#print(partial_mask_label);
adj_labels_to_erode.append(partial_mask_label);
# Continue adding more partial_masks
partial_mask_label+=1;
for ind, lab in enumerate(adj_labels_to_erode):
adj_labels_to_erode[ind] = int(lab)
# Simply erode these label clusters
new_mask = simply_erode_opencv(mask, adj_labels_to_erode);
# bw_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY);
bw_mask = new_mask.copy(); bw_mask[bw_mask>0] = 255;
if len(adj_labels_to_erode)>0:
print('These many masks were found closeby; Eroded Brutally!!', len(adj_labels_to_erode));
return bw_mask
#############################################################################################################
def thisPatch(image, bw_mask, curr_row, curr_col, base_name, vertical_flip, horizontal_flip, rotation_90s, zoomIn):
# Desired values
row_length, col_length, channels = params.image_patch_rows, params.image_patch_cols, params.image_patch_channels;
# make new images
new_image = image[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:channels].copy();
new_mask = bw_mask[curr_row:curr_row+row_length,curr_col:curr_col+col_length].copy();
new_name = base_name;
# Random Operations
if vertical_flip and np.random.uniform(0,1)>=0.5:
new_image = np.flip(new_image,axis=0);
new_mask = np.flip(new_mask,axis=0);
new_name = new_name+'_vflip';
if horizontal_flip and np.random.uniform(0,1)>=0.5:
new_image = np.flip(new_image,axis=1);
new_mask = np.flip(new_mask,axis=1);
new_name = new_name+'_hflip';
if rotation_90s and np.random.uniform(0,1)>=0.5 and new_image.shape[0]==new_image.shape[1]:
# Using OpenCV, input must be SQUARE else bizzare results can be seen
rows,cols,_ = new_image.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90*(np.random.randint(3)+1),1)
new_image = cv2.warpAffine(new_image,M,(cols,rows))
new_mask = cv2.warpAffine(new_mask,M,(cols,rows))
new_name = new_name+'_r90';
if zoomIn and np.random.uniform(0,1)>=0.5:
# Up-Size the image and do rotation and capture 256*256 size
new_image_big = cv2.resize(new_image, (2*new_image.shape[1], 2*new_image.shape[0]), interpolation = cv2.INTER_LINEAR)
new_mask_big = cv2.resize(new_mask, (2*new_mask.shape[1], 2*new_mask.shape[0]), interpolation = cv2.INTER_LINEAR)
start_row = np.random.randint(new_image_big.shape[0]-row_length)
start_col = np.random.randint(new_image_big.shape[1]-col_length)
new_image = new_image_big[start_row:start_row+row_length,start_col:start_col+col_length,:];
new_mask = new_mask_big[start_row:start_row+row_length,start_col:start_col+col_length];
new_name = new_name+'_zoomin';
return new_image, new_mask, new_name
def extract_patches_and_save(item, image, bw_mask, vertical_flip, horizontal_flip, rotation_90s, zoomIn):
# Desired values
row_length, col_length = params.image_patch_rows, params.image_patch_cols;
# Other Initializations
extracted_images, extracted_masks, extracted_names = [], [], [];
img_rows, img_cols, img_channels = image.shape;
# raise Exception('Channels<Desired Channels') if(img_channels<channels) else print('Shapes of Input Read: ', image.shape, bw_mask.shape)
# Generate
curr_gen_count=1;
curr_row = 0;
while(curr_row+row_length<=img_rows):
curr_col = 0;
while(curr_col+col_length<=img_cols):
base_name = str(item)+'_'+str(row_length)+'x'+str(col_length)+'_'+str(curr_gen_count);
new_image, new_mask, new_gen_name = thisPatch(image, bw_mask, curr_row, curr_col, base_name, vertical_flip, horizontal_flip, rotation_90s, zoomIn);
if (np.sum(new_mask==0)/(new_mask.shape[0]*new_mask.shape[1]))<params.max_non_mask_pixel_percent or len(extracted_names)<1:
extracted_images.append(new_image); extracted_masks.append(new_mask); extracted_names.append(new_gen_name);
curr_gen_count+=1;
curr_col+=params.win_shift_cols;
# Saving a last patch in the given set of fixed rows and varying col number
if curr_col>0 and curr_col<img_cols and curr_col+col_length>img_cols and (img_cols-col_length)%params.win_shift_cols!=0: #Dont go to one of earlier indices
curr_col = img_cols-col_length;
base_name = str(item)+'_'+str(row_length)+'x'+str(col_length)+'_'+str(curr_gen_count);
new_image, new_mask, new_gen_name = thisPatch(image, bw_mask, curr_row, curr_col, base_name, vertical_flip, horizontal_flip, rotation_90s, zoomIn);
if (np.sum(new_mask==0)/(new_mask.shape[0]*new_mask.shape[1]))<params.max_non_mask_pixel_percent or len(extracted_names)<1:
extracted_images.append(new_image); extracted_masks.append(new_mask); extracted_names.append(new_gen_name);
curr_gen_count+=1;
curr_col+=params.win_shift_cols;
curr_row+=params.win_shift_rows;
# Saving a last patch in the given set of varying row number and varying col number
if curr_row>0 and curr_row<img_rows and curr_row+row_length>img_rows and (img_rows-row_length)%params.win_shift_rows!=0: #Dont go to one of earlier indices
curr_row = img_rows-row_length;
curr_col = 0;
while(curr_col+col_length<=img_cols):
base_name = str(item)+'_'+str(row_length)+'x'+str(col_length)+'_'+str(curr_gen_count);
new_image, new_mask, new_gen_name = thisPatch(image, bw_mask, curr_row, curr_col, base_name, vertical_flip, horizontal_flip, rotation_90s, zoomIn);
if (np.sum(new_mask==0)/(new_mask.shape[0]*new_mask.shape[1]))<params.max_non_mask_pixel_percent or len(extracted_names)<1:
extracted_images.append(new_image); extracted_masks.append(new_mask); extracted_names.append(new_gen_name);
curr_gen_count+=1;
curr_col+=params.win_shift_cols;
# Saving a last patch in the given set of fixed rows and varying col number
if curr_col>0 and curr_col<img_cols and curr_col+col_length>img_cols and (img_cols-col_length)%params.win_shift_cols!=0: #Dont go to one of earlier indices
curr_col = img_cols-col_length;
base_name = str(item)+'_'+str(row_length)+'x'+str(col_length)+'_'+str(curr_gen_count);
new_image, new_mask, new_gen_name = thisPatch(image, bw_mask, curr_row, curr_col, base_name, vertical_flip, horizontal_flip, rotation_90s, zoomIn);
if (np.sum(new_mask==0)/(new_mask.shape[0]*new_mask.shape[1]))<params.max_non_mask_pixel_percent or len(extracted_names)<1:
extracted_images.append(new_image); extracted_masks.append(new_mask); extracted_names.append(new_gen_name);
curr_gen_count+=1;
curr_col+=params.win_shift_cols;
return extracted_images, extracted_masks, extracted_names
#############################################################################################################
def generate_train_data_with_augmentation(target_folder): # Patches with FLIPS, ROTATIONS, ZOOMIN
# Sanity check for files of requirement
with open(params.hsv_clustering_data,'rb') as openFile:
hsv_clustering = pk.load(openFile);
clt_model = hsv_clustering['clt']
print("HSV Cluster Centers: ", np.array(hsv_clustering['cluster_centers_'], dtype='int'));
del hsv_clustering;
# Other initializations
org_names, gen_names, gen_names_ind, gen_names_ind_cnt = [], [], [], []; #gen_names_individual
for item in os.listdir(params.train_folder_org):
# Get Paths & Load image,masks
org_names+=[item]; print(len(org_names));
image_org_path = os.path.join(params.train_folder_org,item+'/images');
mask_org_path = os.path.join(params.train_folder_org,item+'/masks');
image = cv2.imread(os.path.join(image_org_path,os.listdir(image_org_path)[0]));
bw_mask = get_bw_mask(mask_org_path);
# Know to which clustre this image belongs to!
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV);
thisCenter, _, _ = dominant_clusters(hsv_image, n_clusters=1);
centerMapped = clt_model.predict(thisCenter);
n_iters = 1; vertical_flip=True; horizontal_flip=True; rotation_90s=True; zoomIn=False;
if centerMapped==1 or centerMapped==3:
n_iters = 4; zoomIn=True;
for _ in range(n_iters):
# Patches with FLIPS, ROTATIONS, ZOOMIN
extracted_images, extracted_masks, extracted_names = extract_patches_and_save(item, image, bw_mask,
vertical_flip, horizontal_flip, rotation_90s, zoomIn);
# Write images to folder of interest
print('No.of Files Generated: {}'.format(len(extracted_names)))
for ind, name in enumerate(extracted_names):
cv2.imwrite(os.path.join(target_folder, name+'.png'), extracted_images[ind]);
cv2.imwrite(os.path.join(target_folder, name+'_mask.png'), extracted_masks[ind]);
# Save data for future reference and proceed
gen_names+=extracted_names;
gen_names_ind+=[extracted_names];
gen_names_ind_cnt+=[len(extracted_names)];
with open(os.path.join(target_folder,'org_names.pickle'),'wb') as opfile:
pk.dump(org_names, opfile); opfile.close();
with open(os.path.join(target_folder,'gen_names.pickle'),'wb') as opfile:
pk.dump(gen_names, opfile); opfile.close();
print('Training Data generated success...');
return org_names, gen_names, gen_names_ind, gen_names_ind_cnt
def generate_train_data_only_resize(target_folder): # Rotations+Patches
org_names, gen_names = [], []; #gen_names_individual
for item in os.listdir(params.train_folder_org):
# Get Paths & Load image,masks
org_names+=[item]; print(len(org_names));
image_org_path = os.path.join(params.train_folder_org,item+'/images');
mask_org_path = os.path.join(params.train_folder_org,item+'/masks');
image = cv2.imread(os.path.join(image_org_path,os.listdir(image_org_path)[0]));
bw_mask = get_bw_mask(mask_org_path);
# Desired values
row_length, col_length = params.image_patch_rows, params.image_patch_cols;
# Resize to desired size
image = cv2.resize(image, (col_length, row_length), interpolation = cv2.INTER_LINEAR);
bw_mask = cv2.resize(bw_mask, (col_length, row_length), interpolation = cv2.INTER_LINEAR);
for _ in range(2):
# Do FLIPS, ROTATIONS, ZOOMIN
new_image, new_mask, new_name = thisPatch(image, bw_mask, 0, 0, item);
# Write images to folder of interest
cv2.imwrite(os.path.join(target_folder, new_name+'.png'), new_image);
cv2.imwrite(os.path.join(target_folder, new_name+'_mask.png'), new_mask);
# Save data for future reference and proceed
gen_names+=[new_name];
with open(os.path.join(target_folder,'org_names.pickle'),'wb') as opfile:
pk.dump(org_names, opfile); opfile.close();
with open(os.path.join(target_folder,'gen_names.pickle'),'wb') as opfile:
pk.dump(gen_names, opfile); opfile.close();
print('Training Data generated success...');
return org_names, gen_names
############################################################################################################
def load_train_list_with_augmentation(target_folder):
with open(os.path.join(target_folder,'gen_names.pickle'),'rb') as opfile:
tr_list = pk.load(opfile);
opfile.close();
return tr_list
def load_test_list(target_folder):
with open(os.path.join(target_folder,'org_names.pickle'),'rb') as opfile:
ts_list = pk.load(opfile);
opfile.close();
return ts_list
############################################################################################################
def generate_original_copies_train_data(source_folder, target_folder):
org_names = [];
for item in os.listdir(source_folder):
org_names+=[item]; print(len(org_names));
# Get Path and load image
image_path = os.path.join(source_folder,item+'/images');
image_file = os.listdir(image_path)[0];
image = cv2.imread(os.path.join(image_path,image_file));
cv2.imwrite(os.path.join(target_folder, item+'.png'), image);
with open(os.path.join(target_folder,'org_names.pickle'),'wb') as opfile:
pk.dump(org_names, opfile); opfile.close();
print('Training Data generated success...');
return org_names
def generate_original_copies_test_data(source_folder, target_folder):
org_names = [];
for item in os.listdir(source_folder):
org_names+=[item]; print(len(org_names));
# Get Path and load image
image_path = os.path.join(source_folder,item+'/images');
image_file = os.listdir(image_path)[0];
image = cv2.imread(os.path.join(image_path,image_file));
cv2.imwrite(os.path.join(target_folder, item+'.png'), image);
with open(os.path.join(target_folder,'org_names.pickle'),'wb') as opfile:
pk.dump(org_names, opfile); opfile.close();
print('Training Data generated success...');
return org_names
############################################################################################################
#from train_m3 import prep_image_to_unet
#def make_patches_and_predict(model, bgr_image, threshold_type='global', print_images=False): #'global','otsu','adap+otsu'
# # Create patch->Predict->Save in Mask->Increment each_pixel_cnt for resp. patch
# img_rows, img_cols, channels = bgr_image.shape;
# row_length, col_length = params.image_patch_rows, params.image_patch_cols; # Desired values
# cnt_matrix = np.zeros((bgr_image.shape[0],bgr_image.shape[1],1));
# mask_matrix = np.zeros((bgr_image.shape[0],bgr_image.shape[1],1));
# if(channels<3):
# raise Exception('Channels<Desired Channels')
# curr_gen_count = 1;
# #Patches
# curr_row = 0;
# while(curr_row+row_length<=img_rows):
# curr_col = 0;
# while(curr_col+col_length<=img_cols):
# # newImage and predict\
# #new_bgr_image = bgr_image[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:channels].copy();
# new_bgr_image = bgr_image;
# new_bgr_image, new_ycrcb_image = prep_image_to_unet(None, new_bgr_image);
# new_bgr_image = np.expand_dims(new_bgr_image, axis=0);
# new_ycrcb_image = np.expand_dims(new_ycrcb_image, axis=0)
# pred_mask = model.predict([new_ycrcb_image,new_bgr_image])[0];
# # save details
# mask_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= pred_mask;
# cnt_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= 1;
# # Increment count and continue
# curr_gen_count+=1;
# curr_col+=params.win_shift_cols;
# # Saving a last patch in the given set of fixed rows and varying col number
# if curr_col>0 and curr_col<img_cols and curr_col+col_length>img_cols and (img_cols-col_length)%params.win_shift_cols!=0: #Dont go to one of earlier indices
# curr_col = img_cols-col_length;
# # newImage and predict\
# new_bgr_image = bgr_image[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:channels].copy();
# new_bgr_image, new_ycrcb_image = prep_image_to_unet(None, new_bgr_image);
# new_bgr_image = np.expand_dims(new_bgr_image, axis=0);
# new_ycrcb_image = np.expand_dims(new_ycrcb_image, axis=0)
# pred_mask = model.predict([new_ycrcb_image,new_bgr_image])[0];
# # save details
# mask_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= pred_mask;
# cnt_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= 1;
# # Increment count and continue
# curr_gen_count+=1;
# curr_row+=params.win_shift_rows;
# # Saving a last patch in the given set of varying row number and varying col number
# if curr_row>0 and curr_row<img_rows and curr_row+row_length>img_rows and (img_rows-row_length)%params.win_shift_rows!=0: #Dont go to one of earlier indices
# curr_row = img_rows-row_length;
# curr_col = 0;
# while(curr_col+col_length<=img_cols):
# # newImage and predict\
# new_bgr_image = bgr_image[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:channels].copy();
# new_bgr_image, new_ycrcb_image = prep_image_to_unet(None, new_bgr_image);
# new_bgr_image = np.expand_dims(new_bgr_image, axis=0);
# new_ycrcb_image = np.expand_dims(new_ycrcb_image, axis=0)
# pred_mask = model.predict([new_ycrcb_image,new_bgr_image])[0];
# # save details
# mask_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= pred_mask;
# cnt_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= 1;
# # Increment count and continue
# curr_gen_count+=1;
# curr_col+=params.win_shift_cols;
# # Saving a last patch in the given set of fixed rows and varying col number
# if curr_col>0 and curr_col<img_cols and curr_col+col_length>img_cols and (img_cols-col_length)%params.win_shift_cols!=0: #Dont go to one of earlier indices
# curr_col = img_cols-col_length;
# # newImage and predict\
# new_bgr_image = bgr_image[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:channels].copy();
# new_bgr_image, new_ycrcb_image = prep_image_to_unet(None, new_bgr_image);
# new_bgr_image = np.expand_dims(new_bgr_image, axis=0);
# new_ycrcb_image = np.expand_dims(new_ycrcb_image, axis=0)
# pred_mask = model.predict([new_ycrcb_image,new_bgr_image])[0];
# # save details
# mask_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= pred_mask;
# cnt_matrix[curr_row:curr_row+row_length,curr_col:curr_col+col_length,:1]+= 1;
# # Increment count and continue
# curr_gen_count+=1;
# if np.min(cnt_matrix)==0: # Safe Check
# raise Exception('At least one pixel is not in any of the patches! Use np.where() to find it.')
# # Threshold the pixels
# mask_matrix/=cnt_matrix; # Average ##### mask_matrix = (mask_matrix > 0.5).astype(np.uint8) ######
# if threshold_type=='global':
# temp = mask_matrix.copy();
# pred_image_thres = 255*(temp > 0.5);
# pred_image_thres = pred_image_thres.astype(np.uint8);
# elif threshold_type=='otsu':
# temp = mask_matrix.copy();
# pred_image_255 = 255*temp;
# pred_image_255_uint8 = pred_image_255.astype(np.uint8)
# _ , pred_image_thres = cv2.threshold(pred_image_255_uint8, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# elif threshold_type=='adap+otsu':
# #OTSU
# temp = mask_matrix.copy();
# pred_image_255 = 255*temp;
# pred_image_255_uint8 = pred_image_255.astype(np.uint8)
# _ , pred_image_thres1 = cv2.threshold(pred_image_255_uint8, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# # Adaptive Thresholding
# temp = mask_matrix.copy();
# pred_image_255 = 255*temp;
# pred_image_255_uint8 = pred_image_255.astype(np.uint8)
# pred_image_thres2 = cv2.adaptiveThreshold(pred_image_255_uint8, 255,
# adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
# thresholdType=cv2.THRESH_BINARY, blockSize=13, C=0)
# # Global Thresholding AND Adaptive Thresholding
# pred_image_thres = np.logical_or(pred_image_thres1==255,pred_image_thres2==255);
# pred_image_thres = pred_image_thres.astype(np.uint8)
# pred_image_thres = 255*pred_image_thres;
# else:
# print('Required threshold_type unavailable!');
# # Print thye images
# if print_images:
# cv2.imshow( "{0}".format('ThisImage'), bgr_image);
# cv2.imshow( "{0}".format('ThisMask'), pred_image_thres);
# cv2.waitKey(0);
# cv2.destroyAllWindows();
# return pred_image_thres #, mask_matrix
if __name__=="__main__":
#generate_original_copies_train_data(params.train_folder_org, params.train_folder_gen);
#generate_original_copies_test_data(params.test_folder_org, params.test_folder_gen);
#generate_original_copies_test_data(params.test2_folder_org, params.test2_folder_gen);
#org_names, gen_names, gen_names_ind, gen_names_ind_cnt = generate_train_data_with_augmentation(params.train_folder_gen_m3); #Uses global variable
#org_names, gen_names = generate_train_data_only_resize(params.train_folder_gen_resize)
#tr_list = load_train_list_with_augmentation(params.train_folder_gen);
#ts_list = load_test_list(params.test_folder_gen)
print('Execution Complete. Please check respective folders as mentioned in params.') | murali1996/semantic_segmentation_of_nuclei_images | data_m3.py | data_m3.py | py | 23,748 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.getStructuringElement",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_ELLIPSE",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.erod... |
69826559463 | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, pathpatch_2d_to_3d
import seaborn as sns
from scipy import spatial
from utils import update_matplotlib_rc_parameters
from utils import sph2cart
from utils import sph_normals
def main():
# averaging surface configuration
edge_length = 0.02 # in m
target_area_origin = (-edge_length/2, -edge_length/2)
N = 11 # number of grid points
# control evaluation plane coordinates
x = np.linspace(-edge_length/2, edge_length/2, N)
y = 0
z = np.linspace(-edge_length/2, edge_length/2, N)
Xt, Zt = np.meshgrid(x, z)
x_pln = Xt.ravel()
z_pln = Zt.ravel()
# spherical averaging surface coordinates
r = 0.05 # in m
alpha = 2 * np.arcsin(edge_length/2/r) # angle from secant
theta = np.linspace(np.pi/2-alpha/2, np.pi/2+alpha/2, N)
phi = np.linspace(np.pi-alpha/2, np.pi+alpha/2, N)
Theta, Phi = np.meshgrid(theta, phi)
y_sph, x_sph, z_sph = sph2cart(r, Theta.ravel(), Phi.ravel())
y_sph -= y_sph.min() + y_sph.ptp() / 2
ny_sph, nx_sph, nz_sph = sph_normals(r, Theta.ravel(), Phi.ravel())
# visualize
update_matplotlib_rc_parameters(is_3d=True)
cs = sns.color_palette('rocket', 2)
fig = plt.figure()
ax = plt.axes(projection ='3d')
plane = Rectangle(target_area_origin,
width=edge_length, height=edge_length,
ec=cs[0], fc=cs[1], alpha=0.25,
label='control evaluation plane')
ax.add_patch(plane)
pathpatch_2d_to_3d(plane, z=y, zdir='y')
ax.scatter(x_sph, y_sph, z_sph, color=cs[0], depthshade=True,
label='integration grid')
ax.quiver(x_sph, y_sph, z_sph,
nx_sph, ny_sph, nz_sph,
normalize=True, arrow_length_ratio=0.33, length=0.75/1000,
lw=1.25, color=cs[0], label='unit normal vector')
ax.set_box_aspect([1, 1, 1])
ax.set(xlabel='$x$ [mm]', ylabel='$y$ [mm]', zlabel='$z$ [mm]',
xticks=[x_pln.min(), 0.0, x_pln.max()],
yticks=[y_sph.min(), 0.0, y_sph.max()],
zticks=[z_pln.min(), 0.0, z_pln.max()],
xticklabels=[round(x_pln.min()*1000), 0, round(x_pln.max()*1000)],
yticklabels=[round(y_sph.min()*1000, 2), 0, round(y_sph.max()*1000, 2)],
zticklabels=[round(z_pln.min()*1000), 0, round(z_pln.max()*1000)],
xlim=[x_pln.min()*1.5, x_pln.max()*1.5],
ylim=[y_sph.min()-abs(y_sph.max()*0.5),
y_sph.max()+abs(y_sph.max()*0.5)],
zlim=[z_pln.min()*1.5, z_pln.max()*1.5]
)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.view_init(20, -70)
fig.legend()
fig.tight_layout()
# fig.savefig(os.path.join(
# os.pardir, 'artwork', 'spherical_surface.pdf'
# ))
plt.show()
if __name__ == '__main__':
main()
| akapet00/phd-qualifying-exam | code/spherical_surface.py | spherical_surface.py | py | 2,736 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line... |
40213018002 | from flask import Flask, request, render_template
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from utils import scroll_to_page_end, extract_videos_data
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/result", methods=["POST"])
def result():
url = request.form["url"]
if "/videos" not in url:
url = url.rstrip("/") + "/videos"
driver = webdriver.Chrome()
driver.get(url)
scroll_to_page_end(driver)
html = driver.page_source
soup = BeautifulSoup(html, "html.parser")
data = extract_videos_data(soup)
channel_name = url.split("/")[-2].replace("@", "")
df = pd.DataFrame(data)
videos = df.to_dict("records")
return render_template("result.html", videos=videos)
if __name__ == "__main__":
app.run(debug=True)
| aftabgit786/youtube-videos-data-with-selenium | app.py | app.py | py | 862 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.reques... |
1818949289 | import fileinput
from multiprocessing import Process
import os
import shutil
import sys
from node import Node
from observer import Observer
from utils import pipeName, Pipes
def createNode(node_id, money):
n = Node(node_id, money)
n.listen()
def createObserver():
obs = Observer()
obs.listen()
pass
class Master:
def __init__(self):
self.nodes = dict()
self.pipes = Pipes()
self.observer = None
def startMaster(self):
# Setup pipes
shutil.rmtree('./pipes', ignore_errors=True)
os.mkdir('./pipes')
# Create pipe between master and observer
os.mkfifo(pipeName('master', 'observer'))
os.mkfifo(pipeName('observer', 'master'))
self.pipes.createPipe('master', 'observer', write=True, blocking=True)
self.pipes.createPipe('observer', 'master', write=False, blocking=True)
# Start observer
self.observer = Process(target=createObserver)
self.observer.start()
def killAll(self):
self.observer.terminate()
for node in self.nodes.values():
node.terminate()
shutil.rmtree('./pipes', ignore_errors=True)
sys.exit()
def createNode(self, node_id, money):
# Create pipe between master and node
os.mkfifo(pipeName('master', node_id))
os.mkfifo(pipeName(node_id, 'master'))
self.pipes.createPipe('master', node_id, write=True, blocking=True)
self.pipes.createPipe(node_id, 'master', write=False, blocking=True)
# Create pipe from observer to node
os.mkfifo(pipeName('observer', node_id))
os.mkfifo(pipeName(node_id, 'observer')) # TODO not needed but need to change createPipe to be asymmetric
# Start process
p = Process(target=createNode, args=(node_id, money))
p.start()
# Create inter-node pipes
msg = 'CreateNode {}'.format(node_id)
self.pipes.sendMessage('master', 'observer', msg)
response = self.pipes.receiveMessage('observer', 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
for neighbor_id in self.nodes.keys():
os.mkfifo(pipeName(node_id, neighbor_id))
os.mkfifo(pipeName(neighbor_id, node_id))
msg = 'CreateNode {}'.format(node_id)
self.pipes.sendMessage('master', neighbor_id, msg)
response = self.pipes.receiveMessage(neighbor_id, 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
msg = 'CreateNode {}'.format(neighbor_id)
self.pipes.sendMessage('master', node_id, msg)
response = self.pipes.receiveMessage(node_id, 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
self.nodes[node_id] = p
def send(self, send_id, recv_id, val):
msg = 'Send {} {}'.format(recv_id, val)
self.pipes.sendMessage('master', send_id, msg)
response = self.pipes.receiveMessage(send_id, 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
def receive(self, recv_id, send_id=''):
msg = 'Receive {}'.format(send_id)
self.pipes.sendMessage('master', recv_id, msg)
response = self.pipes.receiveMessage(recv_id, 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
def receiveAll(self):
msg = 'ReceiveAll'
received = True
while received:
received = False
for node_id in sorted(self.nodes.keys()):
self.pipes.sendMessage('master', node_id, msg)
response = self.pipes.receiveMessage(node_id, 'master')
if ('ack' not in response):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
if 'True' in response:
received = True
def beginSnapshot(self, node_id):
msg = 'BeginSnapshot {}'.format(node_id)
self.pipes.sendMessage('master', 'observer', msg)
response = self.pipes.receiveMessage('observer', 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
self.receive(node_id, send_id='observer')
def collectState(self):
msg = 'CollectState'
self.pipes.sendMessage('master', 'observer', msg)
# TODO sort?
for node_id in sorted(self.nodes.keys()):
response = self.pipes.receiveMessage('observer', 'master')
if (response != f'ack {node_id}'):
raise RuntimeError('Expected \'ack {}\', received {}'.format(node_id, response))
self.receive(node_id, send_id='observer')
response = self.pipes.receiveMessage('observer', 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
def printSnapshot(self):
msg = 'PrintSnapshot'
self.pipes.sendMessage('master', 'observer', msg)
response = self.pipes.receiveMessage('observer', 'master')
if (response != 'ack'):
raise RuntimeError('Expected \'ack\', received {}'.format(response))
def run(master):
for line in fileinput.input():
args = line.split()
cmd = args[0]
for idx in range(1, len(args)):
args[idx] = int(args[idx])
if cmd == 'StartMaster':
master.startMaster()
elif cmd == 'KillAll':
master.killAll()
elif cmd == 'CreateNode':
node_id = args[1]
money = args[2]
master.createNode(node_id, money)
elif cmd == 'Send':
send_id = args[1]
recv_id = args[2]
money = args[3]
master.send(send_id, recv_id, money)
elif cmd == 'Receive':
recv_id = args[1]
send_id = args[2] if len(args) > 2 else ''
master.receive(recv_id, send_id)
elif cmd == 'ReceiveAll':
master.receiveAll()
elif cmd == 'BeginSnapshot':
node_id = args[1]
master.beginSnapshot(node_id)
elif cmd == 'CollectState':
master.collectState()
elif cmd == 'PrintSnapshot':
master.printSnapshot()
else:
raise ValueError('Command not supported: ' + line)
if __name__ == '__main__':
master = Master()
run(master)
| a-yun/distributed-snapshot | master.py | master.py | py | 6,692 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "node.Node",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "observer.Observer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "utils.Pipes",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_num... |
42591766267 | from re import M
from pyrailbaron.map.svg import MapSvg, transform_lcc, transform_dxf
from pyrailbaron.map.datamodel import Map, Coordinate
from pyrailbaron.map.states import get_border_data
from pathlib import Path
from typing import List
ROOT_DIR = (Path(__file__) / '../../..').resolve()
border_data = get_border_data(ROOT_DIR / 'data')
BORDERS_TO_DRAW = [
[["MINNESOTA",0,[175,0]],
["M",[49,-98]],
["M",[49,-99]],
["M",[49,-100]],
["M",[49,-101]],
["M",[49,-102]],
["M",[40.5,-102]],
["M",[40.5,-108]],
["M",[37,-108]],
["COLORADO",0,[65,45]],
["OKLAHOMA",0,[200,33]],
["ARKANSAS",0,[0,8]],
["M",[36.5,-92.5]],
["M",[40.6,-92.5]],
["IOWA",0,[124,25]],
["WISCONSIN",0,[265,-1]]],
[["M",[40.5,-108]],
["M",[40.5,-109]],
["M",[40.5,-110]],
["M",[40.5,-111]],
["M",[40.5,-112]],
["M",[40.5,-113]],
["M",[40.5,-114]],
["NEVADA",0,[12,0]],
["NEVADA",0,[-1,173]],
["CALIFORNIA",0,[15,0]],
["CALIFORNIA",0,[-1,158]],
["ARIZONA",0,[154,113]],
["NEW MEXICO",0,[65,52]],
["TEXAS",0,[700,606]],
["OKLAHOMA",0,[196,200]],
["COLORADO",0,[45,65]],
["M",[37,-108]]],
[["TEXAS",0,[606,172]],
["LOUISIANA",0,[470,100]],
["MISSISSIPPI",0,[70,40]],
["M", [32.97, -88.47]],
["M", [32.97, -85.50]],
["KENTUCKY", 0, [263,150]],
["MISSOURI",0,[105,173]],
["OKLAHOMA",0,[33,196]]]
]
with (ROOT_DIR/'output/map.json').open('rt') as map_json:
map: Map = Map.from_json(map_json.read())
(ROOT_DIR / 'output/border_test.svg').unlink(missing_ok=True)
svg = MapSvg(ROOT_DIR / 'output/border_test.svg')
svg.transforms = [transform_lcc, map.map_transform, transform_dxf]
colors = ['black', 'red', 'blue']
l = svg.layer('main')
for row_idx, rows in enumerate(BORDERS_TO_DRAW):
pts: List[Coordinate] = []
for i, row in enumerate(rows):
if row[0] == 'M':
pts.append(row[1])
print(f' Added point {pts[-1]}')
else:
state, border_idx, range = row
border = border_data[state][border_idx]
print(f'BORDER {state} #{border_idx} = {len(border)} points')
segment = None
if range == 'all':
segment = border
elif isinstance(range, list):
start, end = range
do_reverse = False
if (end >= 0 and start > end) or start < 0:
start, end = end, start
do_reverse = True
if end == -1:
segment = border[start:]
else:
segment = border[start:(end + 1)]
if segment:
if do_reverse:
segment = segment.copy()
segment.reverse()
if len(pts) == 0:
l.circle(segment[0], 2, fill='black')
l.text('start', segment[0])
pts += segment
l.circle(pts[-1], 2, fill='black')
l.text(str(i), pts[-1])
print(f' Added segment from {segment[0]} to {segment[-1]}')
pts.append(pts[0])
l.path(pts, stroke=colors[row_idx % 3], stroke_width=0.5, fill='none')
svg.save() | bmboucher/rail_baron | python/scripts/test_draw_border.py | test_draw_border.py | py | 3,293 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyrailbaron.map.states.get_border_data",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyrailbaron.map.datamodel.Map",
"line_number": 57,
"usage_type": "name"
},
{
"... |
32961827103 | import numpy as np
import cv2
from scipy.misc import imresize
from imageio import imread
from tensorflow.keras.preprocessing import image
from keras.applications import imagenet_utils
import os
def get_bounding_box_coordinates(projection):
combined_channels = np.sum(projection[0], axis=2)
arg_positions = np.argwhere(combined_channels)
(xstart, ystart), (xstop, ystop) = arg_positions.min(0), arg_positions.max(0)
return (ystart, ystop, xstart, xstop)
def draw_bounding_box_cv2(image, bounding_boxes):
count = 0
for xstart,xstop,ystart,ystop in bounding_boxes:
count += 1
cv2.rectangle(image, (xstart,ystart), (xstop,ystop), (255, 0, 0), 1 )
cv2.putText(image, "b%d"%(count), (xstart+10,ystart+10), 1, 1, (255, 0, 0), 1)
return image
def get_strongest_filter(activation_img):
# Make sure that dimensions 2 and 3 are spacial (Image is square)
assert activation_img.shape[1] == activation_img.shape[2], "Index ordering incorrect"
# Find maximum activation for each filter for a given image
activation_img = np.nanmax(activation_img, axis=2)
activation_img = np.nanmax(activation_img, axis=1)
# Remove batch size dimension
assert activation_img.shape[0] == 1
activation_img = activation_img.sum(0)
# Make activations 1-based indexing
activation_img = np.insert(activation_img, 0, 0.0)
# activation_image is now a vector of length equal to number of filters (plus one for one-based indexing)
# each entry corresponds to the maximum/summed activation of each filter for a given image
return activation_img.argmax()
def get_strongest_filters(activation_img, N=2):
# Make sure that dimensions 2 and 3 are spacial (Image is square)
assert activation_img.shape[1] == activation_img.shape[2], "Index ordering incorrect"
# Find maximum activation for each filter for a given image
# Find maximum activation for each filter for a given image
activation_img = np.nanmax(activation_img, axis=2)
activation_img = np.nanmax(activation_img, axis=1)
# Remove batch size dimension
assert activation_img.shape[0] == 1
activation_img = activation_img.sum(0)
# Make activations 1-based indexing
activation_img = np.insert(activation_img, 0, 0.0)
# activation_image is now a vector of length equal to number of filters (plus one for one-based indexing)
# each entry corresponds to the maximum/summed activation of each filter for a given image
return activation_img.argsort()[-N:][::-1]
def preprocess_image_batch(image_paths, img_size=(256, 256), crop_size=(224, 224), color_mode="rgb", out=None):
"""
Resize, crop and normalize colors of images
to make them suitable for alexnet_model (if default parameter values are chosen)
This function is also from
https://github.com/heuritech/convnets-keras/blob/master/convnetskeras/convnets.py
with only some minor changes
"""
# Make function callable with single image instead of list
if type(image_paths) is str:
image_paths = [image_paths]
img_list = []
for im_path in image_paths:
img = imread(im_path, mode='RGB')
img = imresize(img, img_size)
img = img.astype('float32')
# Normalize the colors (in RGB space) with the empirical means on the training set
img[:, :, 0] -= 123.68
img[:, :, 1] -= 116.779
img[:, :, 2] -= 103.939
# We permute the colors to get them in the BGR order
if color_mode == "bgr":
img[:, :, [0, 1, 2]] = img[:, :, [2, 1, 0]]
img = img.transpose((2, 0, 1))
if crop_size:
img = img[:, (img_size[0] - crop_size[0]) // 2:(img_size[0] + crop_size[0]) // 2
, (img_size[1] - crop_size[1]) // 2:(img_size[1] + crop_size[1]) // 2]
img_list.append(img)
try:
img_batch = np.stack(img_list, axis=0)
except:
raise ValueError('when img_size and crop_size are None, images'
' in image_paths must have the same shapes.')
if out is not None and hasattr(out, 'append'):
out.append(img_batch)
else:
return img_batch
def grey_square(img, x, y, radius = 10):
h = img.shape[1]
w = img.shape[2]
assert radius <= x < h - radius
assert radius <= y < w - radius
#square size = 2*radius + 1
img[:, x-radius:x+radius, y-radius:y+radius, :] = 0
return img
def set_zero_except_maximum(filter, x):
assert filter >= 1
# Set other layers to zero
new_array = np.zeros_like(x)
new_array[0, :, :, filter - 1] = x[0, :, :, filter - 1]
# Set other activations in same layer to zero
max_index_flat = np.nanargmax(new_array)
max_index = np.unravel_index(max_index_flat, new_array.shape)
out = np.zeros_like(new_array)
out[max_index] = new_array[max_index]
return out
def get_path_from_id(img_id):
'''
used for imagenet data
:param img_id:
:return:
'''
img_id_str = str(img_id)
while len(img_id_str) < 5:
img_id_str = '0' + img_id_str
folder = 'D:\data\imagenet-data\ILSVRC2012_img_val\\'
file = 'ILSVRC2012_val_000' + img_id_str + '.JPEG'
path = folder + file
return path
def load_image(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = imagenet_utils.preprocess_input(x)
return x
def deprocess_image(x, format = 'cl'):
'''
Same normalization as in:
https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
'''
if np.ndim(x) > 3:
x = np.squeeze(x)
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if format == 'cf':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def to_small_pitch(path, pitch_size=(50,50), input_shape=(224,224,3)):
img = image.load_img(path, target_size = pitch_size)
x = image.img_to_array(img)
out = np.zeros(input_shape)
out[50:50+pitch_size[0],50:50+pitch_size[1],:] = x
fname = os.path.basename(path)
image.save_img("small_pitch_{}".format(fname), out)
if __name__ == '__main__':
to_small_pitch('D:\data\imagenet-data\ILSVRC2012_img_val\ILSVRC2012_val_00000003.JPEG',pitch_size=(100,100))
| benygood/deconv | base/image_ops.py | image_ops.py | py | 6,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.sum",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.... |
23331707809 | from size_based_ecosystem import *
import matplotlib.animation as animation
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
from size_based_ecosystem import *
import copy as copy
import scipy.stats as stats
import pickle as pkl
depth = 20
layers = 100
size_classes = 1
lam = 1
simulate = True
verbose = False
daily_cycle = 365*2*np.pi
obj = spectral_method(depth, layers-1) #This is the old off-by-one error... Now we have added another fucked up error!
#norm_dist = stats.norm.pdf(obj.x, loc = 3, scale = 3)
#print(norm_dist)
norm_dist = stats.norm.pdf(obj.x, loc = 3)
res_start = 3*norm_dist #0.1*(1-obj.x/depth)
res_max = 10*norm_dist
water_start = water_column(obj, res_start, layers = layers, resource_max = res_max, replacement = lam, advection = 0, diffusion = 0)
list_of_sizes = np.array([20, 8000]) #, 1, 400, 1600, 40000])
l2 = False
size_classes = 2
m_v_t = list_of_sizes #list_of_sizes[0:size_classes]
params = ecosystem_parameters(m_v_t, obj)
eco = ecosystem_optimization(m_v_t, layers, params, obj, water_start, l2 = l2, output_level = 5, movement_cost = 1)
#OG_layered_attack = np.copy(eco.parameters.layered_attack)
eco.population_setter(np.array([20, 0.1]) )#, 1, 1, 1, 0.1]))
eco.parameters.handling_times = 0 * eco.parameters.handling_times
OG_layered_attack = np.copy(params.layered_attack)
frozen_ecos = []
stability = False
time_step = 10**(-4)
#max_err = time_step*1/10
x_res = sequential_nash(eco, verbose=True, l2=l2, max_its_seq = 20)
eco.strategy_setter(x_res)
for i in range(2):
plt.plot(obj.x, x_res[i]@eco.heat_kernels[i])
plt.show()
for i in range(100):
eco.population_setter(np.array([20, 0.1+0.01*(i+1)]) )#, 1, 1, 1, 0.1]))
x_res = sequential_nash(eco, verbose=True, l2=l2, max_its_seq=20)
eco.strategy_setter(x_res)
print("Current iteration", i)
| jemff/food_web | old_sims/eco_HD_hillclimb.py | eco_HD_hillclimb.py | py | 2,353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.stats.norm.pdf",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 31,
"usage_type": "name"
}
] |
17067227665 | import os
import sys
import difflib
import unittest
import subprocess
import distutils.file_util
import getnose
import nose.plugins
import nose.plugins.builtin
import makeloadertests
class JsTest(nose.plugins.Plugin):
def options(self, parser, env=os.environ):
nose.plugins.Plugin.options(self, parser, env)
parser.add_option("--revalidate-logs", action="store_true",
dest="revalidate_logs",
default=False,
help="For all tests run, makes their logs "
"the expected, or canonical, log file to "
"which all future runs of the tests are "
"compared to.")
def configure(self, options, config):
nose.plugins.Plugin.configure(self, options, config)
self.revalidate_logs = options.revalidate_logs
def wantFile(self, file):
basename = os.path.basename(file)
ext = os.path.splitext(file)[1]
if (basename.startswith("test_") and ext == ".js"):
return True
# Oddly enough, if we return 'False' here instead of 'None',
# none of the other plugins get a chance to test the file.
return None
def loadTestsFromFile(self, filename):
return [JsTestCase(filename, self.revalidate_logs)]
class JsTestCase(unittest.TestCase):
def __init__(self, test, revalidate_logs):
self.__test = test
self.__revalidate_logs = revalidate_logs
unittest.TestCase.__init__(self)
def shortDescription(self):
return os.path.basename(os.path.splitext(self.__test)[0])
def runTest(self):
test = self.__test
dirname = os.path.dirname(test)
testname = os.path.splitext(os.path.basename(test))[0]
result = subprocess.call(
["make",
"-C", dirname,
"-f", "../harness/Makefile",
testname],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT
)
logfile_name = os.path.join(dirname, testname + ".log")
if result != 0:
self.fail(open(logfile_name, "r").read())
else:
expected_logfile_name = logfile_name + ".expected"
if self.__revalidate_logs:
distutils.file_util.copy_file(logfile_name,
expected_logfile_name)
if os.path.exists(expected_logfile_name):
expected = open(expected_logfile_name, "r").read()
actual = open(logfile_name, "r").read()
if expected != actual:
diff = "Expected results differ from actual results.\n\n"
diff += "\n".join(difflib.unified_diff(
expected.splitlines(), actual.splitlines(),
"expected results", "actual results"
))
diff += ("\n\nIf you believe that these changes are valid "
"(i.e., that they don't represent malfunctioning "
"code), you may want to re-validate the expected "
"results by running the following command:\n\n")
diff += "python %s %s --revalidate-logs\n" % (
sys.argv[0],
self.__test
)
self.fail(diff)
if __name__ == "__main__":
makeloadertests.remove_old_loader_tests()
makeloadertests.make_loader_tests()
sys.argv.append("--with-jstest")
nose.main(defaultTest=["scripts",
"tests/unit",
"tests/system"],
plugins=[JsTest()])
| mozillalabs-syncer/weave-backup | tools/scripts/runtests.py | runtests.py | py | 3,767 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "nose.plugins.plugins",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "nose.plugins",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "nose.plugins.p... |
13682438400 | from PIL import Image
import numpy as np
import pandas as pd
from shutil import copyfile
import os
# Add instructions to copy .csv to test/train directory
if not os.path.isdir("./data/"):
print('Creating Data Directory')
os.mkdir("./data/")
if os.path.isdir("./data/smiles_valset/"):
print("You already have raw test images, so I'm using those\n")
else:
print("Downloading Raw Test Images\n")
os.system('gsutil -m cp -r gs://cs229-gap-data/RawData/smiles_valset ./data/')
print("Raw Training Test Downloaded\n")
print("Processing Test Images")
source_csv_path = "./"
source_img_path = "./data/smiles_valset/"
file_name = "gender_fex_valset.csv"
dest_csv_path = "data/test_face/"
if not os.path.isdir("./data/test_face"):
os.mkdir("./data/test_face")
train_data = pd.read_csv(source_csv_path+file_name,header=0)
img_names = np.array(train_data.ix[:,0:1])
copyfile(source_csv_path+file_name, dest_csv_path+file_name)
for img_name in img_names:
img = Image.open(source_img_path+img_name[0])
img = img.resize((256,256), Image.ANTIALIAS)
img.save(dest_csv_path+img_name[0])
for img_name in img_names:
img = np.array(Image.open(dest_csv_path+img_name[0]))
if(img.shape!=(256,256,3)):
img = np.stack((img,)*3,axis=2)
img = Image.fromarray(img)
img.save(dest_csv_path+img_name[0])
if os.path.isdir("./data/smiles_trset/"):
print("You already have raw training images, so I'm using those\n")
else:
print("Downloading Raw Training Images\n")
os.system('gsutil -m cp -r gs://cs229-gap-data/RawData/smiles_trset ./data/')
print("Raw Training Training Downloaded\n")
print("Processing Training Images")
source_csv_path = "./"
source_img_path = "./data/smiles_trset/"
file_name = "gender_fex_trset.csv"
dest_csv_path = "data/train_face/"
if not os.path.isdir("./data/train_face"):
os.mkdir("./data/train_face")
train_data = pd.read_csv(source_csv_path+file_name,header=0)
img_names = np.array(train_data.ix[:,0:1])
copyfile(source_csv_path+file_name, dest_csv_path+file_name)
for img_name in img_names:
img = Image.open(source_img_path+img_name[0])
img = img.resize((256,256), Image.ANTIALIAS)
img.save(dest_csv_path+img_name[0])
for img_name in img_names:
img = np.array(Image.open(dest_csv_path+img_name[0]))
if(img.shape!=(256,256,3)):
img = np.stack((img,)*3,axis=2)
img = Image.fromarray(img)
img.save(dest_csv_path+img_name[0])
| ssanchez2000/CS229-Project | preprocessing.py | preprocessing.py | py | 2,462 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": ... |
19764321263 | from __future__ import annotations
import json
import os
import requests
from typing import Tuple
from .common import *
from .jws import sign, rsa, x509
INTERACTIVE_PAYMENT = "interactive_payment"
AUTOMATED_PAYMENT = "automated_payment"
EXPECTED_PAYMENT = "expected_payment"
class PayinDebtor:
"""The debtor (origin) of a payin with the following fields:
- name: The name of the debtor (optional)
- identification: The ID of the debtor (optional)
- financial_institution: The financial institution of the debtor (optional)
- account: The account of the debtor (optional)
- account_type: The type of account of the debtor (optional)
- email: The email of the debtor (optional)
"""
def __init__(
self,
name: str = None,
identification: PersonId = None,
financial_institution: FinancialInstitution = None,
account: str = None,
account_type: str = None,
email: str = None,
) -> None:
self.name = name
self.identification = identification
self.financial_institution = financial_institution
self.account = account
self.account_type = account_type
self.email = email
class PayinCreditor:
"""The creditor (destination) of a payin with:
- name: The name of the creditor
- identification: The ID of the creditor
- financial_institution: The financial institution of the creditor
- account: The account of the creditor
- account_type: The type of account of the creditor
- email: The email of the creditor
"""
def __init__(
self,
name: str,
identification: PersonId,
financial_institution: FinancialInstitution,
account: str,
account_type: str,
email: str,
) -> None:
self.name = name
self.identification = identification
self.financial_institution = financial_institution
self.account = account
self.account_type = account_type
self.email = email
class PayinTransaction:
"""A payin transaction with:
- payin_type: Either "interactive_payment" or "automated_payment" or "expected_payment"
- interactive_payment_provider: The provider of the interactive payment (optional)
- interactive_payment_success_redirect_url: The URL to redirect to after a successful interactive payment (only required for interactive payments)
- interactive_payment_failure_redirect_url: The URL to redirect to after a failed interactive payment (only required for interactive payments)
- transaction_id: The ID of the transaction (UUID string)
- currency: The currency of the transaction
- amount: The amount of the transaction (string) (optional)
- description: The description of the transaction
- expiration_date: The last date the transaction can be executed, after which the payment will be considered as failed (optional)
- debtor: The debtor of the transaction
- creditor: The creditor of the transaction
"""
def __init__(
self,
payin_type: str,
currency: str,
creditor: PayinCreditor,
interactive_payment_provider: str = None,
interactive_payment_success_redirect_url: str = None,
interactive_payment_failure_redirect_url: str = None,
transaction_id: str = None,
amount: str = None,
description: str = None,
expiration_date: str = None,
debtor: PayinDebtor = None,
) -> None:
self.transaction_type = "payin"
self.payin_type = payin_type
self.interactive_payment_provider = interactive_payment_provider
self.interactive_payment_success_redirect_url = (
interactive_payment_success_redirect_url
)
self.interactive_payment_failure_redirect_url = (
interactive_payment_failure_redirect_url
)
self.transaction_id = transaction_id or random_uuid()
self.currency = currency
self.amount = amount
self.description = description
self.expiration_date = expiration_date
self.debtor = debtor
self.creditor = creditor
class PayinHttpResponseError:
"""An error in the payin sync response with:
- error_code: The error code
- error_message: Human-readable error message
"""
def __init__(self, error_code: str, error_message: str) -> None:
self.error_code = error_code
self.error_message = error_message
def __str__(self) -> str:
return f"{self.error_code} ({self.error_message})"
def __repr__(self) -> str:
return f"PayinHttpResponseError({self.error_code}, {self.error_message})"
class PayinHttpResponse:
"""The http response received right after posting a payin message, with:
- http_status_code: The HTTP code of the response
- transaction ids: A dict mapping transaction ids to Shinkansen's transaction id
- interactive_payment_urls: A dict mapping transaction ids to interactive payment URLs
- errors: A list of `PayinHttpResponseError`s
"""
def __init__(
self,
http_status_code: int,
transaction_ids: dict[str, str],
interactive_payment_urls: dict[str, str],
errors: list[PayinHttpResponseError],
) -> None:
self.http_status_code = http_status_code
self.transaction_ids = transaction_ids
self.interactive_payment_urls = interactive_payment_urls
self.errors = errors
def __str__(self) -> str:
return f"{self.http_status_code} transaction_ids={self.transaction_ids} interactive_payment_urls={self.interactive_payment_urls} errors={self.errors}"
def __repr__(self) -> str:
return f"PayinHttpResponse({self.http_status_code}, {self.transaction_ids}, {self.interactive_payment_urls} {self.errors})"
@classmethod
def from_http_response(cls, response: requests.Response) -> PayinHttpResponse:
"""Return a PayinHttpResponse from a HTTP response"""
if response.status_code in (200, 409):
return PayinHttpResponse(
http_status_code=response.status_code,
errors=[],
transaction_ids={
t["transaction_id"]: t["shinkansen_transaction_id"]
for t in response.json()["transactions"]
},
interactive_payment_urls={
t["transaction_id"]: t["interactive_payment_url"]
for t in response.json()["transactions"]
if "interactive_payment_url" in t
},
)
elif response.status_code == 400:
try:
return PayinHttpResponse(
http_status_code=response.status_code,
transaction_ids={},
interactive_payment_urls={},
errors=[
PayinHttpResponseError(e["error_code"], e["error_message"])
for e in response.json()["errors"]
],
)
except requests.exceptions.JSONDecodeError:
pass # go to the catch-all at the end
# Catch all:
return PayinHttpResponse(
http_status_code=response.status_code,
transaction_ids={},
interactive_payment_urls={},
errors=[],
)
class PayinMessage:
"""A payin message with:
- header: The header of the message
- transactions: A list of transactions contained in the message
"""
def __init__(
self, header: MessageHeader, transactions: list[PayinTransaction]
) -> None:
self.header = header
self.transactions = transactions
def as_json(self) -> str:
"""Returns the message as a JSON object"""
return json.dumps(
{"document": self},
default=lambda o: {k: v for (k, v) in o.__dict__.items() if v is not None},
)
@classmethod
def from_json(cls, json_string: str) -> "PayinMessage":
"""Return a message from a JSON string"""
json_dict = json.loads(json_string)
return cls(
header=MessageHeader.from_json_dict(json_dict["document"]["header"]),
transactions=[
PayinTransaction(
payin_type=t["payin_type"],
interactive_payment_provider=t.get("interactive_payment_provider"),
interactive_payment_success_redirect_url=t.get(
"interactive_payment_success_redirect_url"
),
interactive_payment_failure_redirect_url=t.get(
"interactive_payment_failure_redirect_url"
),
transaction_id=t["transaction_id"],
currency=t["currency"],
amount=t.get("amount"),
description=t.get("description"),
expiration_date=t.get("expiration_date"),
debtor=t.get("debtor")
and PayinDebtor(
name=t["debtor"].get("name"),
identification=t["debtor"].get("identification")
and PersonId(
id_schema=t["debtor"]["identification"]["id_schema"],
id=t["debtor"]["identification"]["id"],
),
financial_institution=t["debtor"].get("financial_institution")
and FinancialInstitution(
fin_id_schema=t["debtor"]["financial_institution"][
"fin_id_schema"
],
fin_id=t["debtor"]["financial_institution"]["fin_id"],
),
account=t["debtor"].get("account"),
account_type=t["debtor"].get("account_type"),
email=t["debtor"].get("email"),
),
creditor=PayinCreditor(
name=t["creditor"]["name"],
identification=PersonId(
id_schema=t["creditor"]["identification"]["id_schema"],
id=t["creditor"]["identification"]["id"],
),
financial_institution=FinancialInstitution(
fin_id_schema=t["creditor"]["financial_institution"][
"fin_id_schema"
],
fin_id=t["creditor"]["financial_institution"]["fin_id"],
),
account=t["creditor"]["account"],
account_type=t["creditor"]["account_type"],
email=t["creditor"]["email"],
),
)
for t in json_dict["document"]["transactions"]
],
)
@property
def id(self) -> str:
"""Returns the ID of the message"""
return self.header.message_id
def signature(
self,
certificate_private_key: rsa.RSAPrivateKey,
certificate: x509.Certificate,
) -> str:
return sign(self.as_json(), certificate_private_key, certificate)
def sign_and_send(
self,
certificate_private_key: rsa.RSAPrivateKey,
certificate: x509.Certificate,
api_key: str = None,
base_url: str = None,
) -> Tuple[str, PayinHttpResponse]:
"""Signs the message and sends it to the Shinkansen API"""
signature = self.signature(certificate_private_key, certificate)
return (signature, self.send(signature, api_key, base_url))
def send(
self, signature: str, api_key: str = None, base_url: str = None
) -> PayinHttpResponse:
"""Sends the message to the Shinkansen API"""
base_url = base_url or SHINKANSEN_API_V1_BASE_URL
api_key = api_key or os.environ.get("SHINKANSEN_API_KEY")
if not api_key:
raise ShinkansenException(
"No api_key argument and SHINKANSEN_API_KEY not found in env"
)
response = requests.post(
url=f"{base_url}/messages/payins",
data=self.as_json(),
headers={
"Content-Type": "application/json",
"Shinkansen-Api-Key": api_key,
"Shinkansen-JWS-Signature": signature,
},
)
return PayinHttpResponse.from_http_response(response)
| shinkansenfinance/python-shinkansen | shinkansen/payins.py | payins.py | py | 12,652 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.Response",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "json.lo... |
8446754298 | import os
import re
import shutil
import tempfile
import unittest
from cupy import testing
from example_tests import example_test
os.environ['MPLBACKEND'] = 'Agg'
@testing.with_requires('matplotlib')
class TestKmeans(unittest.TestCase):
def test_default(self):
output = example_test.run_example(
'kmeans/kmeans.py', '-m', '1', '--num', '10')
assert re.search(
r' CPU : [0-9\.]+ sec\s+GPU : [0-9\.]+ sec',
output.decode('utf-8'),
)
def test_custom_kernel(self):
output = example_test.run_example(
'kmeans/kmeans.py', '-m', '1', '--num', '10',
'--use-custom-kernel')
assert re.search(
r' CPU : [0-9\.]+ sec\s+GPU : [0-9\.]+ sec',
output.decode('utf-8'),
)
def test_result_image(self):
dir_path = tempfile.mkdtemp()
try:
image_path = os.path.join(dir_path, 'kmeans.png')
example_test.run_example(
'kmeans/kmeans.py', '-m', '1', '--num', '10', '-o', image_path)
assert os.path.exists(image_path)
finally:
shutil.rmtree(dir_path, ignore_errors=True)
| cupy/cupy | tests/example_tests/test_kmeans.py | test_kmeans.py | py | 1,190 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "example_tests.example_test.run_example",
"line_number": 19,
"usage_type": "call"
},
{
"api... |
329200388 | # -*- coding: UTF-8 -*-
from flask import Flask, jsonify, current_app, render_template, request
import os
import sys
import json # http query responses handled as json
# (in python requests/grequests and ajax calls)
import requests # for http get/post calls
from time import time # to measure elapsed time for debugging purpose
''' 'yelp2' module is my contribution for extracting photos (biz-photos) '''
from yelp2.url_params import Url_Params # set up parameters to use Yelp search API
from yelp2.config import DEFAULT_TERM # term/category for yelp search query in case term input field is empty
from yelp2.config import DEFAULT_LOCATION # default location for yelp search query
from yelp2.visual_yelp import Visual_Yelp # wrapper for yelp client object
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
IS_PRODUCTION = (os.getenv('PYTHON_ENV', False) == "production")
if not IS_PRODUCTION:
app.debug = True
''' landing page '''
@app.route('/')
def index():
return render_template('kd.html')
''' "visual" presentation of yelp search results '''
@app.route('/yelp')
def yelp():
# business photos (biz-photos) are extracted from yelp search results,
# and displayed one business per row.
# biz-photos are found on each business' photo-box page. Formatted results
# are appended to "search-results" div tag in body.
# Yelp query string is '/yelp/?term=category&location=region'
# business term or category and location are taken from input fields
# visitor first sees yelp results with default term/category and
# ip-obtained geolocation displayed on page
# browser ip used to obtain geolocation
client_ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
# util functions to generate search query parameters
# in case of error default term/category & location values used
util = Url_Params()
location = util.get_location(client_ip)
# pass location info to yelp page
return render_template('yelp.html', term=DEFAULT_TERM, loc=location)
@app.route('/kd')
def kd():
return render_template('kd.html')
# @app.route('/home')
# def home():
# link_id_1 = 'YelpPhotos'
# link_url_1 = '/yelp/'
# link_id_2 = 'LinkedIn'
# link_url_2 = 'https://www.linkedin.com/in/gilkwak'
# return render_template('indexhtml.html',
# link_id_1=link_id_1,
# link_url_1=link_url_1,
# link_id_2=link_id_2,
# link_url_2=link_url_2)
""" Displays results of Yelp http get request """
@app.route("/search/", methods=["GET"])
def main():
# decorator for Yelp client for search query & process response for
# visual display of results (biz-photos for each businesses)
yelp = Visual_Yelp()
biz_photos = yelp.biz_photos()
# json containing status, html for biz-photos,
# longitudes and latitudes of businesses for google map
return biz_photos
''' for google map '''
@app.route('/googlemap/')
def googlemap():
return render_template('googlemap.html')
if __name__ == "__main__":
# handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)
# handler.setLevel(logging.INFO)
# app.logger.addHandler(handler)
if app.debug:
app.run(
host=os.getenv('IP', '0.0.0.0'),
port=int(os.getenv('PORT', '8080'))
)
else:
app.run()
| gil-k/yelp | atitan.py | atitan.py | py | 3,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.environ.get... |
74059454504 | from ansible.module_utils.basic import AnsibleModule
from PyPowerStore.utils.exception import PowerStoreException
from ansible.module_utils import dellemc_ansible_utils as utils
import logging
from uuid import UUID
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: dellemc_powerstore_volumegroup
version_added: '2.6'
short_description: Manage volume groups on PowerStore Storage System
description:
- Managing volume group on PowerStore Storage System includes
creating new volume group, adding volumes to volume
group, removing volumes from volume group, renaming volume group,
modifying volume group and deleting volume group.
author:
- Akash Shendge (akash.shendge@dell.com)
- Arindam Datta (arindam.datta@dell.com)
extends_documentation_fragment:
- dellemc.dellemc_powerstore
options:
vg_name:
description:
- The name of the volume group.
required: False
vg_id:
description:
- The id of the volume group.
- It can be used only for Modify, Add/Remove or Delete operation.
required: False
volumes:
description:
- This is a list of volumes.
- Either the volume ID or name must be provided for adding/removing
existing volumes from volume group.
- If volumes are given, then vol_state should also be specified.
vol_state:
description:
- String variable , describes the state of volumes inside VG
- If volume are given, then vol_state should also be specified.
choices: [present-in-group , absent-in-group]
new_vg_name:
description:
- The new name of the volume group.
description:
description:
- Description about the volume group.
protection_policy:
description:
- String variable, represents Protection policy id or name
used for volume group.
- Specifying an empty string or "" removes the existing
protection policy from Volume Group.
required: false
is_write_order_consistent:
description:
- A boolean flag to indicate whether snapshot sets of the volume group will be write-order consistent.
- If this parameter is not specified, the array by default sets it to true.
required: false
state:
description:
- Define whether the volume group should exist or not.
choices: [absent, present]
required: true
Notes:
- vol_state is mandatory if volumes are provided.
- A protection policy can be specified either for an volume group, or
for the individual volumes inside the volume group.
- A volume can be a member of at most one volume group.
- Specifying "protection_policy" as empty string or "" removes the existing
protection policy from a Volume Group.
'''
EXAMPLES = r'''
- name: Create volume group without protection policy
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
vg_name: "{{vg_name}}"
description: "This volume group is for ansible"
state: "present"
- name: Get details of volume group
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
vg_name: "{{vg_name}}"
state: "present"
- name: Add volumes to volume group
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
vg_name: "{{vg_name}}"
state: "present"
volumes:
- "7f879569-676c-4749-a06f-c2c30e09b295"
- "68e4dad5-5de5-4644-a98f-6d4fb916e169"
- "Ansible_Testing"
vol_state: "present-in-group"
- name: Remove volumes from volume group
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
vg_name: "{{vg_name}}"
state: "present"
volumes:
- "7f879569-676c-4749-a06f-c2c30e09b295"
- "Ansible_Testing"
vol_state: "absent-in-group"
- name: Rename volume group and change is_write_order_consistent flag
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
vg_name: "{{vg_name}}"
new_vg_name: "{{new_vg_name}}"
is_write_order_consistent: False
state: "present"
- name: Get details of volume group by ID
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
vg_id: "{{vg_id}}"
state: "present"
- name: Delete volume group
dellemc_powerstore_volumegroup:
array_ip: "{{array_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
name: "{{new_vg_name}}"
state: "absent"
'''
RETURN = r'''
'''
LOG = utils.get_logger('dellemc_powerstore_volumegroup',
log_devel=logging.INFO)
py4ps_sdk = utils.has_pyu4ps_sdk()
HAS_PY4PS = py4ps_sdk['HAS_Py4PS']
IMPORT_ERROR = py4ps_sdk['Error_message']
py4ps_version = utils.py4ps_version_check()
IS_SUPPORTED_PY4PS_VERSION = py4ps_version['supported_version']
VERSION_ERROR = py4ps_version['unsupported_version_message']
# Application type
APPLICATION_TYPE = 'Ansible/1.0'
class PowerStoreVolumeGroup(object):
"""Class with Volume Group operations"""
def __init__(self):
"""Define all the parameters required by this module"""
self.module_params = utils.get_powerstore_management_host_parameters()
self.module_params.update(get_powerstore_volume_group_parameters())
# initialize the Ansible module
self.module = AnsibleModule(
argument_spec=self.module_params,
supports_check_mode=True
)
LOG.info('HAS_PY4PS = {0} , IMPORT_ERROR = {1}'.format(
HAS_PY4PS, IMPORT_ERROR))
if HAS_PY4PS is False:
self.module.fail_json(msg=IMPORT_ERROR)
LOG.info('IS_SUPPORTED_PY4PS_VERSION = {0} , '
'VERSION_ERROR = {1}'.format(
IS_SUPPORTED_PY4PS_VERSION, VERSION_ERROR))
if IS_SUPPORTED_PY4PS_VERSION is False:
self.module.fail_json(msg=VERSION_ERROR)
self.conn = utils.get_powerstore_connection(self.module.params,
application_type=APPLICATION_TYPE)
self.provisioning = self.conn.provisioning
LOG.info('Got Py4ps instance for provisioning on PowerStore {0}'.
format(self.conn))
self.protection = self.conn.protection
LOG.info('Got Py4ps instance for protection on PowerStore {0}'.
format(self.protection))
def get_volume_group_details(self, vg_id=None, name=None):
"""Get volume group details"""
try:
LOG.info("Getting VG Details {0} {1}".format(vg_id, name))
if vg_id:
resp = self.provisioning.get_volume_group_details(vg_id)
LOG.info("Successfully Got VG with id {0}".format(vg_id))
if len(resp) > 0:
return resp
else:
return None
if name:
resp = self.provisioning.get_volume_group_by_name(name)
if resp and len(resp) > 0:
id = resp[0]['id']
vg_details = self.provisioning.\
get_volume_group_details(id)
LOG.info("Successfully Got VG with name {0}".format(name))
return vg_details
return None
except Exception as e:
id_or_name = vg_id if vg_id else name
errormsg = "Failed to get volume group {0} with error {1}". \
format(id_or_name, str(e))
LOG.error(errormsg)
def delete_volume_group(self, volume_group_id):
"""Delete volume group"""
try:
self.provisioning.delete_volume_group(volume_group_id)
return True
except PowerStoreException as pe:
errormsg = "Failed to delete volume group {0} with error {1}". \
format(volume_group_id, str(pe))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
except Exception as e:
errormsg = "Failed to delete volume group {0} with error {1}".\
format(volume_group_id, str(e))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
def get_volume_id_by_name(self, vol_name):
"""Get the details of a volume by name"""
try:
resp = self.provisioning.get_volume_by_name(vol_name)
if len(resp) > 0:
return resp[0]['id']
else:
return None
except Exception as e:
msg = "Failed to get the volume with name {0} with " \
"error {1}".format(vol_name, str(e))
LOG.error(msg)
def get_volume_details_by_id(self, volume_id):
"""Get the details of a volume by name"""
try:
LOG.info("getting volume with id {0}".format(volume_id))
resp = self.provisioning.get_volume_details(volume_id)
if len(resp) > 0:
LOG.info("got volume with id {0} is {1}".format(volume_id, resp))
return resp['id']
else:
return None
except Exception as e:
msg = "Failed to get the volume with id {0} with " \
"error {1}".format(volume_id, str(e))
LOG.error(msg)
def remove_volumes_from_volume_group(self, vg_id, vol_list):
"""Remove volumes from volume group"""
vol_group_details = self.get_volume_group_details(vg_id=vg_id)
existing_volumes_in_vg = vol_group_details['volumes']
LOG.debug("Existing Volumes: {0}".format(existing_volumes_in_vg))
existing_vol_ids = []
for vol in existing_volumes_in_vg:
if vol:
existing_vol_ids.append(vol['id'])
LOG.debug("Existing Volume IDs {0}".format(existing_vol_ids))
ids_to_remove = []
vol_name_list = []
vol_id_list = []
for each_vol in vol_list:
if each_vol:
identifier_type = self.name_or_id(each_vol)
if identifier_type == "ID" and not (each_vol in vol_id_list):
vol_id_list.append(each_vol)
elif identifier_type == "NAME" and not (each_vol in vol_name_list):
vol_name_list.append(each_vol)
"""remove by name"""
for vol in vol_name_list:
id = self.get_volume_id_by_name(vol)
if id and (id in existing_vol_ids):
if id not in ids_to_remove:
ids_to_remove.append(id)
else:
msg = "Unable to remove volume name {0} since is not " \
"present in volume group {1}".format(vol, vg_id)
LOG.warn(msg)
self.module.fail_json(msg=msg)
"""remove by id"""
for vol in vol_id_list:
if vol in existing_vol_ids:
if vol not in ids_to_remove:
ids_to_remove.append(vol)
else:
msg = "Unable to remove volume id {0} since is not " \
"present in volume group {1}".format(vol, vg_id)
LOG.warn(msg)
self.module.fail_json(msg=msg)
LOG.debug("Volume IDs to Remove {0}".format(ids_to_remove))
if len(ids_to_remove) == 0:
return False
try:
self.provisioning.remove_members_from_volume_group(
vg_id, ids_to_remove)
return True
except PowerStoreException as pe:
errormsg = "Remove existing volume(s) from volume group {0} " \
"failed with error {1}".format(vg_id, str(pe))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
except Exception as e:
errormsg = "Remove existing volume(s) from volume group {0} " \
"failed with error {1}".format(vg_id, str(e))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
def add_volumes_to_volume_group(self,vg_id, vol_list):
"""adds volumes to volume group"""
vol_group_details = self.get_volume_group_details(vg_id=vg_id)
existing_volumes_in_vg = vol_group_details['volumes']
LOG.debug("Existing Volumes: {0}".format(existing_volumes_in_vg))
existing_vol_ids = []
for vol in existing_volumes_in_vg:
if vol:
existing_vol_ids.append(vol['id'])
LOG.debug("Existing Volume IDs {0}".format(existing_vol_ids))
ids_to_add = []
vol_name_list = []
vol_id_list = []
for each_vol in vol_list:
if each_vol:
identifier_type = self.name_or_id(each_vol)
if identifier_type == "ID" and not (each_vol in vol_id_list):
vol_id_list.append(each_vol)
elif identifier_type == "NAME" and not (each_vol in vol_name_list):
vol_name_list.append(each_vol)
"""add volume by name"""
for vol in vol_name_list:
id = self.get_volume_id_by_name(vol)
if id and (id not in existing_vol_ids):
if id not in ids_to_add:
ids_to_add.append(id)
else:
msg = "Unable to add volume name {0}, either it doesn't" \
" exist or already in volume group ".format(vol)
LOG.warn(msg)
self.module.fail_json(msg=msg)
"""add volume by id"""
for vol in vol_id_list:
"""verifying if volume id exists in array"""
vol_by_id = self.get_volume_details_by_id(volume_id=vol)
if vol_by_id not in existing_vol_ids:
if vol_by_id not in ids_to_add:
ids_to_add.append(vol_by_id)
else:
msg = "Unable to add volume id {0}, either it doesn't" \
" exist or already in volume group ".format(vol)
LOG.warn(msg)
self.module.fail_json(msg=msg)
LOG.debug("Volume IDs to add {0}".format(ids_to_add))
if len(ids_to_add) == 0:
return False
try:
self.provisioning.add_members_to_volume_group(
vg_id, ids_to_add)
return True
except PowerStoreException as pe:
errormsg = "Add existing volumes to volume group {0} " \
"failed with error {1}".format(vg_id, str(pe))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
except Exception as e:
errormsg = "Add existing volumes to volume group {0} " \
"failed with error {1}".format(vg_id, str(e))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
def modify_volume_group(self, vg_id, vg_name, description,
is_write_order_consistent,
protection_policy_id):
"""Modify volume group"""
try:
LOG.info("Modifying volume group: {0}".format(
vg_id))
self.provisioning.modify_volume_group(
vg_id, vg_name, description, is_write_order_consistent,
protection_policy_id)
LOG.info("Successfully modified volume group: {0}".format(
vg_id))
return True
except PowerStoreException as pe:
errormsg = "Modify volume group {0} failed with error {1}". \
format(vg_id, str(pe))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
except Exception as e:
errormsg = "Modify volume group {0} failed with error {1}".\
format(vg_id, str(e))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
def is_volume_group_modified(self, volume_group,protection_policy):
"""Check if the desired volume group state is different from existing
volume group"""
modified = False
if(('name' in volume_group and self.module.params['new_vg_name']
is not None) and (volume_group['name'].lower() !=
self.module.params['new_vg_name'].lower())):
modified = True
elif((volume_group['description'] is not None and self.module.params[
'description'] is not None and volume_group['description'].lower()
!= self.module.params['description'].lower()) or
(volume_group['description'] is None and
self.module.params['description'] is not None and
self.module.params['description'].lower() != 'none')):
modified = True
elif(('protection_policy_id' in volume_group and
protection_policy is not None and
volume_group['protection_policy_id'] !=
protection_policy) or
('protection_policy_id' not in volume_group and
protection_policy is not None and
protection_policy.lower() != 'none')):
modified = True
elif('is_write_order_consistent' in volume_group and
self.module.params['is_write_order_consistent'] is not None
and volume_group['is_write_order_consistent'] !=
self.module.params['is_write_order_consistent']):
modified = True
return modified
def create_volume_group(self, vg_name,
description,
protection_policy_id,
is_write_order_consistent):
"""Create a volume group"""
try:
LOG.info('Creating empty volume group {0} '.format(vg_name))
resp = self.provisioning.create_volume_group(
vg_name, description,
protection_policy_id=protection_policy_id,
is_write_order_consistent=is_write_order_consistent)
return True, resp
except PowerStoreException as pe:
errormsg = "Failed to create volume group {0} with error {1}". \
format(vg_name, str(pe))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
except Exception as e:
errormsg = "Failed to create volume group {0} with error {1}".\
format(vg_name, str(e))
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
def get_protection_policy_id_by_name(self, name):
"""Get protection policy by name"""
try:
LOG.info('Getting the details of protection policy '
'{0}'.format(name))
resp = self.protection.get_protection_policy_by_name(name)
if None is resp or len(resp) <= 0:
msg = 'No protection policy present with name {0}'.format(
name)
LOG.error(msg)
self.module.fail_json(msg=msg)
else:
LOG.info('Successfully got the protection policy '
'name {0}'.format(name))
return resp[0]['id']
except Exception as e:
msg = 'Get details of protection policy name: {0} failed' \
' with error : {1} '.format(name, str(e))
LOG.error(msg)
self.module.fail_json(msg=msg)
def get_protection_policy_details_by_id(self, policy_id):
"""Get protection policy details by id"""
try:
LOG.info('Getting the details of protection policy by id'
' {0}'.format(policy_id))
resp = self.protection.get_protection_policy_details(
policy_id=policy_id)
if resp and len(resp) > 0:
LOG.info('Successfully got the details of '
'protection policy id {0}'.format(policy_id))
return resp['id']
else:
msg = 'No protection policy present with id {0}'.format(
policy_id)
LOG.error(msg)
self.module.fail_json(msg=msg)
except Exception as e:
msg = 'Get details of protection policy id: {0} failed' \
' with error {1}'.format(policy_id, str(e))
LOG.error(msg)
self.module.fail_json(msg=msg)
def name_or_id(self, val):
"""Determines if the string is a name or id"""
try:
UUID(str(val))
LOG.info('{0} is a ID'.format(val))
return "ID"
except ValueError:
LOG.info('{0} is a NAME'.format(val))
return "NAME"
def perform_module_operation(self):
"""
Perform different actions on volume group based on user parameter
chosen in playbook
"""
vg_id = self.module.params['vg_id']
state = self.module.params['state']
vg_name = self.module.params['vg_name']
volumes = self.module.params['volumes']
vol_state = self.module.params['vol_state']
new_vg_name = self.module.params['new_vg_name']
description = self.module.params['description']
protection_policy = self.module.params['protection_policy']
is_write_order_consistent = self.module.params[
'is_write_order_consistent']
volume_group = None
if not vg_name and not vg_id:
self.module.fail_json(msg='Specify volume group name or '
'volume group id')
elif vg_name and vg_id:
self.module.fail_json(msg='Specify volume group name or '
'volume group id, not both')
volume_group = self.get_volume_group_details(vg_id=vg_id, name=vg_name)
LOG.debug('volume_group details: {0}'.format(volume_group))
if protection_policy:
prot_pol_identifier_type = self.name_or_id(protection_policy)
if prot_pol_identifier_type == "ID":
protection_policy = self.get_protection_policy_details_by_id(
protection_policy)
if prot_pol_identifier_type == "NAME":
protection_policy = self.get_protection_policy_id_by_name(
protection_policy)
modified = False
if volume_group:
modified = self.is_volume_group_modified(volume_group,protection_policy)
LOG.debug('Modified Flag: {0}'.format(modified))
else:
if not vg_name:
self.module.fail_json(msg="vg_name is required to "
"create a Volume Group")
if new_vg_name:
self.module.fail_json(msg="Invalid argument, "
"new_vg_name is not required")
if volumes and not vol_state:
msg="vol_state must also be specified along with Volumes"
LOG.error(msg)
self.module.fail_json(msg=msg)
if vg_id is None and volume_group:
vg_id = volume_group['id']
if vg_name is None and volume_group:
vg_name = volume_group['name']
result = dict(
changed=False,
create_vg='',
modify_vg='',
add_vols_to_vg='',
remove_vols_from_vg='',
delete_vg='',
volume_group_details='',
)
if state == 'present' and not volume_group:
LOG.info('Creating volume group {0}'.format(vg_name))
result['create_vg'], resp = self.\
create_volume_group(vg_name, description,
protection_policy,
is_write_order_consistent)
result['volume_group_details'] = resp
volume_group = self.get_volume_group_details(vg_id=resp['id'])
vg_id = volume_group['id']
elif state == 'absent' and volume_group:
LOG.info('Deleting volume group {0}'.format(vg_id))
result['delete_vg'] = self.delete_volume_group(vg_id)
if state == 'present' and vol_state == 'present-in-group' and \
volume_group and volumes:
result['add_vols_to_vg'] = self.add_volumes_to_volume_group(
vg_id, volumes)
elif state == 'present' and vol_state == 'absent-in-group' and \
volume_group and volumes:
LOG.info('Remove existing volume(s) from volume group {0}'.
format(vg_id))
result['remove_vols_from_vg'] = self.\
remove_volumes_from_volume_group(vg_id, volumes)
if state == 'present' and volume_group and modified:
LOG.info("From Modify : {0}".format(protection_policy))
result['modify_vg'] = self.modify_volume_group(
vg_id, new_vg_name, description, is_write_order_consistent,
protection_policy)
if state == 'present' and volume_group:
updated_vg = self.get_volume_group_details(vg_id=vg_id)
result['volume_group_details'] = updated_vg
if result['create_vg'] or result['modify_vg'] or result[
'add_vols_to_vg'] or result['remove_vols_from_vg'] or \
result['delete_vg']:
result['changed'] = True
self.module.exit_json(**result)
def get_powerstore_volume_group_parameters():
return dict(
vg_name=dict(required=False, type='str'),
vg_id=dict(required=False, type='str'),
new_vg_name=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
vol_state=dict(required=False, choices=['absent-in-group',
'present-in-group'],
type='str'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
description=dict(required=False, type='str'),
is_write_order_consistent=dict(required=False, type='bool'),
protection_policy=dict(required=False, type='str')
)
def main():
"""Create PowerStore volume group object and perform action on it
based on user input from playbook"""
obj = PowerStoreVolumeGroup()
obj.perform_module_operation()
if __name__ == '__main__':
main()
| avs6/ansible-powerstore | dellemc_ansible/powerstore/library/dellemc_powerstore_volumegroup.py | dellemc_powerstore_volumegroup.py | py | 26,850 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ansible.module_utils.dellemc_ansible_utils.get_logger",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "ansible.module_utils.dellemc_ansible_utils",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": 164,
"us... |
1967983354 | # do some analysis of variance
import sys
import csv
from tabulate import tabulate
from scipy import stats
ANOVA_HEADERS = ['src', 'DF', 'SS', 'MS', 'F', 'p']
def sum_x(data):
total = 0
for row in data:
total += sum(row)
return total
def sum_xsq(data):
total = 0
for row in data:
for item in row:
total += item**2
return total
def prep_data(file, reps):
reader = csv.reader(file)
init_data = []
for line in reader:
init_data.append([float(item) for item in line])
n_rows = len(init_data)
n_cols = len(init_data[0])
n = n_cols * n_rows
gridsum_data = []
for i in range(0, n_rows, reps):
summed_line = []
for j in range(n_cols):
block_sum = 0
for k in range(i, i+reps, 1):
block_sum += init_data[k][j]
summed_line.append(block_sum)
gridsum_data.append(summed_line)
col_sums = [0 for _ in range(n_cols)]
row_sums = []
for row in gridsum_data:
row_sums.append(sum(row))
for i in range(len(row)):
col_sums[i] += row[i]
grand_total = sum(col_sums)
return init_data, gridsum_data, col_sums, row_sums, grand_total, n, n_cols, n_rows
def pretty_anova_tbl(anova, sources):
anova_tbl = []
for src in sources:
line = [src, ]
for attr in ANOVA_HEADERS[1:]:
if attr in anova[src]:
line.append(anova[src][attr])
else:
line.append(None)
anova_tbl.append(line)
print(tabulate(anova_tbl, headers=ANOVA_HEADERS))
def simple_anova(file, reps):
data, _, treat_sums, _, grand_total, n, treat_n, obs_per_treat = prep_data(file, reps)
anova = {}; sources = ['treats', 'err', 'total']
for item in sources:
anova[item] = {}
# DF's
anova['treats']['DF'] = treat_n-1
anova['total']['DF'] = n-1
anova['err']['DF'] = anova['total']['DF'] - anova['treats']['DF']
# SS's
anova['total']['SS'] = sum_xsq(data) - ((sum_x(data)**2)/n)
ssTreats = 0
for i in range(treat_n):
ssTreats += (treat_sums[i]**2)/obs_per_treat
ssTreats -= (grand_total**2)/n
anova['treats']['SS'] = ssTreats
anova['err']['SS'] = anova['total']['SS'] - anova['treats']['SS']
# MS's
anova['treats']['MS'] = anova['treats']['SS']/anova['treats']['DF']
anova['err']['MS'] = anova['err']['SS']/anova['err']['DF']
# F
anova['treats']['F'] = anova['treats']['MS']/anova['err']['MS']
# p
anova['treats']['p'] = stats.f(anova['treats']['DF'], anova['err']['DF']).sf(anova['treats']['F'])
pretty_anova_tbl(anova, sources)
def blocked_anova(file, reps):
data, _, treat_sums, block_sums, grand_total, n, n_treats, obs_per_treat = prep_data(file, reps)
n_blocks = obs_per_treat//reps
obs_per_block = n // n_blocks
anova = {}; sources = ['treats', 'blocks', 'err', 'total']
for item in sources:
anova[item] = {}
# DF's
anova['total']['DF'] = n-1
anova['treats']['DF'] = n_treats-1
anova['blocks']['DF'] = n_blocks-1
anova['err']['DF'] = anova['total']['DF'] - anova['treats']['DF'] - anova['blocks']['DF']
# SS's
anova['total']['SS'] = sum_xsq(data) - ((sum_x(data) ** 2) / n)
ssTreats = 0
for i in range(n_treats):
ssTreats += (treat_sums[i] ** 2) / obs_per_treat
ssTreats -= (grand_total ** 2) / n
anova['treats']['SS'] = ssTreats
ssBlocks = 0
for i in range(n_blocks):
ssBlocks += (block_sums[i]**2) / obs_per_block
ssBlocks -= (grand_total**2) / n
anova['blocks']['SS'] = ssBlocks
anova['err']['SS'] = anova['total']['SS'] - anova['treats']['SS'] - anova['blocks']['SS']
# MS's
anova['treats']['MS'] = anova['treats']['SS'] / anova['treats']['DF']
anova['blocks']['MS'] = anova['blocks']['SS'] / anova['blocks']['DF']
anova['err']['MS'] = anova['err']['SS'] / anova['err']['DF']
# F
anova['treats']['F'] = anova['treats']['MS'] / anova['err']['MS']
anova['blocks']['F'] = anova['blocks']['MS'] / anova['err']['MS']
# p
anova['treats']['p'] = stats.f(anova['treats']['DF'], anova['err']['DF']).sf(anova['treats']['F'])
anova['blocks']['p'] = stats.f(anova['blocks']['DF'], anova['err']['DF']).sf(anova['blocks']['F'])
pretty_anova_tbl(anova, sources)
def twoway_anova(file, reps):
data, gridsum_data, fac1_sums, fac2_sums, grand_total, n, n_fac1s, obs_per_fac1 = prep_data(file, reps)
n_fac2s = obs_per_fac1 // reps
obs_per_fac2 = n // n_fac2s
obs_per_square = obs_per_fac1 // n_fac2s
anova = {}; sources = ['faccol', 'facrow', 'inter', 'err', 'total']
for item in sources:
anova[item] = {}
# DF's
anova['total']['DF'] = n - 1
anova['faccol']['DF'] = n_fac1s-1
anova['facrow']['DF'] = n_fac2s-1
anova['inter']['DF'] = anova['faccol']['DF'] * anova['facrow']['DF']
anova['err']['DF'] = anova['total']['DF'] - anova['faccol']['DF'] - anova['facrow']['DF'] - anova['inter']['DF']
# SS's
anova['total']['SS'] = sum_xsq(data) - ((sum_x(data) ** 2) / n)
SSFac1 = 0
for sum in fac1_sums:
SSFac1 += (sum**2)/obs_per_fac1
SSFac1 -= grand_total**2 / n
anova['faccol']['SS'] = SSFac1
SSFac2 = 0
for sum in fac2_sums:
SSFac2 += (sum**2)/obs_per_fac2
SSFac2 -= grand_total**2 / n
anova['facrow']['SS'] = SSFac2
SSInter = 0
for i in range(n_fac2s): # rows
for j in range(n_fac1s): # cols
SSInter += gridsum_data[i][j]**2 / obs_per_square
SSInter -= grand_total**2 / n
SSInter -= SSFac1 + SSFac2
anova['inter']['SS'] = SSInter
anova['err']['SS'] = anova['total']['SS'] - anova['faccol']['SS'] - anova['facrow']['SS'] - anova['inter']['SS']
# MS's
anova['faccol']['MS'] = anova['faccol']['SS'] / anova['faccol']['DF']
anova['facrow']['MS'] = anova['facrow']['SS'] / anova['facrow']['DF']
anova['inter']['MS'] = anova['inter']['SS'] / anova['inter']['DF']
anova['err']['MS'] = anova['err']['SS'] / anova['err']['DF']
# F
anova['faccol']['F'] = anova['faccol']['MS'] / anova['err']['MS']
anova['facrow']['F'] = anova['facrow']['MS'] / anova['err']['MS']
anova['inter']['F'] = anova['inter']['MS'] / anova['err']['MS']
# p
anova['faccol']['p'] = stats.f(anova['faccol']['DF'], anova['err']['DF']).sf(anova['faccol']['F'])
anova['facrow']['p'] = stats.f(anova['facrow']['DF'], anova['err']['DF']).sf(anova['facrow']['F'])
anova['inter']['p'] = stats.f(anova['inter']['DF'], anova['err']['DF']).sf(anova['inter']['F'])
pretty_anova_tbl(anova, sources)
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage: python3', sys.argv[0], '[simple|blocked|twoway]', '[csv_file]', '[reps]')
exit()
method = sys.argv[1]
reps = int(sys.argv[3])
with open(sys.argv[2]) as file:
if method == 'simple':
simple_anova(file, reps)
elif method == 'blocked':
blocked_anova(file, reps)
elif method == 'twoway':
twoway_anova(file, reps)
else:
print('Unknown method:', method)
print('Allowed methods: simple, blocked, twoway')
| Koellewe/anova | anova.py | anova.py | py | 7,268 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "scipy.stats.f",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_n... |
19686747044 |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import csv
import numpy as np
import matplotlib.mlab
def plotDecisionRegionQDA(sampleMean, CV_, training, data):
all_res = []
minX, maxX, minY, maxY = 0., 8., 0., 8.
# create one-dimensional arrays for x and y
x = np.linspace(minX, maxX, 100, endpoint=True)
y = np.linspace(minY, maxY, 100, endpoint=True)
cnt = 0
for i in range(len(x) - 1):
for k in range(len(y) - 1):
classIndex = 0
logLikelihood = float("inf")
for j in range(len(data.species.unique())):
Nk = training.groupby('species').size()[data.species.unique()[j]]
newLikelihood = logLikelihoodCalc([x[i],y[k]], sampleMean[j], CV_[j], Nk)
if (newLikelihood < logLikelihood):
logLikelihood = newLikelihood
classIndex = j
test = [x[i],y[k], data.species.unique()[classIndex]]
test = pd.DataFrame([test], columns=['pet_length', 'pet_width', 'species'])
cnt =cnt + 1
all_res.append(test)
plotPairwiseScatterPlot(pd.concat(all_res))
def plotDecisionRegionLDA(sampleMean, CV_, training, data):
all_res = []
minX, maxX, minY, maxY = 0., 8., 0., 8.
# create one-dimensional arrays for x and y
x = np.linspace(minX, maxX, 100, endpoint=True)
y = np.linspace(minY, maxY, 100, endpoint=True)
cnt = 0
for i in range(len(x) - 1):
for k in range(len(y) - 1):
classIndex = 0
logLikelihood = float("inf")
for j in range(len(data.species.unique())):
Nk = training.groupby('species').size()[data.species.unique()[j]]
newLikelihood = logLikelihoodCalc([x[i],y[k]], sampleMean[j], CV_, Nk)
if (newLikelihood < logLikelihood):
logLikelihood = newLikelihood
classIndex = j
test = [x[i],y[k], data.species.unique()[classIndex]]
test = pd.DataFrame([test], columns=['pet_length', 'pet_width', 'species'])
cnt =cnt + 1
all_res.append(test)
plotPairwiseScatterPlot(pd.concat(all_res))
def plotPairwiseScatterPlot(dataframe):
sns.scatterplot(x="pet_length", y="pet_width", hue="species",
data=dataframe)
def plotDecisionRegion(C, mu ):
x = np.arange(-5, 10.0, 0.005)
y = np.arange(-5, 10.0, 0.005)
X, Y = np.meshgrid(x, y)
Z = matplotlib.mlab.bivariate_normal(X, Y, C[0, 0], C[1, 1], mu[0], mu[1], (C[0,1]*C[1,0]))
plt.contour(X, Y, Z, levels = 10)
def Mahalonobis(x,C):
return x.dot(np.linalg.inv(C)).dot(np.transpose(x))
#-log likelihood
def logLikelihoodCalc(data,mu, C, Nk):
return (1/2 * np.log(np.linalg.det(C)) + np.log(Mahalonobis(data-mu,C)) - np.log(Nk))
def generateSampleMean(data):
mean = [ data["pet_length"].mean(),data["pet_width"].mean()]
return mean
def generateClassSampleMeans(data):
classMeans = []
classes = data.species.unique()
for species in classes:
classFrame = data.loc[data['species'] == species]
classMeans.append(generateSampleMean(classFrame))
return np.asarray(classMeans)
def generateClassCovarianceMatrix(data, mu):
classCVs = []
classes = data.species.unique()
j=0
mu = np.asarray(mu)
for species in classes:
classFrame = data.loc[data['species'] == species]
classCVs.append(generatePooledCovariance(classFrame,mu[j]))
j = j+1
return np.asarray(classCVs)
def generatePooledCovariance(data, mean):
data = pd.DataFrame(data)
C = np.ndarray(shape=(2,2), dtype=float, order='F')
mean = np.asarray(mean)
data.reset_index(inplace=True, drop = True)
for i in range(len(data.index)):
x_ = [data["pet_length"][i],data["pet_width"][i]]
for j in range(len(data.species.unique())):
if data.species.unique()[j] == data["species"][i]:
C = C + 1/len(data.index)*np.outer((x_-mean[j]),np.transpose(x_-mean[j]))
return C
def LOOCVQDA(data, mode):
LOOCV_Error = 0.0;
data = data.reset_index()
for i in range(len(data.index)):
training = data.copy(deep=True)
training.drop(training.index[i], inplace=True)
test = data.iloc[i]
classIndex = 0
logLikelihood = float("inf")
sampleMean = generateClassSampleMeans(training)
CV_ = generateClassCovarianceMatrix(training, sampleMean)
for j in range(CV_.shape[0]):
if mode > 0:
CV_[j]= np.diag(np.diag(CV_[j]))
if mode > 1:
CV_[j] = (np.trace(CV_[j]) / CV_[j].shape[1]) * \
np.identity(CV_[j].shape[1])
for j in range(len(data.species.unique())):
Nk = training.groupby('species').size()[data.species.unique()[j]]
newLikelihood = logLikelihoodCalc([test['pet_length'], test['pet_width']], sampleMean[j], CV_[j], Nk)
if (newLikelihood < logLikelihood):
logLikelihood = newLikelihood
classIndex = j
if (i == len(data.index) - 1):
#plotPairwiseScatterPlot(training)
plotDecisionRegionQDA(sampleMean, CV_, training, data)
if data.species.unique()[classIndex] != test["species"]:
LOOCV_Error = LOOCV_Error + 1 / len(data.index);
return LOOCV_Error
def LOOCVLDA(data, mode):
LOOCV_Error = 0.0;
data = data.reset_index()
for i in range(len(data.index)):
training = data.copy(deep=True)
training.drop(training.index[i], inplace=True)
test = data.iloc[i]
classIndex = 0
logLikelihood = float("inf")
sampleMean = generateClassSampleMeans(training)
CV_ = generatePooledCovariance(training,sampleMean)
if mode > 0:
CV_ = np.diag(np.diag(CV_))
if mode > 1:
CV_ =( np.trace(CV_) / CV_.shape[0]) * \
np.identity(CV_.shape[0])
for j in range(len(data.species.unique())):
#if(i == len(data.index) - 1):
#plotDecisionRegion(CV_, sampleMean[j])
Nk =training.groupby('species').size()[data.species.unique()[j]]
newLikelihood = logLikelihoodCalc([test['pet_length'],test['pet_width']],sampleMean[j], CV_, Nk)
if( newLikelihood < logLikelihood):
logLikelihood = newLikelihood
classIndex = j
if (i == len(data.index) - 1):
plotPairwiseScatterPlot(training)
plotDecisionRegionLDA(sampleMean, CV_, training, data)
if data.species.unique()[classIndex] != test["species"]:
LOOCV_Error = LOOCV_Error + 1/len(data.index);
return LOOCV_Error
with open("iris.data.txt", 'rt') as csvfile:
iris = csv.reader(csvfile, delimiter=',', quotechar='|')
IrisReduced = pd.DataFrame(list(iris))
IrisReduced.columns = ['sep_length','sep_width','pet_length','pet_width','species']
IrisReduced.drop(['sep_length'], axis = 1, inplace = True)
IrisReduced.drop(['sep_width'], axis = 1, inplace = True)
IrisReduced[['pet_length','pet_width']] = IrisReduced[['pet_length','pet_width']].astype(float)
#LDA mode: 0-general, 1-independent, 2- isotropic
print("Error for LDA: general case")
print(LOOCVLDA(IrisReduced, 1))
#print("Error for LDA: independent case")
#print(LOOCVLDA(IrisReduced, 1))
#print("Error for LDA: isotropic case")
#print(LOOCVLDA(IrisReduced, 2))
#QDA mode: 0-general, 1-independent, 2- isotropic
#print("Error for QDA: general case")
#print(LOOCVQDA(IrisReduced, 0))
#print("Error for QDA: in independent case")
#print(LOOCVQDA(IrisReduced, 1))
#print("Error for QDA: isotropic case")
#print(LOOCVQDA(IrisReduced, 2))
plt.show()
| aaravamudan2014/DiscriminantAnalysisClassifier | classifier.py | classifier.py | py | 7,883 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "seaborn.set",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line... |
36683242978 | import config
import telebot
import requests
from telebot import types
bot = telebot.TeleBot(config.token)
# Декодую json
response = requests.get(config.url).json()
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)
itembtn1 = types.KeyboardButton('USD')
itembtn2 = types.KeyboardButton('EUR')
markup.add(itembtn1, itembtn2,)
msg = bot.send_message(message.chat.id,
"Дізнатися курс ПриватБанка ", reply_markup=markup)
bot.register_next_step_handler(msg, process_coin_step)
def process_coin_step(message):
try:
markup = types.ReplyKeyboardRemove(selective=False)
for coin in response:
if (message.text == coin['ccy']):
bot.send_message(message.chat.id, printCoin(coin['buy'], coin['sale']),
reply_markup=markup, parse_mode="Markdown")
except Exception as e:
bot.reply_to(message, 'ooops!')
def printCoin(buy, sale):
'''Вивід курсу користувачу'''
return "💰 *Курс купівлі:* " + str(buy) + "\n💰 *Курс продажу:* " + str(sale)
# Enable saving next step handlers to file "./.handlers-saves/step.save".
# Delay=2 means that after any change in next step handlers (e.g. calling register_next_step_handler())
# saving will hapen after delay 2 seconds.
bot.enable_save_next_step_handlers(delay=2)
# Load next_step_handlers from save file (default "./.handlers-saves/step.save")
# WARNING It will work only if enable_save_next_step_handlers was called!
bot.load_next_step_handlers()
if __name__ == '__main__':
bot.polling(none_stop=True) | Yaroslav29/H_W_3.....9-codewars- | Telegram bot (курс П.Б.)/telegram bot P_B_.py | telegram bot P_B_.py | py | 1,805 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "config.token",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.url",
"line_nu... |
37936161747 | import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Garden plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
])
| 'Values' >> beam.Values()
| beam.Map(print)) | ezeparziale/apache-beam-start | examples/values.py | values.py | py | 354 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "apache_beam.Pipeline",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "apache_beam.Create",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "apache_beam.Values",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "apache_beam.... |
2769915489 | # coding= utf-8
import json
import mitmproxy.http
# 存在中文乱码情况
class GetJson:
def response(self, flow: mitmproxy.http.HTTPFlow):
if "https://stock.xueqiu.com/v5/stock/batch/quote.json?_t" in flow.request.url and "x=" in flow.request.url:
# 数据的模拟
data = json.loads(flow.response.text)
with open("./data/quote.json", "w", encoding="UTF-8") as f:
json.dump(data, f,indent=2,ensure_ascii=False)
print("加载入文件完成...")
addons = [
GetJson()
]
if __name__ == '__main__':
from mitmproxy.tools.main import mitmdump
mitmdump(['-p', '8080', "-s", __file__])
| liwanli123/HogwartProjectPractice | test_mock/get_json.py | get_json.py | py | 677 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mitmproxy.http.http",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mitmproxy.http",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dump",
"lin... |
2926069987 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 29 21:18:31 2017
@author: jagan
"""
#Import Library
import matplotlib.pyplot as plt
import numpy as np
#Main Function
def main():
N= 5
width = 0.35
#Data for the bar chart
men_score=(75,70,98,62,55)
wom_score=(65,77,98,85,63)
ind = np.arange(N)
p1=plt.bar(ind,men_score,width)
p2=plt.bar(ind,wom_score,width)
#label axis
plt.title("Matplot Class - Bar Chart") # Adds title to graph
plt.xlabel("Number")
plt.ylabel("Score")
plt.legend((p1[0],p2[0]),('Men','Women'))
plt.show()
if __name__=="__main__":
main() | shalabh250284/test-python-project | bar_chart.py | bar_chart.py | py | 642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplo... |
19420197734 | import logging.handlers
import re
import sys
import tqdm
class Utilities(object):
"""
Utility functions.
"""
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'close'
}
URL_REGEX = re.compile(r'(https?:\/\/[\d\w.:-]+)', re.I)
FORM_REGEX = re.compile(r'method[\'\"= ]{1,4}post[\'\"]?', re.I)
OPEN_REGEX = re.compile(r'name[\'\"= ]{1,4}notesview[\'\"]?', re.I)
ACCOUNT_REGEX = re.compile(r'/([a-f0-9]{32}/[a-f0-9]{32})', re.I)
USER_FIELD_REGEX = re.compile(r'user.+', re.I)
REDIRECT_FIELD_REGEX = re.compile(r'redirect.+', re.I)
NAMES_REGEX = re.compile(r'name[\'\"= ]{1,4}notesview[\'\"]?', re.I)
WEBADMIN_REGEX = re.compile(r'<title>.*administration</title>', re.I)
RESTRICTED_REGEX = re.compile(r'(notes exception|not authorized)', re.I)
VERSION_REGEX = re.compile(r'(?:version|domino administrator|domino|release)[=":\s]{0,4}([\d.]+)(?:\s|\")?', re.I)
LINUX_USER_REGEX = re.compile(r'([a-z0-9-_].+):(.+)', re.I)
WINDOWS_USER_REGEX = re.compile(r'(.+)\\(.+)', re.I)
PATH_REGEX = re.compile(r'DataDirectory\s*=\s*\'(.+)\';', re.I)
def set_logging(self):
"""
Configure the basic logging environment for the application.
"""
logger = logging.getLogger('DomiOwned')
logger.setLevel(logging.DEBUG)
custom_format = CustomLoggingFormatter()
handler = logging.StreamHandler()
handler.setFormatter(custom_format)
logger.addHandler(handler)
return logger
def parse_credentials(self, value):
"""
Handle credentials if value is None.
"""
return '' if value is None else value
def check_url(self, url):
"""
Check for valid base URL.
"""
if self.URL_REGEX.search(url):
return self.URL_REGEX.search(url).group(1)
else:
self.logger.error('Invalid URL provided')
sys.exit()
def setup_progress(self, total):
"""
Setup progress bar.
"""
progress_bar = tqdm.tqdm(
total=total,
desc="Progress",
smoothing=0.5,
bar_format='{desc}{percentage:3.0f}%|{bar}|({n_fmt}/{total_fmt})|{elapsed} '
)
return progress_bar
class CustomLoggingFormatter(logging.Formatter):
"""
Custom logging formatter.
"""
DEBUG_FORMAT = "\033[1m\033[34m[*]\033[0m %(msg)s"
INFO_FORMAT = "\033[1m\033[32m[+]\033[0m %(msg)s"
WARN_FORMAT = "\033[1m\033[33m[!]\033[0m %(msg)s"
ERROR_FORMAT = "\033[1m\033[31m[-]\033[0m %(msg)s"
def __init__(self):
super().__init__(fmt="%(levelno)d: %(msg)s", datefmt=None, style='%')
def format(self, record):
orig_format = self._style._fmt
if record.levelno == logging.DEBUG:
self._style._fmt = CustomLoggingFormatter.DEBUG_FORMAT
elif record.levelno == logging.INFO:
self._style._fmt = CustomLoggingFormatter.INFO_FORMAT
elif record.levelno == logging.WARN:
self._style._fmt = CustomLoggingFormatter.WARN_FORMAT
elif record.levelno == logging.ERROR:
self._style._fmt = CustomLoggingFormatter.ERROR_FORMAT
result = logging.Formatter.format(self, record)
self._style._fmt = orig_format
return result
class Banner(object):
"""
Domi-Owned visual banner.
"""
SHOW = """
__________ __________ __________
| |\| | |\\
| * * ||| * * * | * ||
| * * ||| | * ||
| * * ||| * * * | * ||
|__________|||__________|__________||
| || `---------------------`
| * * ||
| ||
| * * ||
|__________||
`----------`
IBM/Lotus Domino OWNage
"""
| coldfusion39/domi-owned | domi_owned/utilities.py | utilities.py | py | 3,743 | python | en | code | 114 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 20,
"us... |
32521250931 | # _*_ coding: utf-8 _*_
from flask import render_template, redirect, url_for, flash, request
from ..models import User, News, Weather
from . import main
from .. import HWMonitor
from flask_login import login_required, current_user, login_user
from sqlalchemy import desc
from random import sample
# 首页
@main.route('/', methods=['GET', 'POST'])
def index():
# 如果当前未登录,登入测试帐号
if not current_user.is_authenticated:
user = User.query.filter_by(id=100).first()
login_user(user)
# 幻灯片部分
news_count = News.query.filter_by(news_agency='BBC').order_by(desc(News.id)).first().id # 获取数据库中最后一条新闻的id
slider = []
if news_count >= 20:
selected_news = sample(range(news_count - 10, news_count), 5) # 随机获得5条新闻的id
for news in selected_news:
slider.append(News.query.filter_by(id=news).first())
# 分页和新闻部分
page = request.args.get('page', 1, type=int)
pagination = News.query.order_by(desc(News.id)).paginate(page, per_page=12, error_out=False)
articles = pagination.items
# 天气部分
weather = Weather.query.filter_by(city="Tianjin").order_by(desc(Weather.id)).first()
# 系统状态部分
hw_status = HWMonitor()
return render_template("index.html",
articles=articles,
weather=weather,
pagination=pagination,
slider=slider,
hw=hw_status)
# 抓取新闻接口 默认50条
@main.route('/fetchnews', methods=['GET', 'POST'])
@login_required
def fetch_news():
News.fetch_news(50)
return redirect(url_for('.index'))
| panshuo/News-Weather | app/main/views.py | views.py | py | 1,752 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_login.current_user.is_authenticated",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.User.query.filter_by",
"line_number": 17,
"usage_type": "c... |
3247799211 | import datetime
# Types of fields.
CHAR = "C"
NUMERAL = "N"
DATE = "D"
LOGICAL = "L"
# System encoding which is used to convert field names between bytes and string.
SYSTEM_ENCODING = "ascii"
# Reference data
ENCODINGS = {
# got from dbf description [dbfspec]
# id name description
0x00: ("ascii", "ASCII"), # internal use
0x01: ("cp437", "DOS USA"),
0x02: ("cp850", "DOS Multilingual"),
0x03: ("cp1252", "Windows ANSI"),
0x04: ("mac_roman", "Standard Macintosh"), # NOT SHURE
0x64: ("cp852", "EE MS-DOS"),
0x65: ("cp866", "Russian MS-DOS"),
0x66: ("cp865", "Nordic MS-DOS"),
0x67: ("cp861", "Icelandic MS-DOS"),
0x6A: ("cp737", "Greek MS-DOS (437G)"),
0x6B: ("cp857", "Turkish MS-DOS"),
0x96: ("mac_cyrillic", "Russian Macintosh"),
0x97: ("mac_latin2", "Eastern Europe Macintosh"), # NOT SHURE
0x98: ("mac_greek", "Greek Macinstosh"),
0xC8: ("cp1250", "Windows EE"),
0xC9: ("cp1251", "Russian Windows"),
0xCA: ("cp1254", "Turkish Windows"),
0xCB: ("cp1253", "Greek Windows"),
## These encodings are not supported by Python but a part of DBF spec.
# 0x68: ('cp895', 'Kamenicky (Czech) MS-DOS'),
# 0x69: ('cp790', 'Mazovia (Polish) MS-DOS'),
}
REVERSE_ENCODINGS = dict(
[(value[0], (code, value[1])) for code, value in ENCODINGS.items()]
)
SIGNATURES = {
0x02: "FoxBase",
0x03: "dBASE III",
0x04: "dBASE IV",
0x05: "dBASE V",
0x30: "Visual FoxPro",
0x31: "Visual FoxPro with AutoIncrement field",
0x43: "dBASE IV with SQL table and memo file",
0x7B: "dBASE IV with memo file",
0x83: "dBASE III with memo file",
0x8B: "dBASE IV with memo file",
0x8E: "dBASE IV with SQL table",
0xB3: ".dbv and .dbt memo (Flagship)",
0xCB: "dBASE IV with SQL table and memo file",
0xE5: "Clipper SIX driver with SMT memo field",
0xF5: "FoxPro with memo field",
0xFB: "FoxPro",
}
SUPPORTED_SIGNATURES = (0x03, 0x04, 0x05)
# < -- little endian
# B -- version number (signature)
# 3B -- last update (YY, MM, DD)
# L -- number of records
# H -- length of header
# H -- length of each record
# 17x -- pad (2B -- reserved,
# B -- incomplete transaction,
# B -- encryption flag,
# 4B -- free record thread (reserved for LAN)
# 8B -- reserved for multiuser dBASE
# B -- MDX flag)
# B -- language driver
# 2x -- pad (2B -- reserved)
HEADER_FORMAT = "<B3BLHH17xB2x"
# < -- little endian
# 11s -- field name in ASCII (terminated by 0x00)
# c -- field type (ASCII)
# 4x -- field data address ( 2B -- address in memory (for dBASE)
# OR 4B -- offset of field from
# beginning of record (for FoxPro)
# B -- field length
# B -- decimal count
# 14x -- pad (2B -- reserved for multi-user dBASE,
# B -- work area id ()
# 2B -- reserved for multi-user dBASE,
# B -- flag for SET FIELDS
# 7B -- reserved
# B -- index field flag)
# B -- language driver
# 2x -- pad (2B -- reserved)
FIELD_DESCRIPTION_FORMAT = "<11sc4xBB14x"
# Common functions
def dbf2date(dbf_str):
"""
Converts date from dbf-date to datetime.date
Args:
`dbf_str`:
string in format YYYYMMDD
"""
if dbf_str is None or not dbf_str.isdigit() or len(dbf_str) != 8:
result = None
else:
result = datetime.date(int(dbf_str[:4]), int(dbf_str[4:6]), int(dbf_str[6:8]))
return result
def date2dbf(dt):
"""
Converts date from datetime.date to dbf-date (string in format YYYYMMDD)
Args:
`dt`:
datetime.date instance
"""
if not isinstance(dt, datetime.date):
raise TypeError("Espects datetime.date instead of %s" % type(dt))
return b"%04d%02d%02d" % (dt.year, dt.month, dt.day)
def dbf2str(dbf_str):
"""
Converts date from dbf-date to string (DD.MM.YYYY)
Args:
`dbf_str`:
dbf-date (bytes in format YYYYMMDD)
"""
if dbf_str is None or not dbf_str.isdigit() or len(dbf_str) != 8:
result = None
else:
string_date = dbf_str.decode(SYSTEM_ENCODING)
result = ".".join(
reversed((string_date[:4], string_date[4:6], string_date[6:8]))
)
return result
def str2dbf(dt_str):
"""
Converts from string to dbf-date (string in format YYYYMMDD)
Args:
`dt_str`:
string in format DD.MM.YYYY
"""
if not isinstance(dt_str, str):
raise TypeError("Espects string or unicode instead of %s" % type(dt_str))
str_l = len(dt_str)
if str_l != 10:
raise ValueError(
"Datestring must be 10 symbols (DD.MM.YYYY) " "length instead of %d" % str_l
)
d, m, y = dt_str.split(".")
dbf_string = "".join((y, m, d))
return dbf_string.encode(SYSTEM_ENCODING)
# References:
# [dbfspec]: http://www.clicketyclick.dk/databases/xbase/format/index.html
| y10h/ydbf | ydbf/lib.py | lib.py | py | 5,038 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 125,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.