seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5744062882 | import argparse
import GitPyService as gps
parser = argparse.ArgumentParser(description="Arguments Description")
parser.add_argument('--repo', nargs='?', default='https://github.com/takenet/lime-csharp', help='Repo to use')
parser.add_argument('--folder', nargs='?', default='', help='Folder to use')
parser.add_argument('--option', nargs='?', default=1, help='Option selected from list')
parser.add_argument('--yearTo', nargs='?', default="2019", help='Year to check up to')
parser.add_argument('--yearFrom', nargs='?', default="1969", help='Year to check from')
parser.add_argument('--searchString', nargs='?', default=1, help='String to search for')
parser.add_argument('--count', nargs='?', default=10, help='Positions returned')
parser.add_argument('--fileType', nargs='?', default='.cs', help='File extension to search for')
args = parser.parse_args()
gitPyService = gps.GitPyService(args.folder, args.repo)
# qtd de arquivos no primeiro e ultimo commits
if args.option == "1":
gitPyService.GetCountFilesFirstAndLastCommits(args.folder)
# qtd de arquivos da extensao no primeiro e ultimo commits
elif args.option == "2":
gitPyService.GetCountFilesFirstAndLastCommits(args.folder, args.fileType)
# qtd de arquivos em cada commit
elif args.option == "3":
gitPyService.GetCountFilesByCommit(args.folder)
# qtd de arquivos da extensao em cada commit
elif args.option == "4":
gitPyService.GetCountFilesByCommit(args.folder, args.fileType)
# numero de linha de codigo da extensao em cada commit
elif args.option == "5":
gitPyService.GetLinesByFile(folderPath=args.folder, fileType=args.fileType)
# qtd de arquivos por ano
elif args.option == "6":
gitPyService.GetCountFilesByYear(args.folder)
# qtd de arquivos da extensao por ano
elif args.option == "7":
gitPyService.GetCountFilesByYear(folderPath=args.folder, fileType=args.fileType)
# numero de linhas de codigo da extensao por ano
elif args.option == "8":
gitPyService.GetCommitedFilesByYear(folderPath=args.folder, fileType=args.fileType)
elif args.option == "9":
gitPyService.GetAllFilesByYear(folderPath=args.folder, fileType=args.fileType)
| rafaatsouza/ufmg-practical-assignments | software-repositories-mining/exercices/exercise-2.py | exercise-2.py | py | 2,147 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "GitPyService.GitPyService",
"line_number": 16,
"usage_type": "call"
}
] |
13076195782 | import serial
import serial.tools.list_ports
import re
import sys
import pymysql
from time import sleep
db = pymysql.connect(host="twofast-RPi3-0", # your host
user="writer", # username
passwd="heiko", # password
db="NG_twofast_DB") # name of the database
def saveDB(counts_WS, counts_BS):
# Create a Cursor object to execute queries.
cur = db.cursor()
try:
cur.execute("""INSERT INTO HBox_Due (counts_WS, counts_BS) VALUES (%s, %s)""", (counts_WS, counts_BS))
except:
cur.rollback()
db.commit()
cur.close()
def fun_read_serial_ports():
return list(serial.tools.list_ports.comports())
def pi_flush(serial_port):
serialArduino = serial.Serial(port=serial_port, baudrate=9600)
serialArduino.flushInput() #flush input buffer, discarding all its contents
serialArduino.flushOutput() #flush output buffer, aborting current output and discard all that is in buffer
def pi_read(serial_port):
serialArduino = serial.Serial(port=serial_port, baudrate=9600)
while (serialArduino.inWaiting() == 0): # wait for incoming data
pass
valueRead = serialArduino.readline(500)
try:
valueRead = (valueRead.decode('utf-8')).strip()
# print(valueRead)
except UnicodeDecodeError:
valueRead = '-1'
return valueRead
arduinoPort = None
ports = fun_read_serial_ports()
print(ports)
for port in ports:
# print(port)
t = re.findall(r'(/dev/\S+).+Arduino Due', str(port))
if len(t) > 0:
arduinoPort = t[0]
print('Arduino Due port found: ', str(port))
break
if arduinoPort == None:
print('No Arduino connected on serial port. Exiting.')
sys.exit(1)
counts_WS = 0
counts_BS = 0
# readout of the arduino
pi_flush(arduinoPort)
while True:
try:
ardRead = pi_read(arduinoPort)
s = ardRead.rstrip().split()
if len(s) == 2: # WS BS2
print(s)
counts_WS = float(s[0])
counts_BS = float(s[1])
saveDB(counts_WS, counts_BS)
sleep(0.1)
except KeyboardInterrupt:
print('Ctrl + C. Exiting. Flushing serial connection.')
pi_flush(arduinoPort)
sys.exit(1)
finally:
pi_flush(arduinoPort)
| kromerh/phd_python | 01_neutron_generator_contol/HBoxDueReadout_V0.py | HBoxDueReadout_V0.py | py | 2,299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.connect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports.comports",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "serial.tools",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "ser... |
24140152426 | from django.test import TestCase
import graphene
from api.query.story import Query
from api.tests.util import request_with_loaders
from story.factories import StoryFactory
class TestStoriesQuery(TestCase):
def setUp(self):
self.schema = graphene.Schema(query=Query)
self.request = request_with_loaders()
def build_query_with_fields(self, *fields):
query = '''
query getStories {
stories {
%s
}
}
''' % ' '.join(fields)
return query
def test_stories_query__returns_list_of_stories(self):
StoryFactory.create(id=2)
StoryFactory.create(id=5)
query_string = self.build_query_with_fields('id')
result = self.schema.execute(query_string, context=self.request)
self.assertListEqual(result.data['stories'], [
{'id': '2'},
{'id': '5'},
])
def test_story_node_query__returns_empty_field_when_id_does_not_exist(self):
query_string = self.build_query_with_fields('id')
result = self.schema.execute(query_string, context=self.request)
self.assertIsNone(result.errors)
self.assertDictEqual(result.data, {'stories': []})
def test_story_node_query__returns_model_fields(self):
StoryFactory.create(
id=2,
title='Hello world',
subtitle='Hello GraphQL',
)
query_string = self.build_query_with_fields(
'id',
'title',
'subtitle',
)
result = self.schema.execute(query_string, context=self.request)
self.assertIsNone(result.errors)
self.assertDictEqual(result.data['stories'][0], {
'id': '2',
'title': 'Hello world',
'subtitle': 'Hello GraphQL',
})
| dvndrsn/graphql-python-tutorial | api/tests/query/test_story.py | test_story.py | py | 1,833 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "graphene.Schema",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "api.query.story.Query",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "api.tests.u... |
24614942714 | from typing import Union
import numpy as np
from src.det import det, build_cofactor_matrix
# with extra space
# def transpose(matrix, size):
# transpose_matrix = matrix.copy()
#
# for i in range(size):
# for j in range(size):
# transpose_matrix[i][j] = matrix[j][i]
#
# return transpose_matrix
# optimization of both space and time
def transpose(matrix, size):
for i in range(size):
for j in range(i, size):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
return matrix
def adjoint(matrix, size):
adj = [[0 for _ in range(size)] for _ in range(size)]
for i in range(size):
for j in range(size):
cofactor_matrix = build_cofactor_matrix(matrix, i, j, size)
adj[i][j] = det(cofactor_matrix, size - 1) * pow(-1, i + j)
return transpose(adj, size)
def inverse(matrix: Union[np.array, list], size):
matrix_det = det(matrix, size)
matrix_adj = adjoint(matrix, size)
for i in range(size):
for j in range(size):
matrix_adj[i][j] = matrix_adj[i][j] / matrix_det
return matrix_adj
| Lakshmikanth2001/LinearAlgebra | src/inverse.py | inverse.py | py | 1,128 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "src.det.build_cofactor_matrix",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "src.det.det",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.array",
... |
72791842345 | from collections import deque
"""
Binary Search Tree — ia binary tree with the constraint:
- left subtree < currNode < right subtree
The left and right subtree each must also be a binary search tree.
"""
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BST:
def __init__(self):
self.root = None
def add(self, value):
if self.root == None:
self.root = Node(value)
else:
stack = [self.root]
while len(stack)>0:
currNode = stack.pop()
if value < currNode.value:
if currNode.left == None:
currNode.left = Node(value)
else:
stack.append(currNode.left)
elif value > currNode.value:
if currNode.right == None:
currNode.right = Node(value)
else:
stack.append(currNode.right)
def contains(self, value):
if self.root == None:
return False
stack = [self.root]
while stack:
currNode = stack.pop()
if currNode.value == value:
return True
if value < currNode.value:
if currNode.left == None:
return False
stack.append(currNode.left)
elif value > currNode.value:
if currNode.right == None:
return False
stack.append(currNode.right)
return False
def findMin(self):
if self.root == None:
return float("-inf")
stack = [self.root]
while len(stack)>0:
currNode = stack.pop()
if currNode.left == None:
return currNode.value
stack.append(currNode.left)
def findMax(self):
if self.root == None:
return float("inf")
stack = [self.root]
while len(stack)>0:
currNode = stack.pop()
if currNode.right == None:
return currNode.value
stack.append(currNode.right)
def preOrder(self):
if self.root == None:
return
allValues = []
stack = [self.root]
while stack:
currNode = stack.pop()
allValues.append(currNode.value)
if currNode.left != None:
stack.append(currNode.left)
if currNode.right != None:
stack.append(currNode.right)
return allValues
def inOrder(self):
# in order traversal (DFS)
stack = []
currNode = self.root
allValues = []
while stack or currNode:
if currNode:
stack.append(currNode)
currNode = currNode.left
else:
currNode = stack.pop()
allValues.append(currNode.value)
currNode = currNode.right
return allValues
def postOrder(self):
if self.root == None:
return
allValues = deque()
stack = [self.root]
while len(stack)>0:
currNode = stack.pop()
allValues.appendleft(currNode.value)
if currNode.right != None:
stack.append(currNode.right)
if currNode.left != None:
stack.append(currNode.left)
return list(allValues)
def levelOrder(self):
if self.root == None:
return []
queue = [self.root]
results = []
while len(queue)>0:
currNode = queue.pop(0)
results.append(currNode.value)
if currNode.left != None:
queue.append(currNode.left)
if currNode.right != None:
queue.append(currNode.right)
return results
bst = BST()
nums = [25, 20, 36, 10, 22, 30, 40, 5, 12, 28, 38, 48, 1, 8, 15, 45, 50]
for value in nums:
bst.add(value)
print("\nPREORDER\n", bst.preOrder())
print("\nINORDER\n", bst.inOrder())
print("\nPOSTORDER\n", bst.postOrder())
print("\nLEVELORDER\n", bst.levelOrder())
| cs50victor/dsa | dsa/non-linear/implement/Graphs/BinaryTrees/BinarySearchTrees.py | BinarySearchTrees.py | py | 3,330 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 128,
"usage_type": "call"
}
] |
74470691304 | import os
import logging
import numpy
import random
from gensim.models import ldaseqmodel
analyze_topics_static = __import__('4a_analyze_topics_static')
config = __import__('0_config')
try:
import pyLDAvis
CAN_VISUALIZE = True
except ImportError:
CAN_VISUALIZE = False
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
numpy.random.seed(config.SEED)
random.seed(config.SEED)
if not os.path.exists(config.OUTPUT_FOLDER):
os.makedirs(config.OUTPUT_FOLDER)
if not os.path.exists(config.MODEL_FOLDER):
os.makedirs(config.MODEL_FOLDER)
sections_to_analyze = [config.DATA_1A_FOLDER]
for section in sections_to_analyze:
data = analyze_topics_static.load_and_clean_data(section)
data, lemma_to_idx, idx_to_lemma = analyze_topics_static.preprocess(section, data)
corpus, dictionary, texts, time_slices, _ = analyze_topics_static.preprocessing_topic(data, idx_to_lemma)
# Load model
model_file = config.TRAIN_PARAMETERS[section][1]
assert os.path.exists(model_file)
num_topics = config.TRAIN_PARAMETERS[section][0]
model, c_v, u_mass = analyze_topics_static.train_topic_model_or_load(corpus, dictionary, texts, num_topics=num_topics, alpha='symmetric', eta='symmetric', chunksize=2000, decay=0.6, offset=0.8, passes=10, iterations=400, eval_every=10, model_file=model_file, only_viz=config.DO_NOT_COMPUTE_COHERENCE)
print('topic:' + str(num_topics), ', time_slice' + ' '.join([str(i) + ':' + str(j) for i,j in enumerate(time_slices)]) + ', c_v:' + str(round(c_v, 4)) + ', cu:' + str(round(u_mass, 4)))
dyn_model = ldaseqmodel.LdaSeqModel(initialize='ldamodel', lda_model=model, time_slice=time_slices, corpus=corpus, id2word=dictionary, num_topics=num_topics, passes=10, random_state=config.SEED)
filename = config.TRAIN_PARAMETERS[section][3]
dyn_model.save(filename)
for t in range(0, len(time_slices)):
doc_topic, topic_term, doc_lengths, term_frequency, vocab = dyn_model.dtm_vis(time=t, corpus=corpus)
prepared = pyLDAvis.prepare(topic_term_dists=topic_term, doc_topic_dists=doc_topic, doc_lengths=doc_lengths, vocab=vocab, term_frequency=term_frequency)
loc_dot_ext = filename.rfind('.')
year = config.START_YEAR + t
filename_dyn = filename[:loc_dot_ext] + '_{}'.format(year) + filename[filename.rfind('.'):]
pyLDAvis.save_html(prepared, filename_dyn + '.html') | Diego999/Risk-Analysis-using-Topic-Models-on-Annual-Reports | 4c_analyze_topics_through_time.py | 4c_analyze_topics_through_time.py | py | 2,519 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
555988903 | import requests
import time
import numpy as np
from bs4 import BeautifulSoup as bs
def dict_vacancy():
return {'Вакансия': None,
'Зарплата мин': None,
'Зарплата мкс': None,
'Валюта': None,
'Ссылка': None,
'Сайт': None}
def salary_tpl(salary_str, dlm='—', spl='\xa0'):
min_salary = None
max_salary = None
valuta = None
def val_symbol(a):
val_dct = {'₽': 'RUB',
'USD': 'USD',
'руб.': 'RUB',
'бел.\xa0руб.': 'BYN',
'KZT': 'KZT',
'RUB': 'RUB',
'EUR': 'EUR',
'грн.': 'UAH'}
return val_dct[a]
if not(salary_str.startswith('По')) and salary_str:
valuta = val_symbol(salary_str.split(spl)[-1])
len_str = len(salary_str.split(spl)[-1])
if salary_str[:2].isdigit():
spam = salary_str[:-len_str].replace(' ', '').replace('\xa0', '').split(dlm)
min_salary = int(spam[0])
max_salary = int(spam[1])
elif salary_str[:2] == 'от':
min_salary = int(salary_str.replace(' ', '').replace('\xa0', '')[2:-len_str])
elif salary_str[:2] == 'до':
max_salary = int(salary_str.replace(' ', '').replace('\xa0', '')[2:-len_str])
return min_salary, max_salary, valuta
def parse_sj(vacancy_search):
main_link_sj = 'https://www.superjob.ru'
vacancy_lst = []
page_sj = 1
while True:
response_sj = requests.get(f'{main_link_sj}/vacancy/search/?keywords={vacancy_search}'
f'&geo%5Bc%5D%5B0%5D=1&page={page_sj}')
if response_sj.ok:
html_parsed_sj = bs(response_sj.text, 'lxml')
vacancy_on_page_sj = html_parsed_sj.findAll('a', {'class': '_3dPok'} and {'class': '_1QIBo'})
for vac in vacancy_on_page_sj:
vacancy = dict_vacancy()
vacancy['Вакансия'] = vac.getText()
vacancy['Ссылка'] = main_link_sj + vac['href']
vacancy['Сайт'] = main_link_sj
salary = vac.findParent().findParent().find('span', {'class': 'f-test-text-company-item-salary'})\
.getText()
vacancy['Зарплата мин'], vacancy['Зарплата мкс'], vacancy['Валюта'] = salary_tpl(salary)
vacancy_lst.append(vacancy)
if html_parsed_sj.find('a', {'class': 'f-test-button-dalshe'}):
page_sj += 1
else:
break
else:
return vacancy_lst, response_sj.status_code
return vacancy_lst, response_sj.status_code
def parse_hh(vacancy_search):
main_link_hh = 'https://spb.hh.ru'
vacancy_lst = []
page_hh = 0
while True:
response_hh = requests.get(f'{main_link_hh}/search/vacancy?L_is_autosearch=false&area=113&'
f'clusters=true&enable_snippets=true&text={vacancy_search}&page={page_hh}',
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; '
'Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/78.0.3904.108 Safari/537.36'})
if response_hh.ok:
html_parsed_hh = bs(response_hh.text, 'lxml')
vacancy_on_page_hh = html_parsed_hh.findAll('a', {'data-qa': 'vacancy-serp__vacancy-title'})
for vac in vacancy_on_page_hh:
vacancy = dict_vacancy()
vacancy['Вакансия'] = vac.getText()
vacancy['Ссылка'] = vac['href']
vacancy['Сайт'] = main_link_hh
compensation = vac.findParent().findParent().findParent().findParent()\
.find('div', {'data-qa': 'vacancy-serp__vacancy-compensation'})
if compensation:
salary = compensation.getText()
vacancy['Зарплата мин'], vacancy['Зарплата мкс'], vacancy['Валюта'] = \
salary_tpl(salary, dlm='-', spl=' ')
vacancy_lst.append(vacancy)
if html_parsed_hh.find('a', {'data-qa': 'pager-next'}):
page_hh += 1
time.sleep(np.random.sample()*3)
else:
break
else:
return vacancy_lst, response_hh.status_code
return vacancy_lst, response_hh.status_code
if __name__ == "__main__":
print(parse_hh('java'), parse_sj('java'))
| GruXsqK/Methods_scraping | Lesson_3/Scraper.py | Scraper.py | py | 4,763 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
25209669435 | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import redirect
from django.db.models import Q
from django.db.models import Count,Sum,Avg,Min,Max
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from django.views import generic
from django import forms
from .models import Message, Comment
from .forms import MessageForm, FindForm
def index(request, page=1):
if (request.method == 'POST'):
obj = Message()
form = MessageForm(request.POST, instance=obj)
form.save()
data = Message.objects.all().reverse()
paginator = Paginator(data, 5)
params = {
'title': 'Top',
'data': paginator.get_page(page),
}
return render(request, 'hello3/index.html', params)
def edit(request, num):
obj = Message.objects.get(id=num)
if (request.method == 'POST'):
message = MessageForm(request.POST, instance=obj)
message.save()
return redirect(to='index')
form = MessageForm(request.GET) #プレビューに推移するときにformでサーバ側に渡しているデータ(POSTはしていない)をゲットしてMessageForm Classに渡す
if form.is_valid(): #POSTで渡していない時(プレビュー時)はここが実行される。(MessageForm Classの機能としてvalidされている)
# context に form だけ渡す
context = {'form': form }
return render(
request,
"hello3/preview.html",
context=context
)
params = { #1回目はここが実行される。
'title': 'Edit',
'id': num,
'form': MessageForm(instance=obj),
}
return render(request, 'hello3/edit.html', params)
def create(request):
params = {
'title': 'Create',
'form': MessageForm(),
}
if (request.method == 'POST'):
obj = Message()
message = MessageForm(request.POST, instance=obj)
message.save()
return redirect(to='index')
return render(request, 'hello3/create.html', params)
# コメント、返信フォーム
CommentForm = forms.modelform_factory(Comment, fields=('text', ))
class PostList(generic.ListView):
"""記事一覧"""
template_name = 'hello3/post_list.html'
model = Message
class PostDetail(generic.DetailView):
"""記事詳細"""
template_name = 'hello3/post_detail.html'
model = Message
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# どのコメントにも紐づかないコメント=記事自体へのコメント を取得する
context['comment_list'] = self.object.comment_set.filter(parent__isnull=True)
return context
def comment_create(request, post_pk):
"""記事へのコメント作成"""
post = get_object_or_404(Message, pk=post_pk)
form = CommentForm(request.POST or None)
if request.method == 'POST':
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect(to='post_detail', pk=post.pk)
context = {
'form': form,
'post': post
}
return render(request, 'hello3/comment_form.html', context)
def reply_create(request, comment_pk):
"""コメントへの返信"""
comment = get_object_or_404(Comment, pk=comment_pk)
post = comment.post
form = CommentForm(request.POST or None)
if request.method == 'POST':
reply = form.save(commit=False)
reply.parent = comment
reply.post = post
reply.save()
return redirect(to='post_detail', pk=post.pk)
context = {
'form': form,
'post': post,
'comment': comment,
}
return render(request, 'hello3/comment_form.html', context) | yamachanyama/Django_app | hello3/views.py | views.py | py | 3,978 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "models.Message",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "forms.MessageForm",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Message.objects.all",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Me... |
25842561969 | from sqlalchemy import ForeignKey, Table, Column
from sqlalchemy.sql.sqltypes import Integer, String, Float, Boolean, Date
from config.db import meta, engine
castings = Table("castings", meta,
Column("id", Integer, primary_key=True),
Column("castingDate", Date),
Column("name", String(255)),
Column("castingDirector", Integer, ForeignKey("people.id")),
Column("director", Integer, ForeignKey("people.id")),
Column("inPerson", Boolean),
Column("inProcess", Boolean),
Column("notes", String(355)))
meta.create_all(engine) | Lorea13/Profesionales-del-Arte | backend/models/casting.py | casting.py | py | 554 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.Table",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "config.db.meta",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.sql... |
23971685802 | from __future__ import unicode_literals
import os
import re
import json
from contextlib import contextmanager
from collections import defaultdict
from functools import wraps, partial
import psycopg2
from PyQt4.QtCore import Qt, QSettings, QRect
from PyQt4.QtGui import (
QIcon, QMessageBox, QDialog, QStandardItem, QMenu, QAction,
QStandardItemModel, QTreeView, QAbstractItemView,
QDockWidget, QWidget, QVBoxLayout, QSizePolicy,
QSortFilterProxyModel, QLineEdit, QDialogButtonBox
)
from qgis.core import (
QgsMapLayerRegistry, QgsBrowserModel, QgsDataSourceURI,
QgsCredentials, QgsVectorLayer, QgsMimeDataUtils, QgsRasterLayer
)
from menu_builder_dialog_base import Ui_Dialog
QGIS_MIMETYPE = 'application/x-vnd.qgis.qgis.uri'
ICON_MAPPER = {
'postgres': ":/plugins/MenuBuilder/resources/postgis.svg",
'WMS': ":/plugins/MenuBuilder/resources/wms.svg",
'WFS': ":/plugins/MenuBuilder/resources/wfs.svg",
'OWS': ":/plugins/MenuBuilder/resources/ows.svg",
'spatialite': ":/plugins/MenuBuilder/resources/spatialite.svg",
'mssql': ":/plugins/MenuBuilder/resources/mssql.svg",
'gdal': ":/plugins/MenuBuilder/resources/gdal.svg",
'ogr': ":/plugins/MenuBuilder/resources/ogr.svg",
}
class MenuBuilderDialog(QDialog, Ui_Dialog):
def __init__(self, uiparent):
super(MenuBuilderDialog, self).__init__()
self.setupUi(self)
# reference to caller
self.uiparent = uiparent
self.combo_profile.lineEdit().setPlaceholderText(self.tr("Profile name"))
# add icons
self.button_add_menu.setIcon(QIcon(":/plugins/MenuBuilder/resources/plus.svg"))
self.button_delete_profile.setIcon(QIcon(":/plugins/MenuBuilder/resources/delete.svg"))
# custom qtreeview
self.target = CustomQtTreeView(self)
self.target.setGeometry(QRect(440, 150, 371, 451))
self.target.setAcceptDrops(True)
self.target.setDragEnabled(True)
self.target.setDragDropMode(QAbstractItemView.DragDrop)
self.target.setObjectName("target")
self.target.setDropIndicatorShown(True)
self.target.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.target.setHeaderHidden(True)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.target.sizePolicy().hasHeightForWidth())
self.target.setSizePolicy(sizePolicy)
self.target.setAutoFillBackground(True)
self.verticalLayout_2.addWidget(self.target)
self.browser = QgsBrowserModel()
self.source.setModel(self.browser)
self.source.setHeaderHidden(True)
self.source.setDragEnabled(True)
self.source.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.menumodel = MenuTreeModel(self)
self.target.setModel(self.menumodel)
self.target.setAnimated(True)
# add a dock widget
self.dock_widget = QDockWidget("Menus")
self.dock_widget.resize(400, 300)
self.dock_widget.setFloating(True)
self.dock_widget.setObjectName(self.tr("Menu Tree"))
self.dock_widget_content = QWidget()
self.dock_widget.setWidget(self.dock_widget_content)
dock_layout = QVBoxLayout()
self.dock_widget_content.setLayout(dock_layout)
self.dock_view = DockQtTreeView(self.dock_widget_content)
self.dock_view.setDragDropMode(QAbstractItemView.DragOnly)
self.dock_menu_filter = QLineEdit()
self.dock_menu_filter.setPlaceholderText(self.tr("Filter by table description (postgis only)"))
dock_layout.addWidget(self.dock_menu_filter)
dock_layout.addWidget(self.dock_view)
self.dock_view.setHeaderHidden(True)
self.dock_view.setDragEnabled(True)
self.dock_view.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.dock_view.setAnimated(True)
self.dock_view.setObjectName("treeView")
self.proxy_model = LeafFilterProxyModel(self)
self.proxy_model.setFilterRole(Qt.ToolTipRole)
self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.profile_list = []
self.table = 'qgis_menubuilder_metadata'
self.layer_handler = {
'vector': self.load_vector,
'raster': self.load_raster
}
# connect signals and handlers
self.combo_database.activated.connect(partial(self.set_connection, dbname=None))
self.combo_schema.activated.connect(self.update_profile_list)
self.combo_profile.activated.connect(partial(self.update_model_idx, self.menumodel))
self.button_add_menu.released.connect(self.add_menu)
self.button_delete_profile.released.connect(self.delete_profile)
self.dock_menu_filter.textEdited.connect(self.filter_update)
self.dock_view.doubleClicked.connect(self.load_from_index)
self.buttonBox.rejected.connect(self.reject)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply)
def filter_update(self):
text = self.dock_menu_filter.displayText()
self.proxy_model.setFilterRegExp(text)
def show_dock(self, state, profile=None, schema=None):
if not state:
# just hide widget
self.dock_widget.setVisible(state)
return
# dock must be read only and deepcopy of model is not supported (c++ inside!)
self.dock_model = MenuTreeModel(self)
if profile:
# bypass combobox
self.update_model(self.dock_model, schema, profile)
else:
self.update_model_idx(self.dock_model, self.combo_profile.currentIndex())
self.dock_model.setHorizontalHeaderLabels(["Menus"])
self.dock_view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.proxy_model.setSourceModel(self.dock_model)
self.dock_view.setModel(self.proxy_model)
self.dock_widget.setVisible(state)
def show_menus(self, state, profile=None, schema=None):
if state:
self.load_menus(profile=profile, schema=schema)
return
# remove menus
for menu in self.uiparent.menus:
self.uiparent.iface.mainWindow().menuBar().removeAction(menu.menuAction())
def add_menu(self):
"""
Add a menu inside qtreeview
"""
item = QStandardItem('NewMenu')
item.setIcon(QIcon(':/plugins/MenuBuilder/resources/menu.svg'))
# select current index selected and insert as a sibling
brother = self.target.selectedIndexes()
if not brother or not brother[0].parent():
# no selection, add menu at the top level
self.menumodel.insertRow(self.menumodel.rowCount(), item)
return
parent = self.menumodel.itemFromIndex(brother[0].parent())
if not parent:
self.menumodel.insertRow(self.menumodel.rowCount(), item)
return
parent.appendRow(item)
def update_database_list(self):
"""update list of defined postgres connections"""
settings = QSettings()
settings.beginGroup("/PostgreSQL/connections")
keys = settings.childGroups()
self.combo_database.clear()
self.combo_schema.clear()
self.menumodel.clear()
self.combo_database.addItems(keys)
self.combo_database.setCurrentIndex(-1)
settings.endGroup()
# clear profile list
self.combo_profile.clear()
self.combo_profile.setCurrentIndex(-1)
def set_connection(self, databaseidx, dbname=None):
"""
Connect to selected postgresql database
"""
selected = self.combo_database.itemText(databaseidx) or dbname
if not selected:
return
settings = QSettings()
settings.beginGroup("/PostgreSQL/connections/{}".format(selected))
if not settings.contains("database"):
# no entry?
QMessageBox.critical(self, "Error", "There is no defined database connection")
return
uri = QgsDataSourceURI()
settingsList = ["service", "host", "port", "database", "username", "password"]
service, host, port, database, username, password = map(
lambda x: settings.value(x, "", type=str), settingsList)
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
sslmode = settings.value("sslmode", QgsDataSourceURI.SSLprefer, type=int)
settings.endGroup()
if service:
uri.setConnection(service, database, username, password, sslmode)
else:
uri.setConnection(host, port, database, username, password, sslmode)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
# connect to db
self.connect_to_uri(uri)
# update schema list
self.update_schema_list()
@contextmanager
def transaction(self):
try:
yield
self.connection.commit()
except self.pg_error_types() as e:
self.connection.rollback()
raise e
def check_connected(func):
"""
Decorator that checks if a database connection is active before executing function
"""
@wraps(func)
def wrapped(inst, *args, **kwargs):
if not getattr(inst, 'connection', False):
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
inst.tr("Not connected to any database, please select one"),
QMessageBox.Ok,
inst
).exec_()
return
if inst.connection.closed:
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
inst.tr("Not connected to any database, please select one"),
QMessageBox.Ok,
inst
).exec_()
return
return func(inst, *args, **kwargs)
return wrapped
def connect_to_uri(self, uri):
self.close_connection()
self.host = uri.host() or os.environ.get('PGHOST')
self.port = uri.port() or os.environ.get('PGPORT')
username = uri.username() or os.environ.get('PGUSER') or os.environ.get('USER')
password = uri.password() or os.environ.get('PGPASSWORD')
try:
self.connection = psycopg2.connect(uri.connectionInfo())
except self.pg_error_types() as e:
err = str(e)
conninfo = uri.connectionInfo()
ok, username, password = QgsCredentials.instance().get(
conninfo, username, password, err)
if not ok:
raise Exception(e)
if username:
uri.setUsername(username)
if password:
uri.setPassword(password)
self.connection = psycopg2.connect(uri.connectionInfo())
self.pgencoding = self.connection.encoding
return True
def pg_error_types(self):
return (
psycopg2.InterfaceError,
psycopg2.OperationalError,
psycopg2.ProgrammingError
)
@check_connected
def update_schema_list(self):
self.combo_schema.clear()
with self.transaction():
cur = self.connection.cursor()
cur.execute("""
select nspname
from pg_namespace
where nspname not ilike 'pg_%'
and nspname not in ('pg_catalog', 'information_schema')
""")
schemas = [row[0] for row in cur.fetchall()]
self.combo_schema.addItems(schemas)
@check_connected
def update_profile_list(self, schemaidx):
"""
update profile list from database
"""
schema = self.combo_schema.itemText(schemaidx)
with self.transaction():
cur = self.connection.cursor()
cur.execute("""
select 1
from pg_tables
where schemaname = '{0}'
and tablename = '{1}'
union
select 1
from pg_matviews
where schemaname = '{0}'
and matviewname = '{1}'
""".format(schema, self.table))
tables = cur.fetchone()
if not tables:
box = QMessageBox(
QMessageBox.Warning,
"Menu Builder",
self.tr("Table '{}.{}' not found in this database, "
"would you like to create it now ?")
.format(schema, self.table),
QMessageBox.Cancel | QMessageBox.Yes,
self
)
ret = box.exec_()
if ret == QMessageBox.Cancel:
return False
elif ret == QMessageBox.Yes:
cur.execute("""
create table {}.{} (
id serial,
name varchar,
profile varchar,
model_index varchar,
datasource_uri text
)
""".format(schema, self.table))
self.connection.commit()
return False
cur.execute("""
select distinct(profile) from {}.{}
""".format(schema, self.table))
profiles = [row[0] for row in cur.fetchall()]
saved_profile = self.combo_profile.currentText()
self.combo_profile.clear()
self.combo_profile.addItems(profiles)
self.combo_profile.setCurrentIndex(self.combo_profile.findText(saved_profile))
@check_connected
def delete_profile(self):
"""
Delete profile currently selected
"""
idx = self.combo_profile.currentIndex()
schema = self.combo_schema.currentText()
profile = self.combo_profile.itemText(idx)
box = QMessageBox(
QMessageBox.Warning,
"Menu Builder",
self.tr("Delete '{}' profile ?").format(profile),
QMessageBox.Cancel | QMessageBox.Yes,
self
)
ret = box.exec_()
if ret == QMessageBox.Cancel:
return False
elif ret == QMessageBox.Yes:
self.combo_profile.removeItem(idx)
with self.transaction():
cur = self.connection.cursor()
cur.execute("""
delete from {}.{}
where profile = '{}'
""".format(schema, self.table, profile))
self.menumodel.clear()
self.combo_profile.setCurrentIndex(-1)
def update_model_idx(self, model, profile_index):
"""
wrapper that checks combobox
"""
profile = self.combo_profile.itemText(profile_index)
schema = self.combo_schema.currentText()
self.update_model(model, schema, profile)
def sortby_modelindex(self, rows):
return sorted(
rows,
key=lambda line: '/'.join(
['{:04}'.format(elem[0]) for elem in json.loads(line[2])]
))
@check_connected
def update_model(self, model, schema, profile):
"""
Update the model by retrieving the profile given in database
"""
menudict = {}
with self.transaction():
cur = self.connection.cursor()
select = """
select name, profile, model_index, datasource_uri
from {}.{}
where profile = '{}'
""".format(schema, self.table, profile)
cur.execute(select)
rows = cur.fetchall()
model.clear()
for name, profile, model_index, datasource_uri in self.sortby_modelindex(rows):
menu = model.invisibleRootItem()
indexes = json.loads(model_index)
parent = ''
for idx, subname in indexes[:-1]:
parent += '{}-{}/'.format(idx, subname)
if parent in menudict:
# already created entry
menu = menudict[parent]
continue
# create menu
item = QStandardItem(subname)
uri_struct = QgsMimeDataUtils.Uri(datasource_uri)
item.setData(uri_struct)
item.setIcon(QIcon(':/plugins/MenuBuilder/resources/menu.svg'))
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable |
Qt.ItemIsEnabled | Qt.ItemIsDropEnabled |
Qt.ItemIsEditable)
item.setWhatsThis("menu")
menu.appendRow(item)
menudict[parent] = item
# set current menu to the new created item
menu = item
# add leaf (layer item)
item = QStandardItem(name)
uri_struct = QgsMimeDataUtils.Uri(datasource_uri)
# fix layer name instead of table name
# usefull when the layer has been renamed in menu
uri_struct.name = name
if uri_struct.providerKey in ICON_MAPPER:
item.setIcon(QIcon(ICON_MAPPER[uri_struct.providerKey]))
item.setData(uri_struct)
# avoid placing dragged layers on it
item.setDropEnabled(False)
if uri_struct.providerKey == 'postgres':
# set tooltip to postgres comment
comment = self.get_table_comment(uri_struct.uri)
item.setToolTip(comment)
menudict[parent].appendRow(item)
@check_connected
def save_changes(self, save_to_db=True):
"""
Save changes in the postgres table
"""
schema = self.combo_schema.currentText()
profile = self.combo_profile.currentText()
if not profile:
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
self.tr("Profile cannot be empty"),
QMessageBox.Ok,
self
).exec_()
return False
if save_to_db:
try:
with self.transaction():
cur = self.connection.cursor()
cur.execute("delete from {}.{} where profile = '{}'".format(
schema, self.table, profile))
for item, data in self.target.iteritems():
if not data:
continue
cur.execute("""
insert into {}.{} (name,profile,model_index,datasource_uri)
values (%s, %s, %s, %s)
""".format(schema, self.table), (
item[-1][1],
profile,
json.dumps(item),
data.data())
)
except Exception as exc:
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
exc.message.decode(self.pgencoding),
QMessageBox.Ok,
self
).exec_()
return False
self.save_session(
self.combo_database.currentText(),
schema,
profile,
self.activate_dock.isChecked(),
self.activate_menubar.isChecked()
)
self.update_profile_list(self.combo_schema.currentIndex())
self.show_dock(self.activate_dock.isChecked())
self.show_menus(self.activate_menubar.isChecked())
return True
@check_connected
def load_menus(self, profile=None, schema=None):
"""
Load menus in the main windows qgis bar
"""
if not schema:
schema = self.combo_schema.currentText()
if not profile:
profile = self.combo_profile.currentText()
# remove previous menus
for menu in self.uiparent.menus:
self.uiparent.iface.mainWindow().menuBar().removeAction(menu.menuAction())
with self.transaction():
cur = self.connection.cursor()
select = """
select name, profile, model_index, datasource_uri
from {}.{}
where profile = '{}'
""".format(schema, self.table, profile)
cur.execute(select)
rows = cur.fetchall()
# item accessor ex: '0-menu/0-submenu/1-item/'
menudict = {}
# reference to parent item
parent = ''
# reference to qgis main menu bar
menubar = self.uiparent.iface.mainWindow().menuBar()
for name, profile, model_index, datasource_uri in self.sortby_modelindex(rows):
uri_struct = QgsMimeDataUtils.Uri(datasource_uri)
indexes = json.loads(model_index)
# root menu
parent = '{}-{}/'.format(indexes[0][0], indexes[0][1])
if parent not in menudict:
menu = QMenu(self.uiparent.iface.mainWindow())
self.uiparent.menus.append(menu)
menu.setObjectName(indexes[0][1])
menu.setTitle(indexes[0][1])
menubar.insertMenu(
self.uiparent.iface.firstRightStandardMenu().menuAction(),
menu)
menudict[parent] = menu
else:
# menu already there
menu = menudict[parent]
for idx, subname in indexes[1:-1]:
# intermediate submenus
parent += '{}-{}/'.format(idx, subname)
if parent not in menudict:
submenu = menu.addMenu(subname)
submenu.setObjectName(subname)
submenu.setTitle(subname)
menu = submenu
# store it for later use
menudict[parent] = menu
continue
# already treated
menu = menudict[parent]
# last item = layer
layer = QAction(name, self.uiparent.iface.mainWindow())
if uri_struct.providerKey in ICON_MAPPER:
layer.setIcon(QIcon(ICON_MAPPER[uri_struct.providerKey]))
if uri_struct.providerKey == 'postgres':
# set tooltip to postgres comment
comment = self.get_table_comment(uri_struct.uri)
layer.setStatusTip(comment)
layer.setToolTip(comment)
layer.setData(uri_struct.uri)
layer.setWhatsThis(uri_struct.providerKey)
layer.triggered.connect(self.layer_handler[uri_struct.layerType])
menu.addAction(layer)
def get_table_comment(self, uri):
schema, table = re.match(r'.*table=(".*"\.".*")', uri) \
.group(1) \
.strip() \
.replace('"', '') \
.split('.')
with self.transaction():
cur = self.connection.cursor()
select = """
select description from pg_description
join pg_class on pg_description.objoid = pg_class.oid
join pg_namespace on pg_class.relnamespace = pg_namespace.oid
where relname = '{}' and nspname='{}'
""".format(table, schema)
cur.execute(select)
row = cur.fetchone()
if row:
return row[0]
return ''
def load_from_index(self, index):
"""Load layers from selected item index"""
item = self.dock_model.itemFromIndex(self.proxy_model.mapToSource(index))
if item.whatsThis() == 'menu':
return
if item.data().layerType == 'vector':
layer = QgsVectorLayer(
item.data().uri, # uri
item.text(), # layer name
item.data().providerKey # provider name
)
elif item.data().layerType == 'raster':
layer = QgsRasterLayer(
item.data().uri, # uri
item.text(), # layer name
item.data().providerKey # provider name
)
if not layer:
return
QgsMapLayerRegistry.instance().addMapLayer(layer)
def load_vector(self):
action = self.sender()
layer = QgsVectorLayer(
action.data(), # uri
action.text(), # layer name
action.whatsThis() # provider name
)
QgsMapLayerRegistry.instance().addMapLayer(layer)
def load_raster(self):
action = self.sender()
layer = QgsRasterLayer(
action.data(), # uri
action.text(), # layer name
action.whatsThis() # provider name
)
QgsMapLayerRegistry.instance().addMapLayer(layer)
def accept(self):
if self.save_changes():
QDialog.reject(self)
self.close_connection()
def apply(self):
if self.save_changes(save_to_db=False):
QDialog.reject(self)
def reject(self):
self.close_connection()
QDialog.reject(self)
def close_connection(self):
"""close current pg connection if exists"""
if getattr(self, 'connection', False):
if self.connection.closed:
return
self.connection.close()
def save_session(self, database, schema, profile, dock, menubar):
"""save current profile for next session"""
settings = QSettings()
settings.setValue("MenuBuilder/database", database)
settings.setValue("MenuBuilder/schema", schema)
settings.setValue("MenuBuilder/profile", profile)
settings.setValue("MenuBuilder/dock", dock)
settings.setValue("MenuBuilder/menubar", menubar)
def restore_session(self):
settings = QSettings()
database = settings.value("MenuBuilder/database", False)
schema = settings.value("MenuBuilder/schema", 'public')
profile = settings.value("MenuBuilder/profile", False)
dock = settings.value("MenuBuilder/dock", False)
menubar = settings.value("MenuBuilder/menubar", False)
if not any([database, profile]):
return
self.set_connection(0, dbname=database)
self.show_dock(bool(dock), profile=profile, schema=schema)
if bool(dock):
self.uiparent.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dock_widget)
self.show_menus(bool(menubar), profile=profile, schema=schema)
class CustomQtTreeView(QTreeView):
def __init__(self, *args, **kwargs):
super(CustomQtTreeView, self).__init__(*args, **kwargs)
def dragMoveEvent(self, event):
event.acceptProposedAction()
def dragEnterEvent(self, event):
if not event.mimeData():
# don't drag menu entry
return False
# refuse if it's not a qgis mimetype
if event.mimeData().hasFormat(QGIS_MIMETYPE):
event.acceptProposedAction()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Delete:
self.dropItem()
def dropItem(self):
model = self.selectionModel().model()
parents = defaultdict(list)
for idx in self.selectedIndexes():
parents[idx.parent()].append(idx)
for parent, idx_list in parents.items():
for diff, index in enumerate(idx_list):
model.removeRow(index.row() - diff, parent)
def iteritems(self, level=0):
"""
Dump model to store in database.
Generates each level recursively
"""
rowcount = self.model().rowCount()
for itemidx in range(rowcount):
# iterate over parents
parent = self.model().itemFromIndex(self.model().index(itemidx, 0))
for item, uri in self.traverse_tree(parent, []):
yield item, uri
def traverse_tree(self, parent, identifier):
"""
Iterate over childs, recursively
"""
identifier.append([parent.row(), parent.text()])
for row in range(parent.rowCount()):
child = parent.child(row)
if child.hasChildren():
# child is a menu ?
for item in self.traverse_tree(child, identifier):
yield item
identifier.pop()
else:
# add leaf
sibling = list(identifier)
sibling.append([child.row(), child.text()])
yield sibling, child.data()
class DockQtTreeView(CustomQtTreeView):
def __init__(self, *args, **kwargs):
super(DockQtTreeView, self).__init__(*args, **kwargs)
def keyPressEvent(self, event):
"""override keyevent to avoid deletion of items in the dock"""
pass
class MenuTreeModel(QStandardItemModel):
def __init__(self, *args, **kwargs):
super(MenuTreeModel, self).__init__(*args, **kwargs)
def dropMimeData(self, mimedata, action, row, column, parentIndex):
"""
Handles the dropping of an item onto the model.
De-serializes the data and inserts it into the model.
"""
# decode data using qgis helpers
uri_list = QgsMimeDataUtils.decodeUriList(mimedata)
if not uri_list:
return False
# find parent item
dropParent = self.itemFromIndex(parentIndex)
if not dropParent:
return False
# each uri will become a new item
for uri in uri_list:
item = QStandardItem(uri.name)
item.setData(uri)
# avoid placing dragged layers on it
item.setDropEnabled(False)
if uri.providerKey in ICON_MAPPER:
item.setIcon(QIcon(ICON_MAPPER[uri.providerKey]))
dropParent.appendRow(item)
dropParent.emitDataChanged()
return True
def mimeData(self, indexes):
"""
Used to serialize data
"""
if not indexes:
return
items = [self.itemFromIndex(idx) for idx in indexes]
if not items:
return
if not all(it.data() for it in items):
return
# reencode items
mimedata = QgsMimeDataUtils.encodeUriList([item.data() for item in items])
return mimedata
def mimeTypes(self):
return [QGIS_MIMETYPE]
def supportedDropActions(self):
return Qt.CopyAction | Qt.MoveAction
class LeafFilterProxyModel(QSortFilterProxyModel):
"""
Class to override the following behaviour:
If a parent item doesn't match the filter,
none of its children will be shown.
This Model matches items which are descendants
or ascendants of matching items.
"""
def filterAcceptsRow(self, row_num, source_parent):
"""Overriding the parent function"""
# Check if the current row matches
if self.filter_accepts_row_itself(row_num, source_parent):
return True
# Traverse up all the way to root and check if any of them match
if self.filter_accepts_any_parent(source_parent):
return True
# Finally, check if any of the children match
return self.has_accepted_children(row_num, source_parent)
def filter_accepts_row_itself(self, row_num, parent):
return super(LeafFilterProxyModel, self).filterAcceptsRow(row_num, parent)
def filter_accepts_any_parent(self, parent):
"""
Traverse to the root node and check if any of the
ancestors match the filter
"""
while parent.isValid():
if self.filter_accepts_row_itself(parent.row(), parent.parent()):
return True
parent = parent.parent()
return False
def has_accepted_children(self, row_num, parent):
"""
Starting from the current node as root, traverse all
the descendants and test if any of the children match
"""
model = self.sourceModel()
source_index = model.index(row_num, 0, parent)
children_count = model.rowCount(source_index)
for i in range(children_count):
if self.filterAcceptsRow(i, source_index):
return True
return False
| Oslandia/qgis-menu-builder | menu_builder_dialog.py | menu_builder_dialog.py | py | 32,979 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "PyQt4.QtGui.QDialog",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "menu_builder_dialog_base.Ui_Dialog",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QIcon",
"line_number": 53,
"usage_type": "call"
},
{
"api_name... |
3599441390 | from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(766, 569)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(30, 20, 61, 21))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(100, 20, 641, 20))
self.lineEdit.setObjectName("lineEdit")
self.graphicsView = QtWidgets.QGraphicsView(Dialog)
self.graphicsView.setGeometry(QtCore.QRect(25, 71, 721, 491))
self.graphicsView.setSizeIncrement(QtCore.QSize(0, 0))
self.graphicsView.setFrameShadow(QtWidgets.QFrame.Raised)
self.graphicsView.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContentsOnFirstShow)
self.graphicsView.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignVCenter)
self.graphicsView.setObjectName("graphicsView")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(670, 40, 75, 23))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Image Viewer"))
self.label.setText(_translate("Dialog", "Image Path"))
self.pushButton.setText(_translate("Dialog", "open"))
#from image_viewer import *
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.QtGui import QPixmap
import os
import sys
class My_Application(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.checkPath)
def checkPath(self):
image_path = self.ui.lineEdit.text()
if os.path.isfile(image_path):
scene = QtWidgets.QGraphicsScene(self)
pixmap = QPixmap(image_path)
item = QtWidgets.QGraphicsPixmapItem(pixmap)
scene.addItem(item)
self.ui.graphicsView.setScene(scene)
if __name__ == '__main__':
app = QApplication(sys.argv)
class_instance = My_Application()
class_instance.show()
sys.exit(app.exec_()) | richespo/Image-Video-Frame | ImageViewerExample.py | ImageViewerExample.py | py | 2,364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QRect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",... |
24402887263 | import argparse, libmarusoftware, libtools, os
__version__="Marueditor b1.0.0"
__revision__="4"
__author__="Marusoftware"
__license__="MIT"
class DefaultArgv:
log_level=20
filepath=None
class Editor():
def __init__(self, argv=DefaultArgv):
self.argv=argv
self.opening={}
def Setup(self, appinfo=None):
self.LoadConfig()
self.Loadl10n()
self.LoadLogger()
if not appinfo is None:
self.appinfo.update(**appinfo)
libmarusoftware.core.adjustEnv(self.logger.getChild("AdjustEnv"), self.appinfo)
self.addon=libtools.Addon(self.logger.getChild("Addon"), self.appinfo)
self.addon.loadAll(self.appinfo["addons"],"editor")
self.logger.info("Starting...")
self.ui=libmarusoftware.UI.UI(self.config, self.logger.getChild("UI"))
self.ui.changeIcon(os.path.join(self.appinfo["image"],"marueditor.png"))
self.ui.setcallback("close", self.exit)
self.ui.changeSize('500x500')
self.ui.notebook=self.ui.Notebook(close=True, command=self.close)
self.ui.notebook.pack(fill="both", expand=True)
def mainloop(self):
self.ui.mainloop()
def LoadConfig(self):
import platform, locale
default_conf={"welcome":1, "lang":None, "open_as":True, "gtk":True, "theme":None}
if None in locale.getlocale():
default_conf.update([("lang",locale.getlocale()[0]),("encode",locale.getlocale()[1])])
else:
default_conf.update([("lang",locale.getdefaultlocale()[0]),("encode",locale.getdefaultlocale()[1])])
self.config=libmarusoftware.Config("Marueditor", default_conf=default_conf)
self.appinfo=self.config.appinfo
def Loadl10n(self, language=None):
if language is None:
language=self.config["lang"]
req = ['file', 'new', 'open', 'open_as', 'save', 'save_as', 'close_tab', 'close_all', 'edit', 'window',
'full_screen', 'open_window', 'setting', 'help', 'about', 'welcome', 'welcome_tab',
'dnd_area', 'select_file_type', 'ok', 'cancel', 'error', 'error_cant_open',
'new_main', 'next', 'new_sub1', 'new_sub2', 'check', 'save_check', 'st_open_from',
'st_dnd', 'marueditor', 'exit', 'addon', 'file_addon', 'delete', 'all', 'were_sorry',
'back', 'dir_name', 'choose_dir', 'file_name', 'new_check', 'wait', 'done', 'new_e1',
'new_e2', 'new_e3', 'done_msg', 'new_e1_msg', 'chk_upd', 'style', 'lang', 'new_check2',
'version', 'licence', 'marueditor_file', "appearance"]
self.lang = libmarusoftware.Lang(self.appinfo, req)
self.txt = self.lang.getText(language)
def LoadLogger(self):
#logging
if "log_dir" in self.config:
self.appinfo["log"] = self.config["log_dir"]
log_dir = self.appinfo["log"]
self.logger=libmarusoftware.core.Logger(log_dir=log_dir, log_level=self.argv.log_level, name="")
def CreateMenu(self):
self.ui.menu=self.ui.Menu(type="bar")
self.ui.menu.apple=self.ui.menu.add_category("apple", name="apple")#Macos apple menu
if not self.ui.menu.apple is None:
self.ui.menu.apple.add_item(type="button", label="About Marueditor")
self.ui.menu.apple.add_item(type="separator")
#self.ui.menu.system=self.ui.menu.add_category("system", name="system")#Windows icon menu(Disabled)
#if not self.ui.menu.system is None:
# self.ui.menu.system.add_item(type="checkbutton", label="Fullscreen", command=self.ui.fullscreen)
# self.ui.menu.system.add_item(type="separator")
self.ui.menu.file=self.ui.menu.add_category(self.txt["file"])#File
self.ui.menu.file.add_item(type="button", label=self.txt["new"], command=self.new)
self.ui.menu.file.add_item(type="button", label=self.txt["open"], command=self.open)
if self.config["open_as"]:
self.ui.menu.file.add_item(type="button", label=self.txt["open_as"], command=lambda: self.open(force_select=True))
self.ui.menu.file.add_item(type="button", label=self.txt["save"], command=self.save)
self.ui.menu.file.add_item(type="button", label=self.txt["save_as"], command=lambda: self.save(as_other=True))
self.ui.menu.file.add_item(type="button", label=self.txt["close_tab"], command=self.close)
self.ui.menu.file.add_item(type="button", label=self.txt["close_all"], command=self.exit)
self.ui.menu.edit=self.ui.menu.add_category(self.txt["edit"], name="edit")#Edit
self.ui.menu.window=self.ui.menu.add_category(self.txt["window"], name="window")#Window
self.ui.menu.window.add_item(type="checkbutton", label=self.txt["full_screen"], command=self.ui.fullscreen)
self.ui.menu.window.add_item(type="button", label=self.txt["open_window"], command=lambda: run(argv=self.argv))
self.ui.menu.settings=self.ui.menu.add_item(type="button", label=self.txt["setting"], command=self.setting)#Settings
self.ui.menu.help=self.ui.menu.add_category(self.txt["help"], name="help")#Help
self.ui.menu.help.add_item(type="button", label=self.txt["about"], command=self.version)
def welcome(self):
self.welcome_tab=self.ui.notebook.add_tab(label=self.txt["welcome"]).frame
self.welcome_tab.label=self.welcome_tab.Label(label=self.txt["welcome_tab"])
self.welcome_tab.label.pack()
self.welcome_tab.new=self.welcome_tab.Input.Button(label=self.txt["new"], command=self.new)
self.welcome_tab.new.pack(fill="x", side="top")
self.welcome_tab.open=self.welcome_tab.Input.Button(label=self.txt["open"], command=lambda: self.open(as_diff_type=True))
self.welcome_tab.open.pack(fill="x", side="top")
if self.welcome_tab.backend=="tkinter":
if self.welcome_tab.dnd:
def dnd_process(event):
for file in event.data:
self.open(file=file, as_diff_type=True)
self.welcome_tab.frame=self.welcome_tab.Frame(label=self.txt["dnd_area"])
self.welcome_tab.frame.pack(fill="both", expand=True)
self.welcome_tab.frame.setup_dnd(dnd_process, "file")
def open(self, file=None, as_diff_type=False, force_select=False):#TODO: mime and directory
def select_addon(exts, file, recom=None):
root=self.ui.makeSubWindow(dialog=True)
root.title=root.Label(label=f"{self.txt['select_file_type']}\n{self.txt['file']}:{file}")
root.title.pack()
root.list=root.Input.List()
root.list.pack(expand=True, fill="both")
def cancel():
root.list.set_selection([])
root.close()
def ok():
if len(root.list.value) == 1:
root.close()
root.ok=root.Input.Button(label=self.txt["ok"], command=ok)
root.ok.pack(expand=True, fill="both", side="bottom")
root.cancel=root.Input.Button(label=self.txt["cancel"], command=cancel)
root.cancel.pack(expand=True, fill="both", side="bottom")
for ext, addons in exts.items():
for addon in addons:
if not root.list.exist_item(addon):
root.list.add_item(label=addon, id=addon)
root.list.add_item(label=ext, id=addon+"."+ext, parent=addon)
root.wait()
if len(root.list.value) == 1:
value=root.list.value[0].split(".")
return value[0], (value[1] if len(value)==2 else self.addon.loaded_addon_info[value[0]]["filetypes"][0])
else:
return
if file is None:
file=self.ui.Dialog.askfile()
if not os.path.exists(file):
return
ext=os.path.splitext(file)[1].lstrip(".")
if force_select:
selected=select_addon(self.addon.extdict, file=file, recom=(self.addon.extdict[ext] if ext in self.addon.extdict else None))
if selected is None:
return
addon, ext = selected
else:
if ext in self.addon.extdict:
addon=self.addon.extdict[ext][0]
else:
if as_diff_type:
selected=select_addon(self.addon.extdict, file=file)
if selected is None:
return
addon, ext = selected
else:
self.ui.Dialog.error(title=self.txt["error"], message=self.txt["error_cant_open"])
return
label=f'{os.path.basename(file)} {f"[{ext}]" if os.path.splitext(file)[1]!="."+ext else ""}'
tab=self.ui.notebook.add_tab(label=label)
self.ui.notebook.select_tab(tab)
ctx=self.addon.getAddon(addon, file, ext, tab.frame, self, self.update_state)
tab.addon=ctx
ctx.saved=False
def save(self, tab=None, as_other=False):
if tab is None:
tab=self.ui.notebook.value
if not hasattr(tab, "addon"):
return
if as_other:
file=self.ui.Dialog.askfile()
if not os.path.exists(file):
return
tab.addon.addon.save(file)
else:
tab.addon.addon.save()
def new(self, **options):
def dialog():
def close():
buttons.next.release()
root.close()
root=self.ui.makeSubWindow(dialog=True)
root.setcallback("close", close)
root.changeSize('300x300')
body=root.Frame()
body.pack(side="top", fill="x")
body.title=body.Label(label=self.txt["new_main"])
body.title.pack()
buttons=root.Frame()
buttons.pack(side="bottom", fill="x")
buttons.cancel=buttons.Input.Button(label=self.txt["cancel"], command=close)
buttons.cancel.pack(side="left", fill="x")
buttons.next=buttons.Input.Button(label=self.txt["next"])
buttons.next.pack(side="right", fill="x")
options={"file":None, "filetype":None}
buttons.next.wait()
if not body.exist():
return options
while 1:
for i in options:
if options[i] is None:
break
else:
break
if i == "file":
body.title.configure(text=self.txt["new_sub1"])
body.file=body.Input.Form(type="filesave", filetypes=[(key, "."+" .".join(addon["filetypes"])) for key, addon in self.addon.loaded_addon_info.items()])
body.file.pack(fill="both", expand=True)
elif i == "filetype":
body.title.configure(text=self.txt["new_sub2"])
body.filetype=body.Input.List()
body.filetype.pack(fill="both")
for ext, addons in self.addon.extdict.items():
for addon in addons:
if not body.filetype.exist_item(addon):
body.filetype.add_item(label=addon, id=addon)
body.filetype.add_item(label=ext, id=addon+"."+ext, parent=addon)
buttons.next.wait()
if not body.exist():
break
if i == "file":
if body.file.value != "":
options["file"]=body.file.value
body.file.destroy()
elif i == "filetype":
if len(body.filetype.value) == 1:
filetype=body.filetype.value[0].split(".")
options["filetype"]=filetype
options["addon"]=filetype[0]
if len(filetype) == 1:
options["ext"]=self.addon.loaded_addon_info[addon]["filetypes"][0]
else:
options["ext"]=filetype[1]
body.filetype.destroy()
root.close()
return options
if not "file" in options or not "filetype" in options:
options=dialog()
if None in options.values():
return
file=options["file"]
addon=options["addon"]
ext=options["ext"]
label=f'{os.path.basename(file)} {f"[{ext}]" if os.path.splitext(file)[1]!="."+ext else ""}'
tab=self.ui.notebook.add_tab(label=label)
self.ui.notebook.select_tab(tab)
ctx=self.addon.getAddon(addon, file, ext, tab.frame, self, self.update_state)
tab.addon=ctx
ctx.saved=False
ctx.addon.new()
def close(self, tab=None, question=-1, autodelete=True):
if tab is None:
tab=self.ui.notebook.value
if not hasattr(tab, "addon"):
self.ui.notebook.del_tab(tab)
else:
self.logger.info(f"Closing file '{tab.addon.filepath}'")
if not tab.addon.saved:
if question == -1:
question=self.ui.Dialog.question("yesnocancel", self.txt["check"], f"{self.txt['save_check']}\n{self.txt['file']}:{tab.addon.filepath}")
self.ui.notebook.del_tab(tab)
if question == True:
self.save(tab)
self.ui.notebook.del_tab(tab)
elif question != False:
pass
else:
self.ui.notebook.del_tab(tab)
tab.addon.addon.close()
if autodelete and len(self.ui.notebook.list_tab())==0:
self.ui.close()
def exit(self):
self.logger.info("Exiting...")
for tab in self.ui.notebook.list_tab().copy():
self.close(tab)
def update_state(self, addon):
for tab in self.ui.notebook.list_tab():
if hasattr(tab, "addon"):
if tab.addon is addon:
if addon.saved:
tab.label=os.path.basename(addon.filepath)
else:
tab.label="*"+os.path.basename(addon.filepath)
break
def version(self):
root=self.ui.makeSubWindow(dialog=True)
root.changeTitle(self.appinfo["appname"]+" - "+self.txt["about"])
root.note=root.Notebook()
root.note.pack(fill="both", expand=True)
version=root.note.add_tab(label="Version").frame
version.title=version.Image(image="init.png")
version.title.pack()
version.text=version.Label(label=f"{__version__} {__revision__} -2023 Marusoftware")
version.text.pack()
licence=root.note.add_tab(label="Licence").frame
licence.text=licence.Input.Text(scroll=True, readonly=True)
licence.text.pack(fill="both", expand=True)
with open(os.path.join(self.appinfo["cd"], "LICENCE")) as f:
licence.text.insert("end", f.read())
def setting(self):
root=self.ui.makeSubWindow(dialog=True)
root.changeTitle(self.appinfo["appname"]+" - "+self.txt["marueditor"])
root.changeSize('300x200')
root.note=root.Notebook()
root.note.pack(fill="both", expand=True)
editor=root.note.add_tab(self.txt["marueditor"]).frame
editor.open_as=editor.Input.CheckButton(label=self.txt["st_open_from"], default=self.config["open_as"], command=lambda: self.config.update(open_as=editor.open_as.value))
editor.open_as.pack(fill="x")
def setlang():
self.config["lang"]=editor.lang.value
self.Loadl10n(language=editor.lang.value)
editor.lang=editor.Input.Select(values=self.lang.lang_list, inline=True, default=self.lang.lang, command=setlang, label=self.txt["lang"]+":")
editor.lang.pack(fill="both")
self.ui.uisetting(root.note.add_tab(self.txt["appearance"]).frame, self.txt)
addon=root.note.add_tab(self.txt["addon"]).frame
addon.addbt=addon.Input.Button(label="Install Addon")
addon.addbt.pack()
addon.list=addon.Input.List(columns=["ftype"], header=True)
addon.list.pack(fill="both")
addon.list.set_header("#0", "Addon name")
addon.list.set_header("ftype", "Editable File type")
for name, info in self.addon.loaded_addon_info.items():
addon.list.add_item(label=name, values=[info["filetypes"]])
class EasyEditor():
def __init__(self):
pass
"""
#help
class hlp():
#show help
def help():
pass
def update():
pass
"""
def run(argv=DefaultArgv):
app=Editor(argv)
app.Setup()
app.CreateMenu()
app.welcome()
if not argv.filepath is None:
app.open(file=argv.filepath, as_diff_type=True)
app.mainloop()
if __name__ == "__main__":
"""INIT"""
#argvParse
argv_parser = argparse.ArgumentParser("marueditor", description="Marueditor. The best editor.")
argv_parser.add_argument("--shell", dest="shell", help="Start in shell mode.", action="store_true")
argv_parser.add_argument("--debug", dest="debug", help="Start in debug mode.", action="store_true")
argv_parser.add_argument("-log_level", action="store", type=int, dest="log_level", default=DefaultArgv.log_level ,help="set Log level.(0-50)")
argv_parser.add_argument("filepath", action="store", type=str, default=DefaultArgv.filepath ,help="Open file path.", nargs='?')
argv = argv_parser.parse_args()
run(argv) | Marusoftware/Marutools | marueditor.py | marueditor.py | py | 17,550 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "libmarusoftware.core.adjustEnv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "libmarusoftware.core",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "libtools.Addon",
"line_number": 23,
"usage_type": "call"
},
{
"api_name"... |
18424838554 |
import sys
from datetime import datetime, timedelta
from hashlib import sha1
from hmac import new as hmac
from os.path import dirname, join as join_path
from random import getrandbits
from time import time
from urllib import urlencode, quote as urlquote
from uuid import uuid4
from wsgiref.handlers import CGIHandler
sys.path.insert(0, join_path(dirname(__file__), 'lib')) # extend sys.path
from demjson import decode as decode_json
from google.appengine.api.urlfetch import fetch as urlfetch, GET, POST
from google.appengine.ext import db
from google.appengine.ext.webapp import RequestHandler, WSGIApplication
from django.http import HttpResponse, HttpResponseRedirect
# ------------------------------------------------------------------------------
# configuration -- SET THESE TO SUIT YOUR APP!!
# ------------------------------------------------------------------------------
OAUTH_APP_SETTINGS = {
'twitter': {
'consumer_key': 'FUja3lLCyC4zWe5PmGdfQ',
'consumer_secret': '4vjeFWOYWF3eGqHzoVvhzJSZdkhaHTcbJsfFz3qUBnY',
'request_token_url': 'https://twitter.com/oauth/request_token',
'access_token_url': 'https://twitter.com/oauth/access_token',
'user_auth_url': 'http://twitter.com/oauth/authorize',
'default_api_prefix': 'http://twitter.com',
'default_api_suffix': '.json',
},
'google': {
'consumer_key': '',
'consumer_secret': '',
'request_token_url': 'https://www.google.com/accounts/OAuthGetRequestToken',
'access_token_url': 'https://www.google.com/accounts/OAuthGetAccessToken',
'user_auth_url': 'https://www.google.com/accounts/OAuthAuthorizeToken',
},
}
CLEANUP_BATCH_SIZE = 100
EXPIRATION_WINDOW = timedelta(seconds=60*60*1) # 1 hour
CHARACTER_LIMIT = 140
try:
from config import OAUTH_APP_SETTINGS
except:
pass
STATIC_OAUTH_TIMESTAMP = 12345 # a workaround for clock skew/network lag
# ------------------------------------------------------------------------------
# utility functions
# ------------------------------------------------------------------------------
def get_service_key(service, consumer_secret, cache={}):
if service in cache: return cache[service]
return cache.setdefault(
service, "%s&" % encode(consumer_secret)
)
def create_uuid():
return 'id-%s' % uuid4()
def encode(text):
return urlquote(str(text), '')
def twitter_specifier_handler(client):
return client.get('/account/verify_credentials')['screen_name']
OAUTH_APP_SETTINGS['twitter']['specifier_handler'] = twitter_specifier_handler
# ------------------------------------------------------------------------------
# db entities
# ------------------------------------------------------------------------------
class OAuthRequestToken(db.Model):
"""OAuth Request Token."""
service = db.StringProperty()
oauth_token = db.StringProperty()
oauth_token_secret = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
class OAuthAccessToken(db.Model):
"""OAuth Access Token."""
consumer_key = db.StringProperty()
service = db.StringProperty()
specifier = db.StringProperty()
oauth_token = db.StringProperty()
oauth_token_secret = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
class TwitterNotAuthorisedError(Exception):
"""
This error indicate that Twitter was not authorised
"""
# ------------------------------------------------------------------------------
# oauth client
# ------------------------------------------------------------------------------
class BaseOAuthClient(object):
def __init__(self, service, request, response, consumer_key, consumer_secret, oauth_callback=None, **request_params):
self.service = service
self.service_info = OAUTH_APP_SETTINGS[service]
self.service_key = None
self.request = request
self.response = response
self.request_params = request_params
self.oauth_callback = oauth_callback
self.token = None
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
def get(self, api_method, http_method='GET', expected_status=(200,), **extra_params):
if not (api_method.startswith('http://') or api_method.startswith('https://')):
api_method = '%s%s%s' % (
self.service_info['default_api_prefix'], api_method,
self.service_info['default_api_suffix']
)
if self.token is None:
self.token = OAuthAccessToken.get_by_key_name(self.get_cookie())
fetch = urlfetch(self.get_signed_url(
api_method, self.token, http_method, **extra_params
))
if fetch.status_code not in expected_status:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content)
def post(self, api_method, http_method='POST', expected_status=(200,), **extra_params):
if not (api_method.startswith('http://') or api_method.startswith('https://')):
api_method = '%s%s%s' % (
self.service_info['default_api_prefix'], api_method,
self.service_info['default_api_suffix']
)
if self.token is None:
self.token = OAuthAccessToken.get_by_key_name(self.get_cookie())
fetch = urlfetch(url=api_method, payload=self.get_signed_body(
api_method, self.token, http_method, **extra_params
), method=http_method)
if fetch.status_code not in expected_status:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content)
# oauth workflow
def get_request_token(self):
token_info = self.get_data_from_signed_url(
self.service_info['request_token_url'], **self.request_params
)
token = OAuthRequestToken(
service=self.service,
**dict(token.split('=') for token in token_info.split('&'))
)
token.put()
if self.oauth_callback:
oauth_callback = {'oauth_callback': self.oauth_callback}
else:
oauth_callback = {}
self.response = HttpResponseRedirect(self.get_signed_url(
self.service_info['user_auth_url'], token, **oauth_callback))
def cleanup(self):
query = OAuthRequestToken.all().filter(
'created <', datetime.now() - EXPIRATION_WINDOW
)
count = query.count(CLEANUP_BATCH_SIZE)
db.delete(query.fetch(CLEANUP_BATCH_SIZE))
return "Cleaned %i entries" % count
# request marshalling
def get_data_from_signed_url(self, __url, __token=None, __meth='GET', **extra_params):
return urlfetch(self.get_signed_url(
__url, __token, __meth, **extra_params
)).content
def get_signed_url(self, __url, __token=None, __meth='GET',**extra_params):
return '%s?%s'%(__url, self.get_signed_body(__url, __token, __meth, **extra_params))
def get_signed_body(self, __url, __token=None, __meth='GET',**extra_params):
service_info = self.service_info
kwargs = {
'oauth_consumer_key': self.consumer_key,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_timestamp': int(time()),
'oauth_nonce': getrandbits(64),
}
kwargs.update(extra_params)
if self.service_key is None:
self.service_key = get_service_key(self.service, self.consumer_secret)
if __token is not None:
kwargs['oauth_token'] = __token.oauth_token
key = self.service_key + encode(__token.oauth_token_secret)
else:
key = self.service_key
message = '&'.join(map(encode, [
__meth.upper(), __url, '&'.join(
'%s=%s' % (encode(k), encode(kwargs[k])) for k in sorted(kwargs)
)
]))
kwargs['oauth_signature'] = hmac(
key, message, sha1
).digest().encode('base64')[:-1]
return urlencode(kwargs)
# who stole the cookie from the cookie jar?
def get_cookie(self):
return self.handler.request.cookies.get(
'oauth.%s' % self.service, ''
)
def set_cookie(self, value, path='/'):
self.handler.response.headers.add_header(
'Set-Cookie',
'%s=%s; path=%s; expires="Fri, 31-Dec-2021 23:59:59 GMT"' %
('oauth.%s' % self.service, value, path)
)
def expire_cookie(self, path='/'):
self.handler.response.headers.add_header(
'Set-Cookie',
'%s=; path=%s; expires="Fri, 31-Dec-1999 23:59:59 GMT"' %
('oauth.%s' % self.service, path)
)
class TwitterClient(BaseOAuthClient):
__public__ = ('callback', 'cleanup', 'login', 'logout')
# public methods
def login(self):
proxy_id = self.get_cookie()
if proxy_id:
return "FOO%rFF" % proxy_id
self.expire_cookie()
return self.get_request_token()
def logout(self, return_to='/'):
self.expire_cookie()
self.handler.redirect(self.handler.request.get("return_to", return_to))
def callback(self, return_to='/'):
oauth_token = self.request.GET.get("oauth_token", None)
if not oauth_token:
return self.get_request_token()
oauth_token = OAuthRequestToken.all().filter(
'oauth_token =', oauth_token).filter(
'service =', self.service).fetch(1)[0]
token_info = self.get_data_from_signed_url(
self.service_info['access_token_url'], oauth_token
)
key_name = create_uuid()
self.token = OAuthAccessToken(
key_name=key_name, service=self.service, consumer_key=self.consumer_key,
**dict(token.split('=') for token in token_info.split('&'))
)
if 'specifier_handler' in self.service_info:
specifier = self.token.specifier = self.service_info['specifier_handler'](self)
old = OAuthAccessToken.all().filter(
'specifier =', specifier).filter(
'service =', self.service)
try:
db.delete(old)
except IndexError:
s = ''
self.response = HttpResponse('Authorisation successful')
self.token.put()
#self.set_cookie(key_name)
# posts an update to Twitter
def post_update(self, access_token, status):
if access_token is None:
raise TwitterNotAuthorisedError("You need to authorise this Twitter account")
url = 'http://api.twitter.com/1/statuses/update.json'
if len(status) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to %d characters. "
"Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
fetch = urlfetch(url, payload=self.get_signed_body(url, access_token, 'POST', **data), method=POST)
expected_status =(200,)
if fetch.status_code not in expected_status:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content) | jaredwy/taskoverflow | to-site/taskoverflow/twitteroauth.py | twitteroauth.py | py | 11,846 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"lin... |
39458596841 | from django.urls.resolvers import URLPattern
from django.urls import path
from . import views
urlpatterns = [
path('customer/', views.CustomerList.as_view()),
path('customer/<int:pk>', views.CustomerDetail.as_view()),
path('product/', views.ProductList.as_view()),
path('product/<int:pk>', views.ProductDetail.as_view()),
path('transection/', views.TransectionList.as_view()),
path('transection/<int:pk>', views.TransectionDetail.as_view()),
]
| ankitaggarwal1986/DjangoWorkforce | emart/products/urls.py | urls.py | py | 469 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
18875650180 | from __future__ import print_function
from stone import lexer, mytoken
import tempfile
f = tempfile.TemporaryFile()
f.write("""if (i==1) {
a=1
}
while i < 10 {
sum = sum + i
i = i + 1
}
sum
""")
f.seek(0)
l = lexer.Lexer(f)
while True:
t = l.read()
if t is mytoken.Token.EOF:
print('break')
break
print("=> " + t.get_text())
| planset/stone-py | chap3_lexer_runner.py | chap3_lexer_runner.py | py | 367 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tempfile.TemporaryFile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "stone.lexer.Lexer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "stone.lexer",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "stone.mytoken.Tok... |
9815980344 | import sys
import requests
import random
token = sys.argv[1]
link = sys.argv[2]
useproxies = sys.argv[3]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
def proxyjoin():
try:
proxy = random.choice(proxy_list)
requests.post(apilink, headers=headers, proxies={"http": proxy, "https": proxy})
except Exception:
proxyjoin()
apilink = "https://discordapp.com/api/v6/invite/" + str(link)
headers={
'Authorization': token
}
if useproxies == 'True':
proxyjoin()
else:
requests.post(apilink, headers=headers)
| X-Nozi/NoziandNiggarr24Toolbox | spammer/joiner.py | joiner.py | py | 612 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_numb... |
12038245832 | # SSCR Server - Simple Socket Chat Room Server
# Version: Alpha 0.9
# This is the server app of the Simple Socket Chat Room which handles all client connections
# todo: 1. [DONE] Clean up server side prints to only whats relevant to the eye in realtime and implement a log for later debugging/analysing
# 2. [DONE] More bot commands!
# 3. [DONE] Permission system for bot commands
# 4. [DONE] Generate Token on server lunch for admin to authenticate with when they connect with the client for bots permmissions
# 5. [DONE] kick command
# 7. Implement a timeout or fail counter for failed sends to client for dropping unnecessary connections
import socket
import chatbot
import config
import sys
import _thread
import datetime
import random
try:
client_list = {} # dict of {client(id) -> connections from socket.accept()}
id_counter = 1 # counter that will increment to keep unique ID for every joined user
server = socket.socket() # the global server socket variable
logfile = open('sscr-server.log', 'a') # used for log
except Exception as e:
print(e)
# logs a message to the logfile
def log(logMessage):
global logfile
logfile.write(f"{datetime.datetime.now()}: {logMessage}\n")
logfile.flush()
# generates a unique token
def token_gen(length):
token = ""
for i in range(length):
upper_character = chr(random.randint(ord('A'), ord('Z')))
lower_character = chr(random.randint(ord('a'), ord('z')))
digit = chr(random.randint(48, 57))
random_characters = [upper_character, lower_character, digit]
token += random.choice(random_characters)
return token
# joins a member and return his ID if successful, on fail return -1
def join(conn, name):
try:
global client_list
global id_counter
client_list[id_counter] = conn
config.name_list[id_counter] = name
print(f"Added: {name}, ID: {id_counter}")
log(f"Added: {name}, ID: {id_counter}")
log(f"Updated client_list: {client_list}")
id_counter += 1
return id_counter - 1
except Exception as e:
print(e)
return -1
# greet a user on connection and asks for his name then calls join to add him to the list and get the user id
# returns the user id or -1 on fail
def greet(conn, addr):
try:
log(f"New thread: {conn}")
conn.send(f"Welcome to {config.room_name}\nYour address will not be shared with anyone\nPlease Type in your name: ".encode())
user_name = conn.recv(1024).decode().replace("\n", "")
user_id = join(conn, user_name)
conn.send(f"Your user ID: {user_id}\n".encode())
if user_id != -1:
return user_id
else:
return -1
except Exception as e:
print(e)
log(e)
return False
# sends message to all connections in client_list
def broadcast(message, name):
message = f"{name}: " + message
print(message)
log(message)
message += "\n"
for client in client_list:
try:
client_list[client].send(message.encode())
log(f"Sent message successfully to {client_list[client]}")
except Exception as e:
log(f"\nFailed to send message to {config.name_list[client]} ({client})")
log(e)
def bot_reply_handl(command, userId, conn):
try:
name = config.name_list[userId]
log(f"command: {command} userID: {userId} name: {name}")
if command.startswith("private:"):
conn.send("-Command successfully executed-\n".encode())
elif command.startswith("fail:"):
conn.send("-Command failed to execute-\n".encode())
elif command.startswith("public:"):
broadcast(command.split(":")[1], "ChatBot")
elif command.startswith("kick="):
userToKick = command.split(":")[0].split("=")[1]
for id, username in config.name_list.items():
if username == userToKick:
userToKick = id # get a userID by name so the associated connection can be closed
broadcast(f"{config.name_list[userToKick]} has bee kicked from the room", "ChatBot")
client_list[userToKick].send("you have been kicked...\n").encode()
client_list[userToKick].close()
return
except Exception as e:
log(e)
# handles each connection thread
def client_handle(conn, addr):
# if couldn't get the user to supply name and add him to the list close connection and give up on him
if greet(conn, addr) == -1:
conn.close()
return
# find name and userID associated with the socket
for id, sock in client_list.items():
if sock == conn:
name = config.name_list[id]
userId = id
broadcast(f"{name} has joined the room", 'Server')
# command has been executed and this is not a message if this is True
while True:
message = conn.recv(2048).decode().replace("\n", "")
command = chatbot.evalCommand(message, userId) # command has been executed and this is not a message if this is True
if command:
bot_reply_handl(command, userId, conn)
else:
name = config.name_list[userId] # update name before sending message
broadcast(message, name)
# main first binds the server to the port then starts a new thread for each connection received.
# each client connection thread goes though the following route: client_handle() => greet() => join() <= greet() =>
def main(port):
try:
global server
server.bind(('0.0.0.0', port))
server.listen(20)
config.token = token_gen(12)
print(f"Admin token is: {config.token}")
print("Server is up, waiting for connections...")
while True:
conn, addr = server.accept()
print(f"{addr} Connected")
log(f"{addr} Connected")
_thread.start_new_thread(client_handle, (conn, addr))
except Exception as e:
print(e)
# Simple sanity check before calling main with port number
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: sscr-server.py port")
exit()
else:
try:
port = int(sys.argv[1])
main(port)
except Exception as e:
print("Fatal error: failed to launch")
print(e)
# main(8000)
| Argentix03/Simple-Socket-Chat-Room | sscr-server.py | sscr-server.py | py | 6,620 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "random.ran... |
34766714821 | import os
import gc
import time
import random
import warnings
import cv2
import hydra
from tqdm import tqdm
import numpy as np
import torch
import torch.optim as torch_optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from losses import DBLoss
from lr_schedulers import WarmupPolyLR
from models import DBTextModel
from text_metrics import (cal_text_score, RunningScore, QuadMetric)
from utils import (setup_determinism, setup_logger, dict_to_device,
visualize_tfb, to_device)
from postprocess import SegDetectorRepresenter
warnings.filterwarnings('ignore')
# https://github.com/pytorch/pytorch/issues/1355
cv2.setNumThreads(0)
def get_data_loaders(cfg):
dataset_name = cfg.dataset.name
ignore_tags = cfg.data[dataset_name].ignore_tags
train_dir = cfg.data[dataset_name].train_dir
test_dir = cfg.data[dataset_name].test_dir
train_gt_dir = cfg.data[dataset_name].train_gt_dir
test_gt_dir = cfg.data[dataset_name].test_gt_dir
if dataset_name == 'totaltext':
from data_loaders import TotalTextDatasetIter
TextDatasetIter = TotalTextDatasetIter
elif dataset_name == 'ctw1500':
from data_loaders import CTW1500DatasetIter
TextDatasetIter = CTW1500DatasetIter
elif dataset_name == 'icdar2015':
from data_loaders import ICDAR2015DatasetIter
TextDatasetIter = ICDAR2015DatasetIter
elif dataset_name == 'msra_td500':
from data_loaders import MSRATD500DatasetIter
TextDatasetIter = MSRATD500DatasetIter
else:
raise NotImplementedError("Pls provide valid dataset name!")
train_iter = TextDatasetIter(train_dir,
train_gt_dir,
ignore_tags,
image_size=cfg.hps.img_size,
is_training=True,
debug=False)
test_iter = TextDatasetIter(test_dir,
test_gt_dir,
ignore_tags,
image_size=cfg.hps.img_size,
is_training=False,
debug=False)
train_loader = DataLoader(dataset=train_iter,
batch_size=cfg.hps.batch_size,
shuffle=True,
num_workers=1)
test_loader = DataLoader(dataset=test_iter,
batch_size=cfg.hps.test_batch_size,
shuffle=False,
num_workers=0)
return train_loader, test_loader
def main(cfg):
# set determinism
setup_determinism(42)
# setup logger
logger = setup_logger(
os.path.join(cfg.meta.root_dir, cfg.logging.logger_file))
# setup log folder
log_dir_path = os.path.join(cfg.meta.root_dir, "logs")
if not os.path.exists(log_dir_path):
os.makedirs(log_dir_path)
tfb_log_dir = os.path.join(log_dir_path, str(int(time.time())))
logger.info(tfb_log_dir)
if not os.path.exists(tfb_log_dir):
os.makedirs(tfb_log_dir)
tfb_writer = SummaryWriter(tfb_log_dir)
device = cfg.meta.device
logger.info(device)
dbnet = DBTextModel().to(device)
lr_optim = cfg.optimizer.lr
# load best cp
if cfg.model.finetune_cp_path:
cp_path = os.path.join(cfg.meta.root_dir, cfg.model.finetune_cp_path)
if os.path.exists(cp_path) and cp_path.endswith('.pth'):
lr_optim = cfg.optimizer.lr_finetune
logger.info("Loading best checkpoint: {}".format(cp_path))
dbnet.load_state_dict(torch.load(cp_path, map_location=device))
dbnet.train()
criterion = DBLoss(alpha=cfg.optimizer.alpha,
beta=cfg.optimizer.beta,
negative_ratio=cfg.optimizer.negative_ratio,
reduction=cfg.optimizer.reduction).to(device)
db_optimizer = torch_optim.Adam(dbnet.parameters(),
lr=lr_optim,
weight_decay=cfg.optimizer.weight_decay,
amsgrad=cfg.optimizer.amsgrad)
# setup model checkpoint
best_test_loss = np.inf
best_train_loss = np.inf
best_hmean = 0
db_scheduler = None
lrs_mode = cfg.lrs.mode
logger.info("Learning rate scheduler: {}".format(lrs_mode))
if lrs_mode == 'poly':
db_scheduler = WarmupPolyLR(db_optimizer,
warmup_iters=cfg.lrs.warmup_iters)
elif lrs_mode == 'reduce':
db_scheduler = torch_optim.lr_scheduler.ReduceLROnPlateau(
optimizer=db_optimizer,
mode='min',
factor=cfg.lrs.factor,
patience=cfg.lrs.patience,
verbose=True)
# get data loaders
dataset_name = cfg.dataset.name
logger.info("Dataset name: {}".format(dataset_name))
logger.info("Ignore tags: {}".format(cfg.data[dataset_name].ignore_tags))
totaltext_train_loader, totaltext_test_loader = get_data_loaders(cfg)
# train model
logger.info("Start training!")
torch.cuda.empty_cache()
gc.collect()
global_steps = 0
for epoch in range(cfg.hps.no_epochs):
# TRAINING
dbnet.train()
train_loss = 0
running_metric_text = RunningScore(cfg.hps.no_classes)
for batch_index, batch in enumerate(totaltext_train_loader):
lr = db_optimizer.param_groups[0]['lr']
global_steps += 1
batch = dict_to_device(batch, device=device)
preds = dbnet(batch['img'])
assert preds.size(1) == 3
_batch = torch.stack([
batch['prob_map'], batch['supervision_mask'],
batch['thresh_map'], batch['text_area_map']
])
prob_loss, threshold_loss, binary_loss, prob_threshold_loss, total_loss = criterion( # noqa
preds, _batch)
db_optimizer.zero_grad()
total_loss.backward()
db_optimizer.step()
if lrs_mode == 'poly':
db_scheduler.step()
score_shrink_map = cal_text_score(
preds[:, 0, :, :],
batch['prob_map'],
batch['supervision_mask'],
running_metric_text,
thresh=cfg.metric.thred_text_score)
train_loss += total_loss
acc = score_shrink_map['Mean Acc']
iou_shrink_map = score_shrink_map['Mean IoU']
# tf-board
tfb_writer.add_scalar('TRAIN/LOSS/total_loss', total_loss,
global_steps)
tfb_writer.add_scalar('TRAIN/LOSS/loss', prob_threshold_loss,
global_steps)
tfb_writer.add_scalar('TRAIN/LOSS/prob_loss', prob_loss,
global_steps)
tfb_writer.add_scalar('TRAIN/LOSS/threshold_loss', threshold_loss,
global_steps)
tfb_writer.add_scalar('TRAIN/LOSS/binary_loss', binary_loss,
global_steps)
tfb_writer.add_scalar('TRAIN/ACC_IOU/acc', acc, global_steps)
tfb_writer.add_scalar('TRAIN/ACC_IOU/iou_shrink_map',
iou_shrink_map, global_steps)
tfb_writer.add_scalar('TRAIN/HPs/lr', lr, global_steps)
if global_steps % cfg.hps.log_iter == 0:
logger.info(
"[{}-{}] - lr: {} - total_loss: {} - loss: {} - acc: {} - iou: {}" # noqa
.format(epoch + 1, global_steps, lr, total_loss,
prob_threshold_loss, acc, iou_shrink_map))
end_epoch_loss = train_loss / len(totaltext_train_loader)
logger.info("Train loss: {}".format(end_epoch_loss))
gc.collect()
# TFB IMGs
# shuffle = True
visualize_tfb(tfb_writer,
batch['img'],
preds,
global_steps=global_steps,
thresh=cfg.metric.thred_text_score,
mode="TRAIN")
seg_obj = SegDetectorRepresenter(thresh=cfg.metric.thred_text_score,
box_thresh=cfg.metric.prob_threshold,
unclip_ratio=cfg.metric.unclip_ratio)
metric_cls = QuadMetric()
# EVAL
dbnet.eval()
test_running_metric_text = RunningScore(cfg.hps.no_classes)
test_loss = 0
raw_metrics = []
test_visualize_index = random.choice(range(len(totaltext_test_loader)))
for test_batch_index, test_batch in tqdm(
enumerate(totaltext_test_loader),
total=len(totaltext_test_loader)):
with torch.no_grad():
test_batch = dict_to_device(test_batch, device)
test_preds = dbnet(test_batch['img'])
assert test_preds.size(1) == 2
_batch = torch.stack([
test_batch['prob_map'], test_batch['supervision_mask'],
test_batch['thresh_map'], test_batch['text_area_map']
])
test_total_loss = criterion(test_preds, _batch)
test_loss += test_total_loss
# visualize predicted image with tfb
if test_batch_index == test_visualize_index:
visualize_tfb(tfb_writer,
test_batch['img'],
test_preds,
global_steps=global_steps,
thresh=cfg.metric.thred_text_score,
mode="TEST")
test_score_shrink_map = cal_text_score(
test_preds[:, 0, :, :],
test_batch['prob_map'],
test_batch['supervision_mask'],
test_running_metric_text,
thresh=cfg.metric.thred_text_score)
test_acc = test_score_shrink_map['Mean Acc']
test_iou_shrink_map = test_score_shrink_map['Mean IoU']
tfb_writer.add_scalar('TEST/LOSS/val_loss', test_total_loss,
global_steps)
tfb_writer.add_scalar('TEST/ACC_IOU/val_acc', test_acc,
global_steps)
tfb_writer.add_scalar('TEST/ACC_IOU/val_iou_shrink_map',
test_iou_shrink_map, global_steps)
# Cal P/R/Hmean
batch_shape = {'shape': [(cfg.hps.img_size, cfg.hps.img_size)]}
box_list, score_list = seg_obj(
batch_shape,
test_preds,
is_output_polygon=cfg.metric.is_output_polygon)
raw_metric = metric_cls.validate_measure(
test_batch, (box_list, score_list))
raw_metrics.append(raw_metric)
metrics = metric_cls.gather_measure(raw_metrics)
recall = metrics['recall'].avg
precision = metrics['precision'].avg
hmean = metrics['fmeasure'].avg
if hmean >= best_hmean:
best_hmean = hmean
torch.save(
dbnet.state_dict(),
os.path.join(cfg.meta.root_dir, cfg.model.best_hmean_cp_path))
logger.info(
"TEST/Recall: {} - TEST/Precision: {} - TEST/HMean: {}".format(
recall, precision, hmean))
tfb_writer.add_scalar('TEST/recall', recall, global_steps)
tfb_writer.add_scalar('TEST/precision', precision, global_steps)
tfb_writer.add_scalar('TEST/hmean', hmean, global_steps)
test_loss = test_loss / len(totaltext_test_loader)
logger.info("[{}] - test_loss: {}".format(global_steps, test_loss))
if test_loss <= best_test_loss and train_loss <= best_train_loss:
best_test_loss = test_loss
best_train_loss = train_loss
torch.save(dbnet.state_dict(),
os.path.join(cfg.meta.root_dir, cfg.model.best_cp_path))
if lrs_mode == 'reduce':
db_scheduler.step(test_loss)
torch.cuda.empty_cache()
gc.collect()
logger.info("Training completed")
torch.save(dbnet.state_dict(),
os.path.join(cfg.meta.root_dir, cfg.model.last_cp_path))
logger.info("Saved model")
@hydra.main(config_path="../config.yaml", strict=False)
def run(cfg):
main(cfg)
if __name__ == '__main__':
run()
| huyhoang17/DB_text_minimal | src/train.py | train.py | py | 12,725 | python | en | code | 34 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.setNumThreads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "data_loaders.TotalTextDatasetIter",
"line_number": 40,
"usage_type": "name"
},
{
"api_n... |
37290284583 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from .models import Image, Comment, Like
from .forms import userRegForm, logForm, addImageForm, commentForm
from django.contrib.auth.decorators import login_required
from datetime import datetime
# Create your views here.
def home(request):
context = {}
context['images_latest'] = Image.objects.all().order_by('-uploaded')[:6]
context['images_popular'] = Image.objects.all().order_by('-likes')[:6]
context['all'] = True
return render(request, 'home.html',context)
def logIn(request):
form = logForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user is not None:
login(request, user)
return redirect(home)
else:
return render(request,'login.html',{'form':form, 'err':"Invalid username or password"})
return render(request, 'login.html', {'form':form})
def register(request):
form = userRegForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
if form.cleaned_data['password'] == form.cleaned_data['password2']:
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.save()
login(request, user)
return redirect(home)
else:
return render(request, 'register.html', {'form':form, 'perr':"Passwords didn't match"})
return render(request, 'register.html', {'form':form})
@login_required(login_url='/login/')
def addImage(request):
form = addImageForm(request.POST or None, request.FILES or None)
if request.method=="POST":
if form.is_valid():
image = form.save(commit=False)
image.user = request.user
image.likes = 0
image.save()
return redirect(home)
return render(request, 'addImage.html', {'form':form})
def allImages(request):
images = Image.objects.all()
return render(request, 'profile.html', {'images':images, 'all':True})
def viewImage(request, image_id):
context = {}
context['image'] = Image.objects.get(id=image_id)
context['form'] = commentForm()
context['comments'] = Comment.objects.filter(image=context['image'])
context['likes'] = Like.objects.filter(image_id=image_id).count()
context['totalcomments'] = Comment.objects.filter(image_id=image_id).count()
if request.user.is_authenticated():
context['is_liked'] = Like.objects.filter(user=request.user, image_id=image_id).exists()
return render(request, 'viewimage.html', context)
@login_required(login_url='/login/')
def comment(request, image_id):
image = Image.objects.get(id=image_id)
form = commentForm(request.POST)
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.user = request.user
new_comment.image = image
new_comment.save()
return redirect(viewImage, image_id)
@login_required(login_url='/login/')
def editcomment(request, image_id, comment_id):
to_change = Comment.objects.get(id=comment_id)
to_change.cmnt = request.POST['edited-cmnt']
to_change.commented = datetime.now()
to_change.save()
return redirect(viewImage, image_id)
@login_required(login_url='/login/')
def deletecomment(request, image_id, comment_id):
comment = Comment.objects.get(id=comment_id)
if comment.user == request.user or Image.objects.get(id=image_id).user == request.user:
Comment.objects.get(id=comment_id).delete()
return redirect(viewImage, image_id)
@login_required(login_url='/login/')
def like(request, image_id):
if not Like.objects.filter(user=request.user, image_id=image_id).exists():
image = Image.objects.get(id=image_id)
image.likes += 1
image.save()
new_like = Like(user=request.user, image=image)
new_like.save()
return redirect(viewImage, image_id)
@login_required(login_url='/login/')
def dislike(request, image_id):
if Like.objects.filter(user=request.user, image_id=image_id).exists():
Like.objects.filter(user=request.user, image_id=image_id).delete()
return redirect(viewImage, image_id)
def profile(request, username=''):
context = {}
context['this_user'] = User.objects.get(username=username)
context['images'] = Image.objects.filter(user=context['this_user'])
return render(request, 'profile.html', context)
@login_required(login_url='/login/')
def deleteImg(request, image_id):
image = Image.objects.get(id=image_id)
if image.user == request.user:
image.delete()
return redirect(profile)
| RAHUL-ALAM/Cookpad-Assignment | image/views.py | views.py | py | 4,575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Image.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Image.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.Image",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "model... |
38031987622 | import csv
from ipaddress import ip_address
from .report import Host, Port
ALLOWED_COLUMNS = [
'ipv4',
'hostname',
'os_name',
'port',
'state',
'protocol',
'service',
'software_banner',
'version',
'cpe',
'other_info',
]
class CSVFileParser:
def load_hosts(self, file):
reader = csv.DictReader(file)
current_host = None
hosts = []
for row in reader:
if len(hosts) == 0 or hosts[-1].ipv4.exploded != row['ipv4']:
hosts.append(Host(ipv4=ip_address(row.get('ipv4', None)),
hostname=row.get('hostname', None),
os_info={'name': row.get('os_name', None)}))
if row.get('port', None) not in ['', None]:
hosts[-1].ports.append(Port(
port=int(float(row.get('port', None))),
protocol=row.get('protocol', None),
state=row.get('state', None),
service=row.get('service', None),
software=row.get('software_banner', None),
version=row.get('version', None),
cpe=row.get('cpe', None)
))
return hosts
| delvelabs/batea | batea/core/csv_parser.py | csv_parser.py | py | 1,260 | python | en | code | 287 | github-code | 36 | [
{
"api_name": "csv.DictReader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "report.Host",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ipaddress.ip_address",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "report.Port",
"li... |
31050296898 | import unittest
import hypothesis.strategies as st
from django.test import RequestFactory
from hypothesis import assume, example, given
from ...serializers import HeaderSerializer
from ..payload_factories import PayloadRequestFactory
class TestHeaderSerializer(unittest.TestCase):
def setUp(self):
request_factory = RequestFactory()
self.request = request_factory.post('/api/github/')
def test_default_missing(self):
"""
HeaderSerializer is invalid given request with no X-GitHub-Event
"""
serializer = HeaderSerializer(self.request)
result = serializer.is_valid()
self.assertFalse(result)
@given(st.sampled_from(('ping', 'pull_request')))
def test_valid(self, event):
"""
HeaderSerializer is valid with pull_request or ping
"""
self.request.META['HTTP_X_GITHUB_EVENT'] = event
serializer = HeaderSerializer(self.request)
result = serializer.is_valid()
self.assertTrue(result)
self.assertEqual(serializer.event, event)
@given(st.text())
@example(' pull_request ')
def test_invalid(self, event):
"""
HeaderSerializer is invalid with event strings not pull_request or ping
"""
assume(event not in ('pull_request', 'ping'))
self.request.META['HTTP_X_GITHUB_EVENT'] = event
serializer = HeaderSerializer(self.request)
result = serializer.is_valid()
self.assertFalse(result)
def test_invalid_event_raises(self):
"""
HeaderSerializer raises AssertionError when invalid and event is loaded
"""
serializer = HeaderSerializer(self.request)
with self.assertRaises(AssertionError):
serializer.event
@given(st.sampled_from(('ping', 'pull_request')))
def test_request_factory_serializable(self, event):
"""
HeaderSerializer is valid for Requests built by PayloadRequestFactory
"""
request = PayloadRequestFactory({}, event)
serializer = HeaderSerializer(request)
result = serializer.is_valid()
self.assertTrue(result, serializer.errors)
| ryankask/prlint | prlint/github/tests/serializers/test_header_serializer.py | test_header_serializer.py | py | 2,183 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.test.RequestFactory",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "serializers.HeaderSerializer",
"line_number": 21,
"usage_type": "call"
},
{
"ap... |
73508165543 | import pygame
from typing import Literal
# Init
pygame.init()
pygame.font.init()
# Janela
display = pygame.display.set_mode((1280, 720))
#3 Parte - formas - player
#pos e forma em retangulo
# 0,0 pos esquerda superior
#player1 = pygame.Rect(0, 0, 30, 150)
player1_img = pygame.image.load("assets/player1.png")
player1 = player1_img.get_rect()
player1_velocidade = 6
player1_score = 0
#player2 = pygame.Rect(1250, 0, 30, 150)
player2_img = pygame.image.load("assets/player2.png")
player2 = player2_img.get_rect(right=1280)
player2_score = 0
#ball = pygame.Rect(600, 350, 15, 15)
ball_img = pygame.image.load("assets/ball.png")
ball = ball_img.get_rect(center=[1280 / 2, 720 / 2])
campo_img = pygame.image.load("assets/bg.png")
campo = campo_img.get_rect()
ball_dir_x, ball_dir_y = 6, 6
# Placar
font = pygame.font.Font(None, 50)
placar_player1 = font.render(str(player1_score), True, "white")
placar_player2 = font.render(str(player2_score), True, "white")
menu_img = pygame.image.load("assets/menu.png")
menu = menu_img.get_rect()
gameover_img = pygame.image.load("assets/gameover.png")
gameover = menu_img.get_rect()
fade_img = pygame.Surface((1280, 720)).convert_alpha()
fade = fade_img.get_rect()
fade_img.fill("black")
fade_alpha = 255
music = pygame.mixer.Sound("assets/music.ogg")
music.play(-1)
def redirect_ball(axis:Literal["x", "y"]):
if axis == "x":
ball.x = 600
ball_dir_x *= -1
if axis == "y":
ball_dir_y *= -1
#loop do game
cena:Literal["menu", "jogo", "gameover"] = "menu"
fps = pygame.time.Clock()
loop = True
while loop:
if cena == "jogo":
#Parte 2 - eventos
for event in pygame.event.get():
#evento do X de fechar
if event.type == pygame.QUIT:
loop = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
player1_velocidade = 6
if event.key == pygame.K_w:
player1_velocidade -= 6
if event.key == pygame.K_SPACE:
player1.height += 50
if event.key == pygame.K_0:
player2.height -= 10
if player2_score >= 3 or player1_score >= 3:
cena = "gameover"
fade_alpha = 255
#parte 6 - colisão e mov bola
if ball.colliderect(player1) or ball.colliderect(player2):
hit = pygame.mixer.Sound('assets/pong.wav')
hit.play()
ball_dir_x *= -1
if player1.y <= 0:
player1.y = 0
elif player1.y >= 720-150:
player1.y = 720 - 150
player1.y += player1_velocidade
if ball.x <= 0:
player2_score += 1
placar_player2 = font.render(str(player2_score), True, "white")
ball.x = 600
ball_dir_x *= -1
elif ball.x >= 1280:
player1_score += 1
placar_player1 = font.render(str(player1_score), True, "white")
ball.x = 600
ball_dir_x *= -1
if ball.y <= 0:
ball_dir_y *= -1
elif ball.y >= 720 - 15:
ball_dir_y *= -1
ball.x += ball_dir_x
ball.y += ball_dir_y
player2.y = ball.y - 75
#fica preenchendo a tela
display.fill((0, 0, 0))
display.blit(campo_img, campo)
display.blit(player1_img, player1)
display.blit(player2_img, player2)
display.blit(ball_img, ball)
display.blit(placar_player1, (500, 50))
display.blit(placar_player2, (780, 50))
elif cena == "gameover":
for event in pygame.event.get():
if event.type == pygame.QUIT:
loop = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
player1_score = 0
player2_score = 0
placar_player1 = font.render(str(player1_score), True, "white")
placar_player2 = font.render(str(player2_score), True, "white")
ball.x = 640
ball.y = 320
player1.y, player2.y = 0, 0
cena = "menu"
fade_alpha = 255
if event.key == pygame.K_q:
loop = False
if fade_alpha > 0:
fade_alpha -= 10
fade_img.set_alpha(fade_alpha)
display.fill((0, 0, 0))
display.blit(gameover_img, gameover)
display.blit(fade_img, fade)
elif cena == "menu":
for event in pygame.event.get():
if event.type == pygame.QUIT:
loop = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
cena = "jogo"
fade_alpha = 255
start = pygame.mixer.Sound("assets/start.wav")
start.play()
if event.key == pygame.K_q:
loop = False
if fade_alpha > 0:
fade_alpha -= 10
fade_img.set_alpha(fade_alpha)
display.fill((0, 0, 0))
display.blit(menu_img, menu)
display.blit(fade_img, fade)
fps.tick(60)
#atualizando a tela
pygame.display.flip()
| Gwynbleidd203/gameli_2 | main.py | main.py | py | 5,398 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.font.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
... |
44393189173 | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
class CenterForm(QMainWindow):
def __init__(self):
super(CenterForm, self).__init__()
# 设置主窗口标题
self.setWindowTitle('深度废物')
# 设置窗口尺寸
self.resize(400,300)
def center(self):
# 获取屏幕坐标
screen = self.QDesktopWidget().screenGeometry()
# 获取窗口坐标
size = self.geometry()
# 计算新的坐标
newleft = (screen.width() - size.width()) / 2
newtop = (screen.height() - size.height()) / 2
self.move(newleft, newtop)
if __name__ == '__main__':
app = QApplication(sys.argv)
# app.setWindowIcon(QIcon()) # 设置程序图标
main = CenterForm()
main.show()
sys.exit(app.exec_()) | Jrisking/pyqt6 | CenterForm.py | CenterForm.py | py | 828 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": ... |
14509567601 | # Import necessary libraries
from dash import Dash, dcc, html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import datetime
from bcb import sgs
from bcb import currency
from bcb import Expectativas
# Estilo
external_stylesheets = [dbc.themes.BOOTSTRAP, 'seg-style.css']
# Font and background colors associated with each theme
text_color = {"dark": "#95969A", "light": "#595959"}
card_color = {"dark": "#2D3038", "light": "#FFFFFF"}
DIV_11_STYLE = {
'position' : 'fixed',
'top' : 100,
'left' : 288,
'bottom' : 412,
'width' : '54rem',
'background-color' : '#FFFFFF',
'display' : 'inline-block',
}
DIV_12_STYLE = {
'position' : 'fixed',
'top' : 100,
'left' : 1085,
'bottom' : 412,
'width' : '54rem',
'background-color' : '#FFFFFF',
'display' : 'inline-block',
}
DIV_21_STYLE = {
'position' : 'fixed',
'top' : 520,
'left' : 288,
'bottom' : 0,
'width' : '54rem',
'background-color' : '#FFFFFF',
'display' : 'inline-block',
}
DIV_22_STYLE = {
'position' : 'fixed',
'top' : 520,
'left' : 1085,
'bottom' : 0,
'width' : '54rem',
'background-color' : '#FFFFFF',
'display' : 'inline-block',
}
# Informacoes #
expec = Expectativas()
entidade_anuais = 'ExpectativasMercadoAnuais'
def obtendo_expectativas(indicador, entidade, respondentes):
# Filtrando para previsoes dos ultimos 3 meses
tres_meses = datetime.date.today() - datetime.timedelta(days = 90)
tres_meses = tres_meses.strftime('%Y-%m-%d')
# Obtendo dados
ep = expec.get_endpoint(entidade)
df_ep = ep.query().filter(ep.Indicador == indicador,
ep.Data >= tres_meses,
ep.numeroRespondentes >= respondentes).select(ep.Data, ep.DataReferencia,
ep.Media, ep.Minimo, ep.Maximo,
ep.Indicador).collect()
return df_ep
# Divida Bruta do Governo #
div_bruta_total = obtendo_expectativas('Dívida bruta do governo geral', entidade_anuais, 10)
def unindo_gov(x):
# Selecionando trimestres
anos = ['2023', '2024']
x = x[x['DataReferencia'].isin(anos)]
# Calculando as metricas
dicio_metricas = {}
df_aux = x.copy()
for ano in df_aux['DataReferencia']:
df_aux = x[(x['DataReferencia'] == ano)]
dicio_metricas['media_{}'.format(ano)] = df_aux['Media'].mean()
dicio_metricas['maximo_{}'.format(ano)] = df_aux['Maximo'].mean()
dicio_metricas['minimo_{}'.format(ano)] = df_aux['Minimo'].mean()
# Colocando em um dataframe
df = pd.DataFrame(list(dicio_metricas.items()))
df.columns = ['metrica_ano', 'valor']
df[['metrica', 'anos']] = df['metrica_ano'].str.split('_', n = 1, expand = True)
df = df.drop(columns = 'metrica_ano')
df = df[['anos', 'metrica', 'valor']]
df.columns = ['Anos', 'Métrica', 'Valor']
df['Valor'] = round(df['Valor'], 2)
df['Métrica'] = np.where(df['Métrica'] == 'maximo', 'Máximo',
np.where(df['Métrica'] == 'media', 'Média', 'Mínimo'))
return df
div_bruta_df = unindo_gov(div_bruta_total)
fig_div_bruta = px.bar(div_bruta_df, x = 'Anos', y = 'Valor', color = 'Métrica', barmode = 'group',
color_discrete_sequence = ['#560699', '#080699', '#E41006'], template = 'plotly_white')
fig_div_bruta.update_layout(title = '<br><sup>Valores em Porcentagem do PIB</sup>', title_x = 0.0, xaxis_title = '', yaxis_title = '',
legend_title = 'Métricas', yaxis = dict(tick0 = 0, dtick = 10))
fig_div_bruta.update_yaxes(range = (0, 100), constrain = 'domain', ticksuffix = '%')
# Finalizando Divida Bruta do Governo #
# Dívida Líquida do Governo #
div_liquida_total = obtendo_expectativas('Dívida líquida do setor público', entidade_anuais, 10)
div_liquida_df = unindo_gov(div_liquida_total)
fig_div_liquida = px.bar(div_liquida_df, x = 'Anos', y = 'Valor', color = 'Métrica', barmode = 'group',
color_discrete_sequence = ['#560699', '#080699', '#E41006'], template = 'plotly_white')
fig_div_liquida.update_layout(title = '<br><sup>Valores em Porcentagem do PIB</sup>', title_x = 0.0, xaxis_title = '', yaxis_title = '',
legend_title = 'Métricas', yaxis = dict(tick0 = 0, dtick = 10))
fig_div_liquida.update_yaxes(range = (0, 100), constrain = 'domain', ticksuffix = '%')
# Finalizando Dívida Líquida do Governo #
# Resultado Primario #
primario_total = obtendo_expectativas('Resultado primário', entidade_anuais, 10)
primario_df = unindo_gov(primario_total)
fig_primario = px.bar(primario_df, x = 'Anos', y = 'Valor', color = 'Métrica', barmode = 'group',
color_discrete_sequence = ['#560699', '#080699', '#E41006'], template = 'plotly_white')
fig_primario.update_layout(title = '<br><sup>Valores em Porcentagem do PIB</sup>', title_x = 0.0, xaxis_title = '', yaxis_title = '',
legend_title = 'Métricas', yaxis = dict(dtick = 0.5))
fig_primario.update_yaxes(range = (-3, 1.5), constrain = 'domain', ticksuffix = '%')
# Finalizado Resultado Primario #
# Resultado Nominal #
nominal_total = obtendo_expectativas('Resultado nominal', entidade_anuais, 10)
nominal_df = unindo_gov(nominal_total)
fig_nominal = px.bar(nominal_df, x = 'Anos', y = 'Valor', color = 'Métrica', barmode = 'group',
color_discrete_sequence = ['#560699', '#080699', '#E41006'], template = 'plotly_white')
fig_nominal.update_layout(title = '<br><sup>Valores em Porcentagem do PIB</sup>', title_x = 0.0, xaxis_title = '', yaxis_title = '',
legend_title = 'Métricas', yaxis = dict(dtick = 1))
fig_nominal.update_yaxes(range = (-12, 0), constrain = 'domain', ticksuffix = '%')
# Finalizado Resultado Nominal #
# Investimento Direto no Pais #
invest_total = obtendo_expectativas('Investimento direto no país', entidade_anuais, 10)
invest_total_df = unindo_gov(invest_total)
fig_invest = px.bar(invest_total_df, x = 'Anos', y = 'Valor', color = 'Métrica', barmode = 'group',
color_discrete_sequence = ['#560699', '#080699', '#E41006'], template = 'plotly_white')
fig_invest.update_layout(title = '<br><sup>Valores em Bilhões</sup>', title_x = 0.0, xaxis_title = '', yaxis_title = '',
legend_title = 'Métricas', yaxis = dict(tick0 = 0, dtick = 10))
fig_invest.update_yaxes(range = (0, 100), constrain = 'domain', tickprefix = 'R$')
# Finalizado Investimento Direto no Pais #
# Conta Corrente #
conta_corrente = obtendo_expectativas('Conta corrente', entidade_anuais, 10)
conta_corrente_df = unindo_gov(conta_corrente)
fig_conta = px.bar(conta_corrente_df, x = 'Anos', y = 'Valor', color = 'Métrica', barmode = 'group',
color_discrete_sequence = ['#560699', '#080699', '#E41006'], template = 'plotly_white')
fig_conta.update_layout(title = '<br><sup>Valores em Bilhões</sup>', title_x = 0.0, xaxis_title = '', yaxis_title = '',
legend_title = 'Métricas')
fig_conta.update_yaxes(range = (-90, 0), constrain = 'domain', tickprefix = 'R$')
# Finalizado Conta Corrente #
# Plot das Informacoes
divida_governo = [
dbc.CardHeader('Previsões para a Dívida do Governo'),
dbc.Tabs(
[
dbc.Tab(
dbc.CardBody(
dcc.Graph(id = 'div-bruta',
figure = fig_div_bruta,
style = {'height' : 360, 'width' : 700}),
), label = 'Dívida Bruta do Governo'
),
dbc.Tab(
dbc.CardBody(
dcc.Graph(id = 'div-liquida',
figure = fig_div_liquida,
style = {'height' : 360, 'width' : 700}),
), label = 'Dívida Líquida do Setor Público'
),
]
)
]
resultado_governo = [
dbc.CardHeader('Previsões para o Resultado Primário e Nominal'),
dbc.Tabs(
[
dbc.Tab(
dbc.CardBody(
dcc.Graph(id = 'resul-prim',
figure = fig_primario,
style = {'height' : 360, 'width' : 700}),
), label = 'Resultado Primário do Governo'
),
dbc.Tab(
dbc.CardBody(
dcc.Graph(id = 'resul-nom',
figure = fig_nominal,
style = {'height' : 360, 'width' : 700}),
), label = 'Resultado Nominal do Governo'
),
]
)
]
invest_direto = [
dbc.CardHeader('Previsões do Investimento Direto no País'),
dbc.CardBody(
[
dcc.Graph(id = 'invest-prev',
figure = fig_invest,
style = {'height' : 360, 'width' : 750}),
],
),
]
conta_corr = [
dbc.CardHeader('Previsões da Conta Corrente do País'),
dbc.CardBody(
[
dcc.Graph(id = 'conta-prev',
figure = fig_conta,
style = {'height' : 360, 'width' : 750}),
],
),
]
area_informacoes = html.Div(
[
html.Div(children = [
html.Div(children =[
dbc.Card(divida_governo),
],
style = DIV_11_STYLE
),
html.Div(children = [
dbc.Card(resultado_governo),
],
style = DIV_12_STYLE
),
],
),
html.Div(children = [
html.Div(children = [
dbc.Card(invest_direto),
],
style = DIV_21_STYLE
),
html.Div(children = [
dbc.Card(conta_corr),
],
style = DIV_22_STYLE
),
],
),
],
)
# Layout
layout = html.Div(
[
area_informacoes,
]
)
| tharikf/WebApp_Cenario_Macroeconomico_Brasileiro | pages/page4.py | page4.py | py | 10,262 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "dash_bootstrap_components.themes",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "bcb.Expectativas",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 67,
"usage_type": "call"
},
{
"api_na... |
11357570925 | import json
import asyncio
import time
from nats.aio.client import Client as NATS
async def sleep():
await asyncio.sleep(0.01)
async def pub_random(loop):
nc = NATS()
await nc.connect("localhost:4222", loop=loop)
if nc.last_error:
print("ERROR received from NATS: ", nc.last_error)
else:
print('Submitting random requests')
i = 0
while True:
i += 1
jdata = {"i": i}
await sleep()
await nc.publish("p1.s0", json.dumps(jdata).encode('utf-8'))
if __name__ == '__main__':
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(pub_random(event_loop)) | saboyle/qt-python-nats-wiretap | Version 1 - Qt and Asyncio shared event loop (minimal)/nats_test_publisher.py | nats_test_publisher.py | py | 669 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "asyncio.sleep",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "nats.aio.client.Client",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop... |
42443818863 | from datetime import date
maior = 0
menor = 0
for cont in range(1,8):
ano = int(input(f'Em que ano a {cont}° pessoa nasceu? '))
if (date.today().year - ano) >= 18:
maior += 1
else:
menor += 1
print(f'Ao todo tivemos {maior} pessoas maiores de idade')
print(f'E também tivemos {menor} pessoas menores de idade.') | JosueFS/Python | Exercicios/Ex054.py | Ex054.py | py | 341 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 6,
"usage_type": "name"
}
] |
73603651945 | from django.contrib import admin
from django.urls import path, include
from . import views
from gcsemaths.exam_questions import exam_non_calc_views
from gcsemaths.a_number import aa_ordering_and_comparative_views, ab_ops_with_int_frac_dec_views
from gcsemaths.b_algebra import algebra_views
from gcsemaths.e_geometry_and_measure import e_geometry_and_measure_views
exam_non_calc_patterns = [ eval(f"path('{module}/', exam_non_calc_views.{module}, name='{module}')") for module in exam_non_calc_views.modulesList() ]
a_number_patterns = [ eval(f"path('{module}/', aa_ordering_and_comparative_views.{module}, name='{module}')") for module in aa_ordering_and_comparative_views.modulesList() ]
a_number_patterns += [ eval(f"path('{module}/', ab_ops_with_int_frac_dec_views.{module}, name='{module}')") for module in ab_ops_with_int_frac_dec_views.modulesList() ]
a_number_patterns += [path('random/', views.number_random, name = 'number random')]
b_algebra_patterns = [ eval(f"path('{module}/', algebra_views.{module}, name='{module}')") for module in algebra_views.modulesList() ]
b_algebra_patterns += [path('random/', views.algebra_random, name = 'algebra random')]
e_geometry_patterns = [ eval(f"path('{module}/', e_geometry_and_measure_views.{module}, name='{module}')") for module in e_geometry_and_measure_views.modulesList() ]
e_geometry_patterns += [path('random/', views.geometry_random, name = 'geometry random')]
urlpatterns = [
path('', views.home, name="gcsemaths-home"),
path('exam_non_calc/', views.home_exam_non_calc, name="exam_non_calc"),
path('exam_non_calc/', include(exam_non_calc_patterns)),
path('a_number/', views.home_number, name="number_exam_questions"),
path('a_number/', include(a_number_patterns)),
path('b_algebra/', views.home_algebra, name="algebra_exam_questions"),
path('b_algebra/', include(b_algebra_patterns)),
#path('algebra_rand/', algebra_views.select_random, name="random-algebra"),
path('e_geometry/', views.home_geometry, name="algebra_exam_questions"),
path('e_geometry/', include(e_geometry_patterns))
]
| devjolt/eqg | gcsemaths/urls.py | urls.py | py | 2,107 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gcsemaths.exam_questions.exam_non_calc_views.modulesList",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gcsemaths.exam_questions.exam_non_calc_views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "gcsemaths.a_number.aa_ordering_and_comparat... |
36708187417 | """
Animate the prediction changes in a scene over training time by reading the predictions at different time steps during
the training process
Input:
- Path to a pickle file containing the points of a scene
- List of paths to pickle files containing the labels at different time steps of the training process
"""
import os
import pickle
import time
from typing import List
import pptk
files = [os.path.join('/tmp/to_visualize1562169522_19.pickle'),
os.path.join('/tmp/to_visualize1562169522_29.pickle'),
os.path.join('/tmp/to_visualize1562169522_74.pickle')]
scene_points = os.path.join('/tmp/to_visualize1562169522_74.pickle')
g_label_names = ['unannotated', 'wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet',
'sink', 'bathtub', 'otherfurniture']
g_label_colors = [
[0, 0, 0], [173, 198, 232], [151, 223, 137], [30, 120, 180], [255, 188, 119], [188, 189, 36], [140, 86, 74],
[255, 152, 151], [213, 38, 40], [197, 175, 213], [148, 103, 188], [197, 156, 148], [24, 190, 208], [247, 183, 210],
[218, 219, 141], [254, 127, 11], [158, 218, 229], [43, 160, 45], [111, 128, 144], [227, 120, 193], [82, 82, 163]
]
def normalize_colors():
"""
Normalize colors to range [0, 1]: needed for pptk-viewer
:return:
"""
for idx, color_val in enumerate(g_label_colors):
g_label_colors[idx] = [color_val[0] / 255.0, color_val[1] / 255.0, color_val[2] / 255.0]
def animate_prediction_changes(points_path: str, files_to_visualize: List[str]):
"""
:param points_path: Path to pickle file containing the points of a scene
:param files_to_visualize: List of paths to pickle files containing the labels at different time steps
of the training process
:return:
"""
# Read the points from the scene-pickle file
with open(points_path, 'rb') as points_fp:
points = pickle.load(points_fp, encoding='latin1')
# Read the labels from the pickle files
labels = []
for file in files_to_visualize:
with open(file, 'rb') as labels_fp:
_ = pickle.load(labels_fp, encoding='latin1')
labels.append(pickle.load(labels_fp, encoding='latin1'))
# Animate the different labels for each list of labels
v2 = pptk.viewer(points, labels[0])
v2.set(point_size=0.005)
for i in range(len(labels)):
time.sleep(3)
colors = list(map(lambda label: g_label_colors[label], labels[i]))
v2.attributes(colors)
if __name__ == '__main__':
normalize_colors()
animate_prediction_changes(scene_points, files)
| tpfeifle/pointcloud-segmentation-attention | attention_points/visualization/labels_during_training.py | labels_during_training.py | py | 2,740 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
12433436430 | from datetime import datetime
"""GPS class. Responsible for retrieving GPS data given connection and returning GPS data including lat, long, and time
Attributes: mav_connection:MAVLinkConnection
"""
class GPS:
def __init__(self, mavlink):
""" Initialize GPS class
Args:
mavlink:MAVLinkConnection
"""
self.register_handlers(mavlink)
self.altitude = 0
self.longitude = 0
self.latitude = 0
self.time = datetime.now()
def register_handlers(self, mavlink):
"""Call push handler in mavlink with request for GLOBAL_POSITION_INT message
Args:
mavlink: MavlinkConnection
"""
mavlink.push_handler('GLOBAL_POSITION_INT', self.global_position_int_handler)
def global_position_int_handler(self, mavlink, message):
"""store message attributes
Args:
mavlink:MAVLinkConnection,
message:GLOBAL_POSITION_INT
"""
self.latitude = float(message.lat)/10000000
self.longitude = float(message.lon)/10000000
self.altitude = float(message.alt)
# find absolute time
self.time = datetime.now()
self.record()
def record(self):
"""Set Record
Returns:
GPSRecord(time, lat, lon, alt): GPSRecord
"""
time = self.time
latitude = self.latitude
longitude = self.longitude
altitude = self.altitude
return GPSRecord(time, latitude, longitude, altitude)
class GPSRecord:
"""GPSRecord class. Helper function for GPS class to return a GPS Record
Attributes:
time: datetime
latitude (float)
longitude (float)
altitude (int)
"""
def __init__(self, time, latitude, longitude, altitude):
"""Initialize GPSRecord class
Args:
time
latitude
longitude
altitude
"""
self.time = time
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def __eq__(self, other):
return self.time == other.time and \
self.latitude == other.latitude and \
self.longitude == other.longitude and \
self.altitude == other.altitude
def __ne__(self, other):
return not self == other
| shamuproject/mavimage | mavimage/gps.py | gps.py | py | 2,385 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetim... |
18729010139 | import spex_common.modules.omeroweb as omeroweb
from flask_restx import Namespace, Resource
from flask import request, abort, Response
from .models import responses, omero
from flask_jwt_extended import jwt_required, get_jwt_identity
from os import getenv
from urllib.parse import unquote
from spex_common.services.Utils import download_file
namespace = Namespace('Omero', description='Omero operations')
namespace.add_model(omero.omero_download_model.name, omero.omero_download_model)
namespace.add_model(omero.login_model.name, omero.login_model)
namespace.add_model(omero.login_responce.name, omero.login_responce)
namespace.add_model(responses.error_response.name, responses.error_response)
excluded_headers = [
# 'content-encoding',
# 'content-length',
# 'transfer-encoding',
# 'connection',
'set-cookie',
'authorization'
]
def _request(path, method='get', **kwargs):
current_user = get_jwt_identity()
client = omeroweb.get(current_user['login'])
path = unquote(path)
if client is None:
abort(401, 'Unauthorized')
try:
index = request.url.index(path) - 1
path = request.url[index:]
except ValueError:
pass
method = getattr(client, method)
response = method(path, **kwargs)
headers = [
(name, value)
for (name, value) in response.raw.headers.items()
if name.lower() not in excluded_headers
]
omeroweb.update_ttl(current_user['login'])
return Response(response.content, response.status_code, headers)
@namespace.route('/<path:path>')
class WebGateway(Resource):
@namespace.doc('omero', security='Bearer')
@namespace.response(404, 'Connect problems', responses.error_response)
@namespace.response(401, 'Unauthorized', responses.error_response)
@jwt_required(locations=['headers', 'cookies'])
def get(self, path):
return _request(path)
@namespace.doc('omero', security='Bearer')
@namespace.response(404, 'Connect problems', responses.error_response)
@namespace.response(401, 'Unauthorized', responses.error_response)
@jwt_required(locations=['headers', 'cookies'])
def post(self, path):
return _request(path, 'post')
@namespace.route('/<string:imageId>')
class DownloadImageReturnPath(Resource):
@namespace.doc('omero/ImageDownload', security='Bearer')
@namespace.response(404, 'Connect problems', responses.error_response)
@namespace.response(401, 'Unauthorized', responses.error_response)
@jwt_required()
def get(self, imageId, format='tif'):
author = get_jwt_identity()['login']
session = omeroweb.get(author)
path = getenv('OMERO_WEB') + '/webclient/render_image_download/' + str(imageId) + '/?format=' + format
relativePath = download_file(path, client=session, imgId=imageId)
if relativePath is not None:
return {'success': True, 'path': relativePath}, 200
| Genentech/spex_backend | routes/api/v1/omero.py | omero.py | py | 2,937 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_restx.Namespace",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.omero.omero_download_model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.omero",
"line_number": 12,
"usage_type": "name"
},
{
"api_nam... |
16743362426 | from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
def settings():
markup = InlineKeyboardMarkup()
shutdown = InlineKeyboardButton(text='🔽 Shutdown', callback_data='shutdown')
restart = InlineKeyboardButton(text='🔄 Restart', callback_data='restart')
lock = InlineKeyboardButton(text='🔒 Lock', callback_data='lock')
brightness = InlineKeyboardButton(text='🔆 Brightness', callback_data='set-br')
volume = InlineKeyboardButton(text='🔊 Volume', callback_data='set-vol')
screen_shot = InlineKeyboardButton(text="📺 Screenshot", callback_data='screenshot')
markup.add(brightness, volume, screen_shot)
markup.add(shutdown, restart, lock)
return markup
def brightness():
buttons = []
lvlvs = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
markup = InlineKeyboardMarkup()
for item in lvlvs:
button_text = f"▫️ {item}"
button = InlineKeyboardButton(button_text, callback_data=f"{item}-brightness")
buttons.append(button)
markup.add(*buttons)
home = InlineKeyboardButton(text="🏘 Home",callback_data="home")
markup.add(home)
return markup
def volume():
buttons = []
lvlvs = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
markup = InlineKeyboardMarkup()
for item in lvlvs:
button_text = f"▫️ {item}"
button = InlineKeyboardButton(button_text, callback_data=f"{item}-volume")
buttons.append(button)
mute = button = InlineKeyboardButton("🔇 Mute", callback_data="mute-volume")
markup.add(*buttons)
home = InlineKeyboardButton(text="🏘 Home",callback_data="home")
markup.add(mute ,home)
return markup
def confirmation(what):
markup = InlineKeyboardMarkup()
yes = InlineKeyboardButton(text='☑️ Yes ', callback_data=f'{what}-yes')
cancel = InlineKeyboardButton(text='🚫 Cancel', callback_data='close')
markup.add(yes, cancel)
home = InlineKeyboardButton(text="🏘 Home",callback_data="home")
markup.add(home)
return markup
def enter_or_delete(what):
markup = InlineKeyboardMarkup()
yes = InlineKeyboardButton(text='☑️ Yes ', callback_data=f'{what}-yes')
cancel = InlineKeyboardButton(text='🚫 Delete', callback_data='delete')
markup.add(yes, cancel)
home = InlineKeyboardButton(text="🏘 Home",callback_data="home")
markup.add(home)
return markup | ToXic2290/pc-controller-bot | markup.py | markup.py | py | 2,456 | python | en | code | null | github-code | 36 | [
{
"api_name": "aiogram.types.InlineKeyboardMarkup",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "aiogram.types.InlineKeyboardButton",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "aiogram.types.InlineKeyboardButton",
"line_number": 6,
"usage_type": ... |
31309873828 | import sys
from ply import lex
from ply.lex import TOKEN
class Lexer:
def __init__(self, error_func):
self.error_func = error_func
def run(self):
self.lexer = lex.lex(object=self)
def reset_lineno(self):
self.lexer.lineno = 1
def input(self, text):
self.lexer.input(text)
def find_tok_column(self, token):
last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos)
return token.lexpos - last_cr
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _make_tok_location(self, token):
return (token.lineno, self.find_tok_column(token))
tok = (
'IF', 'WHILE', 'OR', 'INC', 'DEC', 'TRUE', 'FALSE', 'ELSE',
'NOT', 'DO', 'UINT', 'BOOLEAN', 'CUINT', 'CBOOLEAN', 'FUNCTION',
'DARRAYOFUINT_1', 'DARRAYOFBOOLEAN_1', 'SIZE1', 'EXTEND1',
'DARRAYOFUINT_2', 'DARRAYOFBOOLEAN_2', 'SIZE2', 'EXTEND2',
'FORW', 'BACK','RIGHT','LEFT','GETF','GETB','GETR','GETL',
'PUSHF', 'PUSHB', 'PUSHR', 'PUSHL', 'UNDO'
)
keyword_map = {}
for keyword in tok:
keyword_map[keyword] = keyword
tokens = tok + (
'MINUS', 'PLUS',
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ], .
'LBRACE', 'RBRACE', # { }
'COMMA',
'ID', 'NEWLINE', 'DIGIT',
'LT', 'GT',
'EQUALS',
'EQUAL',
'SEMICOLON',
)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
t_ignore = ' \t'
newline = r'\n+'
@TOKEN(newline)
def t_NEWLINE(self, t):
t.lexer.lineno += t.value.count("\n")
return t
t_DIGIT = r'0|([1-9][0-9]*)'
t_LT = r'<'
t_GT = r'>'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_EQUALS = r'='
t_EQUAL = r'=='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_SEMICOLON = r';'
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.keyword_map.get(t.value, "ID")
return t
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
| EmilGrigoryan/Interpreter | lexer.py | lexer.py | py | 2,505 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ply.lex.lex",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "ply.lex.TOKEN",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "ply.lex.TOKEN",
"line_number": ... |
69980575463 | import re
from bs4 import BeautifulSoup
def parse(html):
soup = BeautifulSoup(html, "html5lib")
# Get ingredients
title = soup.find(attrs={'data-testid': "ContentHeaderHed"}).get_text()
# We can't drill down to the list immediately because the yield is
# a sibling
ingredientList = soup.find(attrs={'data-testid': "IngredientList"})
recipe_yield = ingredientList.find(class_=re.compile('Yield-'))
ingredientList = ingredientList.find(class_=re.compile('List-'))
descriptions = ingredientList.find_all(class_=re.compile('Description-'))
recipe = {
'title': title,
'yield': recipe_yield.get_text(),
'ingredients': [],
'instructions': [],
}
for descTag in descriptions:
recipe['ingredients'].append(descTag.get_text())
# Get instructions
instructions = soup.find(class_=re.compile('InstructionGroupWrapper-'))\
.find_all(class_=re.compile('InstructionBody-'))
for s in instructions:
recipe['instructions'].append(s.get_text())
return recipe
| GarrettGeorge/Destijl | parsers/epicurious.py | epicurious.py | py | 1,080 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number"... |
34623870443 | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from minisom import MiniSom
from matplotlib.gridspec import GridSpec
import time
from pylab import plot, axis, show, pcolor, colorbar, bone
def load_data():
data = np.load('feature_vector.npy')
df = pd.read_csv('bbc-text.csv')
df.loc[df['category'] == 'business', 'category'] = 0
df.loc[df['category'] == 'entertainment', 'category'] = 1
df.loc[df['category'] == 'politics', 'category'] = 2
df.loc[df['category'] == 'sport', 'category'] = 3
df.loc[df['category'] == 'tech', 'category'] = 4
label = list(df.iloc[:, 0])
_, cn = np.unique(label, return_counts=True)
return data, label
def on_center(data, label, k):
som = MiniSom(x=k, y=k, input_len=data.shape[1], sigma=1.0, learning_rate=0.2, neighborhood_function='gaussian',
topology='rectangular', activation_distance='euclidean')
som.random_weights_init(data)
print('training...')
som.train_random(data, num_iteration=1000)
print('...ready!')
winners = []
matrix = np.zeros((k, k))
category_dic = {}
for i, x in enumerate(data):
win = som.winner(x)
winners.append(win)
matrix[win[0], win[1]] += 1
if (win[0], win[1]) not in category_dic.keys():
category_dic[(win[0], win[1])] = []
category_dic[(win[0], win[1])].append(label[i])
plot_category(category_dic, k)
print(f'SOM hits for {k}x{k} topologies: \n', matrix)
plot_hits(matrix, k)
# Euclidean distance
weight = som.get_weights()
w_win = np.zeros((data.shape))
for j in range(data.shape[0]):
w_win[j] = weight[winners[j][0], winners[j][1]]
dis = np.linalg.norm(data - w_win, axis=1)
sum_dis = np.sum(dis)
print('Sum of the nearlablity of each data to winning neuron: ', sum_dis)
def plot_hits(matrix, k):
fig, ax = plt.subplots()
image = ax.imshow(matrix, cmap='PuBu')
for i in range(k):
for j in range(k):
txt = ax.text(j, i, matrix[i, j], ha='center', va='center', color='k')
fig.tight_layout()
ax.set_title(f'SOM Hits Plot {k}x{k}')
plt.show()
def plot_category(category_dic, k):
marker = ['♚', '✣', '♠', '♣', '★']
colors = ['red', 'blue', 'orangered', 'lime', 'yellow']
m_matrix = np.zeros((k, k), int)
for idx, d in enumerate(category_dic.keys()):
val, count = np.unique(category_dic[d], return_counts=True)
argmax_idx = np.argmax(count)
majority = val[argmax_idx]
m_matrix[d[0], d[1]] = majority
# print(f'val:{val} , count:{count} ')
print('similarity matrix for show categories:\n', m_matrix)
fig, ax = plt.subplots()
image = ax.imshow(m_matrix, cmap='Blues')
for i in range(k):
for j in range(k):
txt = ax.text(j, i, marker[m_matrix[i, j]], ha='center', va='center', color=colors[m_matrix[i, j]],
fontsize=20)
fig.tight_layout()
ax.set_title(f'SOM Nearlably Category Plot {k}x{k}')
show()
def _neuron(data, y):
K = [3, 4, 5]
for k in K:
start_time = time.time()
on_center(data, y, k)
end_time = time.time()
print(f'Time for topology {k} :{end_time - start_time} \n')
print('--------------------------------------------')
if __name__ == '__main__':
data, y = load_data()
_neuron(data, y)
| elahesalari/Self-Organizing-Map-SOM | SOM-on-center library.py | SOM-on-center library.py | py | 3,545 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "minisom.MiniSom",
"line_n... |
7804091778 | from scrapy import signals
from .logger import logger as lg
from time import sleep
from datetime import datetime as dt
from scrapy.http import HtmlResponse
from scrapy.utils.python import to_bytes
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from .pipelines import Afreecacreators
import requests, re
from .configController import ConfigController
import os, subprocess
chatAllData = []
class SeleniumMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
middleware = cls()
crawler.signals.connect(
middleware.spider_opened, signals.spider_opened)
crawler.signals.connect(
middleware.spider_closed, signals.spider_closed)
return middleware
def spider_opened(self, spider):
self.config = ConfigController()
self.config.load()
CHROMEDRIVER_PATH = r'C:\Users\WHILETRUESECOND\Desktop\tp-mvp\collectors\afreecatv\crawler\crawler\drivers\chromedriver.exe'
WINDOW_SIZE = "1920, 1080"
chrome_options = Options()
chrome_options.add_argument("--headless") # 크롬창이 열리지 않게
chrome_options.add_argument("--no-sandbox") # GUI를 사용할 수 없는 환경에서 설정, Linux, dorker 등...
chrome_options.add_argument("--disable-gpu") # GUI를 사용할 수 없는 환경에서 설정, Linux, dorker 등...
chrome_options.add_argument(f"--window-size={ WINDOW_SIZE }")
chrome_options.add_argument("--mute-audio") # 브라우저 음소거
# selenium 성능 향상을 위한 옵션 제어
prefs = {'profile.default_content_setting_values':
{'cookies' : 2, 'images': 2, 'plugins' : 2, 'popups': 2, 'geolocation': 2, 'notifications' : 2,
'auto_select_certificate': 2, 'fullscreen' : 2, 'mouselock' : 2, 'mixed_script': 2,
'media_stream' : 2, 'media_stream_mic' : 2, 'media_stream_camera': 2, 'protocol_handlers' : 2,
'ppapi_broker' : 2, 'automatic_downloads': 2, 'midi_sysex' : 2, 'push_messaging' : 2,
'ssl_cert_decisions': 2, 'metro_switch_to_desktop' : 2, 'protected_media_identifier': 2,
'app_banner': 2, 'site_engagement' : 2, 'durable_storage' : 2}}
chrome_options.add_experimental_option('prefs', prefs)
self.driver = webdriver.Chrome( executable_path=CHROMEDRIVER_PATH, chrome_options=chrome_options )
self.driver.get("https://login.afreecatv.com/afreeca/login.php?szFrom=full&request_uri=http%3A%2F%2Fafreecatv.com%2F")
sleep(3)
elem_login = self.driver.find_element_by_id("uid")
elem_login.clear()
elem_login.send_keys(self.config.AFREECA_ID)
elem_login = self.driver.find_element_by_id("password")
elem_login.clear()
elem_login.send_keys(self.config.AFREECA_PASSWORD, Keys.ENTER)
lg.info('크롬브라우저로 아프리카 로그인')
sleep(3)
def process_request( self, request, spider ):
self.driver.get( request.url )
sleep(3)
self.driver.find_element_by_xpath('//*[@id="stop_screen"]/dl/dd[2]/a').click()
self.creatorId = request.url[26:-1]
# 채팅창 제어 변수 초깃값
chatNum = 1
liveEndPoint = 0 # 생방송 상대 여부 판별
tryTime = 0 # 채팅 업로드 시도횟수
def liveEndCheck(self, chatNum, tryTime, liveEndPoint):
response = requests.get(request.url)
title = re.search('"twitter:title" content=".*"', response.text, re.I)
getTitle = title.group()[-12:-2]
try:
next_chating_present = EC.presence_of_element_located((By.ID, f'{chatNum+1}'))
WebDriverWait(self.driver, 10, poll_frequency=0.5).until(next_chating_present)
except:
if getTitle == '방송중이지 않습니다':
lg.info(f'{self.creatorId} 방송 종료')
liveEndPoint = liveEndPoint + 1
return liveEndPoint
elif self.driver.find_element_by_xpath('//*[@id="afreecatv_player"]/div[12]/div/div/div[8]').get_attribute("style") == '':
lg.warning(f'{self.creatorId} 블라인드 처리')
liveEndPoint = liveEndPoint + 1
return liveEndPoint
elif self.driver.find_element_by_xpath('//*[@id="afreecatv_player"]/div[12]/div/div/div[3]').get_attribute("style") == '':
lg.warning(f'{self.creatorId} 19세 방송중')
try:
self.driver.find_element_by_xpath('//*[@id="afreecatv_player"]/div[12]/div/div/div[3]/div/button[1]').click()
except:
pass
tryTime = tryTime + 1
if tryTime == 13:
lg.warning(f'{self.creatorId}님의 19세 방송중 상태 불안정에 따른 새로고침을 실시: 2분 채팅대기')
self.driver.refresh()
sleep(2)
try:
self.driver.find_element_by_xpath('//*[@id="stop_screen"]/dl/dd[2]/a').click()
except:
pass
chatNum = 1
tryTime = 0
liveEndCheck(self, chatNum, tryTime, liveEndPoint)
elif self.driver.find_element_by_xpath('//*[@id="afreecatv_player"]/div[12]/div/div/div[7]').get_attribute("style") == '':
lg.warning(f'{self.creatorId} 비밀번호 설정')
liveEndPoint = liveEndPoint + 1
return liveEndPoint
else:
tryTime = tryTime + 1
if tryTime == 13:
lg.warning(f'{self.creatorId}님의 방송 상태 불안정에 따른 새로고침을 실시: 2분 채팅대기')
self.driver.refresh()
sleep(2)
try:
self.driver.find_element_by_xpath('//*[@id="stop_screen"]/dl/dd[2]/a').click()
except:
pass
chatNum = 1
tryTime = 0
liveEndCheck(self, chatNum, tryTime, liveEndPoint)
while True:
if liveEndCheck(self, chatNum, tryTime, liveEndPoint) == 1:
break
chatEachData = {}
atTime = dt.now().strftime('%Y-%m-%d %H:%M:%S')
chatIdNum = f'//*[@id=\"{chatNum}\"]'
user = f'//*[@id=\"{chatNum}\"]/preceding-sibling::dt'
userId = f'//*[@id=\"{chatNum}\"]/preceding-sibling::dt/a'
try:
chatEachData['userId'] = self.driver.find_element_by_xpath(userId).get_attribute("user_id")
chatEachData['is_mobile'] = self.driver.find_element_by_xpath(userId).get_attribute("is_mobile")
chatEachData['category'] = self.driver.find_element_by_xpath('//*[@id="player_area"]/div[2]/div[2]/ul/li[4]/span').text
chatEachData['videoTitle'] = self.driver.find_element_by_xpath('//*[@id="player_area"]/div[2]/div[2]/div[4]/span').text
chatEachData['like'] = self.driver.find_element_by_xpath('//*[@id="player_area"]/div[2]/div[2]/div[6]/ul/li[1]/span').text
chatEachData['bookmark'] = self.driver.find_element_by_xpath('//*[@id="player_area"]/div[2]/div[2]/div[6]/ul/li[2]/span').text
chatEachData['viewer'] = self.driver.find_element_by_xpath('//*[@id="nAllViewer"]').text
chatEachData['grade'] = self.driver.find_element_by_xpath(user).get_attribute("class").split('_')[0]
chatEachData['sex'] = self.driver.find_element_by_xpath(user).get_attribute("class").split('_')[1]
chatEachData['text'] = self.driver.find_element_by_xpath(chatIdNum).text
chatEachData['creatorId'] = self.creatorId
chatEachData['chattime'] = atTime
chatAllData.append(chatEachData)
except:
lg.warning(f'{self.creatorId}님 방송의 채팅량이 많아 다시 주기를 갱신')
self.driver.refresh()
sleep(2)
try:
self.driver.find_element_by_xpath('//*[@id="stop_screen"]/dl/dd[2]/a').click()
except:
pass
chatNum = 1
chatNum = chatNum + 1
body = to_bytes( text=self.driver.page_source )
return HtmlResponse( url=request.url, body=body, encoding='utf-8', request=request )
def spider_closed(self, spider):
lg.info(f'{self.creatorId} 타겟방송 크롬브라우저 종료 및 프로세스 킬')
afreecaCreator = Afreecacreators()
afreecaCreator.updateLiveCreator([self.creatorId],'turn-off')
self.driver.close()
self.driver.quit()
| uniqon-truepoint/afreecatvCrawler | afreecatv/crawler/crawler/middlewares.py | middlewares.py | py | 9,541 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "scrapy.signals.spider_opened",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "scrapy.signals",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "scrapy.signals.spider_closed",
"line_number": 28,
"usage_type": "attribute"
},
{
... |
6789009521 | from rest_framework import serializers
from django.conf import settings
from django.contrib.auth import get_user_model
from actstream import action
from utils.mail import handlers
from custom_auth.helpers import UserProfileWrapper
class RequestContactSerializer(serializers.Serializer):
comment = serializers.CharField(
required=False,
allow_null=True,
allow_blank=True)
user = serializers.PrimaryKeyRelatedField(
queryset=get_user_model().objects.filter(
is_active=True))
def create(self, validated_data):
user_from = self.context.get('user_from')
user_to = validated_data.get('user')
data = {
'verb': settings.USER_PROFILE_ACTION_REQUEST_CONTACT,
'action_object': user_to,
'description': validated_data.get('comment'),
}
action.send(
user_from,
**data
)
handlers.mail_handler.send_mail(
'request_user_contact',
recipients=[user_to.email],
short_name=user_to.short_name,
requester_name=user_from.get_full_name(),
requester_short_name=user_from.short_name,
requester_email=user_from.email,
user_title=user_from.user_title,
requester_profile_picture=user_from.profile_picture.get_thumbnail_url(),
requester_profile_url=UserProfileWrapper(user_from).profile_slug_url,
comment=validated_data.get('comment'),
)
return validated_data
| tomasgarzon/exo-services | service-exo-core/custom_auth/api/serializers/request_contact.py | request_contact.py | py | 1,545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 14,
"usage_ty... |
29981226502 | from __future__ import absolute_import, print_function
import os.path
import collections
from . import hub, protocols, error, reader, http_ffi, logging, compat
from .hub import switchpoint
from .util import objref, docfrom
from ._version import __version__
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
__all__ = ['HttpError', 'HttpClient', 'HttpServer', 'HttpResponse',
'geturlinfo']
# The "Hop by Hop" headers as defined in RFC 2616. These may not be set by the
# WSGI application.
hop_by_hop = frozenset(('Connection', 'Keep-Alive', 'Proxy-Authenticate',
'Proxy-Authorization', 'TE', 'Trailers',
'Transfer-Encoding', 'Upgrade'))
def geturlinfo(url):
"""Return connection information for a url.
The *url* parameter must be a a string.
The return value is a (host, port, ssl, path) tuple.
"""
parsed = urlsplit(url)
try:
hort, port = parsed.netloc.split(':')
port = int(port)
except ValueError:
host = parsed.netloc
port = 443 if parsed.scheme == 'https' else 80
ssl = parsed.scheme == 'https'
path = (parsed.path + parsed.query) or '/'
return (host, port, ssl, path)
def _s2b(s):
"""Convert a string *s* to bytes in the ISO-8859-1 encoding.
ISO-8859-1 is the default encoding used in HTTP.
"""
if type(s) is not bytes:
s = s.encode('iso-8859-1')
return s
def get_header(headers, name, default=None):
"""Return a header value from a header list."""
name = name.lower()
for header in headers:
if header[0].lower() == name:
return header[1]
return default
def create_chunk(buf):
"""Create a chunk for the HTTP "chunked" transfer encoding."""
chunk = bytearray()
chunk.extend(_s2b('{0:X}\r\n'.format(len(buf))))
chunk.extend(buf)
chunk.extend(b'\r\n')
return chunk
def last_chunk(trailers):
"""Return the last chunk."""
chunk = bytearray()
chunk.extend(b'0\r\n')
for name,value in trailers:
chunk.extend(_s2b('{0}: {1}\r\n'.format(name, value)))
chunk.extend(b'\r\n')
return chunk
def create_request(method, url, headers):
"""Create a HTTP request message (no body). Always HTTP/1.1."""
message = bytearray()
message.extend(_s2b('{0} {1} HTTP/1.1\r\n'.format(method, url)))
for name,value in headers:
message.extend(_s2b('{0}: {1}\r\n'.format(name, value)))
message.extend(b'\r\n')
return message
def create_response(version, status, headers):
"""Create a HTTP response message (no body)."""
message = bytearray()
message.extend(_s2b('HTTP/{0[0]}.{0[1]} {1}\r\n'.format(version, status)))
for name,value in headers:
message.extend(_s2b('{0}: {1}\r\n'.format(name, value)))
message.extend(b'\r\n')
return message
def _ba2str(ba):
"""Convert a byte-array to a "str" type."""
if compat.PY3:
return ba.decode('iso-8859-1')
else:
return bytes(ba)
def _cp2str(cd):
"""Convert a cffi cdata('char *') to a str."""
s = http_ffi.ffi.string(cd)
if compat.PY3:
s = s.decode('iso-8859-1')
return s
class HttpError(error.Error):
"""Exception that is raised in case of HTTP protocol errors."""
class HttpMessage(object):
"""A HTTP message (request or response). Used by the parser."""
def __init__(self):
self.message_type = None
self.version = None
self.status_code = None
self.method = None
self.url = None
self.is_upgrade = None
self.should_keep_alive = None
self.parsed_url = None
self.headers = []
self.trailers = []
self.body = reader.Reader()
def __len__(self):
# Use a fixed header size of 400. This is for flow control purposes
# only, this does not need to be exact.
return 400 + self.body._buffer_size
def get_wsgi_environ(self):
"""Return a WSGI environment dictionary for the current message."""
if self.message_type != HttpParser.HTTP_REQUEST:
raise ValueError('expecting an HTTP request')
env = {}
env['REQUEST_METHOD'] = self.method
env['SERVER_PROTOCOL'] = 'HTTP/{0}.{1}'.format(*self.version)
env['REQUEST_URI'] = self.url
env['SCRIPT_NAME'] = ''
env['PATH_INFO'] = self.parsed_url[0]
env['QUERY_STRING'] = self.parsed_url[1]
for field,value in self.headers:
if field.title() == 'Content-Length':
env['CONTENT_LENGTH'] = value
elif field.title() == 'Content-Type':
env['CONTENT_TYPE'] = value
else:
env['HTTP_{0}'.format(field.upper().replace('-', '_'))] = value
env['wsgi.input'] = self.body
return env
class HttpResponse(object):
"""An HTTP response as returned by :meth:`HttpClient.getresponse`."""
def __init__(self, message):
self._message = message
@property
def version(self):
"""The HTTP version as a (major, minor) tuple."""
return self._message.version
@property
def status(self):
"""The HTTP status code, as an integer."""
return self._message.status_code
@property
def headers(self):
"""The response headers, as a list of (name, value) pairs."""
return self._message.headers
@property
def trailers(self):
"""The response trailers, as a list of (name, value) pairs.
The trailers will only be available after the entire response has been
read. Most servers do not generate trailers.
"""
return self._message.trailers
def get_header(self, name, default=None):
"""Return one HTTP header *name*. If no such header exists, *default*
is returned."""
return get_header(self._message.headers, name, default)
@switchpoint
@docfrom(reader.Reader.read)
def read(self, size=None):
return self._message.body.read(size)
@switchpoint
@docfrom(reader.Reader.readline)
def readline(self, limit=-1):
return self._message.body.readline(limit)
@switchpoint
@docfrom(reader.Reader.readlines)
def readlines(self, hint=-1):
return self._message.body.readlines(hint)
@property
def __iter__(self):
return self._mesasge.body.__iter__
class HttpParser(protocols.Parser):
"""A HTTP parser."""
s_header_field, s_header_value = range(2)
HTTP_REQUEST = http_ffi.lib.HTTP_REQUEST
HTTP_RESPONSE = http_ffi.lib.HTTP_RESPONSE
HTTP_BOTH = http_ffi.lib.HTTP_BOTH
def __init__(self, kind=None):
super(HttpParser, self).__init__()
if kind is None:
kind = self.HTTP_BOTH
self._kind = kind
self._parser = http_ffi.ffi.new('http_parser *')
http_ffi.lib.http_parser_init(self._parser, self._kind)
self._setup_callbacks()
self._requests = collections.deque()
@property
def requests(self):
return self._requests
def push_request(self, method):
if self._kind == self.HTTP_REQUEST:
raise RuntimeError('push_request() is for response parsers only')
self._requests.append(method)
def feed(self, s):
nbytes = http_ffi.lib.http_parser_execute(self._parser, self._settings,
s, len(s))
self.bytes_parsed = nbytes
if nbytes != len(s):
errno = http_ffi.lib.http_errno(self._parser)
errname = _cp2str(http_ffi.lib.http_errno_name(errno))
raise ValueError('http-parser error {0} ({1})'
.format(errno, errname))
return nbytes
def is_partial(self):
return http_ffi.lib.http_body_is_final(self._parser)
def _setup_callbacks(self):
settings = http_ffi.ffi.new('http_parser_settings *')
callback_refs = {} # prevent garbage collection of cffi callbacks
names = [ name for name in dir(self) if name.startswith('_on_') ]
for name in names:
cbtype = 'http_cb' if 'complete' in name or 'begin' in name \
else 'http_data_cb'
cb = http_ffi.ffi.callback(cbtype, getattr(self, name))
callback_refs[name] = cb
setattr(settings, name[1:], cb)
self._settings = settings
self._callback_refs = callback_refs
def _reinit(self):
self._url_data = bytearray()
self._header_state = self.s_header_field
self._header_name = None
self._header_data = bytearray()
self._message = HttpMessage()
self._headers_complete = False
def _on_message_begin(self, parser):
self._reinit()
return 0
def _on_url(self, parser, at, length):
buf = http_ffi.ffi.buffer(at, length)
self._url_data.extend(buf)
return 0
def _on_header_field(self, parser, at, length):
buf = http_ffi.ffi.buffer(at, length)
if self._header_state == self.s_header_field:
self._header_data.extend(buf)
else:
header_value = _ba2str(self._header_data)
dest = self._message.trailers if self._headers_complete \
else self._message.headers
dest.append((self._header_name, header_value))
self._header_data[:] = buf
self._header_state = self.s_header_field
return 0
def _on_header_value(self, parser, at, length):
buf = http_ffi.ffi.buffer(at, length)
if self._header_state == self.s_header_value:
self._header_data.extend(buf)
else:
self._header_name = _ba2str(self._header_data)
self._header_data[:] = buf
self._header_state = self.s_header_value
return 0
def _parse_url(self, url):
msg = self._message
result = http_ffi.ffi.new('struct http_parser_url *')
is_connect = msg.method == 'CONNECT'
error = http_ffi.lib.http_parser_parse_url(bytes(url), len(url),
is_connect, result)
if error:
raise ValueError('url parse error')
parsed_url = []
for field in (http_ffi.lib.UF_PATH, http_ffi.lib.UF_QUERY):
if result.field_set & field:
data = result.field_data[field]
component = msg.url[data.off:data.off+data.len]
else:
component = ''
parsed_url.append(component)
return parsed_url
def _on_headers_complete(self, parser):
if self._header_state == self.s_header_value:
header_value = _ba2str(self._header_data)
self._message.headers.append((self._header_name, header_value))
self._header_state = self.s_header_field
del self._header_data[:]
msg = self._message
msg.message_type = http_ffi.lib.http_message_type(parser)
msg.version = (parser.http_major, parser.http_minor)
if msg.message_type == self.HTTP_REQUEST:
msg.method = _cp2str(http_ffi.lib.http_method_str(parser.method))
msg.url = _ba2str(self._url_data)
try:
msg.parsed_url = self._parse_url(self._url_data)
except ValueError:
return 2
msg.is_upgrade = http_ffi.lib.http_is_upgrade(parser)
else:
msg.status_code = parser.status_code
msg.should_keep_alive = http_ffi.lib.http_should_keep_alive(parser)
self._messages.append(msg)
self._headers_complete = True
request_method = self._requests and self._requests.popleft()
return 1 if request_method == 'HEAD' else 0
def _on_body(self, parser, at, length):
buf = http_ffi.ffi.buffer(at, length)[:] # -> bytes
self._message.body._feed(buf)
return 0
def _on_message_complete(self, parser):
if self._header_state == self.s_header_value:
# last trailer in a chunked messages
header_value = _ba2str(self._header_data)
dest = self._message.trailers if self._headers_complete \
else self._message.headers
dest.append((self._header_name, header_value))
self._message.body._feed(b'')
return 0
class ErrorStream(object):
"""Passed to the WSGI application as environ['wsgi.errors'].
Forwards messages to the Python logging facility.
"""
def __init__(self):
self._log = logging.get_logger(objref(self))
def flush(self):
pass
def write(self, data):
self._log.error(data)
def writelines(self, seq):
for line in seq:
self.write(line)
class HttpClient(protocols.RequestResponseProtocol):
"""An HTTP/1.1 client."""
_exception = HttpError
user_agent = 'gruvi.http/{0}'.format(__version__)
def __init__(self, timeout=None):
"""The optional *timeout* argument can be used to specify a timeout for
the various network operations used within the client."""
def parser_factory():
return HttpParser(HttpParser.HTTP_RESPONSE)
super(HttpClient, self).__init__(parser_factory, timeout=timeout)
self._default_host = None
transport = protocols.Protocol.transport # Have Sphinx document it
@switchpoint
@docfrom(protocols.Protocol._connect)
def connect(self, address, ssl=False, local_address=None,
**transport_args):
self._connect(address, ssl, local_address, **transport_args)
if isinstance(address, tuple):
self._default_host = address[0]
def _init_transport(self, transport):
super(HttpClient, self)._init_transport(transport)
if hasattr(transport, 'nodelay'):
transport.nodelay(True)
def _dispatch_fast_path(self, transport, message):
transport._queue.put(message)
def on_size_change(oldsize, newsize):
transport._queue._adjust_size(newsize-oldsize)
message.body._on_size_change = on_size_change
return True
@switchpoint
def request(self, method, url, headers=None, body=None):
"""Make a new HTTP request.
The *method* argument is the HTTP method to be used. It must be
specified as a string, for example ``'GET'`` or ``'POST'``. The *url*
argument must be a string containing the URL.
The optional *headers* argument specifies extra HTTP headers to use in
the request. It must be a list of (name, value) tuples, with name and
value a string.
The optional *body* argument may be used to specify a body to include
in the request. It must be a ``bytes`` or ``str`` instance, a file-like
object, or an iterable producing ``bytes`` or ``str`` instances. See
the notes at the top about the use of strings in HTTP bodies.
This method sends the request and waits for it to be complete sent out.
It does now however wait for the response. The response can be obtained
using :meth:`getresponse`.
You may make multiple requests before reading a response. This is
called pipelining. According to the HTTP RFC, you should not use the
POST method when doing this. This restriction is not enforced by this
method.
"""
if self._transport is None or self._transport.closed:
raise RuntimeError('not connected')
if headers is None:
headers = []
for name,value in headers:
if name in hop_by_hop:
raise ValueError('header {0} is hop-by-hop'.format(name))
agent = get_header(headers, 'User-Agent')
if agent is None:
headers.append(('User-Agent', self.user_agent))
host = get_header(headers, 'Host')
if host is None and self._default_host:
headers.append(('Host', self._default_host))
if body is None:
body = b''
if not isinstance(body, (compat.binary_type, compat.text_type)) \
and not hasattr(body, 'read') \
and not hasattr(body, '__iter__'):
raise TypeError('body: expecting a bytes or str instance, ' \
'a file-like object, or an iterable')
header = create_request(method, url, headers)
self._transport.write(header)
if hasattr(body, 'write'):
while True:
chunk = body.read(chunksize)
if not chunk:
break
self._write(self._transport, chunk)
elif hasattr(body, '__iter__'):
for chunk in body:
self._write(self._transport, chunk)
else:
self._write(self._transport, body)
self._flush(self._transport)
self._transport._parser.push_request(method)
@switchpoint
def getresponse(self):
"""Get a new response from the connection.
This method will wait until the reponse header is fully received. It
will then parse the response header, store the result in a
:class:`HttpResponse` instance, and return that. The rest of the body
may be read through the response object.
When using HTTP pipelining, this method will return the fist response
header that is received, which will correspond to the oldest request
that is still pending.
"""
if not self._transport._parser.requests and not self._transport._queue:
raise RuntimeError('there are no outstanding requests')
message = self._transport._queue.get()
response = HttpResponse(message)
return response
class HttpServer(protocols.RequestResponseProtocol):
"""An HTTP 1/1. server."""
_exception = HttpError
server_id = 'gruvi.http/{0}'.format(__version__)
def __init__(self, wsgi_handler, server_name=None, timeout=None):
"""The constructor takes the following arugments. The *wsgi_handler*
argument must be a WSGI callable. See `PEP 333
<http://www.python.org/dev/peps/pep-0333/>`_.
The optional *server_name* argument can be used to specify a server
name. This might be needed by the WSGI application to construct
absolute URLs. If not provided, then the host portion of the address
passed to :meth:`listen` will be used.
The optional *timeout* argument can be used to specify a timeout for
the various network operations used within the server.
"""
def parser_factory():
return HttpParser(HttpParser.HTTP_REQUEST)
super(HttpServer, self).__init__(parser_factory, timeout)
self._wsgi_handler = wsgi_handler
self._server_name = server_name
transport = protocols.Protocol.transport # Have Sphinx document it
@property
def clients(self):
"""A set containing the transports of the currently connected
clients."""
return self._clients
@switchpoint
@docfrom(protocols.Protocol._listen)
def listen(self, address, ssl=False, **transport_args):
self._listen(address, ssl, **transport_args)
def _init_transport(self, transport):
super(HttpServer, self)._init_transport(transport)
if hasattr(transport, 'nodelay'):
transport.nodelay(True)
self._reinit_request(transport)
def _reinit_request(self, transport):
transport._version = None
transport._status = None
transport._headers = []
transport._trailers = []
transport._headers_sent = False
transport._chunked = None
transport._keepalive = None
def _dispatch_fast_path(self, transport, message):
def on_size_change(oldsize, newsize):
transport._queue._adjust_size(newsize-oldsize)
message.body._on_size_change = on_size_change
return False
def _close_transport(self, transport, error=None):
if not transport.closed and not transport._headers_sent and error:
transport._status = '500 Internal Server Error'
transport._headers = [('Content-Type', 'text/plain')]
transport._chunked = False
transport._keepalive = False
self._write(transport, 'Internal Server Error ({0})'
.format(error.args[0]))
super(HttpServer, self)._close_transport(transport)
def _get_environ(self, transport, message):
env = message.get_wsgi_environ()
env['SERVER_NAME'] = self._server_name or self._local_address[0]
env['SERVER_PORT'] = self._local_address[1]
env['wsgi.version'] = (1, 0)
errors = env['wsgi.errors'] = ErrorStream()
transport._log.debug('logging to {0}', objref(errors))
env['wsgi.multithread'] = True
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = False
return env
def _send_headers(self, transport):
clen = get_header(transport._headers, 'Content-Length')
transport._chunked = clen is None and transport._version == (1, 1)
if transport._chunked:
transport._headers.append(('Transfer-Encoding', 'chunked'))
if not clen and transport._version == (1, 0):
transport._keepalive = False
if transport._version == (1, 1) and not transport._keepalive:
transport._headers.append(('Connection', 'close'))
elif transport._version == (1, 0) and transport._keepalive:
transport._headers.append(('Connection', 'keep-alive'))
server = get_header(transport._headers, 'Server')
if server is None:
transport._headers.append(('Server', self.server_id))
header = create_response(transport._version, transport._status,
transport._headers)
transport.write(header)
transport._headers_sent = True
@switchpoint
def _write(self, transport, data, last=False):
if isinstance(data, compat.text_type):
data = data.encode('iso-8859-1')
elif not isinstance(data, compat.binary_type):
raise TypeError('data: expecting bytes or str instance')
if not data and not last:
return
if transport._error:
raise transport._error
if not transport._headers_sent:
self._send_headers(transport)
if transport._chunked:
if data:
data = create_chunk(data)
if last:
data += last_chunk(transport._trailers)
super(HttpServer, self)._write(transport, data)
def _start_response(self, transport, status, headers, exc_info=None):
if exc_info:
try:
if transport._headers_sent:
compat.reraise(*exc_info)
finally:
exc_info = None
elif transport._status is not None:
raise RuntimeError('response already started')
for name,value in headers:
if name in hop_by_hop:
raise ValueError('header {0} is hop-by-hop'.format(name))
transport._status = status
transport._headers = headers
def write(data):
return self._write(transport, data, last=False)
return write
def _dispatch_message(self, transport, message):
transport._log.info('request: {0} {1}', message.method, message.url)
transport._version = message.version
transport._keepalive = message.should_keep_alive
environ = self._get_environ(transport, message)
def start_response(status, headers, exc_info=None):
return self._start_response(transport, status, headers, exc_info)
result = self._wsgi_handler(environ, start_response)
try:
if not transport._status:
raise RuntimeError('start_response() not called')
for chunk in result:
if transport.closed:
break
if chunk:
self._write(transport, chunk)
self._write(transport, b'', last=True)
self._flush(transport)
finally:
if hasattr(result, 'close'):
result.close()
transport._log.info('response: {0}', transport._status)
if transport._keepalive:
transport._log.debug('keeping connection alive')
self._reinit_request(transport)
else:
self._close_transport(transport)
| cocagne/gruvi | gruvi/http.py | http.py | py | 24,754 | python | en | code | null | github-code | 36 | [
{
"api_name": "urlparse.urlsplit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "hub.switchpoint",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "util.docfrom",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "hub.switchpoint",
... |
31964896901 | import cv2
import time
import sys
from PIL import Image
from multiprocessing import Process
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
import fpstimer
import moviepy.editor as mp
savefile = open('data.txt', 'a', encoding='utf-8')
emojies = {
"🤍": [255, 255, 255],
"🐚": [220, 220, 220],
"💿": [192, 192, 192],
"🦝": [128, 128, 128],
"🌑": [60, 60, 60],
"🖤": [0, 0, 0],
"🟠": [225, 100, 0],
"❤️": [255, 0, 0],
"🍒": [128, 0, 0],
"💜": [150, 0, 255],
"🌸": [255, 0, 230],
"🫓": [220, 170, 110],
"🥪": [220, 190, 190],
"🌳": [30, 100, 30],
"📗": [30, 230, 60],
"🦖": [25, 140, 45],
"🍋": [230, 230, 0],
"🔵": [0, 0, 255],
"📘": [0, 160, 255],
"🐋": [0, 200, 255]
}
emojies = {
"🤍": [255, 255, 255],
"💿": [192, 192, 192],
"🌑": [60, 60, 60],
"🖤": [0, 0, 0],
"❤️": [255, 0, 0],
"🍒": [128, 0, 0],
"💜": [150, 0, 255],
"🌸": [255, 0, 230],
"🌳": [30, 100, 30],
"📗": [30, 230, 60],
"🦖": [25, 140, 45],
"🍋": [230, 230, 0],
"🔵": [0, 0, 255],
"📘": [0, 160, 255],
"🐋": [0, 200, 255]
}
frame_size = 61 # change to emojies that can fit (60 for terminal)
frame_height = 30 # 32 for terminal
frame_interval = 1.0 / 30.75
ASCII_LIST = []
# Extract frames from video
def extract_transform_generate(video_path, start_frame, number_of_frames=1000):
capture = cv2.VideoCapture(video_path)
capture.set(1, start_frame) # Points cap to target frame
current_frame = start_frame
frame_count = 1
ret, image_frame = capture.read()
while ret and frame_count <= number_of_frames:
ret, image_frame = capture.read()
try:
image = Image.fromarray(image_frame)
ascii_characters = pixels_to_ascii(resize_image(image)) # get ascii characters
pixel_count = len(ascii_characters)
ascii_image = "\n".join(
[ascii_characters[index:(index + frame_size)] for index in range(0, pixel_count, frame_size)])
ASCII_LIST.append(ascii_image)
except Exception as error:
continue
progress_bar(frame_count, number_of_frames)
frame_count += 1 # increases internal frame counter
current_frame += 1 # increases global frame counter
capture.release()
# Progress bar code is courtesy of StackOverflow user: Aravind Voggu.
# Link to thread: https://stackoverflow.com/questions/6169217/replace-console-output-in-python
def progress_bar(current, total, barLength=25):
progress = float(current) * 100 / total
arrow = '#' * int(progress / 100 * barLength - 1)
spaces = ' ' * (barLength - len(arrow))
sys.stdout.write('\rProgress: [%s%s] %d%% Frame %d of %d frames' % (arrow, spaces, progress, current, total))
# Resize image
def resize_image(image_frame):
width, height = image_frame.size
aspect_ratio = (height / float(width * 2.5)) # 2.5 modifier to offset vertical scaling on console
new_height = int(aspect_ratio * frame_size)
new_height = frame_height #change to 32 for terminal
resized_image = image_frame.resize((frame_size, new_height))
# print('Aspect ratio: %f' % aspect_ratio)
# print('New dimensions %d %d' % resized_image.size)
return resized_image
# Greyscale
def toemoji(pixel):
closestint = 100000000000000000
closestcolour = ""
r, g, b = pixel[0], pixel[1], pixel[2]
for emoji, colour in emojies.items():
totalint = (
((r - colour[0]) ** 2)
+ ((g - colour[1]) ** 2)
+ ((b - colour[2]) ** 2) ** (1 / 2)
)
if totalint < closestint:
closestint = totalint
closestcolour = emoji
return closestcolour
# Convert pixels to ascii
def pixels_to_ascii(image_frame):
pixels = image_frame.getdata()
characters = "".join([toemoji(pixel) for pixel in pixels])
return characters
# Open image => Resize => Greyscale => Convert to ASCII => Store in text file
def ascii_generator(image_path, start_frame, number_of_frames):
current_frame = start_frame
while current_frame <= number_of_frames:
path_to_image = image_path + '/BadApple_' + str(current_frame) + '.jpg'
image = Image.open(path_to_image)
ascii_characters = pixels_to_ascii(resize_image(image)) # get ascii characters
pixel_count = len(ascii_characters)
ascii_image = "\n".join(
[ascii_characters[index:(index + frame_size)] for index in range(0, pixel_count, frame_size)])
current_frame += 1
def preflight_operations(path):
if os.path.exists(path):
path_to_video = path.strip()
cap = cv2.VideoCapture(path_to_video)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
frames_per_process = int(total_frames / 4)
process1_end_frame = frames_per_process
process2_start_frame = process1_end_frame + 1
process2_end_frame = process2_start_frame + frames_per_process
process3_start_frame = process2_end_frame + 1
process3_end_frame = process3_start_frame + frames_per_process
process4_start_frame = process3_end_frame + 1
process4_end_frame = total_frames - 1
start_time = time.time()
sys.stdout.write('Beginning ASCII generation...\n')
extract_transform_generate(path_to_video, 1, process4_end_frame)
savefile.write('\t'.join(ASCII_LIST))
savefile.close()
execution_time = time.time() - start_time
sys.stdout.write('ASCII generation completed! ASCII generation time: ' + str(execution_time))
return total_frames
else:
sys.stdout.write('Warning file not found!\n')
def main():
while True:
sys.stdout.write('==============================================================\n')
sys.stdout.write('Select option: \n')
sys.stdout.write('1) Play\n')
sys.stdout.write('2) Exit\n')
sys.stdout.write('==============================================================\n')
user_input = str(input("Your option: "))
user_input.strip() # removes trailing whitespaces
if user_input == '1':
user_input = str(input("Please enter the video file name (file must be in root!): "))
preflight_operations(user_input)
elif user_input == '2':
exit()
else:
sys.stdout.write('Unknown input!\n')
continue
if __name__ == '__main__':
main() | lolLucoa/scripts | frame generator.py | frame generator.py | py | 6,637 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"... |
34788460936 | from aiogram import Router
from aiogram.types import CallbackQuery
from bot.keyboards.inline.raffle import back_to_raffle_menu
crypto_payment = Router()
@crypto_payment.callback_query(lambda call: call.data.split(":")[0] == "Crypto")
async def send_payment_methods(call: CallbackQuery) -> None:
await call.message.edit_text(
"<b>Временно недоступно</b>",
reply_markup=await back_to_raffle_menu()
)
| lowfie/LotteryBot | bot/routers/raffle/crypto_payment.py | crypto_payment.py | py | 445 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.Router",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "aiogram.types.CallbackQuery",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "bot.keyboards.inline.raffle.back_to_raffle_menu",
"line_number": 14,
"usage_type": "call"
}
] |
40319289149 | import numpy as np
from scipy import ndimage
from progress.bar import Bar
import argparse
import cv2
import os
import shutil
ImgFolderPathDict = {
"estimate": "estimate/",
"gt_flow": "gt_flow/",
"gt_traj": "gt_trajectories/",
"images": "images/",
"masks": "masks/"
}
SceneFolderNameLis = ["IM01/", "IM01_hDyn/",
"IM02/", "IM02_hDyn/",
"IM03/", "IM03_hDyn/",
"IM04/", "IM04_hDyn/",
"IM05/", "IM05_hDyn/"]
GTTrajTXTFiles = {
"bgTraj": "bgTrajectories.csv",
"denseTraj": "denseTrajectories.csv",
"personTraj": "personTrajectories.csv"
}
SaveFolder = {
"bgTraj": "bgTrajectories/",
"denseTraj": "denseTrajectories/",
"personTraj": "PersonTrajectories/",
}
def person_annotate_img_generator(txtfile, frame):
"""
Gaussian Spot Annotation
------
(txtfile, image) --> (annotated image)
"""
init_img = np.zeros((720, 1280))
person_pos = np.loadtxt(txtfile, delimiter=",")
frame_per_pos = np.array(
person_pos[:, 2 * frame: 2 * (frame + 1)], dtype=np.uint32)
shape = frame_per_pos.shape
for i in range(shape[0]):
tmp_img = np.zeros((720, 1280))
pos = frame_per_pos[i, :]
if pos[0] == 0 and pos[1] == 0:
continue
elif pos[0] >= 720 or pos[1] >= 1280:
continue
elif pos[0] < 0 or pos[1] < 0:
continue
tmp_img[pos[0], pos[1]] = 1.0
tmp_img = ndimage.filters.gaussian_filter(tmp_img, 3)
max_num = tmp_img.max()
tmp_img = np.array(tmp_img * (128 / max_num), dtype=np.uint8)
init_img += tmp_img
ret_img = np.where(init_img > 255, 255, init_img)
ret_img = np.array(ret_img, dtype=np.uint8)
return ret_img
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""T
This code is to make
bgTrajecotries/denseTrajectories/personTrajectoris
label images
""")
parser.add_argument('-p', '--path', default="E:/Dataset/TUBCrowdFlow/")
parser.add_argument('-k', '--img_kind', choices=["bgTraj", "denseTraj", "personTraj"], default="personTraj")
args = parser.parse_args()
CrowdFlowPath = args.path # Your dataset path
ImgKind = args.img_kind
frame_num_list = [300, 300, 250, 300, 450]
for i, scene in enumerate(SceneFolderNameLis):
frame_num = frame_num_list[int(i / 2)]
path = CrowdFlowPath + \
ImgFolderPathDict['gt_traj'] + scene + GTTrajTXTFiles[ImgKind]
SaveFolderpath = CrowdFlowPath + \
ImgFolderPathDict['gt_traj'] + scene + SaveFolder[ImgKind]
if not os.path.isfile(path):
# shutil.copy(path[:-3]+"txt", path)
print("No csv file")
break
if not os.path.isdir(SaveFolderpath):
# os.mkdir(SaveFolderpath)
print("No such directory")
break
bar = Bar('Makeing Label... : {}'.format(scene), max=frame_num)
for i in range(frame_num):
full_save_path = SaveFolderpath + \
SaveFolder[ImgKind][:-1] + "_frame_{:0=4}.png".format(i)
if os.path.isfile(full_save_path):
bar.next()
continue
img = person_annotate_img_generator(path, i)
cv2.imwrite(full_save_path, img)
bar.next()
bar.finish()
| hanebarla/CrowdCounting_with_Flow | src/utils/make_human_gaussian.py | make_human_gaussian.py | py | 3,717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.uint32",
"line_number... |
41921068479 | from selenium import webdriver
from bs4 import BeautifulSoup
import requests
from time import sleep
from selenium.webdriver.common.keys import Keys
def getNews():
print('getting news')
text_box = browser.find_element_by_class_name("_2wP_Y")
response = "Let me fetch and send top 5 latest news:\n"
text_box.send_keys(response)
soup = BeautifulSoup(requests.get(url).content, "html5lib")
articles = soup.find_all('article', class_="MQsxIb xTewfe R7GTQ keNKEd j7vNaf Cc0Z5d YKEnGe EyNMab t6ttFe Fm1jeb EjqUne")
news = [i.find_all('a',class_="ipQwMb Q7tWef")[0].text for i in articles[:5]]
print('got here')
links = [root+i.find('a')['href'][1:] for i in articles[:5]]
links = [requests.get("http://thelink.la/api-shorten.php?url="+link).content.decode() for link in links]
for i in range(5):
text_box.send_keys(news[i] + "==>" + links[i] + "\n")
def main():
browser = webdriver.Chrome()
browser.get('https://web.whatsapp.com')
browser.maximize_window()
#sleep(20) A 2 second pause
paragraphs = []
while True:
x=input('Please sign in first and then enter "done" : ')
if x : break
print('starting')
bot_users = {} # A dictionary that stores all the users that sent activate bot
while True:
unread = browser.find_elements_by_class_name("OUeyt") # The green dot tells us that the message is new
name,message = '',''
if len(unread) > 0:
ele = unread[-1]
action = webdriver.common.action_chains.ActionChains(browser)
action.move_to_element_with_offset(ele, 0, -20) # move a bit to the left from the green dot
# Clicking couple of times because sometimes whatsapp web responds after two clicks
try:
action.click()
action.perform()
action.click()
action.perform()
except Exception as e:
pass
try:
name = browser.find_element_by_class_name("_3TEwt").text # Contact name
print("Contact name : ", name)
message = browser.find_elements_by_class_name("_3zb-j")[-1] # the message content
print("message : ", message.text.lower())
if 'go' in message.text.lower():
if name not in bot_users:
paragraphs = words()
bot_users[name] = True
text_box = browser.find_element_by_class_name("_1Plpp")
#response = "Hi "+name+". Wassim's Bot here :). Now I am activated for you\n"
for i in paragraphs:
for j in i.split():
text_box.send_keys(j)
text_box.send_keys(Keys.ENTER)
sleep(1)
if name in bot_users:
if 'show' in message.text.lower() and 'news' in message.text.lower():
getNews()
if 'deactivate' in message.text.lower():
if name in bot_users:
text_box = browser.find_element_by_class_name("_1Plpp")
response = "Bye "+name+".\n"
text_box.send_keys(response)
del bot_users[name]
except Exception as e:
print(e)
pass
sleep(2) # A 2 second pause so that the program doesn't run too fast
def words():
temp = []
paragraphs = []
file1 = open("text.txt","r+", encoding="utf8")
#print(file1)
for parag in file1.readlines():
#if len(parag.split("\n")) > 3:
temp.append(parag.split("\n"))
for i in range(len(temp)):
for j in range(len(temp[i])):
if len (temp[i][j]) > 3 :
paragraphs.append(temp[i][j])
return paragraphs
if __name__ == "__main__":
main()
#paragraphs()
| Nijaoui-Wassim/Whatsapp-Bot | main.py | main.py | py | 4,682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome... |
71491780585 | # -*- coding: utf-8 -*-
# HelenのデータセットからXMLファイルを構築してDlibに読み込ませられるXMLファイルを作成する
# 前準備
# 一つのディレクトリ下に以下のファイルを用意する
# 1. facebox のデータが入ったxmlファイル
# 2. annotationの点が入ったファイル(annotation ディレクトリ下に配置する)
#
# Helen_Dataset
# │─ helen_facebox.xml
# └─ annotation
# └─ annotation_data1.txt
# └─ annotation_data2.txt
# │
# └─ annotation_dataxx.txt
import numpy as np
import xml.etree.ElementTree as ET
import linecache
import glob
xml_path = ''
while xml_path[-4:] != '.xml':
print("[1/3]face_boxの位置情報が入ったXMLファイルへのパスを入力してください。")
xml_path = raw_input()
if xml_path[-4:] != '.xml':
print("hoge.xmlの形のパスを再入力してください")
print("[2/3] annotationファイルが入ったディレクトリへのパスを入力してください。")
annotation_dir = raw_input()
if annotation_dir[-1] != '/':
annotation_dir += '/'
txt = ''
annotation_file_names = glob.glob(annotation_dir + '*.txt')
tree = ET.parse(xml_path)
root = tree.getroot()
for image in root.findall('.//image'):
img_name = image.attrib['file']
if '.' in img_name:
img_original = img_name.split('.')[0]
i = 0
while img_original != txt:
annotation_file_name = annotation_file_names[i]
txt = linecache.getline(annotation_file_name, 1)[:-1]
i += 1
annotation = open(annotation_file_name).read().split('\r\n')
if ',' not in annotation[0]:
annotation.pop(0)
for i, txt in enumerate(annotation):
if txt == '':
annotation.pop(i)
for i, anno in enumerate(annotation):
annotation[i] = anno.split(' , ')
annotation[i][0] = str(int(round(float(annotation[i][0]))))
annotation[i][1] = str(int(round(float(annotation[i][1]))))
box = image.find('.//box')
i = 0
for anno in annotation:
ET.SubElement(box, 'part',
attrib = {'name': str(i),
'x': anno[0],
'y': anno[1]
}
)
i += 1
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(root)
out_path = ''
while out_path[-4:] != '.xml':
print("[3/3]保存したい学習データのパス+ファイル名を入力してください")
out_path = raw_input()
if out_path[-4:] != '.xml':
print("/aa/bb/hoge.xmlの形のパスを再入力してください")
tree.write(out_path, encoding='utf-8')
| chicn/render-dots | create_xml/helen_create_xml.py | helen_create_xml.py | py | 3,149 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "linecache... |
25657398507 | from django.urls import path, include
from rest_framework import routers
from src.api.views import UserUpdateView, UserImageView, CaloriesConsumptionListView, CaptureListView
app_name = 'api'
urlpatterns = [
path('capture/', CaptureListView.as_view(), name='capture-list-view'),
path('calories-consumption/', CaloriesConsumptionListView.as_view(), name='calories-consumption-list-view'),
path('my/image/', UserImageView.as_view(), name="my-profile"),
path('my/profile/', UserUpdateView.as_view(), name="my-profile-image"),
]
| IkramKhan-DevOps/exsapp-healthcare | src/api/urls.py | urls.py | py | 544 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.api.views.CaptureListView.as_view",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.api.views.CaptureListView",
"line_number": 9,
"usage_type": "name"
},
{
... |
21184237697 | from flask import request
from flask_restplus import Resource
from ..util.dto import TaskDto
from ..service.task_service import save_new_task, get_all_user_tasks, get_a_user_task, update_task, delete_task, get_all_expired_tasks
api = TaskDto.api
_task = TaskDto.task
_task_update = TaskDto.task_update
parser = api.parser()
parser.add_argument('Authorization', type=str, location='headers', required=True, help='Authorization Token')
@api.route('/')
class Tasks(Resource):
@api.response(200, 'Task fetched successfully.')
@api.doc(parser=parser)
@api.marshal_list_with(_task, envelope='data')
def get(self):
"""List all user tasks"""
return get_all_user_tasks()
@api.response(201, 'Tasks successfully created.')
@api.doc(body=_task_update, parser=parser)
def post(self):
"""Creates a new user Task """
data = request.json
return save_new_task(data=data)
@api.route('/<public_task_id>')
@api.param('public_task_id', 'The Task identifier')
@api.response(404, 'Task not found.')
class Task(Resource):
@api.response(200, 'Task fetched successfully.')
@api.doc(parser=parser)
@api.marshal_with(_task)
def get(self, public_task_id):
"""Get a user task given its identifier"""
resp = get_a_user_task(public_task_id)
return resp
@api.response(200, 'Task successfully updated.')
@api.doc(body=_task_update, parser=parser)
@api.expect(_task_update, validate=True)
def patch(self, public_task_id):
"""Update a user task given its identifier"""
resp = get_a_user_task(public_task_id)
data = request.json
return update_task(task=resp, data=data)
@api.response(200, 'Task successfully deleted.')
@api.doc(parser=parser)
def delete(self, public_task_id):
"""Delete a user task given its identifier"""
resp = get_a_user_task(public_task_id)
return delete_task(task=resp)
| dvdhinesh/task_management_rest_api | app/main/controller/task_controller.py | task_controller.py | py | 1,959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.dto.TaskDto.api",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "util.dto.TaskDto",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "util.dto.TaskDto.task",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "ut... |
28764221918 | from tweepy import API, OAuthHandler
from tweepy.models import Status
from typing import Dict
def init_twitter_api(consumer_key : str,
consumer_secret : str,
access_token : str,
access_token_secret : str) -> API:
r"""Initialize client of Twitter API.
Args:
consumer_key (str): Consumer key of Twitter API.
consumer_secret (str): Consumer secret of Twitter API.
access_token (str): Access token of Twitter API.
access_token_secret (str): Access token secret of Twitter API.
Returns:
API: Client of twitter API.
"""
# Initialize OAuthHandler of Twitter API
auth = OAuthHandler(consumer_key, consumer_secret)
# Set access token to OAuthHandler
auth.set_access_token(access_token, access_token_secret)
# Initialize API client with OAuthHandler
auth_api = API(auth)
return auth_api
def status_parser(status: Status) -> Dict[str, str]:
r"""parse tweepy.models.Status to Dictionary
Args:
status (Status): Object responsed from twitter API.
Returns:
Dict[str, str]: Parsed dictionary.
"""
# Get json body from status object
json_body = status._json
# Create a json body to be returned
return_body = dict()
# Get information from json_body
return_body["username"] = json_body.get("user", {}).get("name")
return_body["message"] = json_body.get("text")
return_body["url"] = json_body.get("entities", {}).get("urls", [{}])[0].get("url")
return_body["retweet_count"] = json_body.get("retweet_count")
return_body["favorite_count"] = json_body.get("favorite_count")
return return_body
| p768lwy3/medium-telegram-tutorial | Ch. 1: Write a telegram bot to get twitter message/src/utils/twitter.py | twitter.py | py | 1,745 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tweepy.models.Status",
"... |
36709812106 | from Genetic.selection import tournamentSelect,getRouletteWheel,rouletteWheelSelect,Individual
from django.shortcuts import render,get_object_or_404,redirect
from django.views.generic import TemplateView,DetailView,ListView
from .forms import FormularzPoczatkowy
from .models import Epoka,PojedynczaWartoscWyniku,Ustawienia,Wynik
from django.contrib import messages
from django.utils import timezone
import statistics,math,random
import heapq
# Create your views here.
class MainView(TemplateView):
def get(self, request, *args, **kwargs):
formularz=FormularzPoczatkowy()
contex={
'formularz':formularz
}
return render(self.request, "Strona_glowna.html",contex)
def post(self,*args,**kwargs):
formularz = FormularzPoczatkowy(self.request.POST or None)
if formularz.is_valid():
zakres1 = formularz.cleaned_data.get('zakres1')
zakres2 = formularz.cleaned_data.get('zakres2')
dokladnosc_reprezentacji_chromsomu = formularz.cleaned_data.get('dokladnosc_reprezentacji_chromsomu')
wielkosc_populacji = formularz.cleaned_data.get('wielkosc_populacji')
liczba_epok = formularz.cleaned_data.get('liczba_epok')
metoda_Selekcji = formularz.cleaned_data.get('metoda_Selekcji')
implementacja_Krzyzowania = formularz.cleaned_data.get('implementacja_Krzyzowania')
prawdopodbienstwo_Krzyzowania = formularz.cleaned_data.get('prawdopodbienstwo_Krzyzowania')
implementacja_MutacjiBrzegowej = formularz.cleaned_data.get('implementacja_MutacjiBrzegowej')
prawdopodbienstwo_MutacjiBrzegowej = formularz.cleaned_data.get('prawdopodbienstwo_MutacjiBrzegowej')
ile_Przechodzi = formularz.cleaned_data.get('ile_Przechodzi')
zakresMutacji2=formularz.cleaned_data.get('zakresMutacji2')
zakresMutacji1 = formularz.cleaned_data.get('zakresMutacji1')
if ile_Przechodzi is None:
ile_Przechodzi=30
if zakresMutacji2 is None:
zakresMutacji2=3
if zakresMutacji1 is None:
zakresMutacji1=-3
rodzaj_Optymalizacj=formularz.cleaned_data.get('rodzaj_Optymalizacj')
wielkosc_turnieju=formularz.cleaned_data.get('wielkosc_turnieju')
elita=formularz.cleaned_data.get('elita')
if wielkosc_turnieju is None:
wielkosc_turnieju=3
if wielkosc_populacji-elita <= round(wielkosc_populacji*(ile_Przechodzi/100)):
elita=elita-round(wielkosc_populacji*(ile_Przechodzi/100))
# ID_Wyniku= formularz.cleaned_data.get('ID_Wyniku')
ustawienia=Ustawienia.objects.create(
zakres1 = zakres1,
zakres2 = zakres2,
wielkoscPopulacji = wielkosc_populacji,
liczbaepok = liczba_epok,
metodaSelekcji = metoda_Selekcji,
implementacjaKrzyzowania = implementacja_Krzyzowania,
prawdobodobienstwoKrzyzowania = prawdopodbienstwo_Krzyzowania,
implementacjaMutowania = implementacja_MutacjiBrzegowej,
prawdobodobienstwoMutowania = prawdopodbienstwo_MutacjiBrzegowej,
ileprzechodzi = ile_Przechodzi,
rodzaj_Optymalizacj=rodzaj_Optymalizacj,
wielkosc_turnieju=wielkosc_turnieju,
elita=elita,
zakresMutacji1=zakresMutacji1,
zakresMutacji2=zakresMutacji2
#
)
return redirect('Main:wynik',licz(ustawienia))
else:
messages.warning(self.request, "Błędnie uzupełniony formularz")
return redirect("Main:main")
class WynikDzialania(ListView):
model = Epoka
def get_context_data(self, **kwargs):
context = super().get_context_data()
id_wyniku = self.request.path.rsplit("/")
id_wyniku=id_wyniku[-1]
Wyniki = Wynik.objects.filter(id=id_wyniku)
ustawienia= Wyniki[0].epoka_set.all()[0]
lista_srednich=[]
lista_odchylen = []
iteracja= []
czas=0
for x in Wyniki[0].epoka_set.all():
lista_srednich.append(float(x.sredniWynik))
lista_odchylen.append(str(x.odchylenieStandardowe))
czas+=float(x.czas)
tmp=x.iteracja.rsplit("/")
tmp=tmp[0]
iteracja.append(tmp)
if(ustawienia.ustawienia.rodzaj_Optymalizacj=='Min'):
maks=min(lista_srednich)
najelpsza_epoka=lista_srednich.index(maks)+1
else:
maks = max(lista_srednich)
najelpsza_epoka = lista_srednich.index(maks) + 1
context= {"srednie": lista_srednich,
"odchylenie": lista_odchylen,
"Epoka": Wyniki[0].epoka_set.all(),
"iteracja":iteracja,
"czas":czas,
"ustawienia":ustawienia,
"maks":maks,
"najelpsza_epoka":najelpsza_epoka
}
return context
template_name = "Wynik.html"
class DetaleEpoki(DetailView):
model = Epoka
def get_context_data(self, **kwargs):
context = super().get_context_data()
lista_x = []
lista_y = []
lista_z = []
for x in self.object.pojedynczawartoscwyniku_set.all():
lista_x.append(x.x1)
lista_y.append(x.x2)
lista_z.append(x.wartosc)
if self.object.ustawienia.rodzaj_Optymalizacj == "Min":
najelpszyWyniki=self.object.pojedynczawartoscwyniku_set.all()[lista_z.index(min(lista_z))]
else:
najelpszyWyniki = self.object.pojedynczawartoscwyniku_set.all()[lista_z.index(max(lista_z))]
context ['lista_x'] = lista_x
context['lista_y'] = lista_y
context['lista_z'] = lista_z
context['zakres1'] = self.object.ustawienia.zakres1
context['zakres2'] = self.object.ustawienia.zakres2
context['min'] = min(lista_z)
context['max'] = max(lista_z)
context['najlepszy']=najelpszyWyniki
return context
template_name = "Epoka_detale.html"
class individual():
cecha1 = 0
cecha2 = 0
wynik = 0
def funkcjaCelu(x1, x2):
return pow((1.5-x1+x1*x2), 2) + pow((2.25-x1+x1*pow(x2,2)), 2) + pow((2.625-x1+x1*pow(x2, 3)),2)
def poczatkoweWartosci(populacja,zakres1,zakres2):
lista= []
for x in range(0,populacja):
tmp = individual()
tmp.cecha1=random.uniform(zakres1,zakres2)
tmp.cecha2 = random.uniform(zakres1, zakres2)
tmp.wynik=funkcjaCelu(tmp.cecha1, tmp.cecha2)
lista.append(tmp)
return lista
def selekcjaNajelpszychMAX(ustawienia,populacja=[]):
licznik=round((ustawienia.ileprzechodzi/100)*populacja.__len__())
najelpsze=[]
Populacja_elity=[]
populacja=sorted(populacja,key= lambda individual:individual.wynik,reverse=True)
for i in range(0,ustawienia.elita):
Populacja_elity.append(populacja[i])
while najelpsze.__len__()!= ustawienia.wielkoscPopulacji-ustawienia.elita:
najelpsze.append(populacja[random.randint(ustawienia.elita,ustawienia.elita + licznik)])
return najelpsze,Populacja_elity
def selekcjaNajelpszychMIN(ustawienia, populacja=[]):
licznik = round((ustawienia.ileprzechodzi / 100) * populacja.__len__())
najelpsze = []
Populacja_elity = []
populacja = sorted(populacja, key=lambda individual: individual.wynik)
for i in range(0, ustawienia.elita):
Populacja_elity.append(populacja[i])
while najelpsze.__len__() != ustawienia.wielkoscPopulacji - ustawienia.elita:
najelpsze.append(populacja[random.randint(ustawienia.elita, ustawienia.elita + licznik)])
return najelpsze, Populacja_elity
def selecjaTurniejowa(ustawienia, populacja=[]):
answer = []
lista_wynikow = []
Populacja_elity = []
if ustawienia.rodzaj_Optymalizacj=='Min':
#elita
for x in populacja:
lista_wynikow.append(x.wynik)
elita = heapq.nsmallest(ustawienia.elita, lista_wynikow)
for i in elita:
x = populacja[elita.index(i)]
Populacja_elity.append(x)
populacja.remove(x)
lista_wynikow=[]
# elita
for x in populacja:
lista_wynikow.append(x.wynik)
while len(answer) < ustawienia.wielkoscPopulacji:
tmp = random.sample(populacja, ustawienia.wielkosc_turnieju)
best = max(lista_wynikow)
for i in tmp:
if i.wynik < best:
best = i.wynik
answer.append(populacja[lista_wynikow.index(best)])
else:
# elita
for x in populacja:
lista_wynikow.append(x.wynik)
elita = heapq.nlargest(ustawienia.elita, lista_wynikow)
for i in elita:
x = populacja[elita.index(i)]
Populacja_elity.append(x)
populacja.remove(x)
lista_wynikow = []
# elita
for x in populacja:
lista_wynikow.append(x.wynik)
while len(answer) < ustawienia.wielkoscPopulacji:
tmp=random.sample(populacja, ustawienia.wielkosc_turnieju)
best=0
for i in tmp:
if i.wynik >best:
best=i.wynik
answer.append(populacja[lista_wynikow.index(best)])
return answer,Populacja_elity
def selekcjaKolemRuletki(ustawienia,populacja=[]):
score={}
lista_wynikow=[]
Populacja_elity=[]
najlepsi=[]
if ustawienia.rodzaj_Optymalizacj=="Max":
# elita
for x in populacja:
lista_wynikow.append(x.wynik)
elita = heapq.nlargest(ustawienia.elita, lista_wynikow)
for i in elita:
x = populacja[elita.index(i)]
Populacja_elity.append(x)
populacja.remove(x)
lista_wynikow = []
# elita
for indiv in populacja:
score.update({indiv:indiv.wynik})
for x in range(0,populacja.__len__()):
najlepsi.append(rouletteWheelSelect(getRouletteWheel(populacja,score)))
else:
if ustawienia.rodzaj_Optymalizacj == "Min":
# elita
for x in populacja:
lista_wynikow.append(x.wynik)
elita = heapq.nsmallest(ustawienia.elita, lista_wynikow)
for i in elita:
x = populacja[elita.index(i)]
Populacja_elity.append(x)
populacja.remove(x)
lista_wynikow=[]
# elita
for indiv in populacja:
score.update({indiv: 1/indiv.wynik})
for x in range(0, populacja.__len__()):
najlepsi.append(rouletteWheelSelect(getRouletteWheel(populacja, score)))
return najlepsi,Populacja_elity
def implementacjaKrzyzowania(typ,ustawienia, populacja=[]):
nowePokolenie=[]
# Krzyzowanie aarytmetnyczne
if typ=="KA":
punktKrzyzowania = random.uniform(0, 1)
while nowePokolenie.__len__() < ustawienia.wielkoscPopulacji-ustawienia.elita:
firstIndiv=random.randint(0,ustawienia.wielkoscPopulacji-ustawienia.elita-1)
secondIndiv=random.randint(0,ustawienia.wielkoscPopulacji-ustawienia.elita-1)
chromosom1cecha1bin = populacja[firstIndiv].cecha1
chromosom1cecha2bin = populacja[firstIndiv].cecha2
chromosom2cecha1bin = populacja[secondIndiv].cecha1
chromosom2cecha2bin = populacja[secondIndiv].cecha2
potomek1cecha1 = (punktKrzyzowania * chromosom1cecha1bin) + (1 - punktKrzyzowania) * chromosom2cecha1bin
potomek1cecha2 = (punktKrzyzowania * chromosom1cecha2bin) + (1 - punktKrzyzowania) * chromosom2cecha2bin
potomek2cecha1 = (1 - punktKrzyzowania) * chromosom1cecha1bin + punktKrzyzowania * chromosom2cecha1bin
potomek2cecha2 = (1 - punktKrzyzowania) * chromosom1cecha2bin + punktKrzyzowania * chromosom2cecha2bin
tmp = individual()
tmp.cecha1 = potomek1cecha1
tmp.cecha2 = potomek1cecha2
tmp.wynik = funkcjaCelu(tmp.cecha1, tmp.cecha2)
nowePokolenie.append(tmp)
tmp = individual()
tmp.cecha1 = potomek2cecha1
tmp.cecha2 = potomek2cecha2
tmp.wynik = funkcjaCelu(tmp.cecha1, tmp.cecha2)
nowePokolenie.append(tmp)
#krzyzowanie heurstyczne
if typ=="KH":
while nowePokolenie.__len__() != ustawienia.wielkoscPopulacji- ustawienia.elita:
punktKrzyzowania = random.uniform(0, 1)
i=random.randint(0,populacja.__len__()-1)
j = random.randint(0, populacja.__len__() - 1)
chromosom1cecha1bin = populacja[i].cecha1
chromosom1cecha2bin = populacja[i].cecha2
chromosom2cecha1bin = populacja[j].cecha1
chromosom2cecha2bin = populacja[j].cecha2
if chromosom2cecha1bin > chromosom1cecha1bin and chromosom1cecha2bin < chromosom2cecha2bin:
potomek1cecha1 = punktKrzyzowania * (chromosom2cecha1bin - chromosom1cecha1bin) + chromosom1cecha1bin
potomek1cecha2 = punktKrzyzowania * (chromosom2cecha2bin - chromosom1cecha2bin) + chromosom1cecha2bin
tmp = individual()
tmp.cecha1 = potomek1cecha1
tmp.cecha2 = potomek1cecha2
tmp.wynik = funkcjaCelu(tmp.cecha1, tmp.cecha2)
nowePokolenie.append(tmp)
return nowePokolenie
def implementacjaMutacji(typ,ustawienia,populacja=[]):
nowePokolenie=[]
#Mutacja rownomierna
if typ == "MR":
for x in populacja:
if random.uniform(0,100)<=ustawienia.prawdobodobienstwoMutowania:
if random.random()==0:
new_x=random.uniform(ustawienia.zakresMutacji1,ustawienia.zakresMutacji2)
new_y=x.cecha2
else:
new_x = x.cecha1
new_y = random.uniform(ustawienia.zakresMutacji1, ustawienia.zakresMutacji2)
tmp=individual()
tmp.cecha1 = new_x
tmp.cecha2 = new_y
tmp.wynik = funkcjaCelu(tmp.cecha1, tmp.cecha2)
nowePokolenie.append(tmp)
else:
nowePokolenie.append(x)
#Mutacja przez zamiania
if typ == "MZ":
for x in populacja:
if random.uniform(0, 100) <= ustawienia.prawdobodobienstwoMutowania:
new_x =x.cecha2
new_y = x.cecha1
tmp=individual()
tmp.cecha1 = new_x
tmp.cecha2 = new_y
tmp.wynik = funkcjaCelu(tmp.cecha1, tmp.cecha2)
nowePokolenie.append(tmp)
else:
nowePokolenie.append(x)
return nowePokolenie
def licz(ustawienia):
saving_time=0
wynikFinalny=Wynik.objects.create()
populacja=poczatkoweWartosci(ustawienia.wielkoscPopulacji,ustawienia.zakres1,ustawienia.zakres2)
for i in range(ustawienia.liczbaepok):
startime2 = timezone.now()
czasselekcji=0
czasmutowania=0
czaskrzyzowania=0
if ustawienia.metodaSelekcji== "SN":
if(ustawienia.rodzaj_Optymalizacj=="Min"):
startime= timezone.now()
populacja,elita=selekcjaNajelpszychMIN(ustawienia,populacja)
czasselekcji=timezone.now()-startime
if random.uniform(0,100)<=ustawienia.prawdobodobienstwoKrzyzowania:
startime = timezone.now()
populacja = implementacjaKrzyzowania(ustawienia.implementacjaKrzyzowania,ustawienia,populacja)
czaskrzyzowania = timezone.now() - startime
startime = timezone.now()
populacja = implementacjaMutacji(ustawienia.implementacjaMutowania,ustawienia,populacja)
czasmutowania = timezone.now() - startime
populacja=populacja+elita
else:
startime = timezone.now()
populacja,elita = selekcjaNajelpszychMAX(ustawienia, populacja)
czasselekcji = timezone.now() - startime
if random.uniform(0, 100) <= ustawienia.prawdobodobienstwoKrzyzowania:
startime = timezone.now()
populacja = implementacjaKrzyzowania(ustawienia.implementacjaKrzyzowania,ustawienia,populacja)
czaskrzyzowania = timezone.now() - startime
startime = timezone.now()
populacja = implementacjaMutacji(ustawienia.implementacjaMutowania,ustawienia,populacja)
czasmutowania = timezone.now() - startime
populacja=populacja+elita
else:
if ustawienia.metodaSelekcji== "SR":
startime = timezone.now()
populacja,elita = selekcjaKolemRuletki(ustawienia,populacja)
czasselekcji = timezone.now() - startime
if random.uniform(0, 100) <= ustawienia.prawdobodobienstwoKrzyzowania:
startime = timezone.now()
populacja = implementacjaKrzyzowania(ustawienia.implementacjaKrzyzowania,ustawienia,populacja)
czaskrzyzowania = timezone.now() - startime
startime = timezone.now()
populacja = implementacjaMutacji(ustawienia.implementacjaMutowania, ustawienia, populacja)
czasmutowania = timezone.now() - startime
populacja=populacja+elita
else:
if ustawienia.metodaSelekcji == "ST":
startime = timezone.now()
populacja,elita = selecjaTurniejowa(ustawienia, populacja)
czasselekcji = timezone.now() - startime
if random.uniform(0, 100) <= ustawienia.prawdobodobienstwoKrzyzowania:
startime = timezone.now()
populacja = implementacjaKrzyzowania(ustawienia.implementacjaKrzyzowania,ustawienia,populacja)
czaskrzyzowania = timezone.now() - startime
startime = timezone.now()
populacja = implementacjaMutacji(ustawienia.implementacjaMutowania,ustawienia,populacja)
czasmutowania = timezone.now() - startime
populacja=populacja+elita
sredniwynik = 0
listawynikow=[]
listax=[]
listay=[]
listaz=[]
for x in populacja:
sredniwynik += x.wynik
listawynikow.append(x.wynik)
listax.append(x.cecha1)
listay.append(x.cecha2)
listaz.append(x.wynik)
ustawienia.save()
czas=timezone.now()-startime2
czasselekcji=str(czasselekcji).split(".")
czasmutowania = str(czasmutowania).split(".")
czaskrzyzowania = str(czaskrzyzowania).split(".")
sekundys = str(czasselekcji[0].rsplit(":")[-1])
sekundym = str(czasmutowania[0].rsplit(":")[-1])
sekundyk = str(czaskrzyzowania[0].rsplit(":")[-1])
setness = czasselekcji[-1]
setnesm = czasmutowania[-1]
setnesk = czaskrzyzowania[-1]
print("Czas selekcji:" +sekundys +"."+setness)
print("Czas Krzyzowania:"+ sekundyk+"."+setnesk)
print("Czas mutowania:"+sekundym+"."+setnesm)
czas= str(czas).split(".")
sekundy=str(czas[0].rsplit(":")[-1])
setnes=czas[-1]
czas=sekundy+"."+setnes
wyniki_epoki = Epoka.objects.create(
czas=czas,
iteracja=str(i + 1) + "/" + str(ustawienia.liczbaepok),
sredniWynik=str(sredniwynik / populacja.__len__()),
odchylenieStandardowe=statistics.stdev(listawynikow),
rezultaty=wynikFinalny,
ustawienia=ustawienia
)
ZapisWyników=[]
startime=timezone.now()
for x in populacja:
ZapisWyników.append(PojedynczaWartoscWyniku(
wartosc = x.wynik,
x1 = x.cecha1,
x2 = x.cecha2,
Wynik= wyniki_epoki
))
PojedynczaWartoscWyniku.objects.bulk_create(ZapisWyników)
czaszapisu=timezone.now()-startime
czaszapisu=str(czaszapisu).split(".")
sekundys = str(czaszapisu[0].rsplit(":")[-1])
setness = czaszapisu[-1]
print("Czas zapisu:" + sekundys + "." + setness)
ustawienia.nalezy_do=wyniki_epoki
wyniki_epoki.save()
print(wyniki_epoki.iteracja)
return wynikFinalny.id | SJaskowski/OE_2 | Main/views.py | views.py | py | 21,028 | python | pl | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.TemplateView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "forms.FormularzPoczatkowy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
... |
43296898564 | from __future__ import with_statement
"""
This file is OBSCURE. Really. The purpose is to avoid copying and changing
'test_c.py' from cffi/c/ in the original CFFI repository:
https://foss.heptapod.net/pypy/cffi/
Adding a test here involves:
1. add a test to cffi/c/test.py
- if you need a C function to call, add it into _cffi_backend.c
as a testfuncNN().
2. have it pass when you run 'py.test test_c.py' in cffi
3. check in and (if you can) push the changes
4. copy test_c.py into _backend_test.py here, killing the few lines of header
- if you added a C function, it goes into _test_lib.c here
- if you could complete step 3, try running 'python2 pytest.py test_file.py' here
5. make the test pass in pypy ('python2 pytest.py test_c.py')
"""
import py, sys, ctypes
from rpython.tool.udir import udir
from pypy.interpreter import gateway
from pypy.module._cffi_backend.moduledef import Module
from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache
from rpython.translator import cdir
from rpython.translator.platform import host
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from .. import VERSION as TEST_VERSION
class AppTestC(object):
"""Populated below, hack hack hack."""
spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array'))
def setup_class(cls):
if cls.runappdirect:
_cffi_backend = py.test.importorskip('_cffi_backend')
if _cffi_backend.__version__ != TEST_VERSION:
py.test.skip(
"These tests are for cffi version %s, this Python "
"has version %s installed" %
(TEST_VERSION, _cffi_backend.__version__))
testfuncs_w = []
keepalive_funcs = []
UniqueCache.for_testing = True
def find_and_load_library_for_test(space, w_name, w_is_global=None):
if w_is_global is None:
w_is_global = space.wrap(0)
if space.is_w(w_name, space.w_None):
path = None
else:
import ctypes.util
path = ctypes.util.find_library(space.str_w(w_name))
if path is None:
py.test.skip("cannot find library '%s'" % (space.str_w(w_name),))
return space.appexec([space.wrap(path), w_is_global],
"""(path, is_global):
import _cffi_backend
return _cffi_backend.load_library(path, is_global)""")
test_lib_c = tmpdir.join('_test_lib.c')
src_test_lib_c = py.path.local(__file__).dirpath().join('_test_lib.c')
src_test_lib_c.copy(test_lib_c)
eci = ExternalCompilationInfo(include_dirs=[cdir])
test_lib = host.compile([test_lib_c], eci, standalone=False)
cdll = ctypes.CDLL(str(test_lib))
cdll.gettestfunc.restype = ctypes.c_void_p
def testfunc_for_test(space, w_num):
if hasattr(space, 'int_w'):
w_num = space.int_w(w_num)
addr = cdll.gettestfunc(w_num)
return space.wrap(addr)
space = cls.space
if cls.runappdirect:
def interp2app(func):
def run(*args):
return func(space, *args)
return run
else:
interp2app = gateway.interp2app
w_func = space.wrap(interp2app(find_and_load_library_for_test))
w_testfunc = space.wrap(interp2app(testfunc_for_test))
space.appexec([space.wrap(str(tmpdir)), w_func, w_testfunc,
space.wrap(sys.version[:3])],
"""(path, func, testfunc, underlying_version):
import sys
sys.path.append(path)
is_musl = False
import _all_test_c
_all_test_c.PY_DOT_PY = underlying_version
_all_test_c.find_and_load_library = func
_all_test_c._testfunc = testfunc
""")
def teardown_method(self, method):
_clean_cache(self.space)
def teardown_class(cls):
UniqueCache.for_testing = False
all_names = ', '.join(Module.interpleveldefs.keys())
backend_test_c = py.path.local(__file__).join('..', '_backend_test_c.py')
lst = []
with backend_test_c.open('r') as f:
for line in f:
if line.startswith('def test_'):
line = line[4:]
line = line[:line.index('():')]
lst.append(line)
tmpdir = udir.join('test_c').ensure(dir=1)
tmpname = tmpdir.join('_test_c.py')
with tmpname.open('w') as f:
for func in lst:
print >> f, 'def %s(self):' % (func,)
print >> f, ' import _all_test_c'
print >> f, ' _all_test_c.%s()' % (func,)
tmpname2 = tmpdir.join('_all_test_c.py')
with tmpname2.open('w') as f:
print >> f, 'import sys'
print >> f, 'from _cffi_backend import %s' % all_names
print >> f, 'is_musl = False'
print >> f, 'class py:'
print >> f, ' class test:'
print >> f, ' raises = staticmethod(raises)'
print >> f, ' skip = staticmethod(skip)'
print >> f, 'pytest = py.test'
print >> f, backend_test_c.read()
mod = tmpname.pyimport()
for key, value in mod.__dict__.items():
if key.startswith('test_'):
setattr(AppTestC, key, value)
| mozillazg/pypy | pypy/module/_cffi_backend/test/test_c.py | test_c.py | py | 5,268 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "py.test.importorskip",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "py.test",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "py.test.skip",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "py.test",
"line_nu... |
20456729952 | #!/usr/bin/env python
"""
Entry point for the ledfx controller. To run this script for development
purposes use:
[console_scripts]
python setup.py develop
ledfx
For non-development purposes run:
[console_scripts]
python setup.py install
ledfx
"""
import argparse
import importlib
import logging
import os
import subprocess
import sys
from logging.handlers import RotatingFileHandler
try:
import psutil
have_psutil = True
except ImportError:
have_psutil = False
try:
import yappi
have_yappi = True
except ImportError:
have_yappi = False
try:
from pyupdater.client import Client
have_updater = True
except ImportError:
have_updater = False
import ledfx.config as config_helpers
from ledfx.consts import (
PROJECT_NAME,
PROJECT_VERSION,
REQUIRED_PYTHON_STRING,
REQUIRED_PYTHON_VERSION,
)
from ledfx.core import LedFxCore
from ledfx.utils import currently_frozen
# Logger Variables
PYUPDATERLOGLEVEL = 35
def validate_python() -> None:
"""Validate the python version for when manually running"""
if sys.version_info[:3] < REQUIRED_PYTHON_VERSION:
print(("Python {} is required.").format(REQUIRED_PYTHON_STRING))
sys.exit(1)
def reset_logging():
manager = logging.root.manager
manager.disabled = logging.NOTSET
for logger in manager.loggerDict.values():
if isinstance(logger, logging.Logger):
logger.setLevel(logging.NOTSET)
logger.propagate = True
logger.disabled = False
logger.filters.clear()
handlers = logger.handlers.copy()
for handler in handlers:
# Copied from `logging.shutdown`.
try:
handler.acquire()
handler.flush()
handler.close()
except (OSError, ValueError):
pass
finally:
handler.release()
logger.removeHandler(handler)
def setup_logging(loglevel, config_dir):
# Create a custom logging level to virtual pyupdater progress
reset_logging()
console_loglevel = loglevel or logging.WARNING
console_logformat = "[%(levelname)-8s] %(name)-30s : %(message)s"
file_loglevel = logging.INFO
file_logformat = "%(asctime)-8s %(name)-30s %(levelname)-8s %(message)s"
root_logger = logging.getLogger()
file_handler = RotatingFileHandler(
config_helpers.get_log_file_location(config_dir),
mode="a", # append
maxBytes=0.5 * 1000 * 1000, # 512kB
encoding="utf8",
backupCount=5, # once it hits 2.5MB total, start removing logs.
)
file_handler.setLevel(file_loglevel) # set loglevel
file_formatter = logging.Formatter(file_logformat)
file_handler.setFormatter(file_formatter)
console_handler = logging.StreamHandler()
console_handler.setLevel(console_loglevel) # set loglevel
console_formatter = logging.Formatter(
console_logformat
) # a simple console format
console_handler.setFormatter(
console_formatter
) # tell the console_handler to use this format
# add the handlers to the root logger
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(console_handler)
root_logger.addHandler(file_handler)
logging.addLevelName(PYUPDATERLOGLEVEL, "Updater")
# Suppress some of the overly verbose logs
logging.getLogger("sacn").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
logging.getLogger("pyupdater").setLevel(logging.WARNING)
logging.getLogger("zeroconf").setLevel(logging.WARNING)
global _LOGGER
_LOGGER = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(
description="A Networked LED Effect Controller"
)
parser.add_argument(
"--version",
action="version",
version=f"ledfx {PROJECT_VERSION}",
)
parser.add_argument(
"-c",
"--config",
dest="config",
help="Directory that contains the configuration files",
default=config_helpers.get_default_config_directory(),
type=str,
)
parser.add_argument(
"--open-ui",
dest="open_ui",
action="store_true",
help="Automatically open the webinterface",
)
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
parser.add_argument(
"-p",
"--port",
dest="port",
help="Web interface port (HTTP)",
default=None,
type=int,
)
parser.add_argument(
"-p_s",
"--port_secure",
dest="port_s",
help="Web interface port (HTTPS)",
default=None,
type=int,
)
parser.add_argument(
"--host",
dest="host",
help="The address to host LedFx web interface",
default=None,
type=str,
)
parser.add_argument(
"--tray",
dest="tray",
action="store_true",
help="Hide LedFx console to the system tray",
)
parser.add_argument(
"--performance",
dest="performance",
action="store_true",
help="Profile LedFx's performance. A developer can use this to diagnose performance issues.",
)
parser.add_argument(
"--offline",
dest="offline_mode",
action="store_true",
help="Disable automated updates and sentry crash logger",
)
parser.add_argument(
"--sentry-crash-test",
dest="sentry_test",
action="store_true",
help="This crashes LedFx to test the sentry crash logger",
)
return parser.parse_args()
def installed_via_pip():
"""Check to see if LedFx is installed via pip
Returns:
boolean
"""
pip_spec = importlib.util.find_spec("pip")
if pip_spec is None:
return False
pip_package_command = subprocess.check_output(
[sys.executable, "-m", "pip", "freeze"]
)
installed_packages = [
r.decode().split("==")[0] for r in pip_package_command.split()
]
if "ledfx" in installed_packages:
return True
else:
return False
def update_ledfx(icon=None):
# initialize & refresh in one update, check client
def notify(msg):
if icon and icon.HAS_NOTIFICATION:
icon.remove_notification()
icon.notify(msg)
_LOGGER.log(PYUPDATERLOGLEVEL, msg)
def log_status_info(info):
total = info.get("total")
downloaded = info.get("downloaded")
percent_complete = info.get("percent_complete")
time = info.get("time")
_LOGGER.log(
PYUPDATERLOGLEVEL,
f"{downloaded} of {total} [{percent_complete} complete, {time} remaining]",
)
class ClientConfig:
PUBLIC_KEY = "Txce3TE9BUixsBtqzDba6V5vBYltt/0pw5oKL8ueCDg"
APP_NAME = PROJECT_NAME
COMPANY_NAME = "LedFx Developers"
HTTP_TIMEOUT = 5
MAX_DOWNLOAD_RETRIES = 2
UPDATE_URLS = ["https://ledfx.app/downloads/"]
client = Client(ClientConfig(), refresh=True)
_LOGGER.log(PYUPDATERLOGLEVEL, "Checking for updates...")
# First we check for updates.
# If an update is found, an update object will be returned
# If no updates are available, None will be returned
ledfx_update = client.update_check(PROJECT_NAME, PROJECT_VERSION)
# Download the update
if ledfx_update is not None:
client.add_progress_hook(log_status_info)
_LOGGER.log(PYUPDATERLOGLEVEL, "Update found!")
notify(
"Downloading update, please wait... LedFx will restart when complete."
)
ledfx_update.download()
# Install and restart
if ledfx_update.is_downloaded():
notify("Download complete. Restarting LedFx...")
ledfx_update.extract_restart()
else:
notify("Unable to download update.")
else:
# No Updates, into main we go
_LOGGER.log(
PYUPDATERLOGLEVEL,
"You're all up to date, enjoy the light show!",
)
def main():
"""Main entry point allowing external calls"""
args = parse_args()
config_helpers.ensure_config_directory(args.config)
setup_logging(args.loglevel, config_dir=args.config)
config_helpers.load_logger()
# Set some process priority optimisations
if have_psutil:
p = psutil.Process(os.getpid())
if psutil.WINDOWS:
try:
p.nice(psutil.HIGH_PRIORITY_CLASS)
except psutil.Error:
_LOGGER.info(
"Unable to set priority, please run as Administrator if you are experiencing frame rate issues"
)
# p.ionice(psutil.IOPRIO_HIGH)
elif psutil.LINUX:
try:
p.nice(15)
p.ionice(psutil.IOPRIO_CLASS_RT, value=7)
except psutil.Error:
_LOGGER.info(
"Unable to set priority, please run as root or sudo if you are experiencing frame rate issues",
)
else:
p.nice(15)
if (
not (currently_frozen() or installed_via_pip())
and args.offline_mode is False
):
import ledfx.sentry_config # noqa: F401
if args.sentry_test:
"""This will crash LedFx and submit a Sentry error if Sentry is configured"""
_LOGGER.warning("Steering LedFx into a brick wall")
div_by_zero = 1 / 0
if args.tray:
# If pystray is imported on a device that can't display it, it explodes. Catch it
try:
import pystray
except Exception as Error:
msg = f"Error: Unable to virtual tray icon. Shutting down. Error: {Error}"
_LOGGER.critical(msg)
raise Exception(msg)
sys.exit(0)
from PIL import Image
if currently_frozen():
current_directory = os.path.dirname(__file__)
icon_location = os.path.join(current_directory, "tray.png")
else:
current_directory = os.path.dirname(__file__)
icon_location = os.path.join(
current_directory, "..", "icons/" "tray.png"
)
icon = pystray.Icon(
"LedFx", icon=Image.open(icon_location), title="LedFx"
)
icon.visible = True
else:
icon = None
# icon = None
# if have_updater and not args.offline_mode and currently_frozen():
# update_ledfx(icon)
if icon:
icon.run(setup=entry_point)
else:
entry_point()
def entry_point(icon=None):
# have to re-parse args here :/ no way to pass them through pysicon's setup
args = parse_args()
exit_code = 4
while exit_code == 4:
_LOGGER.info("LedFx Core is initializing")
if args.performance and have_yappi:
print("Collecting performance data...")
yappi.start()
ledfx = LedFxCore(
config_dir=args.config,
host=args.host,
port=args.port,
port_s=args.port_s,
icon=icon,
)
exit_code = ledfx.start(open_ui=args.open_ui)
if args.performance:
print("Finished collecting performance data")
filename = config_helpers.get_profile_dump_location(
config_dir=args.config
)
yappi.stop()
stats = yappi.get_func_stats()
yappi.get_thread_stats().print_all()
stats.save(filename, type="pstat")
print(f"Saved performance data to config directory: {filename}")
print(
"Please send the performance data to a developer : https://ledfx.app/contact/"
)
if icon:
icon.stop()
if __name__ == "__main__":
sys.exit(main())
| apophisnow/sub-backend | __main__.py | __main__.py | py | 12,252 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "ledfx.consts.REQUIRED_PYTHON_VERSION",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "ledfx.consts.REQUIRED_PYTHON_STRING",
"line_number": 63,
"usage_type": "argume... |
74249580265 | """"
Controls ECS Services
"""
import boto3
import logging
import os
DBTABLEENV = "ECSDYNTABLE"
DBREGION = "ECSDBREGION"
class ecsController:
def __init__(self, region, searchTag):
self.region = region
self.client = boto3.client('ecs', region_name= region)
self.searchTag = searchTag.lower()
self.logger = logging.getLogger(__name__)
self.enabledServices = {}
env = os.environ
self.ecsTable = env.get(DBTABLEENV, "ecsStatev2")
self.dbregion = env.get(DBREGION, "eu-west-2")
"""
Main entry point to be called from ResourceFinder - finds all ECS Services that have a Task running
Returns a MAP[ClusterARN] = [ list of Service ARNS] or []
"""
def findResourcesForECS(self):
clusterServiceMap = self.findServices() # Get all ECS running
return clusterServiceMap
"""
Main entry point to signal a STOP of developer day event
The current running ECS services will have their current desired count stored into a database and then their desired count will be set to 0 in the ecs clusters
"""
def stopDayEvent(self):
clusterServiceMap = self.findServices() # Get all ECS running
clusterServiceStateMap = self.getDesiredState(clusterServiceMap) # find current desired Map Levels
if len(clusterServiceStateMap) ==0:
self.logger.info("There are currently no active ECS Services - they all seemed stopped or do not exist")
return True
# clean out what is currenty in the database
clusterStoredMap = self.loadState()
self._deleteState(clusterStoredMap)
self.storeState(clusterServiceStateMap) # store in the db the current levels
result= self.setState(clusterServiceStateMap,overrideDesiredCount=0)
if result:
self.logger.info("All the running services have been updated to have 0 desired state - all stopping")
else:
self.logger.warning("Not all services could have their desired state updated")
return result
"""
Main entry point to signal a START of developer day event
"""
def startDayEvent(self):
clusterStoredMap = self.loadState()
if len(clusterStoredMap)==0:
self.logger.info("There was no stored desired Stated for ECS Services in the database - so nothing requires starting")
return True
result = self.setState(clusterStoredMap)
if result:
self.logger.info("All the running services have been updated to their original desired State levels")
else:
self.logger.warning("Not all services could have their desired state updated")
return result
"""
Checks the SERVICE ARN for the special searchTag - and see if the Tag is set to TRUE
return True or False
"""
def _checkforTag(self,serviceArn):
response = self.client.list_tags_for_resource(
resourceArn=serviceArn
)
tagList = response.get('tags')
for tag in tagList:
key = tag['key']
value=tag['value'].lower()
if key.lower() == self.searchTag and value=='true':
self.logger.info(f"---------- Service {serviceArn} has {self.searchTag} enabled")
return True
return False
"""
Gets the current Desired number of Running tasks for a given service List - providing a service as atleast one running task
Pass in a MAP of clusters --> [Service Arns]
Returns a MAP of clusters --- [ [Service Arns,Service Name, desired state]]
"""
def getDesiredState(self,serviceArnClusterMap):
resultMap = {}
for cluster in serviceArnClusterMap:
serviceArnList = serviceArnClusterMap[cluster]
itemCount= len(serviceArnList)
partialList = serviceArnList[0:10]
# need to take the ecs list 10 at a time
while len(partialList) >0:
# remove the current 10 from the list
serviceArnList=serviceArnList[10:]
response = self.client.describe_services(
cluster=cluster,
services=partialList
)
partialList = serviceArnList[0:10]
serviceList = response.get("services" ,[])
for serviceMap in serviceList:
sarn = serviceMap["serviceArn"]
runningCount = serviceMap["runningCount"]
sname = serviceMap["serviceName"]
if runningCount >0:
# Ok there is something running so we had better make a results entry for this cluster
if cluster not in resultMap:
resultMap[cluster] = []
desiredCount = serviceMap["desiredCount"]
self.logger.info(f"Cluster {cluster} - Service Name {sname} ARN {sarn} has Task desired Count {desiredCount} ")
resultMap[cluster].append([sarn,sname, desiredCount])
else:
self.logger.warning(f"Service Name {sname} has no task running - so ignoring")
return resultMap
"""
Delete all records passed from the database
expects a MAP of clusters --- [ [Service Arns, service Name, desired state]]
"""
def _deleteState(self, clusterMap):
client = boto3.client('dynamodb', region_name=self.dbregion)
for cluster in clusterMap:
serviceList = clusterMap[cluster]
for service in serviceList:
sarn = service[0]
sname = service[1]
self.logger.info(f"Clearing out DB Records {sname} --> service arn {sarn}")
response = client.delete_item(
TableName=self.ecsTable,
Key={
'ecsArn': {'S': sarn},
'region' : {'S' : self.region}
})
""""
Writes the cluster/service desired state into the dynamoDB table.
expects a MAP of clusters --- [ [Service Arns, service Name, desired state]]
returns True if was all stored ok
"""
def storeState(self, clusterMap):
if len(clusterMap) ==0:
self.logger.warning("There are no ClusterMap is empty - meaning nothing is running - so there is nothing to store")
return False
client = boto3.client('dynamodb', region_name=self.dbregion)
try:
for cluster in clusterMap:
serviceList = clusterMap[cluster]
for service in serviceList:
sarn = service[0]
sname = service[1]
desired = service[2]
response = client.put_item(
TableName=self.ecsTable,
Item={
'ecsArn': {'S': sarn},
'region' : {'S' : self.dbregion},
'servicename' : {'S': sname},
'clusterArn' : {'S':cluster},
'desiredCount' : {'N': f'{desired}'}
})
except Exception as e:
self.logger.exception(f"Failed to store state inside dynamodb. \nPayload was {clusterMap}")
self.logger.exception(e)
return False
return True
"""
Loads the stored state relating to cluster --> Service desired count numbers
Returns: a MAP of clusters --- [ [Service Arns, service Name, desired state]]
"""
def loadState(self):
client = boto3.client('dynamodb', region_name=self.dbregion)
response = client.scan(
TableName=self.ecsTable,
AttributesToGet=[
'ecsArn','region', 'servicename', 'clusterArn' , 'desiredCount'
],
Limit=999)
items = response.get("Items",[])
clusterMap = {}
for item in items:
region = item['region']['S']
clusterARN = item["clusterArn"]['S']
serviceArn = item["ecsArn"]['S']
sname = item["servicename"]['S']
if region == self.region:
desiredCount = int(item["desiredCount"]['N'])
self.logger.info(f"restoring state: cluster Arn: {clusterARN}, Service Name {sname}--> desired Count {desiredCount}")
if clusterARN not in clusterMap:
clusterMap[clusterARN] = []
clusterMap[clusterARN].append([serviceArn, sname, desiredCount])
else:
self.logger.info(f"loading data Ignoring region {region} ")
return clusterMap
"""
Takes a cluster map and sets the desired count for all the services to the value in the map
This will cause a scaling value
There is an override value that will set all listed serviced to the override value. used if to set them all to zero
The clustermap has format: a MAP of clusters --- [ [Service Arns, service Name, desired state]]
Returns Boolean if all changes were made ok to the all the services in the cluster map.
"""
def setState(self,clusterMap, overrideDesiredCount=None):
passed = True
self.logger.info(f"----------Setting the State of the cluster map: overrideDesiredCount {overrideDesiredCount}---------- \n {clusterMap} \n------------------------")
for cluster in clusterMap:
self.logger.info(f"Setting state for cluster {cluster}")
serviceList = clusterMap[cluster]
for service in serviceList:
serArn = "none"
try:
serArn = service[0]
sername = service[1]
if overrideDesiredCount is None:
desiredCount = service[2]
else:
desiredCount = overrideDesiredCount
self.logger.info(f"Setting desired count level to {desiredCount} for cluster {cluster}, service Name {sername}")
response = self.client.update_service(
cluster=cluster,
service=serArn,
desiredCount=desiredCount)
except Exception as e:
self.logger.error(f"Failed to set the service ARN {serArn} for cluster {cluster} to the desired state level")
self.logger.exception(e)
passed = False
return passed
"""
Get the Services that have an enabled DEVDAY TAG. This service will search across all clusters in the given region
Returns a MAP[ClusterARN] = [ list of Service ARNS] or []
"""
def findServices(self):
clusterList = []
try:
response = self.client.list_clusters()
nextToken = "A"
# iterate through the clusters with pagination - just incase there are many
while nextToken is not None:
nextToken = response.get(nextToken)
self.logger.debug(f"cluster reponse> {response}")
cl = response.get('clusterArns',[])
clusterList = clusterList + cl
if nextToken is not None:
response = self.client.list_clusters(nextToken=nextToken)
except Exception as e:
self.logger.warning(f"could not access the clusters for region {self.region}")
self.enabledServices={}
return self.enabledServices
# for each cluster get the list of the services
for cluster in clusterList:
self.logger.info(f"Examining ECS Cluster {cluster}")
self.enabledServices[cluster]=[]
response = self.client.list_services(
cluster=cluster,
maxResults=100
)
serviceArnList = response.get("serviceArns")
for serviceArn in serviceArnList:
self.logger.info(f"----- Looking at Service {serviceArn}")
if serviceArn is not None and self._checkforTag(serviceArn):
self.enabledServices[cluster].append(serviceArn)
return self.enabledServices
| evoraglobal/SleepSaver | ecsController.py | ecsController.py | py | 12,645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "boto3.client",
"li... |
20976916712 | import datetime
import psycopg2
from psycopg2 import Error
connection = None
cursor = None
def romanToInt(s):
"""
:type s: str
:rtype: int
"""
roman = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000, 'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90,
'CD': 400, 'CM': 900}
i = 0
num = 0
while i < len(s):
if i + 1 < len(s) and s[i:i + 2] in roman:
num += roman[s[i:i + 2]]
i += 2
else:
# print(i)
num += roman[s[i]]
i += 1
return num
try:
# Connect to an existing database
connection = psycopg2.connect(user="postgres",
password="root",
host="127.0.0.1",
port="5432",
database="xmldb")
# Create a cursor to perform database operations
cursor = connection.cursor()
# Print PostgreSQL details ODHC01sr_noyear, (cibil_t)
# Executing a SQL query
cursor.execute(
"SELECT * FROM information_schema.columns WHERE table_schema = 'public' AND table_name = 'cause_list';")
cursor.execute("INSERT INTO public.cause_list (causelist_date, court_no, cino, ctype, case_no, cause_reg_year, reg_dt, causelist_type, sr_no, purpose_priority, case_remark) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", (datetime.date(2023, 5, 1), 0, 'ODHC0111222023', 1, 123, 2050, '2022-11-24', 1, 110011, 1, 'case_remark'))
# Fetch result
arrColumns = []
record = cursor.fetchall()
for r in record:
arrColumns.append(r[3])
print(arrColumns)
except (Exception, Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
| kiranfreelancer87/AWARDL_BACKEND | PostgretoMySql.py | PostgretoMySql.py | py | 1,875 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "psycopg2.connect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
"line_number": 54,
"usage_type": "name"
}
] |
29124638883 | from select import select
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def update(conn, contacts, last_name, **kwargs):
"""
update the last name
:param conn:
:param table: table name
:param last_name: last_name
:return:
"""
parameters = [f"{k} = ?" for k in kwargs]
parameters = ", ".join(parameters)
values = tuple(v for v in kwargs.values())
values += (last_name, )
sql = f''' UPDATE {contacts}
SET {parameters}
WHERE last_name = ?'''
try:
cur = conn.cursor()
cur.execute(sql, values)
conn.commit()
print("OK")
except sqlite3.OperationalError as e:
print(e)
def delete_where(conn, contacts, **kwargs):
"""
Delete one row with given attribute
:param conn: Connection to the SQLite database
:param contacts: contacts
:param kwargs: dict of attributes and values
:return:
"""
qs = []
values = tuple()
for k, v in kwargs.items():
qs.append(f"{k}=?")
values += (v,)
q = " AND ".join(qs)
sql = f'DELETE FROM {contacts} WHERE {q}'
cur = conn.cursor()
cur.execute(sql, values)
conn.commit()
print("Deleted")
def execute_sql(conn, sql):
""" Execute sql
:param conn: Connection object
:param sql: a SQL script
:return:
"""
try:
c = conn.cursor()
c.execute(sql)
except Error as e:
print(e)
if __name__ == '__main__':
create_connection("database.db")
create_contacts_sql="""
--contacts table
CREATE TABLE IF NOT EXISTS contacts(
id INTEGER PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL UNIQUE,
phone TEXT NOT NULL UNIQUE
);
"""
db_file = "database.db"
conn=create_connection(db_file)
if conn is not None:
execute_sql(conn, create_contacts_sql)
conn.close()
def add_contact(conn, contact):
"""
Create a new contact into the contacts table
:param conn:
:param contact:
:return: id
"""
sql='''INSERT OR REPLACE INTO contacts(first_name, last_name, email, phone) VALUES(?,?,?,?)'''
cur=conn.cursor()
cur.execute(sql, contact)
conn.commit()
return cur.lastrowid
conn=create_connection("database.db")
contact=("Jakub","Pazderski","jak.pazderski@gmail.com,","573313216")
cont_id=add_contact(conn,contact)
add_contact(conn,contact)
contact=("Bogdan","Kowalski","b.kow@wp.pl","764896320")
add_contact(conn,contact)
contact=("Adam","Nowak","adam.n@o2.pl","856342890")
add_contact(conn,contact)
contact=("Andrzej","Janda","janda89@gmail.com","554786901")
add_contact(conn,contact)
contact=("Tymon","Kałużny","tym.kalu@interia.pl","857420709")
add_contact(conn,contact)
def select_by_name(conn,first_name):
"""
Select contact by first_name
:param conn: the Connection object
:param first_name:
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM contacts WHERE first_name=?", (first_name,))
rows = cur.fetchall()
conn.commit()
return rows
| JakubPazderski/6-subject-1-task | 6-subject-1-task.py | 6-subject-1-task.py | py | 3,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sqlite3.OperationalError",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.E... |
20070029006 | from itertools import compress
def bool_code(number):
b = list()
while number != 0:
if number % 2 == 1:
b.append(True)
else:
b.append(False)
number >>= 1
return b
def bit_not(n, num_bits):
return (1 << num_bits) - 1 - n
def translate_code(items, mask):
return ' '.join(list(compress(items, bool_code(mask))))
def print_closed_frequent_itemsets(closed_frequent_itemsets, l1):
for itemset in closed_frequent_itemsets:
print(translate_code(l1, itemset), ' #SUP:', closed_frequent_itemsets[itemset])
def bit_map_code(transaction, f_1, l_1):
bc = 0
for item in transaction:
if item in f_1:
f_1[item] += 1
else:
f_1[item] = 1
l_1.append(item)
bc += 2 ** l_1.index(item)
# return bc, transaction.__len__(), f_1, l_1
return bc, f_1, l_1
| mghorbani2357/TT-Miner-Topology-Transaction-Miner-for-Mining-Closed-Itemset | utils/tools.py | tools.py | py | 897 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "itertools.compress",
"line_number": 20,
"usage_type": "call"
}
] |
1917766531 | import numpy as np
import matplotlib.pyplot as plt
def load_bin_file(samplerate=2e6, type="complex", bfile="../data/file_source_test", plot=False, start_us=0, end_us=0):
if type not in ["complex", "real"]:
print("data type must be complex or real.")
exit()
with open(bfile, "rb") as f:
data = np.fromfile(f, dtype=np.float32)
if type == "complex":
data = data[::2] + 1j * data[1:][::2]
acq_time_us = np.linspace(1, len(data), len(data)) / samplerate * 1e6
start_index = np.where(acq_time_us>start_us)[0][0]
if end_us == 0:
end_index = len(acq_time_us) - 1
else:
end_index = np.where(acq_time_us>end_us)[0][0]
if plot:
plt.plot(acq_time_us[start_index:end_index], abs(data[start_index:end_index]), color="k")
plt.grid()
plt.ylabel("Amplitude")
plt.xlabel("Time(us)")
plt.show()
return data
if __name__ == "__main__":
data = load_bin_file(plot=True, start_us=26000, end_us=43000)
| HelloKevin07/RAScatter | reader/gr-rfid/misc/code/plot_signal.py | plot_signal.py | py | 1,008 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.fromfile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"lin... |
18903346352 | from pathlib import Path
from logging import getLogger
from pypairtree.utils import identifier_to_path
from uchicagoldrtoolsuite import log_aware
from uchicagoldrtoolsuite.core.lib.convenience import log_init_attempt, \
log_init_success
from .abc.materialsuiteserializationreader import \
MaterialSuiteSerializationReader
from ..ldritems.ldrpath import LDRPath
__author__ = "Brian Balsamo"
__email__ = "balsamo@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
class FileSystemMaterialSuiteReader(MaterialSuiteSerializationReader):
"""
The packager for pairtree based MaterialSuite serializations
Given the path where the MaterialSuite is stored, the identifier, and the
pairtree encapsulation string, packages a MaterialSuite
"""
@log_aware(log)
def __init__(self, root, target_identifier, **kwargs):
"""
Create a new FileSystemMaterialSuiteReader
__Args__
1. root_path (str): The path to the location where the MaterialSuite
is stored
2. identifier (str): The identifier of the MaterialSuite
__KWArgs__ (That this reader looks for)
* encapsulation (str): The pairtree encapsulation utilized by the
serializer. Defaults to arf.
"""
log_init_attempt(self, log, locals())
super().__init__(root, target_identifier)
self.encapsulation = kwargs.get('encapsulation', 'arf')
self.path = Path(self.root, identifier_to_path(self.target_identifier),
self.encapsulation)
log_init_success(self, log)
@log_aware(log)
def get_content(self):
log.debug('Searching for content')
p = Path(self.path, 'content.file')
if p.is_file():
log.debug("content located")
return LDRPath(str(p))
log.debug("Content not found")
@log_aware(log)
def get_premis(self):
p = Path(self.path, 'premis.xml')
log.debug("Searching for PREMIS @ {}".format(str(p)))
if p.is_file():
log.debug("PREMIS located")
return LDRPath(str(p))
log.warn(
"Premis not found for materialsuite @ {}".format(
self.target_identifier
)
)
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/lib/readers/filesystemmaterialsuitereader.py | filesystemmaterialsuitereader.py | py | 2,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "abc.materialsuiteserializationreader.MaterialSuiteSerializationReader",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "uchicagoldrtoolsuite.core.lib.convenience.log_init_attemp... |
5222380259 | # -*- coding: utf-8 -*-
"""
@author: 葛怡梦
@Remark: 人脸识别
@inset: 陈佳婧
"""
import os
import numpy as np
import cv2
import face_recognition
from dvadmin.utils.mail import send_email_demo
from django.conf import settings
# Threshold = 0.65 # 人脸置信度阈值
'''
功能:计算两张图片的相似度,范围:[0,1]
输入:
1)人脸A的特征向量
2)人脸B的特征向量
输出:
1)sim:AB的相似度
'''
def simcos(A, B):
A = np.array(A)
B = np.array(B)
dist = np.linalg.norm(A - B) # 二范数
sim = 1.0 / (1.0 + dist) #
return sim
'''
功能:
输入:
1)x:人脸库向量(n维)
2)y:被测人脸的特征向量(1维)
输出:
1)match:与人脸库匹配列表,如[False,True,True,False]
表示被测人脸y与人脸库x的第2,3张图片匹配,与1,4不匹配
2)max(ressim):最大相似度
'''
def compare_faces(x, y, Threshold):
ressim = []
match = [False]*len(x)
for fet in x:
sim = simcos(fet, y)
ressim.append(sim)
print(ressim)
print(max(ressim))
if max(ressim) > Threshold: # 置信度阈值
match[ressim.index(max(ressim))] = True
print('complete compare')
return match, max(ressim)
def initialize_npy(path):
'''
# 列表npy文件初始化
'''
# 判断是否存在,不存在就创建,并初始化
if not os.path.exists(path):
# 保存
known_face_file = []
known_face_file = np.array(known_face_file)
np.save(path, known_face_file)
def known_face_save(path, new_info):
# 先存储特征向量
# 读取原有列表
known_face_file = np.load(path, allow_pickle=True)
known_face_file = known_face_file.tolist()
# 改变列表
known_face_file.append(new_info)
# 重新储存npy文件
known_face_file = np.array(known_face_file)
np.save(path, known_face_file)
def img_adress(image_url):
# 处理图片地址
image_url1 = image_url[21:]
BASE_DIR = str(settings.BASE_DIR).replace("\\","/")
image_path = BASE_DIR + image_url1
return image_path
'''
注册身份
输入:
1)libpath:人脸库地址
输出:
1)known_face_encodings:人脸库特征向量
2)known_face_names:人脸库名字标签
'''
def registeredIdentity(image_url, name, course):
'''
先运行一遍 初始化npy文件 不用写进程序里
'''
image_path = img_adress(image_url)
image = face_recognition.load_image_file(image_path)
# 读出照片
face_locations = face_recognition.face_locations(image)
# 得到特征向量
face_encoding = face_recognition.face_encodings(image, face_locations)[0]
# face_encoding = face_recognition.face_encodings(image, face_locations)
# 写入对应课程文件
# 路径及文件名
known_face_path = str(settings.BASE_DIR).replace("\\","/") + '/identity/known_face/'
course_encodings = 'known_face_encodings_' + course + '.npy'
course_names = 'known_face_names_' + course + '.npy'
# 得到文件最终的路径
path_encodings = known_face_path + course_encodings
path_names = known_face_path + course_names
# 初始化
initialize_npy(path_encodings)
initialize_npy(path_names)
# 存储
known_face_save(path_encodings, face_encoding)
known_face_save(path_names, name)
print ('complete register')
'''
输入:
1)testimg:测试图片
2)known_face_encodings:人脸库特征向量
3)known_face_names:人脸库名字标签
输出:
1)name_list:预测的名字
2)score_list:相似度得分
3)face_locations:人脸位置坐标
'''
def identityRecognition(testimg, course, Threshold):
##########
# 地址
known_face_path = str(settings.BASE_DIR).replace("\\","/") + '/identity/known_face/'
course_encodings = known_face_path + 'known_face_encodings_' + course + '.npy'
course_names = known_face_path + 'known_face_names_' + course + '.npy'
# 读取列表
known_face_encodings = np.load(course_encodings, allow_pickle=True)
known_face_encodings = known_face_encodings.tolist()
known_face_names = np.load(course_names, allow_pickle=True)
known_face_names = known_face_names.tolist()
############
face_locations = face_recognition.face_locations(testimg)
# face_locations = face_recognition.face_locations(testimg, model="cnn")
face_encodings = face_recognition.face_encodings(testimg, face_locations)
# print(face_encodings)
faceNum = len(face_locations)
name_list, score_list = [], []
retname, retscore = "Noface", 0
for face_encoding in face_encodings:
matches, score = compare_faces(known_face_encodings, face_encoding, Threshold)
retname, retscore = "Unknow", 0
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
if score > retscore:
retname = name
retscore = score
name_list.append(retname)
score_list.append(retscore)
print('complete recongnize')
return name_list, score_list, face_locations, faceNum
'''
输入:
1)img:未裁剪图片
2)face_locations:人脸位置坐标
3) name_list:预测的名字
输出:
img:加框加年龄备注之后的画面
'''
def name_show(img, face_locations, name_list, score_list):
index = 0
for face_location in face_locations:
y0 = face_location[0]
x1 = face_location[1]
y1 = face_location[2]
x0 = face_location[3]
cv2.rectangle(img, (x0, y0), (x1, y1), (0, 0, 255), 2)
info = str(name_list[index])
print('识别为:%s\n相似度:%2f' % (str(name_list[index]), score_list[index]))
index = index + 1
t_size = cv2.getTextSize(str(info), cv2.FONT_HERSHEY_PLAIN, 1, 2)[0]
x2, y2 = x0 + t_size[0] + 3, y0 + t_size[1] + 4
cv2.rectangle(img, (x0, y0), (x2, y2), (0, 0, 255), -1) # -1填充作为文字框底色
cv2.putText(img, info, (x0, y0 + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)
return img
'''
功能:统计、校验名单
输入:
1)需到课的名单
2)检测到的名单
输出:
1)到课名单
2)缺课名单
'''
def statistic_name(faceNum, name_list, absence_list):
print("到课人数:", faceNum)
print("到课名单:", name_list)
print("缺课名单:", absence_list)
'''
功能:识别image身份
输入:
1)image:被测图片
2)libpath:人脸库地址
3)save_dir:图片保存地址
4)Threshold:人脸相似度阈值,Threshold越高识别越精准,但是检出率越低
'''
def pic(image_url, course, name_original, Threshold=0.68):
BASE_DIR = str(settings.BASE_DIR)
BASE_DIR = BASE_DIR.replace("\\","/")
image_url1 = image_url[21:]
imagepath = BASE_DIR + image_url1
save_dir = BASE_DIR + '/identity/' + course + '.jpg'
# 测试完记得改
name_original = name_original
print ("课程名单:", name_original)
image_original = cv2.imread(imagepath)
name_list, score_list, face_locations, faceNum = identityRecognition(image_original, course, Threshold=Threshold)
image = name_show(image_original, face_locations, name_list, score_list)
absence_list = set(name_original)-set(name_list)
statistic_name(faceNum, name_list, absence_list)
cv2.imwrite(save_dir, image)
return faceNum, ",".join(absence_list), absence_list
# if __name__ == '__main__':
# name_list = ['ROSE','JISOO','LISA','JENNIE']
# pic(imagepath = 'E:/face/bp3.jpg', libpath='E:/face/facelib/', save_dir='E:/face/tt_bp.jpg', name_original=name_list)
| Applied-Energetic/Intelligent-classroom-management-system | django-vue-admin-main/backend/dvadmin/utils/face_identification2.py | face_identification2.py | py | 7,218 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_nu... |
75174330665 | from math import hypot, pi, cos, sin
from PIL import Image
import numpy as np
import cv2
def hough(image, theta_x=600, rho_y=600):
"Calculate Hough transform."
print(image.shape)
height, width = image.shape
rho_y = int(rho_y/2)*2 #Make sure that this is even
him = np.zeros((theta_x, rho_y))
rmax = hypot(width, height)
dr = rmax / (rho_y/2)
dth = pi / theta_x
frame = 0
spincnt = 0
modval = 50
fast = False
print("drho", dr, "dtheta", dth)
for x in range(height):
for y in range(width):
col = image[x, y]
# set up a frame for drawing on.
frame += 1
imagename = "img{:07}.png".format(frame)
new = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if col == 255:
if frame % modval * 3 == 0 and not fast:
new = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
cv2.circle(new,(y, x), 3, (0,255,0), -1)
new = cv2.resize(new, (600,600))
a = np.expand_dims(him, axis = 2)
newhough = np.concatenate((a,a,a), axis = 2)
vis = np.concatenate((new, newhough), axis=1)
cv2.imwrite(imagename, vis)
continue
if fast:
if frame % (modval/5) == 0:
cv2.circle(new,(y, x), 3, (255,0, 0), -1)
new = cv2.resize(new, (600,600))
a = np.expand_dims(him, axis = 2)
newhough = np.concatenate((a,a,a), axis = 2)
vis = np.concatenate((new, newhough), axis=1)
cv2.imwrite(imagename, vis)
for tx in range(theta_x):
th = dth * tx
r = x*cos(th) + y*sin(th)
x1,y1,x2,y2 = findline(image, y,x, th)
#
#cv2.waitKey(5)
iry = rho_y/2 + int(r/dr+0.5)
him[int(iry),int(tx)] += 5
frame += 1
if (spincnt == 0 and frame % 3 == 0) or (spincnt > 1 and frame % modval == 0):
if spincnt > 20:
fast = True
continue
imagename = "img{:07}.png".format(frame)
new = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
cv2.circle(new,(y, x), 3, (255,0, 0), -1)
cv2.line(new, (x1, y1), (x2, y2), (255, 0, 0), 1)
a = np.expand_dims(him, axis = 2)
newhough = np.concatenate((a,a,a), axis = 2)
new = cv2.resize(new, (600,600))
cv2.circle(newhough, (int(tx),int(iry)), 3, (0,0,255), -1)
vis = np.concatenate((new, newhough), axis=1)
cv2.imwrite(imagename, vis)
print(imagename)
spincnt += 1
#exit()
return him
def findline(image, x,y, th):
x2 = int(x + (300 * cos(th)))
y2 = int(y + (300 * sin(th)))
x1 = x + (x - x2)
y1 = y + (y - y2)
return x1,y1,x2,y2
def test():
"Test Hough transform with pentagon."
im = cv2.imread("pentagon2.png", 0)
him = hough(im)
cv2.imwrite("houghspace.png", him)
if __name__ == "__main__": test()
| squeakus/bitsandbytes | opencv/hough_transform.py | hough_transform.py | py | 3,381 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.hypot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 27,
... |
7396903634 | """
Based on:
https://devcenter.heroku.com/articles/getting-started-with-django
"""
from settings import *
import os
DEBUG = False
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {'default': dj_database_url.config() }
WSGI_APPLICATION = 'pdxtrees.wsgi_deploy.application'
SECRET_KEY = os.environ.get('SECRET_KEY')
AWS_S3_ACCESS_KEY_ID = os.environ.get('AWS_S3_ACCESS_KEY_ID')
AWS_S3_SECRET_ACCESS_KEY = os.environ.get('AWS_S3_SECRET_ACCESS_KEY')
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
) | mattblair/pdx-trees-django | pdxtrees/settings_production.py | settings_production.py | py | 860 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dj_database_url.config",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get"... |
7450851742 | from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^user_profile/', include('user_profile.urls')),
url(r'^wd2csv/', include('wd2csv.urls')),
url(r'', include('homepage.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| Ash-Crow/ash-django | ash-django_app/urls.py | urls.py | py | 456 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": ... |
16703859938 | # Copyright 2020 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
default_dataset = './dataset/warp_set/'
def main(config):
dataset = config.dataset
mode=config.mode
print(mode)
if mode=='train':
from model.DeScarGAN import Solver
solver = Solver(config.dataset_path, config.dataset)
solver.train()
else:
if dataset == 'Synthetic':
from Evaluation.Evaluation_Synthetic_Dataset import Solver
else:
from Evaluation.Evaluation_Chexpert import Solver
solver = Solver(config.dataset_path, config.choose_net)
solver.test()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
#
# # Training configuration.
parser.add_argument('--dataset_path', type=str,
default='/home/juliawolleb/PycharmProjects/Python_Tutorials/Reversible/Chexpert/2classes_effusion')
# default='/home/juliawolleb/PycharmProjects/Python_Tutorials/warp/warp_set')
parser.add_argument('--dataset', type=str, default='Chexpert')
parser.add_argument('--mode',type=str, default='train')
parser.add_argument('--choose_net',type=str, default='./save_nets')
#
#
#
config = parser.parse_args()
print(config)
main(config)
#
| JuliaWolleb/DeScarGAN | main.py | main.py | py | 1,954 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "model.DeScarGAN.Solver",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Evaluation.Evaluation_Chexpert.Solver",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 41,
"usage_type": "call"
}
] |
21008760995 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess as sp
from pwn import *
import re
import sys
import os
import tempfile
import argparse
def to_8bit(d9):
bits = ''
for c in d9:
bits += bin(ord(c))[2:].rjust(8, '0')
log.debug(bits)
d8 = ''
for i in range(0, len(bits), 9):
tmp = bits[i:i+9]
tmp = tmp if len(tmp) == 9 else tmp.ljust(9, '0')
log.debug('%s : %s' % (tmp, hex(int(tmp, 2))))
d8 += chr(int(tmp, 2))
d8 = d8.strip('\x00')
return d8
def dump_io(pcap, cid, split=False):
fol = []
p = sp.Popen('tshark -r %s -z follow,tcp,raw,%d' % (pcap, cid), shell=True, stdout=sp.PIPE)
o = p.stdout.read()
o = o[o.find('========'):].strip('=\n')
o = '\n'.join(o.split('\n')[4:]).strip()
if len(o) == 0:
return None
if split:
for l in o.split('\n'):
if re.search('^\t', l):
fol.append('out: ' + l[1:].decode('hex'))
else:
fol.append('in : ' + l.decode('hex'))
return fol
else:
for l in o.split('\n'):
if re.search('^\t', l):
log.debug((l[1:]))
fol.append('out: ' + to_8bit(l[1:].decode('hex')))
else:
fol.append('in : ' + to_8bit(l.decode('hex')))
result = ''
for _ in fol:
result += _ + '\n'
return result
def dump(pcap, cid):
if cid != None:
result = dump_io(pcap, cid)
if result:
sys.stdout.write(result)
else:
log.warning('No data')
else:
p = sp.Popen('tshark -r %s -z conv,tcp | grep "<->" | wc -l' % pcap, shell=True, stdout=sp.PIPE)
count = int(p.stdout.read())
log.debug('tcp conversion: %d' % count)
for i in xrange(count):
log.info('%s conversation %d' % (pcap, i))
result = dump_io(pcap, i)
log.info(result)
def search(pcap, pattern, is_hex):
p = sp.Popen('tshark -r %s -z conv,tcp | grep "<->" | wc -l' % pcap, shell=True, stdout=sp.PIPE)
count = int(p.stdout.read())
log.debug('tcp conversion: %d' % count)
data = pattern if not is_hex else pattern.decode('hex')
for i in xrange(count):
result = dump_io(pcap, i)
if data in result:
log.info('Find in %s conversation %d' % (pcap, i))
log.info(result)
else:
log.warning('Not found')
def batch(in_f, out_f):
if not out_f:
out_f = tempfile.mkdtemp()
pcaps = sp.Popen('ls %s/*.pcap' % in_f, shell=True, stdout=sp.PIPE).stdout.read().split()
for pcap in pcaps:
out_dir = '%s/%s' % (out_f, pcap.split('/')[-1])
log.info('Dump to %s ...' % out_dir)
os.mkdir(out_dir)
count = int(sp.Popen('tshark -r %s -z conv,tcp | grep "<->" | wc -l' % pcap, shell=True, stdout=sp.PIPE).stdout.read())
for i in xrange(count):
with open('%s/%d' % (out_dir, i), 'w') as f:
f.write(dump_io(i))
def replay(pcap, cid, host, port):
r = remote(host, port)
for st in dump_io(pcap, cid, split=True):
direct = st.split(':')[0].strip()
data = ':'.join(st.split(':')[1:]).strip()
if 'in' in direct:
r.send(data)
elif 'out' in direct:
if len(data) > 0:
data = r.recv()
log.info(to_8bit(data))
else:
log.error('error happend')
r.close()
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='avaiable subcommands')
parser_search = subparsers.add_parser('search', help='tranfer pattern to 9 bit data and search in pcap')
parser_search.add_argument('pcap', type=str, help='search target')
parser_search.add_argument('pattern', type=str, help='search pattern')
parser_search.add_argument('--hex', action='store_true', help='search hex pattern')
parser_search.set_defaults(func=search)
parser_dump = subparsers.add_parser('dump', help='dump pcap to 8 bit data.')
parser_dump.add_argument('pcap', type=str, help='dump target')
parser_dump.add_argument('cid', type=int, help='tcp conversation id', nargs='?')
parser_dump.set_defaults(func=dump)
parser_batch = subparsers.add_parser('batch', help='batch dump pcap to 8 bit data.')
parser_batch.add_argument('indir', type=str, help='input pcap folder')
parser_batch.add_argument('outdir', type=str, help='output data folder', nargs='?')
parser_batch.set_defaults(func=batch)
parser_replay = subparsers.add_parser('replay', help='replay pcap data')
parser_replay.add_argument('pcap', type=str, help='replay target')
parser_replay.add_argument('cid', type=int, help='tcp conversation id')
parser_replay.add_argument('host', type=str, help='replay target host')
parser_replay.add_argument('port', type=str, help='replay target port')
parser_replay.set_defaults(func=replay)
args = parser.parse_args()
if args.func == batch:
args.func(args.indir, args.outdir)
elif args.func == dump:
args.func(args.pcap, args.cid)
elif args.func == search:
args.func(args.pcap, args.pattern, args.hex)
elif args.func == replay:
args.func(args.pcap, args.cid, args.host, args.port)
| david942j/defcon-2017-tools | pcap/pcap_tool.py | pcap_tool.py | py | 5,202 | python | en | code | 92 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.search",
"line... |
3110641490 | """
Creator:
Dhruuv Agarwal
Github: Dhr11
Reference used for iou calculation:
https://github.com/warmspringwinds/pytorch-segmentation-detection/blob/master/pytorch_segmentation_detection/metrics.py
"""
import numpy as np
from sklearn.metrics import confusion_matrix
class custom_conf_matrix():
def __init__(self, lbl,n_class):
self.lbl = lbl
self.n_class = n_class
self.conf_mat = np.zeros((self.n_class, self.n_class))
def update_step(self,truth_lbl,pred_lbl):
if (truth_lbl == 255).all():
return
curr_conf_mat = confusion_matrix(y_true=truth_lbl,
y_pred=pred_lbl,
labels=self.lbl)
self.conf_mat += curr_conf_mat
def compute_mean_iou(self):
intersection = np.diag(self.conf_mat)
ground_truth_set = self.conf_mat.sum(axis=1)
predicted_set = self.conf_mat.sum(axis=0)
union = ground_truth_set + predicted_set - intersection
return np.mean(intersection / union.astype(np.float32))
def reset(self):
self.conf_mat = np.zeros((self.n_class, self.n_class))
| Dhr11/Semantic_Segmentation | metrics.py | metrics.py | py | 1,203 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
... |
20860418193 | # Databricks notebook source
# MAGIC %md
# MAGIC ### Ingest qualifying folder
# COMMAND ----------
# MAGIC %run "../includes/configuration"
# COMMAND ----------
# MAGIC %run "../includes/common_funcs"
# COMMAND ----------
dbutils.widgets.text("p_data_source","")
v_data_source = dbutils.widgets.get("p_data_source")
# COMMAND ----------
dbutils.widgets.text("p_file_date", "2021-03-21")
v_file_date = dbutils.widgets.get("p_file_date")
# COMMAND ----------
# MAGIC %md
# MAGIC #### Step1 - Read the folder using the spark dataframe reader
# COMMAND ----------
from pyspark.sql.types import StructField, StructType, StringType, IntegerType
# COMMAND ----------
qualifying_schema = StructType(fields=[StructField('qualifyId', IntegerType(), False),
StructField('raceId', IntegerType(), True),
StructField('driverId', IntegerType(), True),
StructField('constructorId', IntegerType(), True),
StructField('number', IntegerType(), True),
StructField('position', IntegerType(), True),
StructField('q1', StringType(), True),
StructField('q2', StringType(), True),
StructField('q3', StringType(), True)
])
# COMMAND ----------
# MAGIC %fs
# MAGIC ls mnt/myformula1projectdl/raw
# COMMAND ----------
qualifying_df = spark.read.schema(qualifying_schema).option('multiLine', True).json(f"{raw_folder_path}/{v_file_date}/qualifying/")
# COMMAND ----------
display(qualifying_df)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Step2 - Rename & add new column
# COMMAND ----------
from pyspark.sql.functions import lit
# COMMAND ----------
final_df = qualifying_df.withColumnRenamed('qualifyId', 'qualify_id')\
.withColumnRenamed('raceId', 'race_id')\
.withColumnRenamed('driverId', 'driver_id')\
.withColumnRenamed('constructorId', 'constructor_id')\
.withColumn('data_source', lit(v_data_source))\
.withColumn('file_date', lit(v_file_date))
# COMMAND ----------
final_df = add_ingestion_date(final_df)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Step3 - Write output as parquet file
# COMMAND ----------
# final_df.write.mode('overwrite').format("parquet").saveAsTable("f1_processed.qualifying")
# COMMAND ----------
# overwrite_partition(final_df,'f1_processed', 'qualifying', 'race_id')
# COMMAND ----------
merge_cond = 'tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id'
merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_cond, 'race_id')
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT race_id, COUNT(1)
# MAGIC FROM f1_processed.qualifying
# MAGIC GROUP BY race_id;
# COMMAND ----------
dbutils.notebook.exit("Done!")
| hdh997/f1-project | ingestion/8.ingest_qualifying_file.py | 8.ingest_qualifying_file.py | py | 3,035 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 34,
"usage_type": "call"
},
... |
37711853411 | def openfile():
import json
f = open('bonus.txt', 'r')
val = json.load(f)
return val
def viewfile(content):
print(content)
def binarySearch(alist,item):
alist.sort()
first = 0
last = len(alist)-1
found = False
print(alist)
while first<=last and not found:
middle = (first+last)//2
if alist[middle] == item:
found = True
else:
if item < alist[middle]:
last=middle-1
else:
first=middle+1
if found:
print('Item found : ', item)
else:
print("Item not found!")
| adnanhf/Basic-Programming-Algorithm | Modul-7-Sequential-and-Binary-Search/Bonus/module.py | module.py | py | 500 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 4,
"usage_type": "call"
}
] |
12491747552 | from pathlib import Path
import pandas as pd
import numpy as np
ROOT_DIRECTORY = Path("/code_execution")
DATA_DIRECTORY = Path("/data")
QRY_VIDEOS_DIRECTORY = DATA_DIRECTORY / "query"
OUTPUT_FILE = ROOT_DIRECTORY / "subset_query_descriptors.npz"
QUERY_SUBSET_FILE = DATA_DIRECTORY / "query_subset.csv"
def generate_query_descriptors(query_video_ids) -> np.ndarray:
raise NotImplementedError(
"This script is just a template. You should adapt it with your own code."
)
video_ids = ...
descriptors = ...
timestamp_intervals = ...
return video_ids, descriptors, timestamp_intervals
def main():
# Loading subset of query images
query_subset = pd.read_csv(QUERY_SUBSET_FILE)
query_subset_video_ids = query_subset.video_id.values.astype("U")
# Generation of query descriptors happens here
query_video_ids, query_descriptors, query_timestamps = generate_query_descriptors(
query_subset_video_ids
)
np.savez(
OUTPUT_FILE,
video_ids=query_video_ids,
features=query_descriptors,
timestamps=query_timestamps,
)
if __name__ == "__main__":
main()
| drivendataorg/meta-vsc-descriptor-runtime | submission_src/main.py | main.py | py | 1,151 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"lin... |
27545046731 | from tir import Webapp
import unittest
class OGAA580(unittest.TestCase):
@classmethod
def setUpClass(inst):
from datetime import datetime
DateSystem = datetime.today().strftime('%d/%m/%Y')
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAADV',DateSystem,'T1','D MG 01 ','67')
def test_OGAA580_CT001(self):
self.oHelper.SetLateralMenu("Atualizações > Comercialização > Cadastros Básicos > Tabela de Índice")
self.oHelper.SetButton("Incluir")
self.oHelper.SetValue("N9H_INDICE", "INDICE000123212",name_attr=True)
self.oHelper.SetValue("N9H_PROD", "AGR-SOJA GRANEL",name_attr=True)
self.oHelper.SetValue("N9H_CODSAF", "1920",name_attr=True)
self.oHelper.SetValue("N9H_UFORIG", "AC",name_attr=True)
self.oHelper.SetValue("N9H_UFDEST", "AL",name_attr=True)
self.oHelper.SetValue("N9H_DTINVG", "31/10/2019",name_attr=True)
self.oHelper.SetValue("N9H_DTFNVG", "31/10/2021",name_attr=True)
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Fechar")
self.oHelper.SearchBrowse("INDICE000123212"+"AGR-SOJA GRANEL")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("N9H_INDICE", user_value = "INDICE000123212")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| ccpn1988/TIR | Modules/SIGAAGR/OGAA580TestCase.py | OGAA580TestCase.py | py | 1,561 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "tir.W... |
7237082725 | #!/usr/bin/python3
# This Red Panda Lineage dataset management tool is useful for doing sweeping dataset
# revisions, such as ensuring that a field exists in each panda or zoo file, or removing
# photos taken by a specific credited author.
import git
import json
import os
import re
import sys
from shared import MEDIA_PATH, PANDA_PATH, ZOO_PATH, CommitError, PhotoFile, SectionNameError, datetime_to_unixtime, fetch_photo, get_max_entity_count, update_ig_link
from unidiff import PatchSet
def find_commit_of_removed_photos(author, repo):
"""
Iterate through the Git repo and find the most recent commit where an
author's photos were removed.
"""
counter = 0
compare = repo.commit("HEAD")
for commit in repo.iter_commits('master'):
diff_raw = repo.git.diff(compare,
commit,
ignore_blank_lines=True,
ignore_space_at_eol=True)
patch = PatchSet(diff_raw)
for change in patch:
filename = change.path
if filename.find(".txt") == -1:
# Don't care about non-data files
continue
elif change.removed <= 0:
# No lines were removed, so we don't care
continue
else:
for hunk in change:
for line in hunk:
if line.is_removed:
# author must be at the end of a line, prepended with ": "
line = line.value.strip()
search_string = ": " + author
if len(line) <= len(search_string):
continue
expected_location = len(line) - len(search_string)
if line.find(search_string) == expected_location:
counter = counter + 1
if counter > 0:
return commit
else:
# Prepare for the next iteration
compare = commit
def remove_author_from_lineage(author):
"""
Occasionally users will remove or rename their photo files online.
For cases where the original files cannot be recovered, it may be
simpler to remove photos by an author and add them back later.
Given a author (typically an IG username), remove their photos
from every panda or zoo data entry.
"""
for file_path in [PANDA_PATH, ZOO_PATH, MEDIA_PATH]:
section = None
for section_name in ["media", "zoos", "pandas"]:
if section_name in file_path.split("/"):
section = section_name.split("s")[0] # HACK
# Enter the pandas subdirectories
for root, dirs, files in os.walk(file_path):
for filename in files:
path = root + os.sep + filename
photo_list = PhotoFile(section, path)
photo_list.remove_author(author)
# Done? Let's write config
photo_list.update_file()
def remove_photo_from_file(path, photo_id):
"""
Given a file path and a photo index ID, remove the photo and renumber
all photos inside the file. Determine what the proper configuration
section header should be from the path itself.
"""
section = None
for section_name in ["wild", "media", "zoos", "pandas"]:
if section_name in path.split("/"):
section = section_name.split("s")[0] # HACK
photo_list = PhotoFile(section, path)
if photo_list.delete_photo(photo_id) == True:
# Read max from an existing photo
max = int(get_max_entity_count())
photo_list.renumber_photos(max)
photo_list.update_file()
def remove_duplicate_photo_uris_per_file():
"""
If a file has the same photo URI multiple times, make a new photo entry
with a union of the tags for each one, and the earlier commitdate.
TODO: support media duplicates
"""
max = int(get_max_entity_count())
for file_path in [PANDA_PATH, ZOO_PATH]:
section = None
for section_name in ["zoos", "pandas"]:
if section_name in file_path.split("/"):
section = section_name.split("s")[0] # HACK
# Enter the pandas subdirectories
for root, dirs, files in os.walk(file_path):
for filename in files:
path = root + os.sep + filename
# print(path)
photo_list = PhotoFile(section, path)
photo_count = photo_list.photo_count()
photo_index = 1
seen = {}
duplicates = {}
while (photo_index <= photo_count):
current_option = "photo." + str(photo_index)
current_uri = photo_list.get_field(current_option)
current_author_option = current_option + ".author"
current_author = photo_list.get_field(current_author_option)
current_date_option = current_option + ".commitdate"
current_date = photo_list.get_field(current_date_option)
current_date_value = datetime_to_unixtime(current_date)
current_link_option = current_option + ".link"
current_link = photo_list.get_field(current_link_option)
current_tags_option = current_option + ".tags"
current_tags = photo_list.get_field(current_tags_option)
if current_uri in seen:
# We have a duplicate
seen_date_value = datetime_to_unixtime(seen[current_uri]["commitdate"])
seen_tags = seen[current_uri]["tags"]
# Resolve dates and tags
if (current_date_value < seen_date_value):
seen[current_uri]["commitdate"] = current_date_value
# Handle when either of the duplicates have no tags
if seen_tags == None and current_tags != None:
seen[current_uri]["tags"] = current_tags
if seen_tags != None and current_tags != None:
tag_list = current_tags.split(", ") + seen_tags.split(", ")
tag_list = sorted(list(dict.fromkeys(tag_list))) # deduplicate tags
seen[current_uri]["tags"] = ", ".join(tag_list)
# Add to duplicates list in its current form
duplicates[current_uri] = seen[current_uri]
# Remove from the photo list
photo_list.delete_photo(photo_index)
photo_list.delete_photo(seen[current_uri]["old_index"])
elif current_uri in duplicates:
# We have something duplicated more than once
seen_date_value = datetime_to_unixtime(duplicates[current_uri]["commitdate"])
seen_tags = duplicates[current_uri]["tags"]
# Resolve dates and tags
if (current_date_value < seen_date_value):
duplicates[current_uri]["commitdate"] = current_date_value
# Handle when either of the duplicates have no tags
if seen_tags == None and current_tags != None:
seen[current_uri]["tags"] = current_tags
if seen_tags != None and current_tags != None:
tag_list = current_tags.split(", ") + seen_tags.split(", ")
tag_list = sorted(list(dict.fromkeys(tag_list))) # deduplicate tags
duplicates[current_uri]["tags"] = ", ".join(tag_list)
# Remove from the photo list
photo_list.delete_photo(photo_index)
else:
seen[current_uri] = {}
seen[current_uri]["old_index"] = photo_index
seen[current_uri]["author"] = current_author
seen[current_uri]["commitdate"] = current_date
seen[current_uri]["link"] = current_link
seen[current_uri]["tags"] = current_tags
photo_index = photo_index + 1
for photo_uri in duplicates.keys():
# Add duplicates back to photo file, starting at the newest index
photo_option = "photo." + str(photo_index)
author_option = photo_option + ".author"
author = duplicates[photo_uri]["author"]
date_option = photo_option + ".commitdate"
date = duplicates[photo_uri]["commitdate"]
link_option = photo_option + ".link"
link = duplicates[photo_uri]["link"]
tags_option = photo_option + ".tags"
tags = duplicates[photo_uri]["tags"]
photo_list.set_field(photo_option, photo_uri)
photo_list.set_field(author_option, author)
photo_list.set_field(date_option, date)
photo_list.set_field(link_option, link)
if (tags != None):
photo_list.set_field(tags_option, tags)
photo_index = photo_index + 1
# Update the file if there were any changes, and re-sort the hashes
duplicate_count = len(duplicates.keys())
if duplicate_count > 0:
print("deduplicated: %s (%s duplicated)" % (path, duplicate_count))
photo_list.renumber_photos(max)
photo_list.update_file()
sort_ig_hashes(path)
def restore_author_to_lineage(author, prior_commit=None):
"""
Find the most recent commit where photos by an author were removed.
Re-add them to the pandas they were removed from. For any panda that
had photos restored, sort their photo hashes.
"""
repo = git.Repo(".")
if prior_commit == None:
prior_commit = find_commit_of_removed_photos(author, repo)
# Go back one more from this commit
current_commit = prior_commit
prior_commit = str(prior_commit) + "~1"
diff_raw = repo.git.diff(prior_commit,
current_commit,
ignore_blank_lines=True,
ignore_space_at_eol=True)
# Make list of removed lines per filename, and convert.
# Handjam this just by iterating on file lines
path_to_photo_index = {}
patch = PatchSet(diff_raw)
for change in patch:
filename = change.path
if filename.find(".txt") == -1:
# Don't care about non-data files
continue
elif change.removed <= 0:
# No lines were removed, so we don't care
continue
else:
# Prepare to add lines
path_to_photo_index[filename] = {}
for hunk in change:
for line in hunk:
if line.is_removed:
if line.value.find("photo.") != 0:
continue
[key, value] = line.value.strip().split(": ")
path_to_photo_index[filename][key] = value
# Delete any items where the author isn't the given
for path in path_to_photo_index.keys():
for option in list(path_to_photo_index[path].keys()):
index = option.split(".")[1]
if path_to_photo_index[path].get("photo." + index + ".author") != author:
path_to_photo_index[path].pop(option)
# Iterate through files that are getting photos back.
# Add the photos to the ends of the files
for path in path_to_photo_index.keys():
if not os.path.exists(path):
# File may have been moved.
print("%s:\nfile no longer exists, so where do I put this?" % path)
for key in path_to_photo_index[path].keys():
print("%s: %s" % (key, value))
continue
section = None
for section_name in ["wild", "media", "zoos", "pandas"]:
if section_name in path.split("/"):
section = section_name.split("s")[0] # HACK
photo_list = PhotoFile(section, path)
photo_count = photo_list.photo_count()
photo_index = photo_count + 1
index_map = {}
# Swap the old index to one that's not currently in the file
for key in path_to_photo_index[path].keys():
index = key.split(".")[1]
if index_map.get(index) == None:
index_map[index] = photo_index
photo_index = photo_index + 1
value = path_to_photo_index[path][key]
key = key.replace("photo." + index, "photo." + str(index_map[index]))
photo_list.set_field(key, value)
# print("%s: %s" % (key, value))
# Update the list of photos
photo_list.update_file()
# Finally, sort the photo files
for path in path_to_photo_index.keys():
sort_ig_hashes(path)
def sort_ig_hashes(path):
"""
Take a zoo/panda file, and sort all photos by their IG hashes.
This makes the photos appear in the order they were uploaded to IG,
oldest to newest.
If a photo does not use an IG URI, keep its index unchanged.
"""
# IG alphabet for hashes, time ordering oldest to newest
# print(path)
print("sorting: %s" % path)
hash_order = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
section = None
for section_name in ["wild", "zoos", "media", "pandas"]:
if section_name in path.split("/"):
section = section_name.split("s")[0] # HACK
photo_list = PhotoFile(section, path)
photo_count = photo_list.photo_count()
max = int(get_max_entity_count()) + 1
if photo_count >= max:
max = photo_count + 1
non_ig_indices = []
ig_photos = []
# Build photo indices of IG photos and non-IG photos
start_index = 1
stop_point = max
photo_index = start_index
while photo_index <= stop_point:
photo_option = "photo." + str(photo_index)
photo = photo_list.get_field(photo_option)
if photo == None:
# Missing photo at this index, continue
photo_index = photo_index + 1
continue
# Convert IG photo formats to use new event handler
photo = update_ig_link(photo)
photo_list.set_field(photo_option, photo)
# If our updated photo link has an ig:// uri, do the moving
if "ig://" in photo:
# Track the photo and index as a tuple
ig_photos.append([photo, photo_index])
# Rename all photo fields as "old_photo_field"
photo_list.move_field("old." + photo_option, photo_option)
photo_list.move_field("old." + photo_option + ".author", photo_option + ".author")
photo_list.move_field("old." + photo_option + ".commitdate", photo_option + ".commitdate")
photo_list.move_field("old." + photo_option + ".link", photo_option + ".link")
photo_list.move_field("old." + photo_option + ".tags", photo_option + ".tags")
if section == "media":
panda_tags = photo_list.get_field("panda.tags").split(", ")
for panda_id in panda_tags:
photo_item = photo_option + ".tags." + panda_id + ".location"
photo_list.move_field("old." + photo_item, photo_item)
else:
# Track the non-ig index, so we can avoid it
# Don't need to rename these photos
non_ig_indices.append(photo_index)
photo_index = photo_index + 1
# Sort the list of ig photo tuples by photo URL
# (the 0th item in each tuple is the url)
# (the 4th item in each URL is the ig photo hash)
ig_photos = sorted(
ig_photos,
key=lambda x:
[hash_order.index(char) for char in x[0].split("/")[-2]])
ig_photos = sorted(ig_photos, key=lambda x: len(x[0].split("/")[-2]))
# Now, re-distribute the photos, iterating down the ig
# photos, moving "old_photo_field" to "photo_field" but with
# updated indices
list_index = start_index
photo_index = start_index
used_indices = []
while photo_index <= stop_point:
if list_index - 1 == len(ig_photos):
# No more photos, for certain
break
[photo, old_index] = ig_photos[list_index - 1]
photo_index = list_index
while photo_index in non_ig_indices:
photo_index = photo_index + 1 # Avoid indices for non-IG photos
while photo_index in used_indices:
photo_index = photo_index + 1 # Avoid indices we already used
used_indices.append(photo_index)
current_option = "photo." + str(photo_index)
old_option = "old.photo." + str(old_index)
photo_list.move_field(current_option, old_option)
photo_list.move_field(current_option + ".author", old_option + ".author")
photo_list.move_field(current_option + ".commitdate", old_option + ".commitdate")
photo_list.move_field(current_option + ".link", old_option + ".link")
photo_list.move_field(current_option + ".tags", old_option + ".tags")
if section == "media":
panda_tags = photo_list.get_field("panda.tags").split(", ")
for panda_id in panda_tags:
current_loc_tag = current_option + ".tags." + panda_id + ".location"
old_loc_tag = old_option + ".tags." + panda_id + ".location"
photo_list.move_field(current_loc_tag, old_loc_tag)
list_index = list_index + 1
# We're done. Update the photo file
photo_list.update_file()
def sort_ig_updates():
"""
Any data file that was updated in the last commit, do a sort operation for the
IG hashes, leaving non-IG files unchanged. Also add commit dates for any photos
that don't have them
"""
repo = git.Repo(".")
prior_commit = repo.commit("HEAD~1")
current_commit = repo.commit("HEAD")
diff_raw = repo.git.diff(prior_commit,
current_commit,
ignore_blank_lines=True,
ignore_space_at_eol=True)
# Start by adding entity and photo commit dates, since this process can
# change the URIs for doing tracking in the commits
update_entity_commit_dates(prior_commit.hexsha)
update_photo_commit_dates(prior_commit.hexsha)
# Now do the sorting and rewriting
patch = PatchSet(diff_raw)
for change in patch:
filename = change.path
if filename.find("links") == 0:
# Don't care about links files
continue
if filename.find(".txt") == -1:
# Don't care about non-data files
continue
elif change.added > 0:
sort_ig_hashes(filename)
def update_entity_commit_dates(starting_commit, force=False):
"""
When moving pandas, the old redpandafinder updater logic considered "new"
animals as anything that was a new file in a location. So when an animal
moved zoos, it became _new_ again. Rectify this by tracking when the
commitdate for each new animal is. Track commit dates for other files too,
just for the hell of it.
"""
filename_to_commit_date = {}
type_id_to_commit_date = {}
repo = git.Repo(".")
# List of sha1-name commits from the repo, oldest to newest
commit_list = list(reversed(list(map(lambda x: x.hexsha, repo.iter_commits()))))
if starting_commit != None:
try:
index = commit_list.index(starting_commit)
except IndexError as e:
raise CommitError("%s not a valid commit in this repo." % starting_commit)
commit_list = commit_list[index:] # All after, and including the given commit
for index, commitish in enumerate(commit_list):
# End of the commit list? Call it a day
if commitish == commit_list[len(commit_list) - 1]:
break
# Get the diff
start = commitish
end = commit_list[index + 1]
diff_raw = repo.git.diff(start, end,
ignore_blank_lines=True,
ignore_space_at_eol=True)
patch = PatchSet(diff_raw)
for change in patch:
filename = change.path
if filename.find(".txt") == -1:
# Don't care about non-data files
continue
elif change.is_added_file == True:
compare = "./" + filename
dt = repo.commit(end).committed_datetime
date = str(dt.year) + "/" + str(dt.month) + "/" + str(dt.day)
just_file = filename.split("/").pop()
just_type = None
just_id = None
if compare.find(PANDA_PATH) == 0:
just_type = "panda"
just_id = just_file.split("_")[0]
elif compare.find(ZOO_PATH) == 0:
just_type = "zoo"
just_id = just_file.split("_")[0]
elif compare.find(MEDIA_PATH) == 0:
just_type = "media"
just_id = filename # Need full path for media files
else:
continue # Not a file we're tracking commitdates for
filename_to_commit_date[just_file] = date
type_id_to_commit_date[just_type + "_" + just_id] = date
else:
continue
# print(str(filename_to_commit_date))
# print(str(type_id_to_commit_date))
# Now walk the repo, find all panda files without commit dates,
# and add commitdate to each photo that needs one
for file_path in [MEDIA_PATH, PANDA_PATH, ZOO_PATH]:
section = None
for section_name in ["media", "zoos", "pandas"]:
if section_name in file_path.split("/"):
section = section_name.split("s")[0] # HACK
# Enter the pandas subdirectories
for root, dirs, files in os.walk(file_path):
for filename in files:
path = root + os.sep + filename
photo_list = PhotoFile(section, path)
if photo_list.get_field("commitdate") == None:
if filename not in filename_to_commit_date:
# file's name was changed at some point
just_file = filename.split("/").pop()
just_type = None
just_id = None
if path.find(PANDA_PATH) == 0:
just_type = "panda"
just_id = just_file.split("_")[0]
elif path.find(ZOO_PATH) == 0:
just_type = "zoo"
just_id = just_file.split("_")[0]
elif path.find(MEDIA_PATH) == 0:
just_type = "media"
just_id = path # Need full path for media files
else:
continue # Not a file we're tracking commitdates for
just_key = just_type + "_" + just_id
if just_key not in type_id_to_commit_date:
print("warning: %s commitdate undetermined" % filename)
continue
else:
date = type_id_to_commit_date[just_key]
old_date = photo_list.get_field("commitdate")
if ((old_date == None) or (force == True)):
photo_list.set_field("commitdate", date)
else:
date = filename_to_commit_date[filename]
old_date = photo_list.get_field("commitdate")
if ((old_date == None) or (force == True)):
photo_list.set_field("commitdate", date)
photo_list.update_file()
def update_photo_commit_dates(starting_commit, force=False):
"""
The old redpandafinder update logic only worked on the basis of commits
in the last week or so. When files are re-sorted, added, or removed for
periods of time, it becomes meaningful to search the entire git repo,
find when a photo URI first appeared, and then track it using its first
commit-date into redpandafinder.
"""
uri_to_commit_date = {}
repo = git.Repo(".")
# List of sha1-name commits from the repo, oldest to newest
commit_list = list(reversed(list(map(lambda x: x.hexsha, repo.iter_commits()))))
if starting_commit != None:
try:
index = commit_list.index(starting_commit)
except IndexError as e:
raise CommitError("%s not a valid commit in this repo." % starting_commit)
commit_list = commit_list[index:] # All after, and including the given commit
for index, commitish in enumerate(commit_list):
# End of the commit list? Call it a day
if commitish == commit_list[len(commit_list) - 1]:
break
# Get the diff
start = commitish
end = commit_list[index + 1]
diff_raw = repo.git.diff(start, end,
ignore_blank_lines=True,
ignore_space_at_eol=True)
patch = PatchSet(diff_raw)
for change in patch:
filename = change.path
if filename.find(".txt") == -1:
# Don't care about non-data files
continue
elif change.added <= 0:
# No lines were added, so we don't care
continue
else:
for hunk in change:
for line in hunk:
if line.is_added:
if re.match("photo.\d+:", line.value) == None:
# Not a photo line
continue
if line.value.find(": ") == -1:
# No correct delimiter, which we see in old commits
continue
if len(line.value.strip().split(": ")) != 2:
# Probably bad linebreaks
continue
[key, value] = line.value.strip().split(": ")
if (value in uri_to_commit_date):
# Photo we've already seen
continue
if (value.find("http") != 0) and (value.find("ig://") != 0):
# Not a URI, so not a photo reference
continue
dt = repo.commit(end).committed_datetime
date = str(dt.year) + "/" + str(dt.month) + "/" + str(dt.day)
if value not in uri_to_commit_date:
# Only insert a comit date once
uri_to_commit_date[value] = date
# print(str(uri_to_commit_date))
# Now walk the repo, find all files with photo lines that have no commit dates,
# and add commitdate to each photo that needs one
for file_path in [PANDA_PATH, ZOO_PATH, MEDIA_PATH]:
section = None
for section_name in ["media", "zoos", "pandas"]:
if section_name in file_path.split("/"):
section = section_name.split("s")[0] # HACK
# Enter the pandas subdirectories
for root, dirs, files in os.walk(file_path):
for filename in files:
path = root + os.sep + filename
# print(path)
photo_list = PhotoFile(section, path)
photo_count = photo_list.photo_count()
photo_index = 1
while (photo_index <= photo_count):
photo_option = "photo." + str(photo_index)
photo_uri = photo_list.get_field(photo_option)
date_option = photo_option + ".commitdate"
if photo_uri not in uri_to_commit_date:
photo_index = photo_index + 1
continue
date_value = uri_to_commit_date[photo_uri]
old_date_value = photo_list.get_field(date_option)
if ((old_date_value == None) or (force == True)):
photo_list.set_field(date_option, date_value)
# print(photo_uri + " ==> " + date_value)
photo_index = photo_index + 1
photo_list.update_file()
if __name__ == '__main__':
"""Choose a utility function."""
if len(sys.argv) == 2:
if sys.argv[1] == "--deduplicate-photo-uris":
remove_duplicate_photo_uris_per_file()
if sys.argv[1] == "--rewrite-all-commit-dates":
update_entity_commit_dates(None, force=True)
update_photo_commit_dates(None, force=True)
if sys.argv[1] == "--sort-instagram-updates":
sort_ig_updates()
if len(sys.argv) == 3:
if sys.argv[1] == "--fetch-photo":
photo = sys.argv[2]
fetch_photo(photo)
if sys.argv[1] == "--remove-author":
author = sys.argv[2]
remove_author_from_lineage(author)
if sys.argv[1] == "--restore-author":
author = sys.argv[2]
restore_author_to_lineage(author)
if sys.argv[1] == "--sort-instagram-hashes":
file_path = sys.argv[2]
sort_ig_hashes(file_path)
if sys.argv[1] == "--update-commit-dates":
commitish = sys.argv[2]
update_entity_commit_dates(commitish)
update_photo_commit_dates(commitish)
if len(sys.argv) == 4:
if sys.argv[1] == "--remove-photo":
file_path = sys.argv[2]
photo_id = sys.argv[3]
remove_photo_from_file(file_path, photo_id)
if sys.argv[1] == "--restore-author":
author = sys.argv[2]
commit = sys.argv[3]
restore_author_to_lineage(author, commit)
| wwoast/redpanda-lineage | manage.py | manage.py | py | 30,773 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "unidiff.PatchSet",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "shared.PANDA_PATH",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "shared.ZOO_PATH",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "shared.MEDIA_PATH"... |
15655149893 | import os
import pickle
import csv
import json
from copy import copy
import numpy as np
import datetime
from scipy.spatial.distance import jaccard, euclidean
def get_files(root_path):
dat_files = set()
for (root, dirs, files) in os.walk(root_path, topdown=True):
for file in files:
dat_files.add(root_path + file)
dat_files = list(dat_files)
return dat_files
OMNI = 'omni'
PARTIAL = 'partial'
NO_REC = 'no_rec'
rec_policy_keys = [OMNI, PARTIAL, NO_REC]
#WORKING_DIR = '/Users/guyaridor/Desktop/ExAnteFilterBubble/data/'
WORKING_DIR = '/media/IntentMedia/rec_data/merged_results/'
DATA_DIR = '/media/IntentMedia/rec_data/sim_results/'
data_files = get_files(DATA_DIR)
def d(i,j, N):
return min(abs(i-j), abs(j-i), abs(j-i-N), abs(i-j-N))
def div_fun(CiT, T, N):
div_score = 0.0
for i in range(len(CiT)):
for j in range(len(CiT)):
if i == j: continue
div_score = div_score + d(CiT[i],CiT[j], N)#euclidean( iota(CiT[i], N), iota(CiT[j], N) )
return div_score*(1.0/(T*(T-1))) / N
def homogeneity(C1, C2, type_sim="jaccard"):
if type_sim == "jaccard":
return 1 - jaccard(C1, C2)
return None
def follow_rec(Ci, Rec, N, T):
s = 0.0
for i in range(len(Ci)):
if Ci[i] == Rec[i]:
s += 1
return s / T
def parse_pickle_key(key):
key = eval(key)
dat = {
'N': float(key[0]),
'T': float(key[1]),
'rho': key[2],
'beta': key[3],
'sigma': key[4],
'alpha': key[5],
'epsilon': key[6]
#'pop_idx': pop_idx
}
return dat
print("STARTING TIME PATH")
INDIVIDUAL_FIELD_NAMES =['pop_idx', 'regime', 'rho', 'beta', 'epsilon', 'alpha', 'N', 'T', 'sigma', 't', 'follow_recommendation', 'mean_consumption_dist', 'median_consumption_dist', 'sd_consumption_dist', 'cur_utility', 'average_cumulative_utility', 'utility_difference', 'local_move_05', 'local_move_025', 'local_move_10', 'instantaneous_welfare_average']
with open(WORKING_DIR + 'time_path.csv', 'w') as rec_csv:
data_writer = csv.DictWriter(rec_csv, fieldnames=INDIVIDUAL_FIELD_NAMES)
data_writer.writeheader()
for file in data_files:
print(file)
print("@ {}".format(datetime.datetime.now()))
df = None
with open(file, 'r') as fp:
df = json.load(fp)
for key, value in df.items():
for pop_idx in range(len(value)):
dat = parse_pickle_key(key)
N = dat['N']
dat['pop_idx'] = pop_idx
cur = value[pop_idx]
consumption = cur['Consumption']
welfare = cur['Welfare']
for policy in rec_policy_keys:
consumption_arr = np.array(consumption[policy])
welfare_arr = np.array(welfare[policy])
dat['regime'] = policy
if policy == PARTIAL:
follow_rec_arr = np.array(cur['Rec'][policy])
cum_welfare = 0
for t in range(int(dat['T'])) :
dat['t'] = t
# consumption dist average
l = len(consumption_arr[t, :])
if t != 0:
distance = [d(consumption_arr[t, cur], consumption_arr[t-1, cur], N) for cur in range(l)]
dat['mean_consumption_dist'] = np.mean(distance)
dat['median_consumption_dist'] = np.median(distance)
dat['sd_consumption_dist'] = np.std(distance)
dat['local_move_05'] = np.mean([dist < (dat['N'] * 0.05) for dist in distance])
dat['local_move_025'] = np.mean([dist < (dat['N'] * 0.025) for dist in distance])
dat['local_move_10'] = np.mean([dist < (dat['N'] * 0.1) for dist in distance])
# cumulative welfare avg
dat['instantaneous_welfare_average'] = np.mean(welfare_arr[t, :])
# instantaneous (at time t) welfare avg
cum_welfare += np.mean(welfare_arr[t, :])
dat['average_cumulative_utility'] = float(cum_welfare)/dat['T']
cur_dat = copy(dat)
data_writer.writerow(cur_dat)
INDIVIDUAL_FIELD_NAMES =['pop_idx', 'indiv_idx', 'regime', 'welfare', 'diversity_score', 'rho', 'beta', 'epsilon', 'follow_recommendation', 'N', 'T', 'sigma', 'alpha', 'nr_pop', 'nr_ind']
with open(WORKING_DIR + 'rec_data.csv', 'w') as rec_csv:
data_writer = csv.DictWriter(rec_csv, fieldnames=INDIVIDUAL_FIELD_NAMES)
data_writer.writeheader()
for file in data_files:
print(file)
print("@ {}".format(datetime.datetime.now()))
df = None
with open(file, 'r') as fp:
df = json.load(fp)
for key, value in df.items():
for pop_idx in range(len(value)):
dat = parse_pickle_key(key)
T = dat['T']
N = dat['N']
dat['pop_idx'] = pop_idx
cur = value[pop_idx]
welfare = cur['Welfare']
consumption = cur['Consumption']
for policy in rec_policy_keys:
dat['regime'] = policy
for indiv_idx in range(len(welfare[policy])):
dat['indiv_idx'] = indiv_idx
welfare_arr = np.array(welfare[policy])
dat['welfare'] = np.sum(welfare_arr[:,indiv_idx]) / T
consumption_arr = np.array(consumption[policy])
dat['diversity_score'] = div_fun(consumption_arr[:,indiv_idx], T, N)
dat['follow_recommendation'] = False
if policy == PARTIAL:
follow_rec_arr = np.array(cur['Rec'][policy])
dat['follow_recommendation'] = follow_rec(consumption_arr[:,indiv_idx], follow_rec_arr[:,indiv_idx], T, N)
data_writer.writerow(copy(dat))
print("STARTING HOMOGENEITY")
INDIVIDUAL_FIELD_NAMES =['pop_idx', 'regime', 'rho', 'beta', 'N', 'T', 'sigma', 'jaccard', 'beta', 'alpha', 'epsilon', 'nr_pop', 'nr_ind']
with open(WORKING_DIR + 'homogeneity_data.csv', 'w') as rec_csv:
data_writer = csv.DictWriter(rec_csv, fieldnames=INDIVIDUAL_FIELD_NAMES)
data_writer.writeheader()
for file in data_files:
print(file)
print("@ {}".format(datetime.datetime.now()))
df = None
with open(file, 'r') as fp:
df = json.load(fp)
for key, value in df.items():
for pop_idx in range(len(value)):
dat = parse_pickle_key(key)
T = dat['T']
N = dat['N']
cur = value[pop_idx]
consumption = cur['Consumption']
for policy in rec_policy_keys:
dat['regime'] = policy
consumption_arr = np.array(consumption[policy])
iter_size = len(consumption_arr[0,:])
tot = 0.0
for indiv_idx1 in range(iter_size):
for indiv_idx2 in range(iter_size):
if indiv_idx1 == indiv_idx2: continue
c1 = consumption_arr[:,indiv_idx1]
c2 = consumption_arr[:,indiv_idx2]
dist = homogeneity(c1, c2, "jaccard")
tot += dist
tot /= (iter_size * (iter_size - 1))
dat['jaccard'] = tot
cur_dat = copy(dat)
data_writer.writerow(cur_dat)
| ssikdar1/DeconstructingTheFilterBubble | replication_files/dump_to_dataset.py | dump_to_dataset.py | py | 7,917 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.jaccard",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.dateti... |
35076409139 | #!/bin/env python3.9
import sys
from base45 import b45decode
import zlib
from cbor2 import loads
from cose.messages import Sign1Message
from pyzbar.pyzbar import decode
from PIL import Image
import argparse
# Initialize components
CLI_PARSER = argparse.ArgumentParser()
def unpack_qr(qr_text):
compressed_bytes = b45decode(qr_text[4:])
cose_bytes = zlib.decompress(compressed_bytes)
cose_message = Sign1Message.decode(cose_bytes)
cbor_message = loads(cose_message.payload)
return {
"COSE": cose_message,
"JSON": cbor_message[-260][1]
}
def read_qr_pyzbar(file):
barcode = decode(Image.open(file))[0]
return barcode.data.decode("utf-8")
CLI_PARSER.add_argument(
'--file',
type=str,
help='QR file')
args = CLI_PARSER.parse_args()
if args.file is None:
CLI_PARSER.print_help()
sys.exit()
print()
print()
data = read_qr_pyzbar(args.file)
print("Raw QR data")
print(data)
json = unpack_qr(data)
print()
print("Hcert")
print(data)
print()
print("JSON")
print(json["JSON"])
print()
print("COSE")
print(json["COSE"])
print()
| ryanbnl/eu-dcc-diagnostics | print_payload_qr.py | print_payload_qr.py | py | 1,097 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base45.b45decode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "zlib.decompress",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cose.message... |
29112759837 | import uuid
from personal_okrs import db
from personal_okrs.data_model.objective import Objective
from personal_okrs.data_model.goal import Goal
# id = db.Column(db.String(255), primary_key=True)
# title = db.Column(db.String(512))
# description = db.Column(db.Text())
# type = db.Column(db.Enum(GoalType))
# direction_of_progress = db.Column(db.Enum(ProgressDirection))
# due_date = db.Column(db.DateTime())
# current_value = db.Column(db.Numeric())
# start_value = db.Column(db.Numeric())
class ObjectiveRepo(object):
def __init__(self):
pass
def create(self, data):
goal_id = uuid.uuid4()
objective_id = uuid.uuid4()
goal = Goal(
id=goal_id,
title=data['title'],
description = data['description'],
type = data['type'],
direction_of_progress = data['direction_of_progress'],
due_date = data['due_date'],
current_value = data['current_value'],
start_value = data['start_value']
)
objective = Objective(
id=objective_id,
goal=goal
)
db.session.add(objective)
db.session.commit()
return objective.dict()
def list(self):
return self.objectives
| xonev/personal-okrs | personal_okrs/core/objective_repo.py | objective_repo.py | py | 1,304 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "uuid.uuid4",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "personal_okrs.data_model.goal.Goal",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "personal_okr... |
39850416701 | import os
import sqlite3
import unittest
from fastapi.testclient import TestClient
from main import app, create_table, increment_count, DB_FILE
class FastAPITest(unittest.TestCase):
def setUp(self):
self.client = TestClient(app)
self.db_file = DB_FILE
create_table()
def tearDown(self):
conn = sqlite3.connect(self.db_file)
conn.execute("DROP TABLE IF EXISTS access_count")
conn.commit()
conn.close()
def test_example_endpoint(self):
response = self.client.get("/example")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"message": "¡Hola! Has accedido al endpoint /example."})
conn = sqlite3.connect(self.db_file)
c = conn.cursor()
c.execute("SELECT count FROM access_count WHERE endpoint = 'example'")
result = c.fetchone()
conn.close()
self.assertEqual(result[0], 1)
def test_count_endpoint(self):
increment_count("test_endpoint")
response = self.client.get("/count/test_endpoint")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"endpoint": "test_endpoint", "count": 1})
def test_count_endpoint_not_found(self):
response = self.client.get("/count/nonexistent_endpoint")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"endpoint": "nonexistent_endpoint", "count": 0})
if __name__ == '__main__':
unittest.main()
| jevillanueva/fastapi-jenkins | test.py | test.py | py | 1,527 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "main... |
2551686389 | #!/usr/bin/env python3
import sys
from ete3 import NCBITaxa
ncbi = NCBITaxa()
def get_desired_ranks(taxid):
desired_ranks = ['phylum', 'genus', 'species']
try:
lineage = ncbi.get_lineage(taxid)
except ValueError:
lineage = []
lineage2ranks = ncbi.get_rank(lineage)
ranks2lineage = dict(
(rank, taxid) for (taxid, rank) in lineage2ranks.items()
)
return {
'{}_id'.format(rank): ranks2lineage.get(rank, '<not present>') for
rank in desired_ranks
}
def main(taxid):
rank_ids = get_desired_ranks(taxid)
names = []
for i in rank_ids.values():
if isinstance(i, int):
names.append(ncbi.translate_to_names([i])[0])
else:
names.append(i)
return names
if __name__ == '__main__':
names = main(sys.argv[1])
print(','.join(names)) | waglecn/helD_search | scripts/tax_csv.py | tax_csv.py | py | 859 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ete3.NCBITaxa",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
}
] |
74021404262 | """django_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, handler404
import django_demoapp.views
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', django_demoapp.views.home, name="home"),
path('responsibles/', django_demoapp.views.responsibles, name="responsibles"),
path('responsibles/<int:responsible_id>/delete/', django_demoapp.views.responsibledelete, name="responsibledelete"),
path('responsibles/<int:responsible_id>/', django_demoapp.views.detail, name="detail"),
path('courses/<int:course_id>/', django_demoapp.views.coursedelete, name="coursedelete"),
path('courses/', django_demoapp.views.courses, name="courses"),
]
handler404 = 'django_demoapp.views.error_404_view' | Abikwa/django_student_management | django_demo/urls.py | urls.py | py | 1,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "... |
38227695787 | from typing import (
List,
Tuple,
Dict,
Optional,
Callable,
)
import os
import sys
import unittest
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules import TextFieldEmbedder, TokenEmbedder
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from allennlp.modules.token_embedders import ElmoTokenEmbedder
from allennlp.data.token_indexers import TokenIndexer
from dpd.dataset import BIODataset, BIODatasetReader
from dpd.models.embedder import NERElmoTokenEmbedder, CachedTextFieldEmbedder
from dpd.models import LinearType
from dpd.weak_supervision.contextual_functions import CWRLinear, CWRkNN
from dpd.constants import GLOVE_DIR
SHOULD_RUN = os.path.exists(GLOVE_DIR)
class CWRFuncTest(unittest.TestCase):
@classmethod
def get_embedder_info(cls) -> Tuple[TokenEmbedder, TokenIndexer]:
return NERElmoTokenEmbedder(), ELMoTokenCharactersIndexer()
@classmethod
def create_entry(cls, sentence: List[str], labels: List[str], entry_id: int, weight: float) -> Dict[str, object]:
assert len(sentence) == len(labels)
return {
'id': entry_id,
'input': sentence,
'output': labels,
'weight': weight,
}
@classmethod
def create_fake_data(cls, binary_class: Optional[str] = None) -> BIODataset:
data = [
cls.create_entry(['single'], ['B-Tag'], 0, 1.0),
cls.create_entry(['single', 'double'], ['B-Tag', 'I-Tag'], 1, 1.0),
cls.create_entry(['single', 'double', 'triple'], ['B-Tag', 'I-Tag', 'O'], 2, 1.0),
cls.create_entry(['no_label'], ['O'], 3, 1.0),
]
dataset = BIODataset(0, 'fake_file.txt', binary_class)
# hack around reading a file
dataset.data = data
return dataset
@classmethod
def setup_embedder(cls, cache: bool = True) -> CachedTextFieldEmbedder:
token_embedder, token_indexer = CWRFuncTest.get_embedder_info()
train_bio = CWRFuncTest.create_fake_data('Tag')
train_reader = BIODatasetReader(
bio_dataset=train_bio,
token_indexers={
'tokens': token_indexer,
},
)
train_data = train_reader.read('temp.txt')
vocab = Vocabulary.from_instances(train_data)
text_field_embedder = BasicTextFieldEmbedder({"tokens": token_embedder})
cached_embedder = CachedTextFieldEmbedder(
text_field_embedder=text_field_embedder,
)
cached_embedder.cache(
dataset_id=train_bio.dataset_id,
dataset=train_data,
vocab=vocab,
)
return cached_embedder
@classmethod
def _exec_test(cls, test: callable):
if SHOULD_RUN:
test()
def test_cwr_linear(self):
def _test():
dataset = CWRFuncTest.create_fake_data('Tag')
embedder = CWRFuncTest.setup_embedder()
cwr_linear = CWRLinear(
positive_label='Tag',
embedder=embedder,
linear_type=LinearType.SVM_LINEAR,
)
cwr_linear.train(dataset, dataset_id=dataset.dataset_id)
annotations = cwr_linear.evaluate(dataset)
CWRFuncTest._exec_test(_test)
def test_cwr_knn(self):
def _test():
dataset = CWRFuncTest.create_fake_data('Tag')
embedder = CWRFuncTest.setup_embedder()
cwr_linear = CWRkNN(
positive_label='Tag',
embedder=embedder,
k=5,
resolve_mode='weighted',
threshold=0.7,
)
cwr_linear.train(dataset, dataset_id=dataset.dataset_id)
annotations = cwr_linear.evaluate(dataset)
CWRFuncTest._exec_test(_test)
| AkshatSh/DPD | tests/weak_supervision/cwr_func_test.py | cwr_func_test.py | py | 3,914 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dpd.constants.GLOVE_DIR",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "unittest.Test... |
33706303531 | import torch
import matplotlib.pyplot as plt
from pathlib import Path
import sys
sys.path.append("../")
import diffoptics as do
# initialization
# device = do.init()
device = torch.device('cpu')
# load target lens
lens = do.Lensgroup(device=device)
lens.load_file(Path('./lenses/Thorlabs/ACL5040U.txt'))
print(lens.surfaces[0])
# generate array of rays
wavelength = torch.Tensor([532.8]).to(device) # [nm]
R = 15.0 # [mm]
def render():
ray_init = lens.sample_ray(wavelength, M=31, R=R)
ps = lens.trace_to_sensor(ray_init)
return ps[...,:2]
def trace_all():
ray_init = lens.sample_ray_2D(R, wavelength, M=15)
ps, oss = lens.trace_to_sensor_r(ray_init)
return ps[...,:2], oss
ps, oss = trace_all()
ax, fig = lens.plot_raytraces(oss)
ax, fig = lens.plot_setup2D_with_trace([0.0], wavelength, M=5, R=R)
ax.axis('off')
ax.set_title("")
fig.savefig("layout_trace_asphere.pdf", bbox_inches='tight')
# show initial RMS
ps_org = render()
L_org = torch.mean(torch.sum(torch.square(ps_org), axis=-1))
print('original loss: {:.3e}'.format(L_org))
lens.spot_diagram(ps_org, xlims=[-50.0e-3, 50.0e-3], ylims=[-50.0e-3, 50.0e-3])
diff_names = [
'surfaces[0].c',
'surfaces[0].k',
'surfaces[0].ai'
]
# optimize
out = do.LM(lens, diff_names, 1e-4, option='diag') \
.optimize(render, lambda y: 0.0 - y, maxit=300, record=True)
# show loss
plt.figure()
plt.semilogy(out['ls'], '-o')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show()
# show spot diagram
ps = render()
L = torch.mean(torch.sum(torch.square(ps), axis=-1))
print('final loss: {:.3e}'.format(L))
lens.spot_diagram(ps, xlims=[-50.0e-3, 50.0e-3], ylims=[-50.0e-3, 50.0e-3])
print(lens.surfaces[0])
# lens.plot_setup2D()
| vccimaging/DiffOptics | examples/spherical_aberration.py | spherical_aberration.py | py | 1,719 | python | en | code | 96 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "diffoptics.Lensgroup",
"... |
25947081758 | # idea: problem is very similar to medium vertical traversal but the one thing
# we have to sort the nested array with row value
# the idea is to add row + val to nested array and sort them
# than loop throught and call sort for every nested array as well and
# add second value from this array to result
from collections import defaultdict
from typing import List, Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def verticalTraversal(self, root: Optional[TreeNode]) -> List[List[int]]:
if not root:
return []
result = []
data = defaultdict(list)
stack = [(root, 0, 0)]
while stack:
node, row, col = stack.pop()
data[col].append([abs(row), node.val])
if node.left:
stack.append([node.left, row + 1, col - 1])
if node.right:
stack.append([node.right, row + 1, col + 1])
for key in sorted(data.keys()):
result.append([v[1] for v in sorted(data[key])])
return result
| dzaytsev91/leetcode-algorithms | hard/987_vertical_order_traversal_of_a_binary_tree.py | 987_vertical_order_traversal_of_a_binary_tree.py | py | 1,160 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 20,
"usage_type": "name"
}
] |
40082582688 | # import packages / libraries
import numpy
import torch
# import functions
from metrics import FOSC
class TemplatesHandler():
""" handles target updating and retrieving for the LOTS attack """
def __init__(self, template_size, num_classes, device):
self.templates = {i: [] for i in range(num_classes)}
self.template_size = template_size
self.device = device
def add_to_templates(self, labels, representations):
""" adds a representation as a target """
for label, representation in zip(labels, representations):
label = label.item()
self.templates[label].append(representation)
if len(self.templates[label]) > self.template_size:
self.templates[label] = self.templates[label][1:]
def get_targets(self, labels, target_classes, target_shape):
""" returns a target representation """
targets = []
for label, target_class in zip(labels, target_classes):
label, target_class = label.item(), target_class.item()
if len(self.templates[target_class]) == 0:
target = torch.rand(target_shape)
else:
images_sum = sum(self.templates[target_class])
target = images_sum / len(self.templates[target_class])
targets.append(target.to(self.device))
return torch.stack(targets).detach().to(self.device)
def get_lots_templates(model, images, labels, num_classes=10):
""" forms templates for evaluation """
with torch.no_grad():
logits = model(images)
correct_predictions_mask = torch.max(logits, dim=1)[1] == labels
class_logits = {i: [] for i in range(num_classes)}
for i in range(len(labels)):
if correct_predictions_mask[i].item():
class_logits[labels[i].item()].append(logits[i])
return {i: sum(t) / len(t) for i, t in class_logits.items()}
def get_lots_targets(templates, target_classes):
""" returns targets from templates and given target classes """
return torch.stack([templates[i.item()] for i in target_classes])
def get_random_target_classes(num_classes, labels):
""" returns a tensor of random target classes """
label_list, target_classes = set(range(num_classes)), []
for label in labels:
available_targets = list(label_list - {label.item()})
target_classes.append(numpy.random.choice(available_targets))
return torch.tensor(target_classes, device=labels.device)
def perturb(images, adversarials, logits, targets, gradient_treatment,
step_size, p, eps, control_vector, iterations):
""" perturbs a batch of images to decrease loss w.r.t. targets """
p = float(p)
device = images.device
if gradient_treatment == 'max_scaling':
adversarial_loss = torch.nn.MSELoss()(logits, targets)
elif gradient_treatment == 'sign':
adversarial_loss = torch.nn.CrossEntropyLoss()(logits, targets)
else:
raise NotImplementedError('only max scaling and sign.')
adversarial_loss.backward()
gradients = adversarials.grad.detach()
if p == float('inf'):
perturbations = torch.zeros(adversarials.shape).to(device)
for i, indicator in enumerate(control_vector):
if indicator.item():
if gradient_treatment == 'max_scaling':
perturbations[i] = gradients[i] / gradients[i].abs().max()
elif gradient_treatment == 'sign':
perturbations[i] = gradients[i].sign()
else:
raise NotImplementedError('only max scaling and sign.')
adversarials = adversarials - step_size * perturbations
if eps != 'none' and step_size * iterations > eps:
eps = float(eps)
adversarials = adversarials.min(images + eps).max(images - eps)
adversarials = torch.clamp(adversarials, 0, 1)
else:
raise NotImplementedError('currently only supporting max norm')
return adversarials.detach()
def attack(model, images, labels, config, round_values, templates_handler=None,
target_classes=None, epoch=None, data_name=None, lots_targets=None):
""" creates adversarial examples using the specified attack """
attack_type = config.pop('attack_type')
num_classes = model.num_classes
if target_classes is None:
target_classes = get_random_target_classes(num_classes, labels)
adversarials = images.clone().detach()
# handle attack type
logits = model(adversarials)
if attack_type == 'pgd':
targets = target_classes
gradient_treatment = 'sign'
elif attack_type == 'lots':
if templates_handler is not None:
targets = templates_handler.get_targets(
labels, target_classes, logits[0].shape
)
templates_handler.add_to_templates(
labels=labels, representations=logits.clone().detach()
)
else:
targets = lots_targets
gradient_treatment = 'max_scaling'
else:
raise NotImplementedError('Only LOTS and PGD attacks are implemented.')
# handle stopping criterion via control vector
additional_stopping_criterion = 'none'
control_vector = torch.ones(images.shape[0]).bool()
if 'additional_stopping_criterion' in config:
additional_stopping_criterion = config['additional_stopping_criterion']
if additional_stopping_criterion == 'success':
with torch.no_grad():
predicted_labels = torch.max(model(adversarials), dim=1)[1]
control_vector = predicted_labels != target_classes
# perturb until stopping criterion is met
iterations, nb_iter = 0, float(config['nb_iter'])
while control_vector.any() and iterations < nb_iter:
adversarials.requires_grad = True
logits = model(adversarials)
adversarials = perturb(
images=images, adversarials=adversarials,
logits=logits, targets=targets,
gradient_treatment=gradient_treatment, p=config['p'],
eps=config['eps'], step_size=config['step_size'],
control_vector=control_vector, iterations=iterations
)
if additional_stopping_criterion == 'success':
rounded_adversarials = adversarials.mul(255.0).round().div(255.0)
with torch.no_grad():
predicted_labels = torch.max(
model(rounded_adversarials), dim=1
)[1]
control_vector = predicted_labels != target_classes
elif additional_stopping_criterion == 'fosc_threshold':
fosc_values = FOSC(
model=model, originals=images, adversarials=adversarials,
targets=target_classes, eps=config['eps']
)
path = 'results/{}/fosc_values/{}'.format(data_name, attack_type)
torch.save(fosc_values, '{}/{}.pt'.format(path, epoch))
control_vector = fosc_values >= config['fosc']
iterations += 1
if round_values:
adversarials = adversarials.mul(255.0).round().div(255.0)
return adversarials.detach(), target_classes
def create_adversarials(model, data, config, name, path, save,
target_classes, lots_targets):
""" creates and saves adversarials given a model, dataset and attack """
num_classes, device = model.num_classes, data.tensors[0].device
images = data.tensors[0].to(device)
labels = data.tensors[1].to(device)
adversarial_images, _ = attack(
model=model, images=images, labels=labels, config=dict(config),
round_values=True, target_classes=target_classes,
lots_targets=lots_targets
)
adversarials = {
'images': adversarial_images,
'labels': labels,
'targets': target_classes
}
if save:
for tensor_name in adversarials.keys():
filename = '{}_adversarial_{}.pt'.format(name, tensor_name)
file_path = '{}{}'.format(path, filename)
torch.save(adversarials[tensor_name], file_path)
return adversarials
| michaelhodel/adversarial-training-with-lots | attacking.py | attacking.py | py | 8,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.rand",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 4... |
70580470825 | import cv2
import numpy as np
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
captura = cv2.VideoCapture(0)
captura.set(cv2.CAP_PROP_FPS,90)
captura.set(cv2.CAP_PROP_FRAME_WIDTH,320)
captura.set(cv2.CAP_PROP_FRAME_HEIGHT,240)
#La resolucion de la camara es 640x480 ------> Tamaño de la ventana openCv
anchoVentana = 320
altoVentana = 240
mitadVentana = anchoVentana//2
tolerancia = 10
promAntX = 0
pwmA = 18
motorA1 = 15
motorA2 = 16
pwmB = 7
motorB1 = 11
motorB2 = 13
standBy = 12
GPIO.setup(pwmA,GPIO.OUT)
GPIO.setup(motorA1,GPIO.OUT)
GPIO.setup(motorA2,GPIO.OUT)
GPIO.setup(pwmB,GPIO.OUT)
GPIO.setup(motorB1,GPIO.OUT)
GPIO.setup(motorB2,GPIO.OUT)
GPIO.setup(standBy,GPIO.OUT)
intensidadA = GPIO.PWM(pwmA,100)
intensidadB = GPIO.PWM(pwmB,100)
intensidadA.start(0)
intensidadB.start(0)
def grabarVideo():
ret,frame = captura.read()
return frame
def filtrar(frame):
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
th, img_bw = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
rect = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
aux = cv2.morphologyEx(img_bw, cv2.MORPH_DILATE, rect)
out = aux - img_bw
return (out,img_bw)
def extraerLinea(canny):
mascara = cv2.findNonZero(canny)
return mascara
def puntosImportantesVentana(frame):
cv2.line(frame,(mitadVentana-tolerancia,0),(mitadVentana-tolerancia,altoVentana),(255,255,0),3)#Linea media de la pantalla
cv2.line(frame,(mitadVentana+tolerancia,0),(mitadVentana+tolerancia,altoVentana),(255,255,0),3)#Linea media de la pantalla
for i in range(0,240,50): # Lineas delimitadoras (zonas de estudio)
cv2.line(frame,(0,i),(320,i),(0,0,255),2)
def zona1(frame,mascara):
global promAntX
promX = (mascara[:96,0,0].sum())//96
promY = (mascara[:96,0,1].sum())//96
if(promX > promAntX+5 or promX < promAntX-5):
cv2.circle(frame,(promX,25),3,(0,10,254),-1)
cv2.line(frame,(promX-tolerancia,0),(promX-tolerancia,50),(0,0,255),3)
cv2.line(frame,(promX+tolerancia,0),(promX+tolerancia,50),(0,0,255),3)
promAntX = promX
else:
cv2.circle(frame,(promAntX,25),3,(0,10,254),-1)
cv2.line(frame,(promAntX-tolerancia,0),(promAntX-tolerancia,50),(0,0,255),3)
cv2.line(frame,(promAntX+tolerancia,0),(promAntX+tolerancia,50),(0,0,255),3)
if((promAntX-tolerancia)- (mitadVentana-tolerancia) < -23):
movimientoMotores(20,30) # Mizq, Mder
print("Aumentar motor Derecho")
elif((promAntX-tolerancia)- (mitadVentana-tolerancia) > 23):
movimientoMotores(30,20) # Mizq, Mder
print("Aumentar motor Izquierdo")
else:
movimientoMotores(30,30) # Mizq, Mder
print("esta centrado")
def mostrarVentanas(frame,canny,umbralizada):
cv2.imshow("Video Color",frame)
cv2.imshow("Video Canny",canny)
#cv2.imshow("Video umbralizado",umbralizada)
def movimientoMotores(vD,vI):
GPIO.output(motorA1, False)
GPIO.output(motorA2, True)
GPIO.output(motorB1, True)
GPIO.output(motorB2, False)
GPIO.output(standBy, True)
intensidadA.ChangeDutyCycle(vD) # Motor Derecho
intensidadB.ChangeDutyCycle(vI) # Motor Izquierdo
def programaPrincipal():
contador = []
while True:
tiempoInicial = time.time()
frame = grabarVideo()
canny,umbralizada = filtrar(frame)
mascara = extraerLinea(canny)
puntosImportantesVentana(frame)
zona1(frame,mascara)
mostrarVentanas(frame,canny,0)
tiempoFinal = time.time()
contador.append(tiempoFinal-tiempoInicial)
if(cv2.waitKey(1) & 0xFF == ord('q')):
suma = 0
final = 0
for idx,i in enumerate(contador):
suma += i
final = idx
print("{0} medicion(es) y el promedio es: {1}".format(final,suma/final))
break
if __name__ == '__main__':
programaPrincipal()
captura.release()
cv2.destroyAllWindows()
| jlukas1001/Seguidor-con-camara | seguidor2/seguidorVelocista/programaCamara.py | programaCamara.py | py | 3,765 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "RPi.GPIO.setmode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BOARD",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"li... |
70230288424 | import graphene
class CreateAuctionInput(graphene.InputObjectType):
meme = graphene.List(graphene.NonNull(graphene.ID))
initial_price = graphene.Int(required = True)
limit = graphene.Int()
starts_at = graphene.DateTime(required = True)
ends_at = graphene.DateTime(required = True)
class UpdateAuctionInput(graphene.InputObjectType):
id = graphene.ID(required = True)
initial_price = graphene.Int()
limit = graphene.Int()
starts_at = graphene.DateTime()
ends_at = graphene.DateTime()
class DeleteAuctionInput(graphene.InputObjectType):
id = graphene.ID(required = True) | Bluefin-Tuna/meme-economy | ME/auction/GQL/inputs/auction.py | auction.py | py | 623 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "graphene.InputObjectType",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "graphene.List",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "graphene.NonNull",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "graphene.ID... |
13989797888 | # this is editorial soln
from collections import deque
from sys import maxint
class Solution(object):
def bfs_search(self, matrix, queue, min_dist):
n, m = len(matrix), len(matrix[0])
while queue:
(x, y), d = queue.popleft()
for i, j in ((x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)):
if 0 <= i < n and 0 <= j < m and (d + 1) < min_dist[i][j]:
min_dist[i][j] = d + 1
queue.append(((i, j), d + 1))
def updateMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
n, m = len(matrix), len(matrix[0])
min_dist = [[0 for _ in xrange(m)] for _ in xrange(n)]
queue = deque()
for x in xrange(n):
for y in xrange(m):
if matrix[x][y] == 1:
min_dist[x][y] = maxint
else:
queue.append(((x, y), 0))
self.bfs_search(matrix, queue, min_dist)
return min_dist
| dariomx/topcoder-srm | leetcode/zero-pass/google/01-matrix/Solution15.py | Solution15.py | py | 1,046 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.maxint",
"line_number": 28,
"usage_type": "name"
}
] |
37974609826 | import numpy as np
import matplotlib.pyplot as plt
import random
def random_angle():
return random.uniform(0, 2*np.pi)
def next_step(x_pos, y_pos):
angle = random_angle()
x_pos += np.cos(angle)
y_pos += np.sin(angle)
return (x_pos, y_pos)
colors = ['black', 'red', 'yellow', 'blue', 'orange', 'purple', 'cyan', 'brown', 'pink', 'green', 'grey', 'chocolate', 'navy', 'indigo', 'coral']
number_of_walks = 15
number_of_coin_flips = 10**5
for i in range(number_of_walks):
x = []
y = []
x_pos = 0
y_pos = 0
for j in range(number_of_coin_flips + 1):
(x_pos, y_pos) = next_step(x_pos, y_pos)
x.append(x_pos)
y.append(y_pos)
plt.plot(x, y, lw=0.1, color=colors[i])
plt.plot(x[-1], y[-1], 'o', ms=10, color=colors[i])
plt.savefig('plot1B.png', format="png", box_inches='tight', pad_inches=0.05)
plt.show()
| Fadikk367/WFiIS-VPython | LAB07/ZAD1B.py | ZAD1B.py | py | 877 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.uniform",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1... |
2874693524 | import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import MaxPooling2D
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Settings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
CONTENT_IMG_PATH = "./input/cat.jpg"
STYLE_IMG_PATH = "./input/starry_night.jpg"
GENERATED_IMG_PATH = "./output/generated_img.jpg"
IMG_SIZE = (400, 300)
NUM_COLOR_CHANNELS = 3
ALPHA = 10
BETA = 40
NOISE_RATIO = 0.6
CONTENT_LAYER_INDEX = 13
STYLE_LAYER_INDICES = [1, 4, 7, 12, 17]
STYLE_LAYER_COEFFICIENTS = [0.2, 0.2, 0.2, 0.2, 0.2]
NUM_ITERATIONS = 500
LEARNING_RATE = 2
VGG_IMAGENET_MEANS = np.array([103.939, 116.779, 123.68]).reshape((1, 1, 3)) # In blue-green-red order
LOG_GRAPH = False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def create_output_dir():
"""Create output dir if it does not exist."""
cwd = os.getcwd()
output_dir_path = os.path.join(cwd, "output")
if not os.path.exists(output_dir_path):
os.makedirs(output_dir_path)
def load_img(path, size, color_means):
"""Load image from path, preprocess it, and return the image."""
img = cv2.imread(path)
img = cv2.resize(img, dsize=size, interpolation=cv2.INTER_CUBIC)
img = img.astype("float32")
img -= color_means
img = np.expand_dims(img, axis=0)
return img
def save_img(img, path, color_means):
"""Save image to path after postprocessing."""
img += color_means
img = np.clip(img, 0, 255)
img = img.astype("uint8")
cv2.imwrite(path, img)
def create_noisy_img(img, noise_ratio):
"""Add noise to img and return it."""
noise = np.random.uniform(-20, 20, (img.shape[0], img.shape[1], img.shape[2], img.shape[3])).astype("float32")
noisy_img = noise_ratio * noise + (1 - noise_ratio) * img
return noisy_img
def create_output_tensors(input_variable, content_layer_index, style_layer_indices):
"""
Create output tensors, using a pretrained Keras VGG19-model.
Return tensors for content and style layers.
"""
vgg_model = VGG19(weights="imagenet", include_top=False)
layers = [l for l in vgg_model.layers]
x = layers[1](input_variable)
x_content_tensor = x
x_style_tensors = []
if 1 in style_layer_indices:
x_style_tensors.append(x)
for i in range(2, len(layers)):
# Use layers from vgg model, but swap max pooling layers for average pooling
if type(layers[i]) == MaxPooling2D:
x = tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
else:
x = layers[i](x)
# Store appropriate layer outputs
if i == content_layer_index:
x_content_tensor = x
if i in style_layer_indices:
x_style_tensors.append(x)
return x_content_tensor, x_style_tensors
def content_cost(a_c, a_g):
"""Return a tensor representing the content cost."""
_, n_h, n_w, n_c = a_c.shape
return (1/(4 * n_h * n_w * n_c)) * tf.reduce_sum(tf.square(tf.subtract(a_c, a_g)))
def style_cost(a_s_layers, a_g_layers, style_layer_coefficients):
"""Return a tensor representing the style cost."""
style_cost = 0
for i in range(len(a_s_layers)):
# Compute gram matrix for the activations of the style image
a_s = a_s_layers[i]
_, n_h, n_w, n_c = a_s.shape
a_s_unrolled = tf.reshape(tf.transpose(a_s), [n_c, n_h*n_w])
a_s_gram = tf.matmul(a_s_unrolled, tf.transpose(a_s_unrolled))
# Compute gram matrix for the activations of the generated image
a_g = a_g_layers[i]
a_g_unrolled = tf.reshape(tf.transpose(a_g), [n_c, n_h*n_w])
a_g_gram = tf.matmul(a_g_unrolled, tf.transpose(a_g_unrolled))
# Compute style cost for the current layer
style_cost_layer = (1/(4 * n_c**2 * (n_w* n_h)**2)) * tf.reduce_sum(tf.square(tf.subtract(a_s_gram, a_g_gram)))
style_cost += style_cost_layer * style_layer_coefficients[i]
return style_cost
def total_cost(content_cost, style_cost, alpha, beta):
"""Return a tensor representing the total cost."""
return alpha * content_cost + beta * style_cost
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
create_output_dir()
# Load, resize, and preprocess content and style images
content_img = load_img(CONTENT_IMG_PATH, IMG_SIZE, VGG_IMAGENET_MEANS)
style_img = load_img(STYLE_IMG_PATH, IMG_SIZE, VGG_IMAGENET_MEANS)
# Create initial generated image, this is the starting point for the optimization process
generated_img_init = create_noisy_img(content_img, NOISE_RATIO)
# Create tensorflow variable that will be used as an input to the network.
# This variable will later be assigned generated_img_init and trained.
input_var = tf.Variable(content_img, dtype=tf.float32, expected_shape=(None, None, None, NUM_COLOR_CHANNELS), name="input_var")
# Create output tensors for the activations of the content and style layers,
# using a Keras VGG19-model pretrained on the ImageNet dataset.
x_content, x_styles = create_output_tensors(input_var, CONTENT_LAYER_INDEX, STYLE_LAYER_INDICES)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
# Use the Keras session instead of creating a new one
with K.get_session() as sess:
sess.run(tf.variables_initializer([input_var]))
# Extract the layer activations for content and style images
a_content = sess.run(x_content, feed_dict={K.learning_phase(): 0})
sess.run(input_var.assign(style_img))
a_styles = sess.run(x_styles, feed_dict={K.learning_phase(): 0})
# Define the cost function
J_content = content_cost(a_content, x_content)
J_style = style_cost(a_styles, x_styles, STYLE_LAYER_COEFFICIENTS)
J_total = total_cost(J_content, J_style, ALPHA, BETA)
# Log the graph. To display use "tensorboard --logdir=log".
if LOG_GRAPH:
writer = tf.summary.FileWriter("log", sess.graph)
writer.close()
# Assign the generated random initial image as input
sess.run(input_var.assign(generated_img_init))
# Create the training operation
train_op = optimizer.minimize(J_total, var_list=[input_var])
sess.run(tf.variables_initializer(optimizer.variables()))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Train the generated image
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for i in range(NUM_ITERATIONS):
sess.run(train_op)
if (i%20) == 0:
print(
"Iteration: " + str(i) +
", Content cost: " + "{:.2e}".format(sess.run(J_content)) +
", Style cost: " + "{:.2e}".format(sess.run(J_style)) +
", Total cost: " + "{:.2e}".format(sess.run(J_total))
)
# Save the generated image
generated_img = sess.run(input_var)[0]
save_img(generated_img, GENERATED_IMG_PATH, VGG_IMAGENET_MEANS)
# Save the generated image
generated_img = sess.run(input_var)[0]
save_img(generated_img, GENERATED_IMG_PATH, VGG_IMAGENET_MEANS)
| CarlFredriksson/neural_style_transfer | neural_style_transfer.py | neural_style_transfer.py | py | 7,291 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
... |
35566548820 | import torch
import json
import numpy as np
import torch.utils.data
class DataSet(torch.utils.data.Dataset):
def __init__(self, data_path: str, metadata_path: str, args):
with open(metadata_path, 'r') as j:
obj = json.load(j)
self.normalize = obj['normalized']
data_type = obj['data_type']
data_variables = obj['variables']
if args.forecast_channel not in data_variables:
raise ValueError(
f'Provided channel variable was not found in the provided data, channel: {args.forecast_channel}'
)
self.min = obj['var_max_min_values'][args.forecast_channel]['min']
self.max = obj['var_max_min_values'][args.forecast_channel]['max']
self.variable_len = len(data_variables)
self.y_channel = data_variables.index(args.forecast_channel)
self.x_channel = []
# Index of forecast channel in x_channel array
self.channel_y_idx_in_x = None
# Collects all used channel idxes in one array
for c in args.channel:
self.x_channel.append(data_variables.index(c))
# If forecast channel abbreviation is the same as channel abbreviation the its idx is saved
if c == args.forecast_channel:
self.channel_y_idx_in_x = len(self.x_channel) - 1
self.rollout = args.rollout
self.delta = args.delta
self.seq_cut = args.delta * args.rollout
self.data = np.memmap(filename=data_path, dtype=data_type, mode='r', shape=tuple(obj['shape']))
with open(args.extra_data, 'r') as j:
obj = json.load(j)
lon_idx = obj['variables'].index('lon')
lat_idx = obj['variables'].index('lat')
self.lon = obj['data'][lon_idx]
self.lat = obj['data'][lat_idx]
def __len__(self):
return len(self.data) - self.seq_cut
def __getitem__(self, idx):
if self.variable_len == len(self.x_channel):
x = torch.FloatTensor(self.data[idx].copy())
else:
x = torch.FloatTensor(self.data[idx, self.x_channel].copy())
y_end_idx = idx + self.delta + self.rollout * self.delta
y_idx_arange = np.arange(idx + self.delta, y_end_idx, self.delta)
y = torch.FloatTensor(self.data[y_idx_arange, self.y_channel].copy())
return x, y
| kristofers-volkovs/Primed-UNet-LSTM | src/data_sets/data_rollout.py | data_rollout.py | py | 2,473 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.memmap",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number":... |
28088792579 | import requests
from bs4 import BeautifulSoup
from .crawler import Crawler
from ..models.page import PageScraperModel
class SeedingCrawler(Crawler):
def __init__(self, base_url):
super().__init__(base_url + 'd/furniture/search/fua?')
self.stack = []
def scrape(self):
html = self.get_html(self.base_url)
soup = self.get_soup(html)
self.stack.append(
PageScraperModel(
soup,
0
)
)
self._dfs()
def _dfs(self):
while self.stack:
node = self.stack.pop()
if node.next_cursor:
url = self.get_next_url(node.next_cursor)
print(url, flush=True)
html = self.get_html(url)
soup = self.get_soup(html)
self.stack.append(
PageScraperModel(
soup,
node.next_cursor,
node.max_cursor
)
)
node.save_to_db()
def get_next_url(self, next_cursor):
return self.base_url + f's={next_cursor}'
def get_html(self, url):
req = requests.get(url)
if req.status_code == 200:
return req.text
def get_soup(self, html):
return BeautifulSoup(html, 'html.parser')
| philipk19238/klarity | api/app/scraper/crawlers/seeding_crawler.py | seeding_crawler.py | py | 1,372 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "crawler.Crawler",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.page.PageScraperModel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.page.PageScraperModel",
"line_number": 34,
"usage_type": "call"
},
{
"api_name... |
72808863784 | from django.shortcuts import render,redirect
from django.http import JsonResponse
from rest_framework.decorators import api_view,permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly,IsAuthenticated
from .models import Advocate,Company
from .serializers import AdvocateSerializer,CompanySerializer
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from rest_framework.views import APIView
from django.http import Http404
import requests
from dotenv import load_dotenv
load_dotenv()
import os
TWITTER_API_KEY=os.environ.get('TWITTER_API_KEY')
# Create your views here.
@api_view(['GET'])
@permission_classes([IsAuthenticatedOrReadOnly])
def enpoint(request):
print(TWITTER_API_KEY )
data=['advocates','advocates/:username']
return Response(data)
@api_view(['GET','POST'])
@permission_classes([IsAuthenticatedOrReadOnly])
def advocate_list(request):
if request.method=='GET':
query=request.GET.get('query')
if query==None:
query=''
advocate=Advocate.objects.filter(Q(username__icontains=query)|Q(bio__icontains=query))
serializer=AdvocateSerializer(advocate,many=True)
return Response(serializer.data)
if request.method=='POST':
advocate=Advocate.objects.create(
username=request.data['username'],
bio=request.data['bio']
)
serializer=AdvocateSerializer(advocate,many=False)
return Response(serializer.data)
@permission_classes([IsAuthenticatedOrReadOnly])
class AdvocateDetails(APIView):
def get_object(self,username):
try:
return Advocate.objects.get(username=username)
except Advocate.DoesNotExist:
raise Http404("Advocate doesn't exist.")
def get(self, request, username):
# head={'Authorization': 'Bearer ' + TWITTER_API_KEY}
# url='https://api.twitter.com/2/users/by/username/{}'.format(username)
# test=requests.get(url,headers=head).json()
# print('Data form Twitter***',test)
advocate=self.get_object(username)
serializer=AdvocateSerializer(advocate,many=False)
return Response(serializer.data)
def put(self,request,username):
advocate=self.get_object(username)
advocate.bio=request.data['username']
advocate.bio=request.data['bio']
advocate.save()
serializer=AdvocateSerializer(advocate,many=False)
return Response(serializer.data)
def delete(self,request,username):
advocate=self.get_object(username)
advocate.delete()
return Response('user was deleted')
# @api_view(['GET','PUT','DELETE'])
# @permission_classes([IsAuthenticatedOrReadOnly])
# def advocate_details(request,username):
# try:
# advocate=Advocate.objects.get(username=username)
# if request.method=="GET":
# serializer=AdvocateSerializer(advocate,many=False)
# return Response(serializer.data)
# if request.method=='PUT':
# advocate.username=request.data['username']
# advocate.bio=request.data['bio']
# advocate.save()
# serializer=AdvocateSerializer(advocate,many=False)
# return Response(serializer.data)
# if request.method=='DELETE':
# advocate.delete()
# return Response('delete done')
# except ObjectDoesNotExist:
# return Response({'error': 'Advocate username not found.'}, status=404)
@api_view(['GET'])
@permission_classes([IsAuthenticatedOrReadOnly])
def Companis_List(request):
companies=Company.objects.all()
selizer=CompanySerializer(companies,many=True)
return Response(selizer.data) | sakibsarker/REST_framework_Function-based | restwork/views.py | views.py | py | 3,777 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.resp... |
28564597089 | from functools import cache
import matplotlib.pyplot as plt
import numpy as np
from data import NICKEL_REGIMES, load_experiment_data
from experiment_setup import NI_FOIL
from intensity import output_intensity
from materials import Layer, Cell, KAPTON
def get_geometric_factor_ascan(plot=False):
# Limits determined manually
limits_x = [75, 160]
limits_y = [[40, 90],
[130, 180],
[220, 270],
[300, 350],
[400, 450]]
# Good scans determined manually
# Selection is I02-independent
scans = {
'in_cell': (2, [21, 30]),
'out_of_cell': (48, [6, 75]),
}
coefficients = {}
for name, (number, scan_range) in scans.items():
data = np.load(f'data/geometry_data/scan_{number}.npy')
# Plot detector receptive field
# fig, ax = plt.subplots(2)
# ax[0].plot(data.sum(axis=(0, 2)))
# ax[1].plot(data.sum(axis=(0, 1)))
#
# plt.show()
crystal_data = [data[:, limits_x[0]:limits_x[1], limit_y[0]: limit_y[1]] for limit_y in limits_y]
# Intensities per crystal and scan
intensities = np.array([np.sum(crystal, axis=(1, 2)) for crystal in crystal_data]).T
if plot:
plt.plot(intensities)
plt.show()
intensities = intensities[scan_range[0]:scan_range[1] + 1]
coefficients[name] = intensities.sum(axis=0)
coefficients[name] /= coefficients[name][0]
return coefficients
def get_norm(energy, intensity):
# Subtract background
mask_background = energy < NICKEL_REGIMES['pre_edge']
background = intensity.loc[mask_background].mean()
intensity = intensity - background
return np.trapz(intensity, energy)
def get_geometric_factors_energy_scans():
cells = {'in_cell': load_experiment_data(NI_FOIL[0]),
'out_of_cell': load_experiment_data(NI_FOIL[1])}
detectors = NI_FOIL[0].detectors
norms = {}
for name, cell in cells.items():
norms[name] = np.array([get_norm(cell['energy'], cell[f'intensity_{detector}']) for detector in detectors])
norms[name] = norms[name] / norms[name][0]
return norms
def get_theoretical_angle_distribution():
ni_foil = Cell(layers=[KAPTON, Layer(depth=5.0, densities={'Ni': 8.908})])
density = np.zeros(7000)
density[6500:] = 1.0 # 65 um of no nickel and 5um of nickel only
theta_in = np.pi / 180 * 31
theta_out = np.pi / 180 * np.array([41, 34, 27, 20, 13]) # Detectors C1-5
energies = np.array([8400])
results = output_intensity(theta_in, theta_out, ni_foil, energies, 7480, density)
results = results[0]
results = results / results[0]
return results
@cache
def get_geometric_factors():
factors = get_geometric_factors_energy_scans()
factors['theoretical'] = get_theoretical_angle_distribution()
return factors
| ondraskacel/cellExperiments | geometry.py | geometry.py | py | 2,922 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_n... |
1434884470 | import json
import sys
from btcc_trade_collector import bttc_trade_collector
from btcc_client import Market
from btcc_log import create_timed_rotating_log
from btcc_log import set_log_name_prefix
with open("./collector_config.json") as config_file:
config = json.load(config_file)
if (len(sys.argv) != 2):
print ("Usage: python run_collector.py [job name]")
print ("Optional job names are:")
print (" ".join(config["jobs"].keys()))
sys.exit(1)
common = config["common"]
jobname = sys.argv[1]
job = config["jobs"][jobname]
set_log_name_prefix(jobname)
logger = create_timed_rotating_log()
submit_config = common.copy()
submit_config.update(job)
a = bttc_trade_collector(submit_config)
logger.info("btcc collector started with the following config:")
logger.info(json.dumps(submit_config))
a.run() | UnluckyNinja/hwindCode | python/btctrade/run_collector.py | run_collector.py | py | 823 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
... |
3853633003 | from Logger import Logger
from ConsoleLogger import ConsoleLogger
from HTTPRequest import HTTPRequest
from HTTPProxyRequest import HTTPProxyRequest
from ProxyManager import ProxyManager
import multiprocessing
import multiprocessing.managers
import json
import re
regex_email = '([A-Z0-9%+\-\._]+@[A-Z0-9\.\-_]+\.[A-Z0-9]+)'
regex_category_id = 'Shop\.php/Listing\?category=([0-9]+)'
regex_category_page = 'Shop\.php/Listing\?category=[0-9]+&p=([0-9]+)'
regex_about_user_link = 'my_page\.php\?uid=([0-9]+)'
regex_auction_link = '"(/.*?\.html)"'
regex_user_name = '\<span class="uname"\>(.*?)\</span\>'
allegro_category_url = 'http://allegro.pl/Shop.php/Listing?category='
allegro_about_user_url = 'http://allegro.pl/my_page.php?uid='
allegro_user_auctions_url = 'http://allegro.pl/listing/user/listing.php?us_id='
processes_no = 5
ProxyManager.proxy_alert_count = 5
HTTPProxyRequest.headers = HTTPRequest.header = {'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36'}
proxy_list_url = '' # URL for a proxy list
Logger.init()
ConsoleLogger.init()
class MyManager(multiprocessing.managers.BaseManager):
pass
def parse_user_page(worker_id, uid):
Logger.message('Worker {}: processing user with id: {}'.format(worker_id, uid))
request = HTTPProxyRequest(allegro_about_user_url + str(uid))
response_0 = request.read()
email_list = re.findall(regex_email, response_0, re.IGNORECASE)
user_name = re.findall(regex_user_name, response_0, re.IGNORECASE)
if len(user_name) == 0:
user_name = ''
else:
user_name = user_name[0]
if len(email_list) == 0:
Logger.message('Worker {}: requesting user page, user id: {}'.format(worker_id, uid))
request = HTTPProxyRequest(allegro_user_auctions_url + str(uid))
auctions = re.findall(regex_auction_link, request.read(), re.IGNORECASE)
if len(auctions) > 0:
Logger.message('Worker {}: requesting user auction, user id: {}'.format(worker_id, uid))
request = HTTPProxyRequest('http://allegro.pl' + auctions[0])
email_list = re.findall(regex_email, request.read(), re.IGNORECASE)
else:
email_list = []
return user_name, email_list
def parse_categories(worker_id, input_queue, output_queue, proxy_manager, done_user_list, done_categories_list):
try:
HTTPProxyRequest.set_proxy_manager(proxy_manager)
Logger.message('Worker {}: started'.format(worker_id))
while True:
Logger.message('Worker {}: trying to acquire job'.format(worker_id))
category_id = input_queue.get()
if category_id is None:
Logger.message('Worker {}: found poison pill, terminating'.format(worker_id))
input_queue.task_done()
break
Logger.message('Worker {}: processing category id {}'.format(worker_id, category_id))
if (category_id in done_categories_list) and ('all' in done_categories_list[category_id]):
Logger.message('Worker {}: category {} was already processed'.format(worker_id, category_id))
continue
response = HTTPProxyRequest(allegro_category_url + str(category_id)).read()
m = re.findall(regex_category_page, response, re.IGNORECASE)
if len(m) == 0:
Logger.message('Worker {}: category {} is empty'.format(worker_id, category_id))
output_queue.put({'type': 'category_data', 'category_id': category_id, 'page': 'all'})
input_queue.task_done()
continue
pages = int(m[-1])
for page_no in range(1, pages + 1):
Logger.message('Worker {}: started processing page {} of {} from category {}'.format(worker_id, page_no, pages, category_id))
if (category_id in done_categories_list) and (page_no in done_categories_list[category_id]):
Logger.message('Worker {}: page {} in category {} was already processed'.format(worker_id, page_no, category_id))
continue
if page_no > 1:
response = HTTPProxyRequest(allegro_category_url + str(category_id) + '&p=' + str(page_no)).read()
user_ids = re.findall(regex_about_user_link, response, re.IGNORECASE)
for user_id in user_ids:
if user_id in done_user_list:
Logger.message('Worker {}: user {} already processed'.format(worker_id, user_id))
continue
(user_login, email_list) = parse_user_page(worker_id, user_id)
email_list = set(email_list)
Logger.message('Worker {}: found {} emails for uid {}'.format(worker_id, len(email_list), user_id))
if len(email_list) == 0:
output_queue.put({'type': 'user_data', 'user_id': user_id, 'login': user_login, 'email': ''})
else:
for email in email_list:
output_queue.put({'type': 'user_data', 'user_id': user_id, 'login': user_login, 'email': email})
done_user_list.append(user_id)
output_queue.put({'type': 'category_data', 'category_id': category_id, 'page': page_no})
Logger.message('Worker {}: finished processing page {} of {} from category {}'.format(worker_id, page_no, pages, category_id))
output_queue.put({'type': 'category_data', 'category_id': category_id, 'page': 'all'})
Logger.message('Worker {}: done processing category id {}'.format(worker_id, category_id))
input_queue.task_done()
Logger.message('Worker {}: finished'.format(worker_id))
except KeyboardInterrupt:
pass
def output_handler(output_queue):
output_file = open('output.txt', 'a', encoding='utf-8')
try:
while True:
item = output_queue.get()
if item is None:
break
output_file.write(json.dumps(item) + '\n')
output_file.flush()
except KeyboardInterrupt:
pass
finally:
output_file.flush()
output_file.close()
def main():
Logger.message('Start')
# Find all categories ids
response = HTTPRequest(allegro_category_url + '0').read()
matches = re.findall(regex_category_id, response, re.IGNORECASE)
main_categories = set(matches)
main_categories.add('0')
categories_ids = []
for category_id in matches:
Logger.message('Collecting categories ids in category: {}'.format(category_id))
response = HTTPRequest(allegro_category_url + category_id).read()
id_list = re.findall(regex_category_id, response, re.IGNORECASE)
for sub_cat_id in id_list:
if sub_cat_id not in main_categories:
categories_ids.append(sub_cat_id)
# Create queues
input_queue = multiprocessing.JoinableQueue()
output_queue = multiprocessing.Queue()
# Fill input queue
for category_id in categories_ids:
input_queue.put(category_id)
Logger.message('Queue size: {}'.format(input_queue.qsize()))
# Create proxy manager
MyManager.register('ProxyManagerClass', ProxyManager)
manager = MyManager()
manager.start()
proxy_manager = manager.ProxyManagerClass()
proxy_manager.set_url(proxy_list_url)
proxy_manager.update_proxies()
# Create processed user list
user_list = multiprocessing.Manager().list()
# Create categories dictionary
categories_list = multiprocessing.Manager().dict()
# Get list of already processed items
try:
database = open('output.txt', 'r', encoding='utf-8').read().splitlines(False)
for row in database:
data = json.loads(row)
if data['type'] == 'user_data':
if data['user_id'] not in user_list:
user_list.append(data['user_id'])
elif data['type'] == 'category_data':
if data['category_id'] not in categories_list:
categories_list[data['category_id']] = []
tmp = categories_list[data['category_id']]
if data['page'] not in tmp:
tmp.append(data['page'])
categories_list[data['category_id']] = tmp
except IOError:
pass
Logger.message('Already processed {} users'.format(len(user_list)))
# Create worker processes
processes = []
Logger.message('Starting worker processes.')
for worker_id in range(processes_no):
# Add poison pill for each process
input_queue.put(None)
process = multiprocessing.Process(target=parse_categories,
args=(worker_id, input_queue, output_queue, proxy_manager, user_list, categories_list), daemon=True)
processes.append(process)
process.start()
# Setup finish flag for output handler
# Create output handler process
Logger.message('Starting output handler process.')
output_process = multiprocessing.Process(target=output_handler, args=(output_queue,), daemon=True)
output_process.start()
# Wait for worker processes to finish
Logger.message('Waiting for worker processes to finish.')
try:
for process in processes:
process.join()
except KeyboardInterrupt:
pass
finally:
# Put poison pill for output handler
output_queue.put(None)
Logger.message('Done loading, waiting for output handler process to finish.')
# Wait for output handler to finish
output_process.join()
if __name__ == '__main__':
main()
| dsypniewski/allegro-profile-crawler | main.py | main.py | py | 8,550 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ProxyManager.ProxyManager.proxy_alert_count",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "ProxyManager.ProxyManager",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "HTTPProxyRequest.HTTPProxyRequest.headers",
"line_number": 23,
... |
70669125543 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# WDQ7005 - Data Mining
# Master of Data Science | University of Malaya
# Assignment Part A: Web Crawling of Real-time Data
# Group Members:
# Azwa Kamaruddin (WQD170089)
# Kok Hon Loong (WQD170086)
# In[1]:
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.figure(figsize=(20,10))
from bs4 import BeautifulSoup
# In[ ]:
### =============================== ###
### CODE STRUCTURE
### =============================== ###
#
# We partitioned the code into 3 sections:
# 1. Number of CONFIRMED cases.
# 2. Number of DEATH cases.
# 3. Number of RECOVERED cases.
# For each section, we display the time series trend for ASEAN countries and China and compare between them.
# In[2]:
### =============================== ###
### NO. OF CONFIRMED CASES
### =============================== ###
url_confirmed_cases = "https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv"
try:
page = requests.get(url_confirmed_cases, timeout=5)
if page.status_code == 200:
soup = BeautifulSoup(page.content,'html.parser')
table = soup.find("table", {"class": "js-csv-data csv-data js-file-line-container"})
df = pd.read_html(str(table))
else:
print(str(page.status_code) + " - Error, page not found.")
except requests.ConnectionError as e:
print('Connection error')
print(str(e))
# In[3]:
# Put the tabulated data into a dataframe and display the first 5 results:
confirmed_cases = df[0]
confirmed_cases.head()
# In[4]:
# Drop unneeded columns from the dataframe:
confirmed_cases = confirmed_cases.drop('Unnamed: 0', axis=1)
confirmed_cases = confirmed_cases.drop(['Lat','Long'], axis=1)
confirmed_cases = confirmed_cases.drop('Province/State', axis=1)
# Rename column header to simply 'Country' and set it as the index:
confirmed_cases = confirmed_cases.rename(columns={'Country/Region':'Country'})
confirmed_cases = confirmed_cases.set_index('Country')
# Display first 5 results:
confirmed_cases.head()
# In[5]:
# COMMENT: From the above table we can see that the number of confirmed cases are increasing for all listed countries.
# In[6]:
# Let's transpose the table and describe the data for each of the different countries:
confirmed_cases.transpose().describe()
# In[7]:
# COMMENT:
# count - shows how many days of data cases are being tracked. There are 51 days since the first tracking.
# mean - shows the average number of confirmed cases during the 51 days.
# max - the total number of confirmed cases to date.
# In[8]:
# Obtain only the confirmed cases for the 10 ASEAN countries + China
# Get data for ASEAN regions:
asean = ['Malaysia', 'Singapore', 'Thailand', 'Brunei', 'Cambodia', 'Indonesia', 'Laos', 'Myanmar', 'Philippines', 'Vietnam']
confirmed_cases_asean = confirmed_cases[confirmed_cases.index.isin(asean)]
confirmed_cases_asean
# In[9]:
# China:
confirmed_cases_china = confirmed_cases[confirmed_cases.index == 'China']
confirmed_cases_china.head()
# In[10]:
# For China, we combined all the different regions into a single row representing the entire China:
confirmed_cases_china_combined = confirmed_cases_china.groupby('Country').sum()
confirmed_cases_china_combined
# In[11]:
# Plot the number of confirmed cases over time for all ASEAN countries:
confirmed_cases_asean.T.plot()
plt.ylabel('No. of confirmed cases')
plt.xlabel('Days')
plt.show()
# In[12]:
# COMMENT: Singapore has the most number of recorded confirmed cases and are increasing exponentially.
# Malaysia is second followed by Thailand as the 3rd most recorded confirmed cases in ASEAN.
# In[13]:
# Plot the number of confirmed cases over time for China:
confirmed_cases_china_combined.T.plot()
plt.ylabel('No. of confirmed cases')
plt.xlabel('Days')
plt.show()
# In[14]:
# COMMENT: Confirmed cases in China continues to rise. More than 80k cases confirmed to date.
# In[15]:
# Plot the number of confirmed cases over time for China vs ASEAN countries:
ax = confirmed_cases_china_combined.T.plot()
confirmed_cases_asean.T.plot(ax=ax)
# In[16]:
# COMMENT: China numbers are too huge to compare with the number of cases in ASEAN.
# In[17]:
### =============================== ###
### NO. OF DEATH CASES
### =============================== ###
url_death_cases = "https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv"
try:
page = requests.get(url_death_cases, timeout=5)
if page.status_code == 200:
soup = BeautifulSoup(page.content,'html.parser')
table = soup.find("table", {"class": "js-csv-data csv-data js-file-line-container"})
df = pd.read_html(str(table))
else:
print(str(page.status_code) + " - Error, page not found.")
except requests.ConnectionError as e:
print('Connection error')
print(str(e))
# In[18]:
# Put the tabulated data into a dataframe and display the first 5 results:
death_cases = df[0]
death_cases.head()
# In[19]:
# Drop unneeded columns from the dataframe:
death_cases = death_cases.drop('Unnamed: 0', axis=1)
death_cases = death_cases.drop(['Lat','Long'], axis=1)
death_cases = death_cases.drop('Province/State', axis=1)
# Rename column header to simply 'Country' and set it as the index:
death_cases = death_cases.rename(columns={'Country/Region':'Country'})
death_cases = death_cases.set_index('Country')
# Display first 5 results:
death_cases.head()
# In[20]:
# Transpose the table and describe the data for each of the different countries:
death_cases.transpose().describe()
# In[21]:
# COMMENT:
# count - shows how many days of data cases are being tracked. There are 51 days since the first tracking.
# mean - shows the average number of death cases during the 51 days.
# max - the total number of death cases to date.
# In[22]:
# Obtain only the death cases for the 10 ASEAN countries + China
# Get data for ASEAN regions:
asean = ['Malaysia', 'Singapore', 'Thailand', 'Brunei', 'Cambodia', 'Indonesia', 'Laos', 'Myanmar', 'Philippines', 'Vietnam']
death_cases_asean = death_cases[death_cases.index.isin(asean)]
death_cases_asean
# In[23]:
# China:
death_cases_china = death_cases[death_cases.index == 'China']
death_cases_china.head()
# In[24]:
# For China, we combined all the different regions into a single row representing the entire China:
death_cases_china_combined = death_cases_china.groupby('Country').sum()
death_cases_china_combined
# In[25]:
# Plot the number of death cases over time for all ASEAN countries:
death_cases_asean.T.plot()
plt.ylabel('No. of death cases')
plt.xlabel('Days')
plt.show()
# In[26]:
# COMMENT: The number of deaths from the covid-19 is rare/low in the ASEAN region.
# Only 4 patients have died: 2 from Phillipines, 1 from Thailand, and 1 from Indonesia.
# In[27]:
# Plot the number of death cases over time for China:
death_cases_china_combined.T.plot()
plt.ylabel('No. of confirmed cases')
plt.xlabel('Days')
plt.show()
# In[28]:
# COMMENT: The number of death cases in China has risen to more than 3k cases.
# In[29]:
# Plot the number of death cases over time for China vs ASEAN countries:
ax = death_cases_china_combined.T.plot()
death_cases_asean.T.plot(ax=ax)
# In[30]:
# COMMENT: Deaths in ASEAN countries due to covid-19 is rare or low compared to China.
# In[31]:
### =============================== ###
### NO. OF RECOVERED CASES
### =============================== ###
url_recovered_cases = "https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv"
try:
page = requests.get(url_recovered_cases, timeout=5)
if page.status_code == 200:
soup = BeautifulSoup(page.content,'html.parser')
table = soup.find("table", {"class": "js-csv-data csv-data js-file-line-container"})
df = pd.read_html(str(table))
else:
print(str(page.status_code) + " - Error, page not found.")
except requests.ConnectionError as e:
print('Connection error')
print(str(e))
# In[32]:
# Put the tabulated data into a dataframe and display the first 5 results:
recovered_cases = df[0]
recovered_cases.head()
# In[33]:
# Drop unneeded columns from the dataframe:
recovered_cases = recovered_cases.drop('Unnamed: 0', axis=1)
recovered_cases = recovered_cases.drop(['Lat','Long'], axis=1)
recovered_cases = recovered_cases.drop('Province/State', axis=1)
# In[34]:
# Rename column header to simply 'Country' and set it as the index:
recovered_cases = recovered_cases.rename(columns={'Country/Region':'Country'})
recovered_cases = recovered_cases.set_index('Country')
# In[35]:
# Display first 5 results:
recovered_cases.head()
# In[36]:
# Transpose the table and describe the data for each of the different countries:
recovered_cases.transpose().describe()
# In[37]:
# COMMENT:
# count - shows how many days of data cases are being tracked. There are 51 days since the first tracking.
# mean - shows the average number of recovered cases during the 51 days.
# max - the total number of recovered cases to date.
# In[38]:
# Obtain only the recovered cases for the 10 ASEAN countries + China
# Get data for ASEAN regions:
asean = ['Malaysia', 'Singapore', 'Thailand', 'Brunei', 'Cambodia', 'Indonesia', 'Laos', 'Myanmar', 'Philippines', 'Vietnam']
recovered_cases_asean = recovered_cases[recovered_cases.index.isin(asean)]
recovered_cases_asean
# In[39]:
# China:
recovered_cases_china = recovered_cases[recovered_cases.index == 'China']
recovered_cases_china.head()
# In[40]:
# For China, we combined all the different regions into a single row representing the entire China:
recovered_cases_china_combined = recovered_cases_china.groupby('Country').sum()
recovered_cases_china_combined
# In[41]:
# Plot the number of recovered cases over time for all ASEAN countries:
recovered_cases_asean.T.plot()
plt.ylabel('No. of recovered cases')
plt.xlabel('Days')
plt.show()
# In[42]:
# COMMENT: Singapore continues to lead in terms of the number of patients who recovered from covid-19.
# Thailand is second in recovery, while Malaysia is in 3rd place.
# In[43]:
# Plot the number of recovered cases over time for China:
recovered_cases_china_combined.T.plot()
plt.ylabel('No. of confirmed cases')
plt.xlabel('Days')
plt.show()
# In[44]:
# COMMENT: Despite the huge number of confirmed cases at 80k, the number of patients who recovered is also increasing rapidly at 60k people.
# In[45]:
# Plot the number of recovered cases over time for China vs ASEAN countries:
ax = recovered_cases_china_combined.T.plot()
recovered_cases_asean.T.plot(ax=ax)
# In[46]:
# COMMENT: China recovery is increasing since they have the most case, while ASEAN countries are seen recovering as well but ASEAN cases are lower compared to China.
# In[47]:
### =============================== ###
### COMPARISON OF DEATH vs RECOVERED
### =============================== ###
# For ASEAN countries:
ax = death_cases_asean.plot(kind='hist')
recovered_cases_asean.plot(kind='hist', ax=ax)
# In[48]:
recovered_cases_asean
# In[49]:
death_cases_asean.transpose()[-1:].plot(kind='hist')
# In[50]:
recovered_cases_asean.transpose()[-1:].unstack().plot(kind='hist')
# In[ ]:
| hlkok/WQD7005DataMining-Assignments | Assignment-a-Web-Crawler_Final.py | Assignment-a-Web-Crawler_Final.py | py | 11,487 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_na... |
35631463735 | from django.shortcuts import render
import requests
API_KEY = '09cdaa56db4d4a6cb93bf1bedde04bd7'
def home(request):
url = f'https://newsapi.org/v2/top-headlines?country=gb&apikey={API_KEY}'
response = requests.get(url)
data = response.json()
articles = data['articles']
context = {
'articles' : articles
}
return render (request, 'home.html', context) | CassandraTalbot32/News-API | api/newsapp/views.py | views.py | py | 399 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
}
] |
30800479288 | import time, datetime
from decoder import Decoder, InvalidGearException
import os, yaml, shutil, json
from gear import Gear, Substat
import unidecode
from PIL import Image
def show_img(path):
img = Image.open(path)
img.show()
def log(msg):
print(f"[{datetime.datetime.now()}] - {msg}")
config = yaml.safe_load(open("config.yaml","r"))
folder_left, folder_substats = config['folder_left'], config['folder_substats']
folder_result = config['folder_result']
gear_paths = [f'{folder_result}/{filename}' for filename in os.listdir(folder_result) if '.json' in filename]
incompleted_paths = [gear_path for gear_path in gear_paths if not json.load(open(gear_path, 'r'))['meta']['completed_substats']]
log(f"Incomplete gears: {len(incompleted_paths)}/{len(gear_paths)}")
for index_correction,gear_path in enumerate(incompleted_paths):
with open(gear_path, 'r') as f:
dict_gear = json.load(f)
f.close()
if not dict_gear['meta']['completed_substats']:
print('...')
log(f'Correcting {gear_path} - correction {index_correction}/{len(incompleted_paths)}')
skip_this_correction = False
stop_all_corrections = False
while not skip_this_correction and not stop_all_corrections:
decision = input('What do you want to do ?\nskip : skip this correction\nsub : Enter the substats value separated by a "-". Example "5-8%-365-7.5%" for 4 substats: [5, 8%, 365, 7.5%]\nshow : show picture of the substats values\nstop : stop the whole process\n>>')
# skipping this correction
if decision.lower() == "skip":
break
# entering the sub input
elif "-" in decision.lower():
subs_list = decision.replace(" ","").replace("sub","").split("-")
confirm_subs = input(f'Do you confirm adding subs {subs_list} to gear n°{dict_gear["meta"]["index_img"]} ? y/n\n>>')
if confirm_subs.lower() == 'y':
print(f'Adding subs {decision} to gear n°{dict_gear["meta"]["index_img"]}')
# creating list of substats
substats = []
sub_dicts = dict_gear['substats']
for i,sub_value in enumerate(subs_list):
sub_dict = sub_dicts[i]
is_percent = '%' in sub_value
stat_amount = float(sub_value.replace('%',''))
substats.append(Substat(stat_name=sub_dict['stat_name'],
stat_amount=stat_amount,
nbs_procs=sub_dict['nbs_procs'],
is_percent=is_percent))
# creating gear object
gear = Gear(
type = unidecode.unidecode(dict_gear['type']),
level = dict_gear['level'],
main_stat_name = dict_gear['main_stat_name'],
main_stat_amount = dict_gear['main_stat_amount'],
main_stat_is_percent = dict_gear['main_stat_is_percent'],
substats = substats,
set_name = dict_gear['set_name'],
meta = {"completed_substats":True, "index_img":dict_gear["meta"]["index_img"]})
# updating json dict of gear with new gear._asdict()
print(f"Updating gear dict: {gear._asdict()}")
with open(f"{folder_result}/{dict_gear['meta']['index_img']}.json","w+") as f:
json.dump(gear._asdict(), f)
f.close()
# breaking the loop to go to next gear to correct
break
else:
print('Cancelling this subs input')
pass
# showing image
elif decision.lower() == "show":
show_img(f"{folder_substats}/{dict_gear['meta']['index_img']}.png")
# stopping the whole correction process
elif decision.lower() == "stop":
stop_all_corrections = True
break
else:
pass
if stop_all_corrections:
log(f'Stopping all corrections')
break
| FrenchieTucker/RPGgearDetection | main_correcter.py | main_correcter.py | py | 4,475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
41761215227 | # coding:utf-8
from __future__ import unicode_literals
from django import forms
from .models import Student
class StudentForm(forms.ModelForm):
def clean_qq(self):
cleaned_data = self.cleaned_data['qq']
if not cleaned_data.isdigit():
raise forms.ValidationError('必须是数字!')
return int(cleaned_data)
class Meta:
model = Student
fields = (
'name', 'sex', 'profession',
'email', 'qq', 'phone'
)
| the5fire/django-practice-book | code/student_house/student_sys/student/forms.py | forms.py | py | 500 | python | en | code | 293 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "d... |
27757909127 | # -*- coding: utf-8 -*-
import scrapy
'''以JSON格式导出爬取内容'''
class QuetosExtractSpider(scrapy.Spider):
name = 'quetos_extract'
def start_requests(self):
urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
"""下面代码的含义是,将html中<div class="quote">内包含的
<text><author><tags>都保存到items中"""
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').extract_first(),
'author': quote.css('small.author::text').extract_first(),
'tags': quote.css('div.tags a.tag::text').extract(),
}
"""
并且在外加命令 scrapy crawl quotes -o quotes.json 后
保存到名为quotes.json的文件中
"""
| IceDerce/python-spider-practice | spider/scrapy_practice/scrapy_practice/spiders/quotes_extract.py | quotes_extract.py | py | 997 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 15,
"usage_type": "call"
}
] |
25077224694 | from collections import OrderedDict
import ctypes
import os
import random
import re
import sys
import pandas as pd
from psychopy import core, event, visual
import experiment as ex
from settings import get_settings
settings = get_settings(env="production", test=False)
par = None
experiment_timer = None
class Paradigm:
"""Represents a study paradigm.
Attributes:
window
_init_stimulus
escape_key
stimuli
log_data
"""
def __init__(
self,
window_dimensions=(800, 600),
color="Black",
escape_key=None,
*args,
**kwargs
):
"""Initialize a paradigm
Args:
window_dimensions: the dimension of the Psychopy window object
color: the color of the window object
escape_key: the keyboard button that exits the paradigm
"""
if window_dimensions == "full_screen":
self.window = visual.Window(
fullscr=True, color=color, units="norm", *args, **kwargs
)
else:
self.window = visual.Window(
window_dimensions, color=color, units="norm", *args, **kwargs
)
self.stimuli = []
self.escape_key = escape_key
self.log_data = OrderedDict()
def addStimulus(self, stimulus):
"""Adds a stimulus.
A stimulus should be a tuple of the form:
(StimulusType, (arguments))
Ex: (Text, ('Hello World!', 3.0))
Args:
stimulus: the stimulus to be added
"""
assert type(stimulus) in (
tuple,
list,
), "Stimulus should be in form (StimulusType, (arguments))"
self.stimuli.append(stimulus)
def addStimuli(self, stimuli):
"""Adds multiple stimuli.
Args:
stimuli: a list of stimuli
"""
for stimulus in stimuli:
self.addStimulus(stimulus)
def insertStimulus(self, stimulus):
"""Inserts a stimulus to the beginning of the stim list.
A stimulus should be a tuple of the form:
(StimulusType, (arguments))
Ex: (Text, ('Hello World!', 3.0))
Args:
stimulus: the stimulus to be added
"""
assert type(stimulus) in (
tuple,
list,
), "Stimulus should be in form (StimulusType, (arguments))"
self.stimuli.insert(0, stimulus)
def playAll(self, is_odd, verbose=False):
"""Plays all the stimuli in the sequence.
Args:
verbose: bool if show stimuli details
"""
stim_index = 0
# Initialize clock for total experiment runtime
global experiment_timer
experiment_timer = core.MonotonicClock()
while self.escape_key not in event.getKeys():
stim_index += 1
if verbose:
"Playing stimulus {stim_index}".format(stim_index=stim_index)
self.playNext(is_odd)
core.quit()
def playNext(self, is_odd, verbose=False):
"""Plays the next stimulus in the sequence
Args:
verbose: bool if show stimulus details
Returns:
Stimulus to be shown
"""
if len(self.stimuli) > 0:
stim_data = self.stimuli.pop(0)
stim = self._init_stimulus(stim_data)
if verbose:
print(stim)
return stim.show(is_odd)
else:
elapsed = experiment_timer.getTime()
par.log_data["exp_runtime"] = ["Total Experiment Runtime",
"", "", "", "", "", "", "", "", "", "", "", "", "", elapsed]
# Get participant ID and format
uid = settings["participant"]
if int(uid) < 10:
uid = "0" + str(uid)
# Create logs directory if doesn't exist
if not os.path.exists(ex.log_dir):
os.makedirs(ex.log_dir)
# Dump log info to csv
log_path = str(ex.log_dir) + "/" + str(uid) + "_posttest_log.csv"
log_df = pd.DataFrame.from_dict(
par.log_data, orient='index', columns=['NOUN', 'ASSOCIATE', 'MF_RC', 'YO_OM', 'WN_LD', 'RESP_KEY', 'RESP_CODED',
'RESP_CORRECT', 'KEY_RT', 'PROBE_TYPE', 'PROBE', 'PROBE_KEY', 'PROBE_CODED', 'PROBE_CORRECT', 'PROBE_RT'])
log_df.to_csv(log_path, index=False)
core.quit()
def _init_stimulus(self, stim_data):
"""Initialize a stimulus object from a tuple of the form
(StimulusType, (arguments))
Args:
stim_data: the stimulus and its arguments as a tuple
Returns:
stim_class with new stimulus object
"""
stim_class = stim_data[0]
stim_args = stim_data[1] if type(stim_data[1]) == tuple else tuple()
try:
stim_kwargs = stim_data[2] if stim_args else stim_data[1]
except IndexError:
stim_kwargs = {}
return stim_class(self.window, *stim_args, **stim_kwargs)
class Stimulus(object):
"""An abstract stimulus class. All stimulus types will inherit from this class"""
def show(self):
# Show the stimulus. Must be implemented by descendant classes
raise NotImplementedError
def close(self):
# Close out.
core.quit()
class Text(Stimulus):
def __init__(self, window, text, height, duration, keys, stim_type, probe_details=None):
"""Initializes a text stimulus
Args:
window: the window object
text: text to display
duration: the duration the text will appear
keys: the list of keys to press to continue to the next stimulus (if None, will automatically go to next stimulus)
stim_type: float 0 if probe, 1 if intro, -1 otherwise
probe_details: dict probe question and answer key
"""
self.window = window
self.word_key = list(text.keys())[0]
self.height = height
self.text = visual.TextStim(
self.window, text=text[self.word_key], height=self.height, units="norm")
self.duration = duration
self.keys = keys
self.stim_type = stim_type
self.probe_details = probe_details
def show(self, is_odd):
self.text.draw()
self.window.flip()
# Create timer for individual stimulus
stim_timer = core.MonotonicClock()
if self.duration:
core.wait(self.duration)
elif self.keys:
wait = WaitForKey(self.window, self.keys,
self.word_key, self.stim_type, self.probe_details)
return wait.show(self, is_odd, stim_timer)
self.window.flip()
return self
class Pause(Stimulus):
"""A simple pause
Attributes:
duration
window
"""
def __init__(self, window, duration):
self.window = window
self.duration = float(duration)
def show(self):
core.wait(self.duration)
return self
class WaitForKey(Stimulus):
"""Wait for a key press
Attributes:
event
window
keys
word_key
stim_type
"""
def __init__(self, window, keys, word_key, stim_type, probe_details, event="continue"):
self.window = window
self.keys = keys
self.event = event
self.word_key = word_key
self.stim_type = stim_type
self.probe_details = probe_details
def show(self, stimulus, is_odd, stim_timer):
# Get participant answer
key_pressed = wait_for_key(self.keys)
key_rt = stim_timer.getTime()
# Process answer
self.run_event(stimulus, is_odd, self.stim_type,
self.probe_details, key_pressed, key_rt)
return self
def run_event(self, stimulus, is_odd, stim_type, probe_details, key_pressed, key_rt):
global par
if self.event == "exit":
print("Exiting...")
self.window.close()
core.quit()
if stim_type == 0:
# Get noun that came before probe
last_noun = next(reversed(par.log_data.keys()))
# Get potential probe answers
mf_rc = ex.excel_df.loc[ex.excel_df.NOUN ==
last_noun, 'MF_RC'].values[0]
yo_om = ex.excel_df.loc[ex.excel_df.NOUN ==
last_noun, 'YO_OM'].values[0]
wn_ld = ex.excel_df.loc[ex.excel_df.NOUN ==
last_noun, 'WN_LD'].values[0]
last_noun_attributes = [
mf_rc.upper(), yo_om.upper(), wn_ld.upper()]
# Process user response
probe_question = probe_details['q']
probe_answer = probe_details[key_pressed].upper()
correct_flag = 1 if probe_answer in last_noun_attributes else 0
# Update log with probe info
log = par.log_data[last_noun]
log.extend([probe_question, key_pressed,
probe_answer, correct_flag, key_rt])
par.log_data[last_noun] = log
elif stim_type < 0:
# Create key for answer lookup
answer_code = dict()
# Set keys based on participant ID
if is_odd:
answer_code['d'] = 'f'
answer_code['k'] = 'e'
else:
answer_code['d'] = 'e'
answer_code['k'] = 'f'
# Get correct answer
correct_answer = ex.excel_df.loc[ex.excel_df.NOUN ==
self.word_key, 'recallRespCorrect'].values[0]
word_type = ex.excel_df.loc[ex.excel_df.NOUN ==
self.word_key].ASSOCIATE.values[0]
probe_type = word_type.upper()
# Get misc info related to word from master list
mf_rc = ex.excel_df.loc[ex.excel_df.NOUN ==
self.word_key, 'MF_RC'].values[0]
yo_om = ex.excel_df.loc[ex.excel_df.NOUN ==
self.word_key, 'YO_OM'].values[0]
wn_ld = ex.excel_df.loc[ex.excel_df.NOUN ==
self.word_key, 'WN_LD'].values[0]
correct_flag = 1 if correct_answer == answer_code[key_pressed] else 0
# Determine probe type
if not correct_flag:
probes = ["HOUSE", "FACE"]
probe_type = probes[random.randint(0, 1)]
# Update log
par.log_data[self.word_key] = [
self.word_key, word_type.upper(), mf_rc, yo_om, wn_ld, key_pressed, answer_code[key_pressed], correct_flag, key_rt, probe_type]
get_probe(probe_type, is_odd)
stimulus.window.flip()
return stimulus
def wait_for_key(keys):
"""Wait for a key that is in a set of keys to be pressed before proceeding.
Args:
keys: a list or tuple of keys
"""
event.clearEvents()
return event.waitKeys(keyList=keys)[0]
def get_probe(probe_type, is_odd):
"""Insert probe at beginning of master stim list
Args:
probe_type: str Face or House probe
is_odd: bool participant ID is odd
"""
probe_tup = None
# Get probe based on type (house or face) and is_odd
if probe_type == 'HOUSE' and is_odd:
probe_tup = random.choice(ex.house_list_odd)
elif probe_type == 'HOUSE' and not is_odd:
probe_tup = random.choice(ex.house_list_even)
elif probe_type == 'FACE' and is_odd:
probe_tup = random.choice(ex.face_list_odd)
elif probe_type == 'FACE' and not is_odd:
probe_tup = random.choice(ex.face_list_even)
if probe_tup:
(probe, keys) = probe_tup
# Extract answer key for check later
split = keys[0].split(",")
d_key = split[0]
k_key = split[1]
d_answer = d_key[d_key.index('='):][2:]
k_answer = k_key[k_key.index('='):][2:]
probe_details = {
'q': probe,
'd': d_answer,
'k': k_answer
}
# Construct stim and add to stimlist
display_text = dict()
display_text[probe] = probe + "\n" + keys[0]
stim = (Text, (display_text, ex.default_text_height,
ex.default_duration, ex.default_keys, 0, probe_details))
insert(stim)
else:
# If unable to get probe, exit
print("Exiting...")
core.quit()
def construct_par(is_odd):
""" Initializes experiment paradigm
Args:
is_odd: bool if participant ID is odd
"""
global par
par = Paradigm(
window_dimensions=settings["window_dimensions"], escape_key="escape")
# Get list of 40 random nouns
face_words = ex.face_rows.NOUN.tolist()
house_words = ex.house_rows.NOUN.tolist()
random_words = face_words + house_words
random.shuffle(random_words)
# Create intro stimulus
intro_text = dict()
intro_text['intro'] = ex.intro
# To use separate defaults for intro change below to ex.intro_duration and ex.intro_keys
stimuli = [(Text, (intro_text, ex.intro_text_height,
ex.default_duration, ex.default_keys, 1))]
# Create word stimuli
for word in random_words:
key_text = ex.noun_odd_key if is_odd else ex.noun_even_key
display_text = dict()
display_text[word] = word + "\n" + key_text
stim = (Text, (display_text, ex.default_text_height,
ex.default_duration, ex.default_keys, -1))
stimuli.append(stim)
# Add stimuli to paradigm
par.addStimuli(stimuli)
def insert(stimulus):
""" Inserts stimulus at beginning of master stim list
Args:
stimulus: Stimulus to be inserted
"""
global par
par.insertStimulus(stimulus)
| julianstephens/FH-WordOA-PostTest | stimulus.py | stimulus.py | py | 14,113 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "settings.get_settings",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "psychopy.visual.Window",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "psychopy.visual",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "psychopy... |
4778234359 | import os
import re
import glob
import datetime
from prettytable import PrettyTable
from matplotlib import pyplot as plt
NUM_MOST_RECENT_RUNS = 100
te_path = os.getenv("TE_PATH", "/opt/transformerengine")
mlm_log_dir = os.path.join(te_path, "ci_logs")
te_ci_log_dir = "/data/transformer_engine_ci_logs"
te_ci_plot_dir = os.path.join(te_ci_log_dir, "plots")
convergence_pattern = (
"validation loss at iteration \d* on validation set | lm loss"
" value: ([\d.]*)E\+(\d*) | lm loss PPL: ([\d.]*)E\+(\d*)"
)
perf_pattern = "elapsed time per iteration \(ms\): ([\d.]*)"
def get_output_file():
now = datetime.datetime.now()
default_fname = f"unknown_pipeline_id_{now.month}_{now.day}_{now.year}_{now.hour}_{now.minute}"
fname = f"{os.getenv('CI_PIPELINE_ID', default_fname)}.txt"
return os.path.join(te_ci_log_dir, fname)
def get_run_metrics(filename):
"""Return the loss, perplexity, and step time for a given megatron-LM logfile."""
with open(filename, "r") as f:
data = f.read()
# Loss and PPL
convergence_matches = re.findall(convergence_pattern, data)
loss = round(float(convergence_matches[1][0]) * (10 ** int(convergence_matches[1][1])), 2)
ppl = round(float(convergence_matches[2][2]) * (10 ** int(convergence_matches[2][3])), 2)
step_times_str = re.findall(perf_pattern, data)
step_times = [float(x) for x in step_times_str]
avg_step_time = round(sum(step_times) / len(step_times), 2)
return loss, ppl, avg_step_time
def print_run_logs():
tables = []
raw_logs = []
for model_config in os.listdir(mlm_log_dir):
model_config_dir = os.path.join(mlm_log_dir, model_config)
table = PrettyTable()
table.title = model_config
table.field_names = ["Config", "Loss", "Perplexity", "Avg time per step (ms)"]
for exp in os.listdir(model_config_dir):
filename = os.path.join(model_config_dir, exp)
loss, ppl, time_per_step = get_run_metrics(filename)
exp_name = exp[:-4]
table.add_row([exp_name, loss, ppl, time_per_step])
raw_logs.append(f"{model_config} {exp_name} {loss} {ppl} {time_per_step}\n")
tables.append(table)
with open(get_output_file(), "w") as f:
for raw_log in raw_logs:
f.write(raw_log)
for table in tables:
print(table)
def save_plot(title, legend, data, filename, ylabel):
x = list(range(1, len(data[0]) + 1))
plt.figure()
for label, y in zip(legend, data):
plt.plot(x, y, "-o", label=label)
plt.title(title)
plt.legend()
plt.xlabel(f"Last {NUM_MOST_RECENT_RUNS} runs")
plt.ylabel(ylabel)
plt.savefig(os.path.join(te_ci_plot_dir, filename))
def perf_and_loss_plots():
files = glob.glob(os.path.join(te_ci_log_dir, "*.txt"))
files.sort(key=os.path.getctime)
files = files[-NUM_MOST_RECENT_RUNS:]
data = {}
for filename in files:
with open(filename) as file:
for line in file:
line = line.strip()
model_config, exp_name, loss, _, time_per_step = line.split(" ")
if model_config not in data:
data[model_config] = {}
if exp_name not in data[model_config]:
data[model_config][exp_name] = {"loss": [], "perf": []}
data[model_config][exp_name]["loss"].append(float(loss))
data[model_config][exp_name]["perf"].append(float(time_per_step))
for model_config, experiments in data.items():
lm_loss_data = []
lm_perf_data = []
legend = []
for exp_name, lm_data in experiments.items():
legend.append(exp_name)
lm_loss_data.append(lm_data["loss"])
lm_perf_data.append(lm_data["perf"])
save_plot(
model_config + " loss", legend,
lm_loss_data, model_config + "_loss.png",
"LM-Loss",
)
save_plot(
model_config + " perf",
legend, lm_perf_data, model_config + "_perf.png",
"Time per step (ms)",
)
if __name__ == "__main__":
print_run_logs()
perf_and_loss_plots()
| NVIDIA/TransformerEngine | tests/pytorch/distributed/print_logs.py | print_logs.py | py | 4,204 | python | en | code | 1,056 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
25947079068 | from typing import List
class Solution:
def trap(self, height: List[int]) -> int:
if len(height) <= 2:
return 0
result = 0
left_max = height[0]
right_max = height[-1]
left = 1
right = len(height) - 1
while left <= right:
if height[left] > left_max:
left_max = height[left]
if height[right] > right_max:
right_max = height[right]
if left_max <= right_max:
result += left_max - height[left]
left += 1
else:
result += right_max - height[right]
right -= 1
return result
| dzaytsev91/leetcode-algorithms | hard/42_trapping_rain_water.py | 42_trapping_rain_water.py | py | 699 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
72809308264 | from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.extension_value_type import ExtensionValueType
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.reference import Reference
T = TypeVar("T", bound="Extension")
@attr.s(auto_attribs=True)
class Extension:
"""
Attributes:
name (Union[Unset, str]):
value (Union[Unset, str]):
value_type (Union[Unset, ExtensionValueType]):
refers_to (Union[Unset, List['Reference']]):
supplemental_semantic_ids (Union[Unset, List['Reference']]):
semantic_id (Union[Unset, Reference]):
"""
name: Union[Unset, str] = UNSET
value: Union[Unset, str] = UNSET
value_type: Union[Unset, ExtensionValueType] = UNSET
refers_to: Union[Unset, List["Reference"]] = UNSET
supplemental_semantic_ids: Union[Unset, List["Reference"]] = UNSET
semantic_id: Union[Unset, "Reference"] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
name = self.name
value = self.value
value_type: Union[Unset, str] = UNSET
if not isinstance(self.value_type, Unset):
value_type = self.value_type.value
refers_to: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.refers_to, Unset):
refers_to = []
for refers_to_item_data in self.refers_to:
refers_to_item = refers_to_item_data.to_dict()
refers_to.append(refers_to_item)
supplemental_semantic_ids: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.supplemental_semantic_ids, Unset):
supplemental_semantic_ids = []
for supplemental_semantic_ids_item_data in self.supplemental_semantic_ids:
supplemental_semantic_ids_item = supplemental_semantic_ids_item_data.to_dict()
supplemental_semantic_ids.append(supplemental_semantic_ids_item)
semantic_id: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.semantic_id, Unset):
semantic_id = self.semantic_id.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if name is not UNSET:
field_dict["name"] = name
if value is not UNSET:
field_dict["value"] = value
if value_type is not UNSET:
field_dict["valueType"] = value_type
if refers_to is not UNSET:
field_dict["refersTo"] = refers_to
if supplemental_semantic_ids is not UNSET:
field_dict["supplementalSemanticIds"] = supplemental_semantic_ids
if semantic_id is not UNSET:
field_dict["semanticId"] = semantic_id
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.reference import Reference
d = src_dict.copy()
name = d.pop("name", UNSET)
value = d.pop("value", UNSET)
_value_type = d.pop("valueType", UNSET)
value_type: Union[Unset, ExtensionValueType]
if isinstance(_value_type, Unset):
value_type = UNSET
else:
value_type = ExtensionValueType(_value_type)
refers_to = []
_refers_to = d.pop("refersTo", UNSET)
for refers_to_item_data in _refers_to or []:
refers_to_item = Reference.from_dict(refers_to_item_data)
refers_to.append(refers_to_item)
supplemental_semantic_ids = []
_supplemental_semantic_ids = d.pop("supplementalSemanticIds", UNSET)
for supplemental_semantic_ids_item_data in _supplemental_semantic_ids or []:
supplemental_semantic_ids_item = Reference.from_dict(supplemental_semantic_ids_item_data)
supplemental_semantic_ids.append(supplemental_semantic_ids_item)
_semantic_id = d.pop("semanticId", UNSET)
semantic_id: Union[Unset, Reference]
if isinstance(_semantic_id, Unset):
semantic_id = UNSET
else:
semantic_id = Reference.from_dict(_semantic_id)
extension = cls(
name=name,
value=value,
value_type=value_type,
refers_to=refers_to,
supplemental_semantic_ids=supplemental_semantic_ids,
semantic_id=semantic_id,
)
extension.additional_properties = d
return extension
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| sdm4fzi/aas2openapi | ba-syx-submodel-repository-client/ba_syx_submodel_repository_client/models/extension.py | extension.py | py | 5,047 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"li... |
73980074024 | from random import choice, sample
from datetime import timedelta, date
cooking = []
washing = []
print("#+title: Rota")
print("#+options: h:2 num:nil toc:t")
print("\n")
def create_rota(d):
for i in range(0, 7):
avail_cook = ["Noam", "Laura", "Louis", "David", "Nat", "Störm"]
if(i==0):
cook_day=sample(avail_cook, 2)
# no cooks two days in a row
if(i>0):
# remove already cooking
for c in cooking[i-1]:
if c in avail_cook:
avail_cook.remove(c)
cook_day=sample(avail_cook, 2)
# store cooks for the day
cooking.append(cook_day)
for i in range(0, 7):
avail_wash = ["Noam", "Laura", "Louis", "David", "Nat", "Soko", "Sonny", "Störm"]
cook_day=cooking[i]
# no washers who are cooking
for c in cook_day:
if c in avail_wash:
avail_wash.remove(c)
print("* ", d.strftime("%d %b %Y %A"))
print("- Cooking: ", cook_day[0], "and ", cook_day[1])
cooking.append(cook_day)
wash_day = sample(avail_wash, 3)
print("- Washing: ", wash_day[0], ", ", wash_day[1]," and ", wash_day[2])
washing.append(wash_day)
d+=timedelta(days=1)
d1 = date.today()
for i in range (0, 100):
# print("Week ", i, ". Starting on ", d1.strftime("%a %d %b %Y"))
# print("----------\n")
create_rota(d1)
d1+=timedelta(days=7)
# print("\n")
| locua/rota-generator | generate_rota.py | generate_rota.py | py | 1,500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.sample",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"l... |
27036133703 | import json
import threading
import time
import webbrowser
from multiprocessing import Process, Pipe
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import *
from tkinter.ttk import Treeview
from PIL import Image, ImageTk
import course_do
import encrypt
import getCourse
import login
import settings
import multiprocessing_win
developer_png = None
donate_png = None
class CourseFrame(Frame): # 7类课程全部使用本类实现
def __init__(self, master=None, _type=''):
Frame.__init__(self, master)
self.root = master # 定义内部变量root
self.type = _type # 本班课程: 'TJKC' 方案内课程: 'FANKC' 方案外课程: 'FAWKC' 校公选课: 'XGXK' 慕课: "MOOC" 辅修课程: "FXKC",体育课程:"TYKC"
self.char_ct = '☑' # 复选框选中标识符
self.char_cf = '□' # 复选框未选中标识符
self.search = StringVar()
self.search.set('')
self.code = getCourse.recommended_course
self.name = '本班课程'
if self.type == 'FANKC':
self.code = getCourse.in_course
self.name = '方案内课程'
elif self.type == 'FAWKC':
self.code = getCourse.out_course
self.name = '方案外课程'
elif self.type == 'XGXK':
self.code = getCourse.public_course
self.name = '校公选课'
elif self.type == 'MOOC':
self.code = getCourse.mooc
self.name = '慕课'
elif self.type == 'FXKC':
self.code = getCourse.fuxiu_course
self.name = '辅修课程'
elif self.type == 'TYKC':
self.code = getCourse.sport_course
self.name = '体育课'
self.createPage()
def get_row_values_by_item(self, item):
"""
获取一行的值内容,包含行头部信息
:return: 元组,1为头部信息,1以后为表格信息
"""
values = self.tree.item(item, 'values')
return values
def get_parent_by_item(self, item):
return self.tree.parent(item)
def exchange_check_by_item(self, item):
"""
变换一行的复选状态
"""
vals = self.get_row_values_by_item(item)
if vals[1][0:1] != self.char_cf and vals[1][0:1] != self.char_ct:
return
check_str = vals[1][0:1]
if check_str == self.char_ct:
value = self.char_cf
settings.courses.remove(settings.findInDict(vals[2])) # 在选课名单中删除选中的课程
else:
value = self.char_ct
parent = self.get_row_values_by_item(self.get_parent_by_item(item)) # 返回父级以获取课程名称
assign = {'id': '', 'type': '', 'name': "", 'teachingPlace': '', 'teacherName': ''}
assign.update(id=vals[2], type=self.type, name=parent[1], teachingPlace=vals[4], teacherName=vals[3])
settings.courses.append(assign) # 在选课名单中新增选中的课程
col_str = '#%d' % 2
self.tree.set(item, column=col_str, value=value) # 修改单元格的值
def change_check_on_select(self):
"""
改变选中行的勾选状态
"""
try:
item = self.tree.selection()[0] # 获取行对象
except Exception:
pass
else:
self.exchange_check_by_item(item)
def on_click(self, event):
"""
行单击事件
"""
self.change_check_on_select()
def createPage(self):
frame1 = Frame(self, relief=RAISED, borderwidth=2)
frame1.pack(side=TOP, fill=X, ipadx=13, ipady=13, expand=0)
Label(frame1, text='\n%s' % self.name, font=12).pack(side='top')
Entry(frame1, textvariable=self.search, width=60).pack(side=LEFT, expand=True)
Button(frame1, text='刷新/搜索', command=lambda: self.refresh(self.code)).pack(side=LEFT, expand=True)
ybar = Scrollbar(self, orient='vertical') # 竖直滚动条
self.tree = Treeview(self, show="headings", columns=('no', 'name', 'id', 'teacher', 'place'), height=20,
yscrollcommand=ybar.set)
ybar['command'] = self.tree.yview
self.tree.column('no', width=35, anchor='center')
self.tree.column('name', width=150, anchor='center')
self.tree.column('id', width=180, anchor='center')
self.tree.column('teacher', width=80, anchor='center')
self.tree.column('place', width=300, anchor='center')
self.tree.heading('no', text='序号')
self.tree.heading('name', text='课程名')
self.tree.heading('id', text='课程总号')
self.tree.heading('teacher', text='教师')
self.tree.heading('place', text='上课时间及地点')
self.refresh(self.code)
self.tree.pack(side='left', expand='yes', fill='y')
ybar.pack(side='left', expand='yes', fill='y')
self.tree.bind('<ButtonRelease-1>', self.on_click) # 绑定行单击事件
def refresh(self, methods):
x = self.tree.get_children()
for item in x:
self.tree.delete(item) # 删除旧数据,准备插入新数据
try:
index = 1
for i in range(1000):
s = methods(page=i, query=self.search.get())
data = json.loads(s)
if data['dataList'] is None or len(data['dataList']) == 0:
break
for course in data['dataList']:
mid = self.tree.insert('', 'end', values=(index, course['courseName'], '', '', ''))
index = index + 1
for j in range(1000):
if len(course['tcList']) <= j:
break
if settings.findInDict(course['tcList'][j]['teachingClassID']) == -1:
state = self.char_cf
else:
state = self.char_ct
temp1 = [state, course['tcList'][j]['teachingClassID'],
course['tcList'][j]['teacherName'],
course['tcList'][j]['teachingPlace']]
self.tree.insert(mid, 'end', values=("(%d" % (j + 1), temp1[0], temp1[1], temp1[2], temp1[3]))
self.flag = 1
except Exception as e:
showinfo(title='错误', message='获取%s列表失败\n %s' % (self.name, e))
class SelectedCourseFrame(Frame): # 继承Frame类
def __init__(self, master=None):
Frame.__init__(self, master)
self.flag = 0 # 是否有缓存内容,若无则刷新
self.root = master # 定义内部变量root
self.selectedCourse = []
if self.flag == 0:
try:
self.selectedCourse = course_do.query_result()
self.flag = 1
except Exception as e:
showinfo(title='错误', message='获取已选课程列表失败\n %s' % e)
self.createPage()
def createPage(self):
Label(self, text='\n已选课程\n', font=12).pack(side='top')
Button(self, text='刷新', command=self.refresh).pack(side='top')
ybar = Scrollbar(self, orient='vertical') # 竖直滚动条
self.tree = ttk.Treeview(self, show="headings", columns=('no', 'id', 'name', 'teacher', 'place'), height=20,
yscrollcommand=ybar.set)
ybar['command'] = self.tree.yview
self.tree.column('no', width=35, anchor='center')
self.tree.column('id', width=180, anchor='center')
self.tree.column('name', width=150, anchor='center')
self.tree.column('teacher', width=80, anchor='center')
self.tree.column('place', width=300, anchor='center')
self.tree.heading('no', text='序号')
self.tree.heading('id', text='课程总号')
self.tree.heading('name', text='课程名')
self.tree.heading('teacher', text='教师')
self.tree.heading('place', text='上课时间及地点')
for i in range(len(self.selectedCourse)):
self.tree.insert('', i, values=(i + 1,
self.selectedCourse[i][0], self.selectedCourse[i][2],
self.selectedCourse[i][1],
self.selectedCourse[i][3]))
self.tree.pack(side='left', expand='yes', fill='both')
ybar.pack(side='left', expand='yes', fill='y')
def refresh(self):
try:
self.selectedCourse = course_do.query_result()
x = self.tree.get_children()
for item in x:
self.tree.delete(item)
for i in range(len(self.selectedCourse)):
self.tree.insert('', i, values=(i + 1,
self.selectedCourse[i][0], self.selectedCourse[i][2],
self.selectedCourse[i][1],
self.selectedCourse[i][3]))
self.flag = 1
except Exception as e:
showinfo(title='错误', message='获取已选课程列表失败\n %s' % e)
class ChooseCourseThread(threading.Thread):
def __init__(self, SelectedCourse):
super().__init__()
self.SelectedCourse = SelectedCourse
self.setDaemon(True)
self.start()
def run(self):
for i in range(settings.loopVariable):
if not settings.isRunning:
return
for j in settings.courses:
if not settings.isRunning:
return
if not (self.SelectedCourse.count(j['id']) or self.SelectedCourse.count(j['name'])):
response = course_do.choose_course(j['id'], j['type'])
if response['code'] == '1':
settings.deleteCoursesInDict(j['name'])
self.SelectedCourse.append(j['id'])
self.SelectedCourse.append(j['name'])
Tree.insert('', 'end',
values=(
j['id'], j['name'], j['teacherName'], response['msg'], j['teachingPlace']))
time.sleep(settings.delayVariable / 1000)
class StartChooseFrame(Frame): # 继承Frame类
def __init__(self, master=None):
Frame.__init__(self, master)
self.root = master # 定义内部变量root
self.SelectedCourse = []
self.createPage()
def createPage(self):
Label(self, text='\n开始选课\n', font=12).pack(side='top')
Button(self, text='开始选课', command=self.StartChoose).pack(side='top')
Button(self, text='停止选课', command=self.StopChoose).pack(side='top')
ybar = Scrollbar(self, orient='vertical') # 竖直滚动条
global Tree
Tree = ttk.Treeview(self, show="headings", columns=('id', 'name', 'teacher', 'msg', 'place'), height=20,
yscrollcommand=ybar.set)
ybar['command'] = Tree.yview
Tree.column('id', width=180, anchor='center')
Tree.column('name', width=150, anchor='center')
Tree.column('teacher', width=80, anchor='center')
Tree.column('msg', width=150, anchor='center')
Tree.column('place', width=300, anchor='center')
Tree.heading('id', text='课程总号')
Tree.heading('name', text='课程名')
Tree.heading('teacher', text='教师')
Tree.heading('msg', text='选课结果')
Tree.heading('place', text='上课时间及地点')
Tree.pack(side='left', expand='yes', fill='both')
ybar.pack(side='left', expand='yes', fill='y')
def StartChoose(self):
if settings.delayVariable < 300:
showinfo(title='错误', message='请求发送延迟需大于300ms!')
return
if settings.loopVariable < 1 or settings.loopVariable > 10000:
showinfo(title='错误', message='请求发送循环次数需在1到10000之间!')
return
settings.isRunning = True
try:
ChooseCourseThread(self.SelectedCourse)
except Exception as e:
showinfo(title='错误', message='选课错误!\n %s' % e)
return
def StopChoose(self):
settings.isRunning = False
class SettingsFrame(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.flag = 0 # 是否有缓存内容,若无则刷新
self.root = master # 定义内部变量root
self.selectedCourse = []
self.delayVariable = StringVar()
self.loopVariable = StringVar()
if self.flag == 0:
try:
self.flag = 1
except Exception as e:
showinfo(title='错误', message='获取已选课程列表失败\n %s' % e)
self.createPage()
def createPage(self):
frame1 = Frame(self, relief=RAISED, borderwidth=2)
frame1.pack(side=TOP, fill=X, ipadx=13, ipady=13, expand=0)
Label(frame1, text='选课参数设置', font=12).pack(side='top')
Label(frame1, text='请求发送延迟(ms)', font=12).pack(side='left', padx=20)
Spinbox(frame1, from_=300, to=800, increment=100, textvariable=self.delayVariable).pack(side='left', padx=20)
Label(frame1, text='请求发送循环次数', font=12).pack(side='left', padx=20)
Spinbox(frame1, from_=1, to=5, increment=1, textvariable=self.loopVariable).pack(side='left', padx=20)
Label(self, text='\n选课计划(排序代表程序选课的先后次序)', font=8).pack(side='top')
Button(self, text='保存并刷新', command=self.refresh).pack(side='top')
ybar = Scrollbar(self, orient='vertical') # 竖直滚动条
self.tree = ttk.Treeview(self, show="headings", columns=('no', 'id', 'name', 'teacher', 'place'), height=20,
yscrollcommand=ybar.set)
ybar['command'] = self.tree.yview
self.tree.column('no', width=35, anchor='center')
self.tree.column('id', width=180, anchor='center')
self.tree.column('name', width=150, anchor='center')
self.tree.column('teacher', width=80, anchor='center')
self.tree.column('place', width=300, anchor='center')
self.tree.heading('no', text='序号')
self.tree.heading('id', text='课程总号')
self.tree.heading('name', text='课程名')
self.tree.heading('teacher', text='教师')
self.tree.heading('place', text='上课时间及地点')
for i in range(len(settings.courses)):
self.tree.insert('', i, values=(i + 1,
settings.courses[i]['id'], settings.courses[i]['name'],
settings.courses[i]['teacherName'], settings.courses[i]['teachingPlace']))
self.tree.pack(side='left', expand='yes', fill='y')
ybar.pack(side='left', expand='yes', fill='y')
def refresh(self):
try:
if int(self.delayVariable.get()) < 300:
showinfo(title='错误', message='请求发送延迟需大于300ms!')
else:
settings.delayVariable = int(self.delayVariable.get())
if int(self.loopVariable.get()) < 1 or int(self.loopVariable.get()) > 10000:
showinfo(title='错误', message='请求发送循环次数需在1到10000之间!')
else:
settings.loopVariable = int(self.loopVariable.get())
except Exception as e:
showinfo(title='错误', message='获取选课参数失败\n %s' % e)
return
try:
self.selectedCourse = settings.courses
x = self.tree.get_children()
for item in x:
self.tree.delete(item)
for i in range(len(settings.courses)):
self.tree.insert('', i, values=(i + 1,
settings.courses[i]['id'], settings.courses[i]['name'],
settings.courses[i]['teacherName'],
settings.courses[i]['teachingPlace']))
self.flag = 1
except Exception as e:
showinfo(title='错误', message='获取选课列表失败\n %s' % e)
class InfoPage(object):
def __init__(self, master=None):
self.root = master # 定义内部变量root
self.root.geometry('%dx%d' % (600, 600)) # 设置窗口大小
self.root.resizable(0, 0)
self.username = StringVar()
self.password = StringVar()
self.createPage()
def createPage(self):
self.page = Frame(self.root) # 创建Frame
self.page.pack()
Label(self.page).grid(row=0, stick=W)
img_open = Image.open("image/developer.png")
global developer_png
developer_png = ImageTk.PhotoImage(img_open.resize((400, 300), Image.ANTIALIAS))
Label(self.page, image=developer_png, height=250).grid(row=1, column=0, columnspan=2)
Label(self.page,
text='本软件由:' + '\n' + 'Matt-Dong123(github.com/Matt-Dong123)' + '\n' + '和' + '\n' + 'ANDYWANGTIANTIAN(github.com/ANDYWANGTIANTIAN)'
+ '\n' + '开发制作,项目地址https://github.com/ANDYWANGTIANTIAN/SZU_AutoCourseSelecter'
',可以在深圳大学本科选课系统实现自动选课功能,详细使用方法请参考README.md'
'。本软件仅供学习交流使用,请勿用于真实选课环境中!!因使用本软件造成的一切后果均由软件使用者承担,软件开发者不承担任何责任!!如您同意以上免责声明,请点击“同意并进入”按钮,如不同意,请点击退出\n\n',
font=12,
wraplength=550).grid(row=2, column=0, columnspan=2)
Button(self.page, text='同意并进入', font=12, command=self.GotoLoginPage, width=10).grid(row=5, column=0)
Button(self.page, text='退出', width=10, font=12, command=self.page.quit).grid(row=5, column=1)
def GotoLoginPage(self):
self.page.destroy()
LoginPage(self.root)
class AboutFrame(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.root = master # 定义内部变量root
self.createPage()
def createPage(self):
Label(self, text='如果觉得本软件好用,欢迎给我们打赏').grid(row=0, columnspan=2)
img_open = Image.open("image/payment.jpg")
global donate_png
donate_png = ImageTk.PhotoImage(img_open.resize((300, 300), Image.ANTIALIAS))
Label(self, image=donate_png, height=250).grid(row=1, column=0, columnspan=2)
Label(self,
text='本软件由:' + '\n' + 'Matt-Dong123(github.com/Matt-Dong123)' + '\n' + '和' + '\n' + 'ANDYWANGTIANTIAN(github.com/ANDYWANGTIANTIAN)'
+ '\n' + '开发制作,可以在深圳大学本科选课系统实现自动选课功能,详细使用方法请参考README.md'
'。本软件仅供学习交流使用,请勿用于真实选课环境中!!因使用本软件造成的一切后果均由软件使用者承担,软件开发者不承担任何责任!如发现软件存在问题,欢迎点击下方按钮进行反馈,或联系以下邮箱:Matt(2274006799@qq.com)或Andy(2309724277@qq.com)\n',
font=12,
wraplength=550).grid(row=2, column=0, columnspan=2)
Button(self, text='在github反馈问题', font=12, command=self.GoToGithub).grid(row=3, columnspan=2)
def GoToGithub(self):
webbrowser.open("https://github.com/ANDYWANGTIANTIAN/SZU_AutoCourseSelecter", new=0)
class LoginPage(object):
def __init__(self, master=None):
self.labelText1 = StringVar()
self.labelText2 = StringVar()
self.labelText3 = StringVar()
self.labelEntry1 = StringVar()
self.labelEntry2 = StringVar()
self.labelEntry3 = StringVar()
self.root = master # 定义内部变量root
self.root.geometry('%dx%d' % (600, 400)) # 设置窗口大小
self.root.resizable(0, 0)
self.var_SelUrl = IntVar() # 选择校内、校外模式
self.var_SelMode = IntVar() # 选择登录方法(账户密码、Cookie)
self.createPage()
def createPage(self):
self.labelText1.set("账号(学号)")
self.labelText2.set("密码")
self.labelText3.set("此项无需填写")
self.var_SelUrl.set(0)
self.var_SelMode.set(0)
self.page = Frame(self.root) # 创建Frame
self.page.pack()
Label(self.page, text='\n\n登录选课系统\n', font=12).grid(row=1, columnspan=2)
Radiobutton(self.page, text="使用账户密码登录", variable=self.var_SelMode, value=0,
command=self.SelMode).grid(row=2)
Radiobutton(self.page, text="使用Cookie登录(尚未完成)", variable=self.var_SelMode, value=1,
command=self.SelMode, state="disabled").grid(row=2, column=1)
Label(self.page, textvariable=self.labelText1).grid(row=3, pady=10)
Entry(self.page, textvariable=self.labelEntry1).grid(row=3, column=1)
Label(self.page, textvariable=self.labelText2).grid(row=4, pady=10)
Entry(self.page, textvariable=self.labelEntry2, show='*').grid(row=4, column=1)
Label(self.page, textvariable=self.labelText3).grid(row=5, pady=10)
Entry(self.page, textvariable=self.labelEntry3).grid(row=5, column=1)
Radiobutton(self.page, text="校内模式(bkxk.szu.edu.cn)", variable=self.var_SelUrl, value=0,
command=self.SelUrl).grid(row=6, column=0)
Radiobutton(self.page, text="校外模式尚未完成(bkxk.webvpn.szu.edu.cn)", variable=self.var_SelUrl, value=1,
command=self.SelUrl, state="disabled").grid(row=6, column=1)
Button(self.page, text='登录', command=self.loginCheck).grid(row=7, pady=10)
Button(self.page, text='退出', command=self.page.quit).grid(row=7, column=1)
Label(self.page, text='点击登录后,会有短暂的卡顿,请稍等', font=10).grid(row=8, columnspan=2)
def loginCheck(self):
if self.var_SelMode.get() == 1:
settings.user_id = self.labelEntry1.get()
settings.cookie = self.labelEntry2.get()
settings.token = self.labelEntry3.get()
settings.Mode = 1
else:
settings.Mode = 0
settings.user_id = self.labelEntry1.get()
settings.user_pass = self.labelEntry2.get()
parent_conn, child_conn = Pipe()
p = Process(target=encrypt.encrypt, args=(settings.user_pass, child_conn))
p.start()
p.join()
settings.user_encrypted_pass = parent_conn.recv()
StatusCode = settings.user_pass
try:
vtoken = login.get_vtoken()
Code = login.get_vimage(vtoken)
Status = login.LogIn(Code, vtoken)
except ValueError:
showinfo(title='错误', message='发生错误!请检查是否使用了代理,如使用,请关闭一切代理并重试!')
return
except Exception as e:
showinfo(title='错误', message='未知错误 %s' % e)
return
if Status['code'] == '1':
MainPage(self.root)
self.page.destroy()
showinfo(title='登录成功',
message='欢迎您! ' + settings.user_name + "\n学号: " + settings.user_id + "\n学院: " + settings.user_college + "\n专业: " + settings.user_department + "\n班级: " + settings.user_class)
else:
showinfo(title='错误', message=Status['msg'])
def SelMode(self):
if self.var_SelMode.get() == 1:
self.labelText2.set("Cookie")
self.labelText3.set("Token")
# self.tokenEntry.configure(state="disabled")
else:
self.labelText2.set("密码")
self.labelText3.set("此项无需填写")
# self.tokenEntry.configure(state="disabled")
def SelUrl(self):
dic = {0: '校内模式', 1: '校外模式'}
s = "您选了" + dic.get(self.var_SelUrl.get())
if self.var_SelUrl.get() == 0:
settings.url = 'http://bkxk.szu.edu.cn/'
else:
settings.url = 'https://bkxk.webvpn.szu.edu.cn/'
def MainPage(self):
self.page.destroy()
MainPage(self.root)
class MainPage(object):
def __init__(self, master=None):
self.root = master # 定义内部变量root
self.root.geometry('%dx%d' % (800, 600)) # 设置窗口大小
self.root.resizable(0, 0)
self.createPage()
def createPage(self):
# 本班课程: 'TJKC'
# 方案内课程: 'FANKC'
# 方案外课程: 'FAWKC'
# 校公选课: 'XGXK'
# 慕课: "MOOC",
# 辅修课程: "FXKC",
# 体育课程:"TYKC"
self.SelectedCourse = SelectedCourseFrame(self.root) # 创建不同Frame
self.ClassCoursePage = CourseFrame(self.root, _type='TJKC')
self.InCoursePage = CourseFrame(self.root, _type='FANKC')
self.OutCoursePage = CourseFrame(self.root, _type='FAWKC')
self.PublicCoursePage = CourseFrame(self.root, _type='XGXK')
self.MOOCPage = CourseFrame(self.root, _type='MOOC')
self.FuxiuPage = CourseFrame(self.root, _type='FXKC')
self.SportCoursePage = CourseFrame(self.root, _type='TYKC')
self.SettingsPage = SettingsFrame(self.root)
self.StartChoosePage = StartChooseFrame(self.root)
self.AboutPage = AboutFrame(self.root)
self.SelectedCourse.pack() # 默认显示数据录入界面
menubar = Menu(self.root)
menubar.add_command(label='已选课程', command=self.GotoSelectedCourse)
menubar.add_command(label='本班课程', command=self.GotoClassCourse)
menubar.add_command(label='方案内课程', command=self.GotoInCourse)
menubar.add_command(label='方案外课程', command=self.GoToOutCourse)
menubar.add_command(label='校公选课', command=self.GoToPublicCourse)
menubar.add_command(label='慕课', command=self.GoToMoocCourse)
menubar.add_command(label='辅修课程', command=self.GoToFuxiuCourse)
menubar.add_command(label='体育课程', command=self.GoToSportCourse)
menubar.add_command(label='参数设置', command=self.GoToSettings)
menubar.add_command(label='开始选课', command=self.GoToStartChooseCourse)
menubar.add_command(label='关于本软件', command=self.GoToAbout)
self.root['menu'] = menubar # 设置菜单栏
def GotoSelectedCourse(self):
self.SelectedCourse.pack()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.FuxiuPage.pack_forget()
self.AboutPage.pack_forget()
self.SportCoursePage.pack_forget()
self.SettingsPage.pack_forget()
self.StartChoosePage.pack_forget()
def GotoClassCourse(self):
self.SelectedCourse.pack_forget()
self.InCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.SettingsPage.pack_forget()
self.AboutPage.pack_forget()
self.ClassCoursePage.pack()
self.StartChoosePage.pack_forget()
def GotoInCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.SettingsPage.pack_forget()
self.AboutPage.pack_forget()
self.InCoursePage.pack()
self.StartChoosePage.pack_forget()
def GoToOutCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.AboutPage.pack_forget()
self.SettingsPage.pack_forget()
self.OutCoursePage.pack()
self.StartChoosePage.pack_forget()
def GoToPublicCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.SettingsPage.pack_forget()
self.AboutPage.pack_forget()
self.PublicCoursePage.pack()
self.StartChoosePage.pack_forget()
def GoToMoocCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.SettingsPage.pack_forget()
self.PublicCoursePage.pack_forget()
self.AboutPage.pack_forget()
self.MOOCPage.pack()
self.StartChoosePage.pack_forget()
def GoToFuxiuCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.SportCoursePage.pack_forget()
self.SettingsPage.pack_forget()
self.PublicCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.AboutPage.pack_forget()
self.FuxiuPage.pack()
self.StartChoosePage.pack_forget()
def GoToSportCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.FuxiuPage.pack_forget()
self.SettingsPage.pack_forget()
self.PublicCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.AboutPage.pack_forget()
self.SportCoursePage.pack()
self.StartChoosePage.pack_forget()
def GoToSettings(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.AboutPage.pack_forget()
self.SettingsPage.pack()
self.StartChoosePage.pack_forget()
def GoToStartChooseCourse(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.SettingsPage.pack_forget()
self.AboutPage.pack_forget()
self.StartChoosePage.pack()
def GoToAbout(self):
self.SelectedCourse.pack_forget()
self.ClassCoursePage.pack_forget()
self.InCoursePage.pack_forget()
self.OutCoursePage.pack_forget()
self.FuxiuPage.pack_forget()
self.SportCoursePage.pack_forget()
self.PublicCoursePage.pack_forget()
self.MOOCPage.pack_forget()
self.SettingsPage.pack_forget()
self.StartChoosePage.pack_forget()
self.AboutPage.pack()
def StartRun():
root = Tk()
root.title('SZU选课助手')
root.iconbitmap('image/favicon.ico')
InfoPage(root)
root.mainloop()
| ANDYWANGTIANTIAN/SZU_AutoCourseSelecter | gui.py | gui.py | py | 32,494 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "getCourse.recommended_course",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "getCourse.in_course",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "getCourse.out_course",
"line_number": 37,
"usage_type": "attribute"
},
{
... |
2808406541 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.5.4"
__author__ = "Abien Fred Agarap"
from dataset.normalize_data import list_files
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
import tensorflow as tf
def load_data(dataset):
"""Returns a tuple containing the features and labels
in a dataset.
Parameter
---------
dataset : numpy.ndarray
A NumPy array file containing the dataset to be loaded.
Returns
-------
features : ndarray
A numpy.ndarray with the features in the dataset as its elements.
labels : ndarray
A numpy.ndarray with the labels in the dataset as its elements.
Examples
--------
>>> dataset = 'train_data.npy'
>>> features, labels = data.load_data(dataset=dataset)
>>> features
array([[ 6., 0., 2., ..., 6., 1., 1.],
[ 6., 0., 2., ..., 6., 1., 1.],
[ 9., 0., 3., ..., 9., 1., 2.],
...,
[ 7., 3., 7., ..., 1., 1., 1.],
[ 6., 0., 2., ..., 6., 1., 1.],
[ 8., 0., 2., ..., 2., 1., 1.]], dtype=float32)
>>> labels
array([ 1., 1., 1., ..., 0., 1., 1.], dtype=float32)
"""
# load the data into memory
data = np.load(dataset)
# get the labels from the dataset
labels = data[:, 17]
labels = labels.astype(np.float32)
# get the features from the dataset
data = np.delete(arr=data, obj=[17], axis=1)
data = data.astype(np.float32)
return data, labels
def plot_confusion_matrix(phase, path, class_names):
"""Plots the confusion matrix using matplotlib.
Parameter
---------
phase : str
String value indicating for what phase is the confusion matrix, i.e. training/validation/testing
path : str
Directory where the predicted and actual label NPY files reside
class_names : str
List consisting of the class names for the labels
Returns
-------
conf : array, shape = [num_classes, num_classes]
Confusion matrix
accuracy : float
Predictive accuracy
"""
# list all the results files
files = list_files(path=path)
labels = np.array([])
for file in files:
labels_batch = np.load(file)
labels = np.append(labels, labels_batch)
if (files.index(file) / files.__len__()) % 0.2 == 0:
print(
"Done appending {}% of {}".format(
(files.index(file) / files.__len__()) * 100, files.__len__()
)
)
labels = np.reshape(labels, newshape=(labels.shape[0] // 4, 4))
print("Done appending NPY files.")
# get the predicted labels
predictions = labels[:, :2]
# get the actual labels
actual = labels[:, 2:]
# create a TensorFlow session
with tf.Session() as sess:
# decode the one-hot encoded labels to single integer
predictions = sess.run(tf.argmax(predictions, 1))
actual = sess.run(tf.argmax(actual, 1))
# get the confusion matrix based on the actual and predicted labels
conf = confusion_matrix(y_true=actual, y_pred=predictions)
# create a confusion matrix plot
plt.imshow(conf, cmap=plt.cm.Purples, interpolation="nearest")
# set the plot title
plt.title("Confusion Matrix for {} Phase".format(phase))
# legend of intensity for the plot
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
plt.tight_layout()
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
# show the plot
plt.show()
# get the accuracy of the phase
accuracy = (conf[0][0] + conf[1][1]) / labels.shape[0]
# return the confusion matrix and the accuracy
return conf, accuracy
| AFAgarap/gru-svm | utils/data.py | data.py | py | 3,939 | python | en | code | 136 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "dataset.normalize_data",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "numpy.float32",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete... |
74258842345 | import os
import json
import pandas as pd
import shutil
from PIL import Image
import matplotlib.pyplot as plt
import os
import cv2
import numpy as np
def rotate_bound(image, angle):
# 获取图像的尺寸
# 旋转中心
(h, w) = image.shape[:2]
(cx, cy) = (w / 2, h / 2)
# 设置旋转矩阵
M = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# 计算图像旋转后的新边界
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# 调整旋转矩阵的移动距离(t_{x}, t_{y})
M[0, 2] += (nW / 2) - cx
M[1, 2] += (nH / 2) - cy
return cv2.warpAffine(image, M, (nW, nH))
def find_head(cate, location):
flag = 0
for i in range(7):
v = location[i][2]
if v == 2:
flag = 1
else:
flag = 0
break
if flag == 1:
x_min = min(location[0][0], location[1][0], location[2][0], location[3][0], location[4][0], location[5][0], location[6][0])
x_max = max(location[0][0], location[1][0], location[2][0], location[3][0], location[4][0], location[5][0], location[6][0])
y_min = min(location[0][1], location[1][1], location[2][1], location[3][1], location[4][1], location[5][1], location[6][1]) - 30
y_max = max(location[0][1], location[1][1], location[2][1], location[3][1], location[4][1], location[5][1], location[6][1]) -15
return True, [(x_min, y_min), (x_max, y_max)]
else:
return False, None
def find_torso(cate, location):
flag = 0
mark_keypoint = [0,1,2,3,4,5,6,7,8,9,10,11,12]
for i in mark_keypoint:
v = location[i][2]
if v == 2:
flag = 1
else:
flag = 0
break
if flag == 1:
x = [location[j][0] for j in mark_keypoint]
x_min = min(x) -15
x_max = max(x) + 15
y = [location[j][1] for j in mark_keypoint]
y_min = min(y) - 30
y_max = max(y)
return True, [(x_min, y_min), (x_max, y_max)]
else:
return False, None
dir = '/shared/group/coco/annotations/person_keypoints_val2017.json'
f = open(dir)
json_file = json.load(f)
# data = json.load(dir)
s = 1
imgs = json_file['images']
anns = json_file['annotations']
categories = ['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle']
num = 0
img_list = []
for ann in anns:
# if num > 100:
# break
img_id = int(ann['image_id'])
img_id_ = "%012d" % img_id + '.jpg'
keypoints = ann['keypoints']
location = []
for i, cate in enumerate(categories):
x = keypoints[3 * i + 0]
y = keypoints[3 * i + 1]
v = keypoints[3 * i + 2]
location.append([x, y, v])
Head_exist, location = find_torso(categories, location)
if Head_exist:
if not img_id in img_list:
img_list.append(img_id)
prefix = '/shared/group/coco/val2017'
_path = os.path.join(prefix, img_id_)
I = cv2.imread(_path)
I[location[0][1]:location[1][1],location[0][0]:location[1][0],:] = 0
# I[], :, :] = 0
I = rotate_bound(I, angle=110)
target_image_dir = '/shared/niudt/pose_estimation/test_images/coco_legs_rotate110'
if not os.path.exists(target_image_dir):
os.makedirs(target_image_dir)
save_name = os.path.join(target_image_dir, img_id_)
cv2.imwrite(save_name, I)
s = 1
# num = num + 1
| Dantong88/Medical-Partial-Body-Pose-Estimation | ViTPose/demo/process_coco.py | process_coco.py | py | 3,724 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.getRotationMatrix2D",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"lin... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.