seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23228731915 | import sys
import time
import random
import pygame
from event_listener import event_listener
from functions import render_all, one_dimensional_list, update_frames
sys.path.insert(1, 'player')
from yoshi import Yoshi
from movement import move_all, set_direction, move
sys.path.insert(1, 'eggs')
from egg import Egg
sys.path.insert(1, 'end_game')
from end_game import end_game
from death_collided import collided_walls
pygame.init()
# Setting app name.
pygame.display.set_caption("Yoshi Snake Game.")
# It should be 17x15.
SCREEN_WIDTH = 680
SCREEN_HEIGHT = 600
GREEN = (31, 134, 31)
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
yoshis = []
eggs = []
objects = []
objects.append(yoshis)
objects.append(eggs)
# Creating player character.
player = Yoshi()
yoshis.append(player)
# Testing egg.
eggy = Egg("green")
eggs.append(eggy)
# Allows player to choose first direction.
direction = ""
score = 1
frame = 0
while True:
# Rendering frame
# Reseting screen for every frame.
screen.fill(GREEN)
# Rendering all objects
render_all( one_dimensional_list(objects), screen )
# Flip the display
pygame.display.flip()
# Verifying if player lost:
if collided_walls(player, SCREEN_WIDTH, SCREEN_HEIGHT):
end_game(screen)
time.sleep(0.5)
# Taking input.
last_relevant_event = event_listener()
# Closing game if clicked to quit:
if last_relevant_event == "quit":
sys.exit(0)
# Preparing for the next rendering:
move_all(yoshis)
direction = set_direction(direction, last_relevant_event)
move(player, direction, 40, 40)
# Checking egg collision.
if eggy.check_collision(player):
new_yoshi = Yoshi(x = player.x, y = player.y, current_frame = frame)
yoshis.append(new_yoshi)
# Changing egg location.
eggy.x = random.randint(0, 16) * 40
eggy.y = random.randint(0, 14) * 40
score += 1
pygame.display.set_caption("Yoshi's Snake Game." + " " * 3 + "Score: " + str(score))
# Updating gif frames.
update_frames(one_dimensional_list(objects))
frame += 1
| mignoe/Games | yoshi-snake-game/game.py | game.py | py | 2,214 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_nu... |
27239543439 | # from django.shortcuts import render
from django.views.generic import ListView, DetailView, UpdateView, CreateView, DeleteView # импортируем класс, который говорит нам о том, что в этом представлении мы будем выводить список объектов из БД
from .models import Post
from datetime import datetime
from .filters import PostFilter
from .forms import PostForm # импортируем нашу форму
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from django.contrib.auth.mixins import PermissionRequiredMixin
class PostList(ListView):
model = Post # указываем модель, объекты которой мы будем выводить
template_name = 'news.html' # указываем имя шаблона, в котором будет лежать html,
# в котором будут все инструкции о том, как именно пользователю должны вывестись наши объекты
context_object_name = 'news' # это имя списка, в котором будут лежать все объекты,
# его надо указать, чтобы обратиться к самому списку объектов через html-шаблон
queryset = Post.objects.order_by('-id')
form_class = PostForm # добавляем форм класс, чтобы получать доступ к форме через метод POST
#paginate_by = 1
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['time_now'] = datetime.utcnow() # добавим переменную текущей даты time_now
context['value1'] = None # добавим ещё одну пустую переменную, чтобы на её примере посмотреть работу
# другого фильтра
return context
# создаём представление в котором будет детали конкретного отдельного товара
class PostDetail(DetailView):
model = Post # модель всё та же, но мы хотим получать детали конкретно отдельного товара
template_name = 'post.html' # название шаблона будет post.html
context_object_name = 'post' # название объекта. в нём будет
class Search(ListView):
model = Post
template_name = 'search.html'
context_object_name = 'search'
ordering = ['-time_in']
paginate_by = 1 # поставим постраничный вывод в один элемент
def get_context_data(self, **kwargs): # забираем отфильтрованные объекты переопределяя метод
# get_context_data у наследуемого класса (привет полиморфизм, мы скучали!!!)
context = super().get_context_data(**kwargs)
context['filter'] = PostFilter(self.request.GET, queryset=self.get_queryset()) # вписываем наш фильтр
# в контекст
return context
class PostCreateView(PermissionRequiredMixin,CreateView):
template_name = 'add.html'
form_class = PostForm
permission_required = ('news.add_post',)
# дженерик для редактирования объекта
class PostEditView(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
template_name = 'edit.html'
form_class = PostForm
permission_required = ('news.change_post',)
# метод get_object мы используем вместо queryset, чтобы получить информацию об объекте
# который мы собираемся редактировать
def get_object(self, **kwargs):
id_1 = self.kwargs.get('pk')
return Post.objects.get(id=id_1)
# дженерик для удаления товара
class PostDeleteView(PermissionRequiredMixin, DeleteView):
template_name = 'delete.html'
queryset = Post.objects.all()
permission_required = ('news.delete_post',)
success_url = '/news/' | pvlrmv/newspaper | NewsPaper/news/views.py | views.py | py | 4,311 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.Post.objects.order_by",
"line_number": 18,
"usage_type": "call"
},
{
"api_name":... |
22985216912 | from collections import deque
from threading import Thread
class Sequencer:
def __init__(self, name):
self.name = name
self.file = open(name, "wb")
self.queue = deque()
self.running = False
self.byte_sequence = []
self.thread = Thread(target=self._writer_thread)
def add(self, byte):
index = byte['index']
self.queue.append(byte)
# self.byte_sequence.append(byte)
# self.arrange()
pass
def close(self):
"""
Wait for the writer thread to finish and close the open file.
"""
self.running = False
self.thread.join()
self.file.close()
def _writer_thread(self):
"""
Runs until a stop is requested and the queue is exhausted.
"""
self.running = True
while self.running or len(self.queue):
if len(self.queue):
byte = self.queue.popleft()
chunk = byte['index']
data = byte['data']
chunk_size = len(data)
self.file.seek(chunk*chunk_size)
self.file.write(data)
def arrange(self):
self.byte_sequence.sort(key=lambda x: x['index'])
i = 0
while i != len(self.byte_sequence):
current = self.byte_sequence[i]
previous = self.byte_sequence[i - 1]
current_sequence = current.get('index')
previous_sequence = previous.get('last', previous.get('index'))
if (current_sequence - 1) == previous_sequence and (current_sequence - 1) > -1:
current = self.byte_sequence.pop(i)
previous['data'] = previous['data'] + current.get('data', b'')
last_sequence = current.get('last', current.get('index'))
previous['last'] = last_sequence
continue
i += 1
print(self.byte_sequence)
if __name__ == '__main__':
obj = Sequencer('test')
obj.add({'index': 0, 'data': b'0'})
obj.add({'index': 1, 'data': b'1'})
obj.add({'index': 2, 'data': b'2'})
obj.add({'index': 3, 'data': b'3'})
obj.add({'index': 4, 'data': b'4'})
obj.add({'index': 5, 'data': b'5'})
obj.add({'index': 6, 'data': b'6'})
obj.add({'index': 7, 'data': b'7'})
obj.add({'index': 8, 'data': b'8'})
obj.add({'index': 9, 'data': b'9'})
obj.add({'index': 10, 'data': b'10'})
obj.add({'index': 11, 'data': b'11'})
| muthuprabhu-kp/FTOU | Server/ByteSequencer.py | ByteSequencer.py | py | 2,482 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 12,
"usage_type": "call"
}
] |
11303986062 | from enum import Enum
import logging
from pathlib import Path
from transitions import Machine
from ..config import ALBUM_FOLDER_NAME_TEMPLATE
from ..config import MUSIC_PATH_NAME
from ..config import TRACK_FILE_NAME_TEMPLATE
from ..config import VA_ALBUM_FOLDER_NAME_TEMPLATE
logger = logging.getLogger(__name__)
class States(Enum):
NO_DISC = 1
KNOWN_DISC = 2
RIPPING = 3
DONE = 4
class Triggers(object):
START = 'start'
KNOWN_DISC = 'known_disc'
RIP_TRACK = 'rip_track'
FINISH = 'finish'
EJECT = 'eject'
class Ripper(object):
def __init__(
self,
grab_and_convert_track_func,
create_folder_func,
write_meta_func,
move_track_func,
write_disc_id_func,
after_state_change_callback
):
self.grab_and_convert_track_func = grab_and_convert_track_func
self.create_folder_func = create_folder_func
self.write_meta_func = write_meta_func
self.move_track_func = move_track_func
self.write_disc_id_func = write_disc_id_func
self.after_state_change_callback = after_state_change_callback
self._clear_internal_state()
def _clear_internal_state(self):
self.disc_meta = None
self.track_list = None
self.current_track = None
self.folder_path = None
def set_disc_meta(self, disc_meta):
self.disc_meta = disc_meta
self.track_list = []
self.current_track = 0
def create_folder(self, disc_meta):
self.folder_path = self._get_folder_path(disc_meta)
self.create_folder_func(self.folder_path)
def _get_folder_path(self, disc_meta):
album_path = Path(MUSIC_PATH_NAME)
if 'artist' in disc_meta:
album_path = album_path.joinpath(
self._remove_unsafe_chars(
ALBUM_FOLDER_NAME_TEMPLATE.format(
artist=disc_meta['artist'],
title=disc_meta['title']
)
)
)
else:
album_path = album_path.joinpath(
self._remove_unsafe_chars(
VA_ALBUM_FOLDER_NAME_TEMPLATE.format(title=disc_meta['title'])
)
)
if disc_meta['total_cds'] > 1:
album_path = album_path.joinpath('CD%s' % disc_meta['cd'])
return album_path
def _remove_unsafe_chars(self, path_name):
return path_name.replace('\\', ' ')\
.replace('/', ' ')\
.replace(':', ' ')
def has_next_track(self):
return self.current_track < len(self.disc_meta['tracks'])
def rip_next_track(self):
track_number = self.current_track + 1
logger.info('Ripping track %s', track_number)
tmp_file_path = Path(self.grab_and_convert_track_func(track_number))
self.tag_track(track_number, str(tmp_file_path))
target_path = self.folder_path.joinpath(
self._get_track_filename(track_number)
)
self.move_track_func(tmp_file_path, target_path)
self.track_list.append(str(target_path))
self.current_track = track_number
self.after_state_change_callback()
def tag_track(self, track_number, track_filename):
track_meta = self.disc_meta['tracks'][track_number - 1]
self.write_meta_func(
track_filename,
track_meta['artist'],
track_meta['title'],
self.disc_meta['title'],
track_number,
len(self.disc_meta['tracks'])
)
def _get_track_filename(self, track_number):
track_meta = self.disc_meta['tracks'][track_number - 1]
track_filename = TRACK_FILE_NAME_TEMPLATE.format(
track_number="{:02d}".format(track_number),
artist=track_meta['artist'],
title=track_meta['title']
)
return self._remove_unsafe_chars(track_filename)
def store_disc_id(self):
disc_id = self.disc_meta['disc_id']
path = self.folder_path.joinpath('.disc_id')
self.write_disc_id_func(path, disc_id)
def get_full_state(self):
return {
'state': self.state.value,
'track_list': self.track_list,
'disc_meta': self.disc_meta,
'current_track': self.current_track,
'folder_path': str(self.folder_path)
}
def on_state_change(self, *args, **kwargs):
self.after_state_change_callback()
def create_ripper(
grab_and_convert_track_func,
create_folder_func,
write_meta_func,
move_track_func,
write_disc_id_func,
after_state_change_callback
):
ripper = Ripper(
grab_and_convert_track_func,
create_folder_func,
write_meta_func,
move_track_func,
write_disc_id_func,
after_state_change_callback
)
machine = Machine(ripper, states=States, initial=States.NO_DISC, after_state_change='on_state_change')
# terminal state: disc already ripped
machine.add_transition(Triggers.KNOWN_DISC, States.NO_DISC, States.KNOWN_DISC)
machine.add_transition(
Triggers.START,
States.NO_DISC,
States.RIPPING,
before=['set_disc_meta', 'create_folder']
)
machine.add_transition(
Triggers.RIP_TRACK,
States.RIPPING,
States.RIPPING,
conditions='has_next_track',
before='rip_next_track'
)
# terminal state: disc ripped successfully
machine.add_transition(
Triggers.FINISH,
States.RIPPING,
States.DONE,
unless='has_next_track',
before='store_disc_id'
)
machine.add_transition(Triggers.EJECT, '*', States.NO_DISC, before='_clear_internal_state')
return ripper
| pisarenko-net/cdp-sa | hifi_appliance/state/ripper.py | ripper.py | py | 5,818 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "config.MUSIC_PATH_NAME",
... |
35658675778 | """The emails tests module."""
import pytest
from tests.fixtures.auth import USER_EMAIL
from communication.notifications.email import mail_managers, mail_user
from users.models import User
pytestmark = pytest.mark.django_db
def test_mail_managers(mailoutbox):
"""Should send an email to the system managers."""
mail_managers(subject="Text message", data={"text": "<p>Test text</p>"})
assert len(mailoutbox) == 1
mail = mailoutbox[0]
assert mail.recipients() == ["admin@example.com", "manager@example.com"]
assert "Text message" in mail.subject
assert "<p>Test text" in mail.alternatives[0][0]
def test_mail_user(
user: User,
mailoutbox,
):
"""Should send an email to the user."""
mail_user(
user=user,
subject="Text message",
template="message_notification",
data={},
)
assert len(mailoutbox) == 1
mail = mailoutbox[0]
assert mail.recipients() == [USER_EMAIL]
assert "Text message" in mail.subject
| webmalc/d8base-backend | communication/tests/email_tests.py | email_tests.py | py | 1,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.mark",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "communication.notifications.email.mail_managers",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 22,
"usage_type": "name"
},
{
... |
31482133421 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import random
from datetime import datetime
from pathlib2 import Path
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='SageMaker Batch Transformation Job')
parser.add_argument('--region', type=str, help='The region where the cluster launches.')
parser.add_argument('--model_name', type=str, help='The name of the model that you want to use for the transform job.')
parser.add_argument('--input_location', type=str, help='The S3 location of the data source that is associated with a channel.')
parser.add_argument('--output_location', type=str, help='The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.')
parser.add_argument('--output_location_file', type=str, help='File path where the program will write the Amazon S3 URI of the transform job results.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting Batch Transformation request to SageMaker...')
batch_job_name = _utils.create_transform_job(
client, args.model_name, args.input_location, args.output_location)
logging.info('Batch Job request submitted. Waiting for completion...')
_utils.wait_for_transform_job(client, batch_job_name)
_utils.print_tranformation_job_result(args.output_location)
Path(args.output_location_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_location_file).write_text(unicode(args.output_location))
logging.info('Batch Transformation creation completed.')
if __name__== "__main__":
main()
| pamarquez/pipelineHW | components/aws/sagemaker/batch_transform/src/batch_transform.py | batch_transform.py | py | 2,183 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "common._u... |
31474544931 | #!/home/apollo/anaconda3/bin/python3
#-*- coding: utf-8 -*-
#******************************************************************************
# Author : jtx
# Create : 2020-03-31 19:05
# Last modified: 2020-04-09 14:18
# Filename : patent_kbp.py
# Description : 专利-->企业 关系添加
#******************************************************************************
import configparser
import sys
from pymongo import MongoClient
from pymongo import errors
from pyArango.connection import Connection as ArangoConnection
from pyArango.theExceptions import AQLFetchError
import pymysql
from dateutil import parser
import datetime
import json
import logging
import re
import copy
import requests
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(__file__)
kbp_path = os.path.dirname(dir_path)
config_path = os.path.join(kbp_path,"config.ini")
class RelationPipeline(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.arango_con = ArangoConnection(arangoURL=self.config.get("arango","arango_url"),username= self.config.get("arango","user"),password=self.config.get("arango","passwd"))
self.arango_db = self.arango_con[self.config.get("arango","db")]
self.kb_patent = self.arango_db[self.config.get("arango","kb_patent")]
self.kb_company = self.arango_db[self.config.get("arango","kb_company")]
self.industry_url = self.config.get("url","patent_classifier")
self._init_division_schema() # init division_schema from mysql
self._init_industry_schema()
self.count_graph_update = 0 # arango更新关系数据数量
self.total = 0 # 处理日期总共需要添加关系的数量
def _init_division_schema(self):
'''
行政区域实体关系加载
'''
self.division_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 初始化行政区域的关系schema
sql_query_industry = "select name, id, level, parent_id from {}".format(self.config.get("mysql","res_division"))
sql_cur.execute(sql_query_industry)
divisions = sql_cur.fetchall()
for division in divisions:
division_name, division_id, division_level, division_parent_id = division
self.division_schema[division_name] = {
"relation_type":"concept_relation/100004",
"object_name":division_name,
"object_type": "division",
"object_id": division_id
}
sql_cur.close()
sql_conn.close()
logger.info("MYSQL division schema 加载完成")
def _init_industry_schema(self):
'''
init loading industry schema at mysql res_industry table
'''
self.industry_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 初始化产业/产业领域 schema
sql_query_industry = "select name, id, parent_id from {}".format(self.config.get("mysql","res_industry"))
sql_cur.execute(sql_query_industry)
labels = sql_cur.fetchall()
for industry in labels:
industry_name, industry_id, parent_id = industry
self.industry_schema[industry_id] = {
"relation_type":"concept_relation/100011",
"object_name":industry_name,
"object_type": "industry",
"object_id": industry_id,
"object_parent_id": parent_id
}
def query_process_patent(self, process_date):
if process_date == "yesterday":
process_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
elif process_date == "today":
process_date = datetime.today().strftime("%Y-%m-%d")
elif len(process_date.split("-")) == 3:
process_date = process_date
else:
raise Exception("无效参数")
iso_date_str = process_date + 'T00:00:00+08:00'
iso_date = parser.parse(iso_date_str)
aql = "FOR patent IN {} FILTER patent.create_time >= '{}' SORT patent.create_time return patent".format(
self.config.get("arango","kb_patent"), iso_date)
try:
res = self.arango_db.fetch_list(aql)
except AQLFetchError as e:
'''没有查到相关数据时,fetch_list会抛出异常'''
res = []
logger.warn("Arango专利库没有查到数据",e)
self.total = len(res)
self.process_date = process_date
logger.info("[{}],专利知识库查到待处理数据[{}]个".format(process_date, self.total))
return res
def get_related_industry_tags(self, industry_id):
'''
根据子领域名称递归返回领域及所有父领域标签
'''
relations = []
# 过滤招商领域与图谱定义不一致的
if not industry_id in self.industry_schema:
return relations
relations.append(self.industry_schema[industry_id])
parent_id = self.industry_schema[industry_id]["object_parent_id"]
while (parent_id):
node = self.industry_schema[parent_id]
relations.append(node)
parent_id = node["object_parent_id"]
return relations
def process_company_rel(self, properties):
'''专利所属企业关系建立'''
company_rels = []
applicants = properties["applicant"]
applicant = list(set(applicants))
for applicant in applicants:
company = self.kb_company.fetchFirstExample({"name": applicant})
if not company:
continue
company = company[0] # company返回的是cursor
company_rel = {
"relation_type":"concept_relation/100001",
"object_name": company["name"],
"object_type": "company",
"object_id": company["_id"]
}
company_rels.append(company_rel)
return company_rels
def process_industry_rel(self, _key):
'''
产业领域标签ID化添加
'''
industry_tags = []
industry_field_tags = []
patent_id = _key
post_data = {
"patent_id": patent_id ,
}
pack_data = json.dumps(post_data)
try:
res = requests.post(self.industry_url, data=pack_data)
if res.status_code == 200:
tags = res.json().get("body")
industry_field_tags.extend(tags)
except Exception as e:
logging.error("获取专家产业领域失败,专家id=[{}],接口=[{}]".format(patent_id,self.industry_url),e)
for field in industry_field_tags:
for node in self.get_related_industry_tags(field["id"]):
if node not in industry_tags:
industry_tags.append(node)
return industry_tags
def process_division_rel(self, properties):
div_rel = []
province = properties["province"]
city = properties["city"]
area = properties["area"]
if province and province in self.division_schema.keys():
if province in ["北京市","上海市","重庆市","天津市"]:
province = province.replace("市","")
div_rel.append(self.division_schema[province])
if city and city in self.division_schema.keys():
if city in ["北京","上海","重庆","天津"]:
div_rel.append(self.division_schema[city+'市'])
if area and area in self.division_schema.keys():
div_rel.append(self.division_schema[area])
return div_rel
def process_relations(self, properties, _key):
'''
添加关系
'''
relations = []
company_rel = self.process_company_rel(properties)
relations.extend(company_rel)
# 关联产业分类
industry_rel = self.process_industry_rel(_key)
relations.extend(industry_rel)
# 籍贯关联的行政区划
#division_rel = self.process_division_rel(properties)
#relations.extend(division_rel)
return relations
def process(self, scan_date):
process_patents = self.query_process_patent(scan_date)
count = 0
# arango数据库专利信息处理
for patent in process_patents:
count += 1
#logger.info("处理专利关系,专利名=[{}]".format(patent["name"]))
patent_key = patent["_key"]
relations = self.process_relations(patent["properties"], patent_key)
try:
doc = self.kb_patent[patent_key]
doc["relations"] = relations
doc["update_time"] = datetime.datetime.today()
doc.save()
self.count_graph_update += 1
except Exception as e:
logger.error("专利关系添加失败,专利名=[{}],id=[{}]".format(patent["name"],patent_key))
if count % 100 == 0 or count == self.total:
logger.info("前[{}]家专利关系添加完成".format(count))
logger.info("日期[{}]专利知识库共找到专利{}个,arango专利库添加专利关系{}个".format(
self.process_date, self.total, self.count_graph_update))
if __name__=="__main__":
# 最早日期 2019-06-03
rel = RelationPipeline()
if len(sys.argv) > 1:
rel.process(sys.argv[1])
else:
rel.process("yesterday")
| RogerJTX/KbpPipeline_ExpertSystem | patent/patent_relation.py | patent_relation.py | py | 10,621 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.dirna... |
16129130585 | # from pandas import *
# from pylab import *
# import numpy as np
# from matplotlib import pyplot as plt
# mpl.rcParams['font.sans-serif'] = ['SimHei'] # 加载中文字体的神奇呀
# idx = Index(np.arange(1,7))
# df = DataFrame(np.random.randn(6, 2), index=idx, columns=['', 'count'])
# valss = np.array([['总数', 100], ['嘿嘿', 10], ['流皮', '5']])
# vals = np.around(df.values,2)
# fig = plt.figure(figsize=(9,4))
# ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[]) # 去掉背景的意思嘛
# the_table=plt.table(cellText=valss, rowLabels=None, colLabels=['', 'count'],colWidths = [0.1]*vals.shape[1], loc='center',cellLoc='center')
# the_table.set_fontsize(20)
# the_table.scale(2.5,2.58)
# plt.show() # todo 画表格的
# import numpy as np
# import matplotlib.pyplot as plt
# men_means, men_std = (20, 35, 30, 35, 27), (0, 3, 4, 1, 2)
# women_means, women_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
# ind = np.arange(len(men_means)) # the x locations for the groups
# width = 0.35 # the width of the bars
# fig, ax = plt.subplots()
# rects1 = ax.bar(ind - width/2, men_means, width,
# color='SkyBlue', label='Men')
# rects2 = ax.bar(ind + width/2, women_means, width,
# color='IndianRed', label='Women')
# ax.set_ylabel('Scores')
# ax.set_title('Scores by group and gender')
# ax.set_xticks(ind)
# ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
# ax.legend()
# def autolabel(rects, xpos='center'):
# xpos = xpos.lower() # normalize the case of the parameter
# ha = {'center': 'center', 'right': 'left', 'left': 'right'}
# offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
#
# for rect in rects:
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
# '{}'.format(height), ha=ha[xpos], va='bottom')
# autolabel(rects1, "left")
# autolabel(rects2, "right")
# plt.show() # todo 画柱形图的
# import numpy as np
# import matplotlib
# import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 2
# vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
# "potato", "wheat", "barley"]
# vegetables1 = [" ", " ", " ", " ",
# " ", " ", " "]
# vegetables2 = ["a", "asd", "asd", "asd",
# "zxc", "asd", "qwe"]
# farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
# "Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
# harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
# [2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
# [1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
# [0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
# [0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
# [1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
# [0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
# fig, ax = plt.subplots()
# im = ax.imshow(harvest)
# ax.set_xticks(np.arange(len(farmers)))
# ax.set_xticks(np.arange(len(farmers)+1)-.5, minor=True)
# ax.set_xticklabels(farmers)
# ax.set_yticks(np.arange(len(vegetables)))
# ax.set_yticks(np.arange(len(vegetables)+1)-.5, minor=True)
# # ax.set_yticklabels(['' for _ in range(len(vegetables))])
# ax.set_yticklabels(vegetables)
# # ... and label them with the respective list entries
# # ax.set_yticklabels([1,2,3,4,5,6,7])
# # ax.set_yticklabels([str(i) for i in range(len(vegetables))])
# ax.grid(which="minor", color="w", linestyle='-', linewidth=0)
# # Rotate the tick labels and set their alignment.
# plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
# rotation_mode="anchor")
# # Loop over data dimensions and create text annotations.
# for i in range(len(vegetables)):
# for j in range(len(farmers)):
# text = ax.text(j, i, harvest[i, j],
# ha="center", va="center", color="w")
# ax.set_title("Harvest of local farmers (in tons/year)")
# fig.tight_layout()
# plt.show()
# import matplotlib.pyplot as plt
# def make_patch_spines_invisible(ax):
# ax.set_frame_on(True)
# ax.patch.set_visible(False)
# for sp in ax.spines.values():
# sp.set_visible(False)
# fig, host = plt.subplots()
# # fig.subplots_adjust(right=0.75)
# par1 = host.twinx()
# par2 = host.twinx()
# # Offset the right spine of par2. The ticks and label have already been
# # placed on the right by twinx above.
# par2.spines["right"].set_position(("axes", 1.2))
# # Having been created by twinx, par2 has its frame off, so the line of its
# # detached spine is invisible. First, activate the frame but make the patch
# # and spines invisible.
# make_patch_spines_invisible(par2)
# # Second, show the right spine.
# par2.spines["right"].set_visible(True)
# p1, = host.plot([0, 1, 2], [0, 1, 2], "b-", label="Density")
# p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-", label="Temperature")
# p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-", label="Velocity")
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
# par1.set_ylim(0, 4)
# par2.set_ylim(1, 65)
# host.set_xlabel("Distance")
# host.set_ylabel("Density")
# par1.set_ylabel("Temperature")
# par2.set_ylabel("Velocity")
# host.yaxis.label.set_color(p1.get_color())
# par1.yaxis.label.set_color(p2.get_color())
# par2.yaxis.label.set_color(p3.get_color())
# tkw = dict(size=4, width=1.5)
# host.tick_params(axis='y', colors=p1.get_color(), **tkw)
# par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
# par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
# host.tick_params(axis='x', **tkw)
# lines = [p1, p2, p3]
# host.legend(lines, [l.get_label() for l in lines])
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
men_means, men_std = (20, 35, 30, 35, 27), (2, 3, 4, 1, 2)
women_means, women_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
midde_means, midde_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
ind = np.arange(len(men_means)) # the x locations for the groups
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.barh(ind - width/2, men_means, width,
color='SkyBlue', label='Men')
rects2 = ax.barh(ind + width/2, women_means, width,
color='r', label='Women')
rects3 = ax.barh(ind + width/2 + width, midde_means, width,
color='IndianRed', label='midde')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_yticks(ind)
ax.set_yticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend()
def autolabel(rects, xpos='center'):
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
width = rect.get_width()
text = '{}'.format(111)
ax.text(1.01 * rect.get_width(), rect.get_y(), text, va='bottom')
rects1_values = []
for rects in [rects1, rects2, rects2]:
_rects1_values = []
for rect in rects:
_rects1_values.append(rect.get_height())
rects1_values.append(np.array(_rects1_values))
autolabel(rects1, "center")
autolabel(rects2, "center")
autolabel(rects3, "center")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(19680801)
width = 0.2
plt.rcdefaults()
fig, ax = plt.subplots()
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
ax.barh(y_pos - width/2, performance, width, xerr=error, align='center',
color='green', ecolor='black')
ax.barh(y_pos + width/2, performance, width, xerr=error, align='center',
color='red', ecolor='black')
print(y_pos, people)
ax.set_yticks(y_pos)
ax.set_yticklabels(people)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Performance')
ax.set_title('How fast do you want to go today?')
plt.show()
| czasg/ScrapyLearning | czaSpider/dump2/数据分析个人版/诚信数据咯/画图抱佛脚.py | 画图抱佛脚.py | py | 7,890 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "numpy.ar... |
27278263299 | import redis
class RedisClient:
def __init__(self):
self.client = redis.Redis(
host='127.0.0.1',
port=6379,
db=0
)
def db_health(self):
if self.client.ping():
print("PONG")
else:
print("Connection failed to db")
| kliu2python/allsee | utils/redis_client.py | redis_client.py | py | 315 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "redis.Redis",
"line_number": 6,
"usage_type": "call"
}
] |
28786462902 | import pandas as pd
import geopandas as gpd
import osmnx as ox
from h3 import h3
from rich.progress import track
from urbanpy.utils import geo_boundary_to_polygon
from typing import Sequence, Union
__all__ = [
"merge_geom_downloads",
"filter_population",
"remove_features",
"gen_hexagons",
"merge_shape_hex",
"overlay_polygons_hexs",
"resolution_downsampling",
"osmnx_coefficient_computation",
]
def merge_geom_downloads(
gdfs: Sequence[gpd.GeoDataFrame], crs: str = "EPSG:4326"
) -> gpd.GeoDataFrame:
"""
Merge several GeoDataFrames from OSM download_osm
Parameters
----------
dfs: array_like
Array of GeoDataFrames to merge. Assumes equal CRS.
crs: str
Valid string to pass to crs param of the geopandas.GeoDataFrame constructor function.
Returns
-------
concat: GeoDataFrame
Output from concatenation and unary union of geometries, providing a single geometry database for the city
Examples
--------
>>> lima = urbanpy.download.nominatim_osm("Lima, Peru", 2)
>>> callao = urbanpy.download.nominatim_osm("Callao, Peru", 1)
>>> lima = urbanpy.geom.merge_geom_downloads([lima, callao])
>>> lima.head()
geometry
MULTIPOLYGON (((-76.80277 -12.47562, -76.80261...)))
"""
concat = gpd.GeoDataFrame(geometry=[pd.concat(gdfs).unary_union], crs=crs)
return concat
def filter_population(
pop_df: pd.DataFrame, polygon_gdf: gpd.GeoDataFrame
) -> gpd.GeoDataFrame:
"""
Filter an HDX database download to the polygon bounds
Parameters
----------
pop_df: DataFrame
Result from download_hdx
polygon_gdf: GeoDataFrame
Result from download_osm or merge_geom_downloads
Returns
-------
filtered_points_gdf: GeoDataFrame
Population DataFrame filtered to polygon bounds
Examples
--------
>>> lima = urbanpy.download.nominatim_osm("Lima, Peru", 2)
>>> callao = urbanpy.download.nominatim_osm("Callao, Peru", 1)
>>> lima = urbanpy.geom.merge_geom_downloads([lima, callao])
>>> pop = urbanpy.download.hdx_fb_population('peru', 'full')
>>> urbanpy.geom.filter_population(pop, lima)
latitude | longitude | population_2015 | population_2020 | geometry
-12.519861 | -76.774583 | 2.633668 | 2.644757 | POINT (-76.77458 -12.51986)
-12.519861 | -76.745972 | 2.633668 | 2.644757 | POINT (-76.74597 -12.51986)
-12.519861 | -76.745694 | 2.633668 | 2.644757 | POINT (-76.74569 -12.51986)
-12.519861 | -76.742639 | 2.633668 | 2.644757 | POINT (-76.74264 -12.51986)
-12.519861 | -76.741250 | 2.633668 | 2.644757 | POINT (-76.74125 -12.51986)
"""
minx, miny, maxx, maxy = polygon_gdf.geometry.total_bounds
limits_filter = pop_df["longitude"].between(minx, maxx) & pop_df[
"latitude"
].between(miny, maxy)
filtered_points = pop_df[limits_filter]
geometry_ = gpd.points_from_xy(
filtered_points["longitude"], filtered_points["latitude"]
)
filtered_points_gdf = gpd.GeoDataFrame(
filtered_points, geometry=geometry_, crs="EPSG:4326"
)
return filtered_points_gdf
def remove_features(gdf: gpd.GeoDataFrame, bounds: Sequence[float]) -> gpd.GeoDataFrame:
"""
Remove a set of features based on bounds
Parameters
----------
gdf: GeoDataFrame
Input GeoDataFrame containing the point features filtered with filter_population
bounds: array_like
Array input following [minx, miny, maxx, maxy] for filtering (GeoPandas total_bounds method output)
Returns
-------
gdf: GeoDataFrame
Input DataFrame but without the desired features
Examples
--------
>>> lima = urbanpy.geom.filter_population(pop_lima, poly_lima)
>>> removed = urbanpy.geom.remove_features(lima, [-12.2,-12, -77.2,-77.17]) #Remove San Lorenzo Island
>>> print(lima.shape, removed.shape)
(348434, 4) (348427, 4)
"""
minx, miny, maxx, maxy = bounds
drop_ix = gdf.cx[minx:maxx, miny:maxy].index
return gdf.drop(drop_ix)
def gen_hexagons(resolution: int, city: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Converts an input multipolygon layer to H3 hexagons given a resolution.
Parameters
----------
resolution: int, 0:15
Hexagon resolution, higher values create smaller hexagons.
city: GeoDataFrame
Input city polygons to transform into hexagons.
Returns
-------
city_hexagons: GeoDataFrame
Hexagon geometry GeoDataFrame (hex_id, geom).
Examples
--------
>>> lima = urbanpy.geom.filter_population(pop_lima, poly_lima)
>>> lima_hex = urbanpy.geom.gen_hexagons(8, lima)
hex | geometry
888e620e41fffff | POLYGON ((-76.80007 -12.46917, -76.80439 -12.4...))
888e62c809fffff | POLYGON ((-77.22539 -12.08663, -77.22971 -12.0...))
888e62c851fffff | POLYGON ((-77.20708 -12.08484, -77.21140 -12.0...))
888e62c841fffff | POLYGON ((-77.22689 -12.07104, -77.23122 -12.0...))
888e62c847fffff | POLYGON ((-77.23072 -12.07929, -77.23504 -12.0...))
"""
# Polyfill the city boundaries
h3_polygons = list()
h3_indexes = list()
# Get every polygon in Multipolygon shape
city_poly = city.explode(index_parts=True).reset_index(drop=True)
for _, geo in city_poly.iterrows():
hexagons = h3.polyfill(
geo["geometry"].__geo_interface__, res=resolution, geo_json_conformant=True
)
for hexagon in hexagons:
h3_polygons.append(geo_boundary_to_polygon(hexagon))
h3_indexes.append(hexagon)
# Create hexagon dataframe
city_hexagons = gpd.GeoDataFrame(h3_indexes, geometry=h3_polygons).drop_duplicates()
city_hexagons.crs = "EPSG:4326"
city_hexagons = city_hexagons.rename(
columns={0: "hex"}
) # Format column name for readability
return city_hexagons
def merge_shape_hex(
hexs: gpd.GeoDataFrame,
shape: gpd.GeoDataFrame,
agg: dict,
how="inner",
predicate="intersects",
) -> gpd.GeoDataFrame:
"""
Merges a H3 hexagon GeoDataFrame with a Point GeoDataFrame and aggregates the
point gdf data.
Parameters
----------
hexs: GeoDataFrame
Input GeoDataFrame containing hexagon geometries
shape: GeoDataFrame
Input GeoDataFrame containing points and features to be aggregated
agg: dict
A dictionary with column names as keys and values as aggregation
operations. The aggregation must be one of {'sum', 'min', 'max'}.
how: str. One of {'inner', 'left', 'right'}. Default 'inner'.
Determines how to merge data:
'left' uses keys from left and only retains geometry from left
'right' uses keys from right and only retains geometry from right
'inner': use intersection of keys from both dfs; retain only left geometry column
op: str. One of {'intersects', 'contains', 'within'}. Default 'intersects'
Determines how geometries are queried for merging.
Returns
-------
hexs: GeoDataFrame
Result of a spatial join within hex and points. All features are aggregated
based on the input parameters
Examples
--------
>>> lima = urbanpy.download.nominatim_osm('Lima, Peru', 2)
>>> pop_lima = urbanpy.download.hdx_fb_population('peru', 'full')
>>> pop_df = urbanpy.filter_population(pop_lima, lima)
>>> hexs = urbanpy.geom.gen_hexagons(8, lima)
>>> urbanpy.geom.merge_point_hex(hexs, pop_df, 'inner', 'within', {'population_2020':'sum'})
0 | geometry | population_2020
888e628d8bfffff | POLYGON ((-76.66002 -12.20371, -76.66433 -12.2... | NaN
888e62c5ddfffff | POLYGON ((-76.94564 -12.16138, -76.94996 -12.1... | 14528.039097
888e62132bfffff | POLYGON ((-76.84736 -12.17523, -76.85167 -12.1... | 608.312696
888e628debfffff | POLYGON ((-76.67982 -12.18998, -76.68413 -12.1... | NaN
888e6299b3fffff | POLYGON ((-76.78876 -11.97286, -76.79307 -11.9... | 3225.658803
"""
joined = gpd.sjoin(shape, hexs, how=how, predicate=predicate)
# Uses index right based on the order of points and hex. Right takes hex index
hex_merge = joined.groupby("index_right").agg(agg)
# Avoid SpecificationError by copying the DataFrame
ret_hex = hexs.copy()
for key in agg:
ret_hex.loc[hex_merge.index, key] = hex_merge[key].values
return ret_hex
def overlay_polygons_hexs(
polygons: gpd.GeoDataFrame,
hexs: gpd.GeoDataFrame,
hex_col: str,
columns: Sequence[str],
) -> gpd.GeoDataFrame:
"""
Overlays a Polygon GeoDataFrame with a H3 hexagon GeoDataFrame and divide the 'columns' the values proportionally to the overlayed area.
Parameters
----------
polygons: GeoDataFrame
Input GeoDataFrame containing polygons and columns to be processed
hexs: GeoDataFrame
Input GeoDataFrame containing desired output hexagon resolution geometries
hex_col: str
Determines the column with the hex id.
columns: list
A list with column names of the columns that are going to be proportionally adjusted
Returns
-------
hexs: GeoDataFrame
Result of a spatial join within hex and points. All columns are adjusted
based on the overlayed area.
Examples
--------
>>> urbanpy.geom.overlay_polygons_hexs(zonas_pob, hex_lima, 'hex', pob_vulnerable)
hex | POB_TOTAL | geometry
898e6200493ffff | 193.705376 | POLYGON ((-76.80695 -12.35199, -76.80812 -12.3...
898e6200497ffff | 175.749780 | POLYGON ((-76.80412 -12.35395, -76.80528 -12.3...
898e620049bffff | 32.231078 | POLYGON ((-76.81011 -12.35342, -76.81127 -12.3...
898e62004a7ffff | 74.154973 | POLYGON ((-76.79911 -12.36468, -76.80027 -12.3...
898e62004b7ffff | 46.989828 | POLYGON ((-76.79879 -12.36128, -76.79995 -12.3...
"""
polygons_ = polygons.copy() # Preserve data state
polygons_["poly_area"] = polygons_.geometry.area # Calc polygon area
# Overlay intersection
overlayed = gpd.overlay(polygons_, hexs, how="intersection")
# Downsample indicators using proporional overlayed area w.r.t polygon area
area_prop = overlayed.geometry.area / overlayed["poly_area"]
overlayed[columns] = overlayed[columns].apply(lambda col: col * area_prop)
# Aggregate over Hex ID
per_hexagon_data = overlayed.groupby(hex_col)[columns].sum()
# Preserve data as GeoDataFrame
hex_df = pd.merge(
left=per_hexagon_data, right=hexs[[hex_col, "geometry"]], on=hex_col
)
hex_gdf = gpd.GeoDataFrame(
hex_df[[hex_col] + columns], geometry=hex_df["geometry"], crs=hexs.crs
)
return hex_gdf
def resolution_downsampling(
gdf: gpd.GeoDataFrame, hex_col: str, coarse_resolution: int, agg: dict
) -> gpd.GeoDataFrame:
"""
Downsample hexagon resolution aggregating indicated metrics (e.g. Transform hexagon resolution from 9 to 6).
Parameters
----------
gdf: GeoDataFrame
GeoDataFrame with hexagon geometries (output from gen_hexagons).
hex_col: str
Determines the column with the hex id.
coarse_resolution: int, 0:15
Hexagon resolution lower than gdf actual resolution (higher values create smaller hexagons).
Returns
-------
gdfc: GeoDataFrame
GeoDataFrame with lower resolution hexagons geometry and metrics aggregated as indicated.
"""
gdf_coarse = gdf.copy()
coarse_hex_col = "hex_{}".format(coarse_resolution)
gdf_coarse[coarse_hex_col] = gdf_coarse[hex_col].apply(
lambda x: h3.h3_to_parent(x, coarse_resolution)
)
dfc = gdf_coarse.groupby([coarse_hex_col]).agg(agg).reset_index()
gdfc_geometry = dfc[coarse_hex_col].apply(geo_boundary_to_polygon)
return gpd.GeoDataFrame(dfc, geometry=gdfc_geometry, crs=gdf.crs)
def osmnx_coefficient_computation(
gdf,
net_type,
basic_stats,
extended_stats,
connectivity=False,
anc=False,
ecc=False,
bc=False,
cc=False,
):
"""
Apply osmnx's graph from polygon to query a city's street network within a geometry.
This may be a long procedure given the hexagon layer resolution.
Parameters
----------
gdf: GeoDataFrame
GeoDataFrame with geometries to download graphs contained within them.
net_type: str
Network type to download. One of {'drive', 'drive_service', 'walk', 'bike', 'all', 'all_private'}
basic_stats: list
List of basic stats to compute from downloaded graph
extended_stats: list
List of extended stats to compute from graph
connectivity: bool. Default False.
Compute node and edge connectivity
anc: bool. Default False.
Compute avg node connectivity
ecc: bool. Default False.
Compute shortest paths, eccentricity and topological metric
bc: bool. Default False.
Compute node betweeness centrality
cc: bool. Default False.
Compute node closeness centrality
For more detail about these parameters, see https://osmnx.readthedocs.io/en/stable/osmnx.html#module-osmnx.stats
Returns
-------
gdf: GeoDataFrame
Input GeoDataFrame with updated columns containing the selected metrics
Examples
--------
>>> hexagons = urbanpy.geom.gen_hexagons(8, lima)
>>> urbanpy.geom.osmnx_coefficient_computation(hexagons.head(), 'walk', ['circuity_avg'], [])
On record 1: There are no nodes within the requested geometry
On record 3: There are no nodes within the requested geometry
hex | geometry | circuity_avg
888e62c64bfffff | POLYGON ((-76.89763 -12.03869, -76.90194 -12.0... | 1.021441
888e6212e1fffff | POLYGON ((-76.75291 -12.19727, -76.75722 -12.2... | NaN
888e62d333fffff | POLYGON ((-77.09253 -11.83762, -77.09685 -11.8... | 1.025313
888e666c2dfffff | POLYGON ((-76.93109 -11.79031, -76.93540 -11.7... | NaN
888e62d4b3fffff | POLYGON ((-76.87935 -12.03688, -76.88366 -12.0... | 1.044654
"""
# May be a lengthy download depending on the amount of features
for index, row in track(
gdf.iterrows(),
total=gdf.shape[0],
description="Computing road network coefficients...",
):
try:
graph = ox.graph_from_polygon(row["geometry"], net_type)
b_stats = ox.basic_stats(graph)
ext_stats = ox.extended_stats(graph, connectivity, anc, ecc, bc, cc)
for stat in basic_stats:
gdf.loc[index, stat] = b_stats.get(stat)
for stat in extended_stats:
gdf.loc[index, stat] = ext_stats.get(stat)
except Exception as err:
print(f"On record {index}: ", err)
return gdf
| EL-BID/urbanpy | urbanpy/geom/geom.py | geom.py | py | 15,013 | python | en | code | 85 | github-code | 36 | [
{
"api_name": "typing.Sequence",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pa... |
17849746047 | from .data_metabolite_to_standard_name_dict import data_metabolite_to_standard_name_dict
from ..complete_dataset_class import CompleteDataset, natural_distribution_anti_correction, check_negative_data_array
from scripts.src.common.config import DataType, Direct, Keywords as CommonKeywords
from ..common_functions import average_mid_data_dict, glucose_infusion_input_metabolite_obj_dict_generator
from ..config import default_glucose_infusion_labeled_ratio
from .c13_glucose_enrichment_plasma import glucose_enrichment_plasma_dict
class Keyword(object):
tissue = 'tissue'
patient = 'patient'
index = 'index'
kidney = 'kidney'
carcinoma = 'carcinoma'
brain = 'brain'
index_average_list = [1, 2, 3]
def input_metabolite_data_obj_dict_generator(tissue_name, tissue_index):
if tissue_name == Keyword.kidney or tissue_name == Keyword.carcinoma:
current_label_ratio = glucose_enrichment_plasma_dict[tissue_index]
else:
current_label_ratio = default_glucose_infusion_labeled_ratio
current_input_metabolite_obj_dict = glucose_infusion_input_metabolite_obj_dict_generator(
current_label_ratio)
return current_input_metabolite_obj_dict
class SpecificParameters(CompleteDataset):
def __init__(self):
super().__init__()
self.mixed_compartment_list = ('c', 'm')
self.current_direct = '{}/renal_carcinoma'.format(Direct.data_direct)
self.file_path = '{}/data.xlsx'.format(self.current_direct)
self.experiment_name_prefix_list = ['kidney', 'carcinoma', 'brain']
self.test_experiment_name_prefix = 'brain'
self.test_tissue_index = 1
self.test_repeat_index = 1
self.exclude_metabolites_dict = {
'brain': {'3-phosphoglycerate'}
}
self._complete_data_parameter_dict_dict = {
current_sheet_name: {
'xlsx_file_path': self.file_path,
'xlsx_sheet_name': current_sheet_name,
'index_col_name': CommonKeywords.metabolite_name_col,
'mixed_compartment_list': self.mixed_compartment_list,
'to_standard_name_dict': data_metabolite_to_standard_name_dict}
for current_sheet_name in self.experiment_name_prefix_list}
self._test_data_parameter_dict_dict = {
DataType.test: {
'xlsx_file_path': self.file_path,
'xlsx_sheet_name': self.test_experiment_name_prefix,
'index_col_name': CommonKeywords.metabolite_name_col,
'mixed_compartment_list': self.mixed_compartment_list,
'to_standard_name_dict': data_metabolite_to_standard_name_dict}}
self.complete_input_metabolite_data_dict = {}
@staticmethod
def project_name_generator(tissue_name, tissue_index, repeat_index):
return '{}__{}_{}'.format(tissue_name, tissue_index, repeat_index)
def add_data_sheet(self, sheet_name, current_data_dict):
if self.anti_correction:
for column_name, each_column_data_dict in current_data_dict.items():
natural_distribution_anti_correction(each_column_data_dict)
check_negative_data_array(current_data_dict, [])
final_result_dict = self.complete_dataset
if sheet_name not in final_result_dict:
final_result_dict[sheet_name] = {}
for data_label, specific_data_dict in current_data_dict.items():
_, tissue_index_str, repeat_index_str = data_label.split('_')
tissue_index = int(tissue_index_str)
repeat_index = int(repeat_index_str)
try:
current_excluded_metabolites_set = self.exclude_metabolites_dict[sheet_name]
except KeyError:
current_excluded_metabolites_set = {}
for excluded_metabolite_name in current_excluded_metabolites_set:
pop_item = specific_data_dict.pop(excluded_metabolite_name, None)
if tissue_index not in final_result_dict[sheet_name]:
final_result_dict[sheet_name][tissue_index] = {}
final_result_dict[sheet_name][tissue_index][repeat_index] = specific_data_dict
def _complete_return_dataset(self, param_dict):
tissue_name = param_dict[Keyword.tissue]
tissue_index = param_dict[Keyword.patient]
repeat_index = param_dict[Keyword.index]
if repeat_index == CommonKeywords.average:
final_target_metabolite_data_dict = average_mid_data_dict(
self.complete_dataset[tissue_name][tissue_index], Keyword.index_average_list)
else:
final_target_metabolite_data_dict = self.complete_dataset[
tissue_name][tissue_index][repeat_index]
project_name = self.project_name_generator(tissue_name, tissue_index, repeat_index)
final_input_metabolite_data_obj_dict = input_metabolite_data_obj_dict_generator(tissue_name, tissue_index)
return project_name, final_target_metabolite_data_dict, final_input_metabolite_data_obj_dict
def _test_return_dataset(self):
final_target_metabolite_data_dict = self.complete_dataset[
DataType.test][self.test_tissue_index][self.test_repeat_index]
project_name = DataType.test
final_input_metabolite_data_dict = None
return project_name, final_target_metabolite_data_dict, final_input_metabolite_data_dict
| LocasaleLab/Automated-MFA-2023 | scripts/data/renal_carcinoma/specific_data_parameters.py | specific_data_parameters.py | py | 5,412 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "c13_glucose_enrichment_plasma.glucose_enrichment_plasma_dict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "config.default_glucose_infusion_labeled_ratio",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "common_functions.glucose_infusion_inpu... |
26469617014 | from ....key import Address
from ....hint import MBC_USER_STATISTICS, MBC_VOTING_CANDIDATE
from ....common import Int, MitumFactor, _hint, concatBytes
class Candidate(MitumFactor):
def __init__(self, address, nickname, manifest, count):
assert len(manifest) <= 100, 'manifest length is over 100! (len(manifest) <= 100); Candidate.__init__'
self.hint = _hint(MBC_VOTING_CANDIDATE)
self.address = Address(address)
self.nickname = nickname
self.manifest = manifest
self.count = Int(count)
def bytes(self):
bAddress = self.address.bytes()
bNickname = self.nickname.encode()
bManifest = self.manifest.encode()
bCount = self.count.bytes()
return concatBytes(bAddress, bNickname, bManifest, bCount)
def dict(self):
candidate = {}
candidate['_hint'] = self.hint.hint
candidate['address'] = self.address.address
candidate['nickname'] = self.nickname
candidate['manifest'] = self.manifest
candidate['count'] = self.count.value
return candidate
class UserStatistics(object):
def __init__(self, hp, str, agi, dex, cha, intel, vital):
self.hint = _hint(MBC_USER_STATISTICS)
self.hp = Int(hp)
self.str = Int(str)
self.agi = Int(agi)
self.dex = Int(dex)
self.cha = Int(cha)
self.intel = Int(intel)
self.vital = Int(vital)
def bytes(self):
return concatBytes(
self.hp.bytes(),
self.str.bytes(),
self.agi.bytes(),
self.dex.bytes(),
self.cha.bytes(),
self.intel.bytes(),
self.vital.bytes()
)
def dict(self):
statistics = {}
statistics['_hint'] = self.hint.hint
statistics['hp'] = self.hp.value
statistics['strength'] = self.str.value
statistics['agility'] = self.agi.value
statistics['dexterity'] = self.dex.value
statistics['charisma'] = self.cha.value
statistics['intelligence'] = self.intel.value
statistics['vital'] = self.vital.value
return statistics | ProtoconNet/mitum-py-util | src/mitumc/operation/document/blockcity/base.py | base.py | py | 2,222 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "common.MitumFactor",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "common._hint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hint.MBC_VOTING_CANDIDATE",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "key.Addres... |
9340019814 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 11:01:53 2017
@author: PiotrTutak
"""
import numpy as np
import scipy.linalg as lg
import matplotlib.pyplot as plt
print("Podaj L1 L2 L3 L4")
L=[float(x) for x in input().strip().split()]
print('Podaj k S q alfa tInf')
k,S,q,alfa,tInf=(float(x) for x in input().strip().split())
C=[k*S/l for l in L]
L=[sum(L[:i]) for i in range(len(L)+1)]
A=np.array([
[C[0],-C[0],0,0,0],
[-C[0],C[0]+C[1],-C[1],0,0],
[0,-C[1],C[1]+C[2],-C[2],0],
[0,0,-C[2],C[2]+C[3],-C[3]],
[0,0,0,-C[3],C[3]+alfa*S]
])
P=np.array([
q*S,
0,
0,
0,
-alfa*S*tInf
])
P=-P
#t=np.linalg.solve(A,P)
t=lg.solve(A,P)
print(t)
plt.plot(L,t,'ro')
plt.show()
| ptutak/MES | zad01.py | zad01.py | py | 748 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.solve",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_n... |
1298646891 | from moviepy.editor import *
import os
from natsort import natsorted
L =[]
for root, dirs, files in os.walk("D:\\Sujay\\German\\Best Way to Learn German Language-Full Beginner Course-A1.1\\New folder"):
#files.sort()
files = natsorted(files)
for file in files:
if os.path.splitext(file)[1] == '.mp4':
filePath = os.path.join(root, file)
video = VideoFileClip(filePath)
L.append(video)
final_clip = concatenate_videoclips(L)
final_clip.to_videofile("output.mp4", fps=24, remove_temp=False) | Sujay-Mhaske/Join-video | vid_join.py | vid_join.py | py | 565 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "natsort.natsorted",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
21052629609 | import requests
from bs4 import BeautifulSoup
import json
tokens = []
for x in range(1, 12):
result = requests.get("https://etherscan.io/tokens?p=" + str(x))
c = result.content
soup = BeautifulSoup(c, "html.parser")
samples = soup.find_all("tr")
for sample in samples:
try:
if ("token" in sample.find_all("td")[1].find("a")['href']):
tokens.append({"address": sample.find_all("td")[1].find("a")['href'].replace("/token/", ""), "image": sample.find_all("td")[1].find("img")['src'], "name": sample.find_all("td")[2].find("a").text})
except Exception as e:
continue
print(json.dumps(tokens)) | markchipman/inklin | get_tokens.py | get_tokens.py | py | 673 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
}
] |
32829769716 | from collections import deque
# BFS 함수 정의
def bfs(sx, sy, ex, ey):
# 시작 지점이 목표 지점과 같은 경우, 함수 종료
if sx == ex and sy == ey:
return
queue = deque([(sx, sy)])
# 나이트가 움직일 수 있는 방향 벡터 정의
dx = [-2, -1, 1, 2, 2, 1, -1, -2]
dy = [1, 2, 2, 1, -1, -2, -2, -1]
while queue:
x, y = queue.popleft()
for d in range(8):
nx = x + dx[d]
ny = y + dy[d]
# 범위를 벗어나는 경우, 무시
if nx < 0 or nx >= i or ny < 0 or ny >= i:
continue
# 처음 도달하는 칸인 경우, 이동 횟수를 기록하고 해당 칸의 위치를 큐에 삽입
if graph[nx][ny] == 0:
queue.append((nx, ny))
graph[nx][ny] = graph[x][y] + 1
t = int(input())
# 테스트 케이스 수 만큼 반복
for _ in range(t):
i = int(input())
sx, sy = map(int, input().split()) # 나이트가 현재 있는 칸 입력 받기
ex, ey = map(int, input().split()) # 나이트가 이동하려고 하는 칸 입력 받기
graph = [[0] * i for _ in range(i)] # i X i 크기의 체스판 생성
# BFS 수행
bfs(sx, sy, ex, ey)
# 결과 출력
print(graph[ex][ey])
| veluminous/CodingTest | 백준 실전 문제/[백준 7562 DFS&BFS] 나이트의 이동.py | [백준 7562 DFS&BFS] 나이트의 이동.py | py | 1,306 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
70811192105 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
import logging
import datetime
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import taskqueue
import gaetalk
import config
import utils
class XMPPSub(webapp.RequestHandler):
'''被人加好友了~可能被触发多次'''
def post(self):
jid = self.request.get('from')
gaetalk.try_add_user(jid)
class XMPPUnsub(webapp.RequestHandler):
def post(self):
# 注意:由于 gtalk 客户端的错误处理,提供了一个使用命令离开的方式
jid = self.request.get('from')
L = utils.MemLock('delete_user')
L.require()
try:
gaetalk.del_user(jid)
finally:
L.release()
class XMPPMsg(webapp.RequestHandler):
def post(self):
try:
message = xmpp.Message(self.request.POST)
gaetalk.handle_message(message)
except xmpp.InvalidMessageError:
logging.info('InvalidMessageError: %r' % self.request.POST)
class XMPPAvail(webapp.RequestHandler):
def post(self):
'''show 可以是 away、dnd(忙碌)或空(在线)'''
jid, resource = self.request.get('from').split('/', 1)
status = self.request.get('status')
show = self.request.get('show')
logging.debug(u'%s 的状态: %s (%s)' % (jid, status, show))
try:
show = gaetalk.STATUS_CODE[show]
except KeyError:
logging.error('%s has sent an incorrect show code %s' % (jid, show))
return
try:
gaetalk.send_status(self.request.get('from'))
except xmpp.Error:
logging.error('Error while sending presence to %s' % jid)
return
u = gaetalk.get_user_by_jid(jid)
if u is not None:
modified = False
if resource not in u.resources:
u.resources.append(resource)
modified = True
if u.avail != show:
if u.avail == gaetalk.OFFLINE:
u.last_online_date = datetime.datetime.now()
u.avail = show
modified = True
if modified:
gaetalk.log_onoff(u, show, resource)
u.put()
if config.warnGtalk105 and resource.startswith('Talk.v105'):
xmpp.send_message(jid, u'您的客户端使用明文传输数据,为了大家的安全,请使用Gtalk英文版或者其它使用SSL加密的客户端。')
else:
gaetalk.try_add_user(jid, show, resource)
class XMPPUnavail(webapp.RequestHandler):
def post(self):
jid, resource = self.request.get('from').split('/', 1)
logging.info(u'%s 下线了' % jid)
taskqueue.add(url='/_admin/queue', queue_name='userunavailable', params={'jid': jid, 'resource': resource})
class XMPPProbe(webapp.RequestHandler):
def post(self):
fulljid = self.request.get('from')
try:
gaetalk.send_status(fulljid)
except xmpp.Error:
logging.error('Error while sending presence to %s' % fulljid)
class XMPPDummy(webapp.RequestHandler):
def post(self):
pass
class UserUnavailable(webapp.RequestHandler):
def post(self):
jid = self.request.get('jid')
resource = self.request.get('resource')
u = gaetalk.get_user_by_jid(jid)
if u is not None:
if resource in u.resources:
u.resources.remove(resource)
if not u.resources:
u.avail = gaetalk.OFFLINE
u.last_offline_date = datetime.datetime.now()
u.put()
gaetalk.log_onoff(u, gaetalk.OFFLINE, resource)
application = webapp.WSGIApplication(
[
('/_ah/xmpp/subscription/subscribed/', XMPPSub),
('/_ah/xmpp/subscription/unsubscribed/', XMPPUnsub),
('/_ah/xmpp/message/chat/', XMPPMsg),
('/_ah/xmpp/presence/available/', XMPPAvail),
('/_ah/xmpp/presence/unavailable/', XMPPUnavail),
('/_ah/xmpp/presence/probe/', XMPPProbe),
('/_ah/xmpp/subscription/subscribe/', XMPPDummy),
('/_ah/xmpp/subscription/unsubscribe/', XMPPDummy),
('/_admin/queue', UserUnavailable),
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| lilydjwg/gaetalk | chatmain.py | chatmain.py | py | 4,035 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "google.appengine.ext.webapp.RequestHandler",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.webapp",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "gaetalk.try_add_user",
"line_number": 19,
"usage_type": "call... |
43823721553 | import json
import random
import re
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import requests
from template import *
proxies = {
'http': '127.0.0.1:9898',
'https': '127.0.0.1:9898',
}
ori_keys = json.load(open("../../data/120_key1.json"))
keys = [key for key, v in ori_keys.items() if v]
unused_keys = keys.copy()
used_keys = []
overload_keys = []
invalid_keys = []
def get_valid_key():
global unused_keys, used_keys, overload_keys
current_time = time.time()
new_overload_keys = []
for key, timestamp in overload_keys:
if current_time - timestamp >= 60:
unused_keys.append(key)
else:
new_overload_keys.append((key, timestamp))
overload_keys = new_overload_keys
while not unused_keys:
time.sleep(5)
key = random.choice(unused_keys)
unused_keys.remove(key)
used_keys.append(key)
return key
def make_chat_request(prompt, max_length=1024, timeout=10, logit_bias=None, max_retries=5):
global unused_keys, used_keys, overload_keys
for index in range(max_retries):
key = get_valid_key()
try:
with requests.post(
url=f"https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {key}"},
json={
"model": "gpt-3.5-turbo",
"temperature": 1.0,
"messages": [{'role': 'user', 'content': prompt}],
"max_tokens": max_length,
"top_p": 1.0,
"logit_bias": logit_bias,
},
# proxies=proxies,
timeout=timeout
) as resp:
if resp.status_code == 200:
used_keys.remove(key)
unused_keys.append(key)
return json.loads(resp.content)
elif json.loads(resp.content).get('error'):
print(json.loads(resp.content).get('error'))
if json.loads(resp.content).get('error')['message'] == "You exceeded your current quota, please check your plan and billing details.":
invalid_keys.append(key)
else:
overload_keys.append((key, time.time()))
except requests.exceptions.RequestException as e:
used_keys.remove(key)
unused_keys.append(key)
timeout += 5
if timeout >= 20:
logit_bias = {"13": -100, "4083": -100}
print(f"Error with key {key}: {e}")
else:
logit_bias = dict(list(logit_bias.items())[:int(len(logit_bias) / 2)])
def get_uncompleted_data(file_path, out_path):
all_uuids = {json.loads(line)["uuid"] for line in open(file_path)}
completed_uuids = {json.loads(line)['input']["uuid"] for line in open(out_path) if json.loads(line)["output"] != ["network error"]}
completed_data = [json.loads(line) for line in open(out_path) if json.loads(line)['input']["uuid"] in completed_uuids]
uncompleted_uuids = all_uuids - completed_uuids
if uncompleted_uuids:
with open(out_path, "w") as f:
for item in completed_data:
f.write(json.dumps(item) + "\n")
data = [json.loads(line) for line in open(file_path) if json.loads(line)["uuid"] in uncompleted_uuids]
return data
def pross_answer(input_string):
if input_string.startswith("yes"):
return "yes"
if input_string.startswith("no"):
return "no"
if input_string.startswith("unknown"):
return "unknown"
return input_string
def process_one_data(args):
data, relation, mode = args
try:
data = eval(data)
except:
data = data
prompt, logit_bias = data['query']["prompt"], data['query']["logit_bias"]
answer = make_chat_request(prompt, logit_bias=logit_bias)
try:
answer = answer['choices'][0]['message']['content']
answer = pross_answer(answer.strip().lower())
except:
answer = ["network error"]
item = {
"input": data,
"output": answer
}
with open(f"./data/{mode}/query_result/{relation}.json", "a") as f:
f.write(json.dumps(item) + "\n")
return "success"
def process_all_data(data_list, relation, mode):
results = []
max_threads = min(os.cpu_count(), len(keys) - len(invalid_keys))
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = {executor.submit(process_one_data, (data, relation, mode)): data for data in data_list}
with tqdm(total=len(data_list), desc=f"{relation, relation_list.index(relation)}") as progress_bar:
for future in as_completed(futures):
try:
result = future.result()
results.append(result)
except Exception as e:
print(f"Error occurred while processing data: {e}")
progress_bar.update(1)
for id in invalid_keys:
ori_keys[id] = False
# 将更改后的数据写回到 JSON 文件中
with open("../../data/120_key1.json", 'w') as file:
json.dump(ori_keys, file)
| bigdante/nell162 | backup/verification/chatgpt_gen_yes_no/utils.py | utils.py | py | 5,313 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 36,... |
74704594025 | import pandas as pd
import numpy as np
import regex as re
# the usual import horror in python
# https://stackoverflow.com/questions/35166821/valueerror-attempted-relative-import-beyond-top-level-package
from ...config.config import Config
class ExperimentalPlan:
'''
Class for creating an experimental Plan based on DoE.
planType [str]: type of doe plan (e.g. plackett-burrmann, lhs, ...)
planType [str]: configuration params for the DoE plan (e.g.
number of factors, number of levels, ...). Depends
on pyDOE2 specs.
rawPlan [np.array]: raw plan with abstract values (usually -1,0,
1 but depends on type)
factorPlan [pd.DataFrame]: plan with real factor values
nrTests [int]: number of tests runs of the plan
'''
def __init__(self, config: Config):
self.factorFile = 'factors.csv'
self.planType = config.planType
self.rawPlan = np.array(0)
self.factorPlan = pd.DataFrame()
self.factorList = []
self.nrTests = 0
print('\t\tExperimental Plan created: plan_%s.csv'%config.planType )
def setNrTests(self):
self.nrTests = len(self.rawPlan)
def setFactorList(self):
self.factorList = list(pd.read_csv(self.factorFile, index_col='name').index)
def convertPlanToRangeZeroOne(self):
rawPlanRangeZeroOne = np.zeros((len(self.rawPlan[:, 0]), len(self.rawPlan[0, :])))
# loop through columns of rawPlan
for j in range(len(self.rawPlan[0, :])):
factorCol = self.rawPlan[:, j]
mini = min(factorCol)
maxi = max(factorCol)
# loop through rows of rawPlan
for i in range(len(factorCol)):
currentCell = float(self.rawPlan[i, j])
rawPlanRangeZeroOne[i, j] = 0 + (1 - 0) * (currentCell - mini) / (maxi - mini)
self.rawPlan = rawPlanRangeZeroOne
def printFactorPlanToFile(self, pathToPlanFile):
self.factorPlan.to_csv(pathToPlanFile)
def printRawPlanToFile(self, pathToRawPlanFile):
pd.DataFrame(self.rawPlan).to_csv(pathToRawPlanFile,
header=self.factorList)
def getFactorValuesOfTestRun(self, testNr):
return dict(self.factorPlan.iloc[testNr])
def checkFactorMatchingToRawPlan(self):
# checking that numbers of factors in factors.csv matches the
# configuration parameters from *.conf
nrFactorsCSV = len(self.factorList)
nrFactorsRawPlan = len(self.rawPlan[0, :])
if nrFactorsCSV != nrFactorsRawPlan:
raise ValueError(
'The number of factors in factors.csv does not match to the plan created with config.conf.')
def convertRawPlanToFactorPlan(self, pathToFactorFile):
dfFactors = pd.read_csv(pathToFactorFile, index_col='name')
self.factorPlan = pd.DataFrame(self.rawPlan.copy())
self.factorPlan.columns = self.factorList
# loop through all factors (columns of rawPlan)
j = 0
factorsWithExprList = []
posOfFactorWithExpr = []
for factor in self.factorList:
factorCol = self.rawPlan[:, j].copy()
factorMin = str(dfFactors.loc[factor].at['min'])
factorMax = str(dfFactors.loc[factor].at['max'])
# check if factor min is a number (float or int)
if re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMin) is None \
or re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMax) is None:
# if true factorMin/Max should be a math expression like 'a+b/2'
# it is necessary to save these columns for later because they
# depend on other factors values which need to be calculated first
factorsWithExprList.append(factor)
posOfFactorWithExpr.append(j)
# these are dummy values that no error occurs
factorMin = 0
factorMax = 0
factorMin = float(factorMin)
factorMax = float(factorMax)
factorCol *= factorMax - factorMin
factorCol += factorMin
# overwrite column of factorPlan
self.factorPlan[factor] = factorCol
j += 1
# loop through the previous saved factor with expression in factorMin/Max
factorRegex = '|'.join(self.factorList)
j = 0
for factorWithExpr in factorsWithExprList:
factorCol = self.rawPlan[:, posOfFactorWithExpr[j]]
factorMin = str(dfFactors.loc[factorWithExpr].at['min'])
factorMax = str(dfFactors.loc[factorWithExpr].at['max'])
if re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMin) is None:
factorMin = self.__calcMinMaxForStrExpression(factorCol, factorRegex, factorMin)
else:
factorMin = float(factorMin)
if re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMax) is None:
factorMax = self.__calcMinMaxForStrExpression(factorCol, factorRegex, factorMax)
else:
factorMax = float(factorMax)
factorCol *= factorMax - factorMin
factorCol += factorMin
# overwrite column of factorPlan
self.factorPlan[factorWithExpr] = factorCol
j += 1
def __calcMinMaxForStrExpression(self, factorCol, factorRegex, minMax):
minMaxCol = np.zeros(len(factorCol))
# loop through all tests
i = 0
for testNr in range(len(factorCol)):
# get all factors, operators (+-*/) and number (float or int)
expressionList = re.findall('%s|[+|\-|\*|/]|[[0-9]+[\.]?[0-9]*' % (factorRegex), minMax)
# extract factors from expressionlist
factorsInExprList = list(set(expressionList) & set(self.factorList))
# calculate values for factor with min or max
for factorInExpr in factorsInExprList:
factorValue = self.factorPlan.loc[i].at[factorInExpr]
factorExpr = minMax.replace(factorInExpr, str(factorValue))
# calculate expression
minMaxCol[i] = eval(factorExpr)
i += 1
return minMaxCol
| csRon/autodoe | src/preProcessor/experimentalPlans/experimentalPlan.py | experimentalPlan.py | py | 6,290 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.config.Config",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "config.config.planType",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "config.config",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.... |
3276810925 | from django.contrib.auth import get_user_model
from django.db.models import F, Sum
from django.http.response import HttpResponse
from django_filters.rest_framework import DjangoFilterBackend
from djoser.views import UserViewSet as DjoserUserViewSet
from recipes.models import (AmountIngredientRecipe, Favorite, Follow,
Ingredient, Recipe, ShoppingCart, Tag)
from rest_framework import permissions, status, viewsets
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from .filters import IngredientFilter, RecipeFilter
from .mixins import FavoriteShoppingcartMixin
from .permissions import IsOwnerAdminOrReadOnly
from .serializers import (FollowSerializer, IngredientSerializer,
RecipesSerializer, TagSerializer)
User = get_user_model()
class UsersViewSet(DjoserUserViewSet):
"""Вьюсет для пользователей"""
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=[permissions.IsAuthenticated],
)
def subscribe(self, request, id):
"""
Создаётся или удаляется подписка на пользователя.
:param request: данные запроса.
:param pk: id пользователя на которого нужно подписаться(отписаться).
:return:
"""
user = request.user
author = get_object_or_404(User, id=id)
is_subscribed = Follow.objects.filter(
user=user,
author=author
).exists()
if request.method == 'DELETE':
if not is_subscribed:
return Response(
{'errors': 'Вы не были подписаны на этого автора'},
status=status.HTTP_400_BAD_REQUEST
)
Follow.objects.get(user=user, author=author).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
if user == author:
return Response(
{'errors': 'Подписка самого на себя невозможна'},
status=status.HTTP_400_BAD_REQUEST
)
if is_subscribed:
return Response(
{'errors': 'Вы уже подписаны на этого пользователя'},
status=status.HTTP_400_BAD_REQUEST
)
Follow.objects.create(user=user, author=author)
serializer = FollowSerializer(
author,
context={'request': request}
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(
methods=['get'],
detail=False,
permission_classes=[permissions.IsAuthenticated],
)
def subscriptions(self, request):
"""
Получаем всех пользователей на которых подписан.
:param request: данные запроса.
:return: Возвращает сериализованные данные через FollowSerializer
с пагинацией.
"""
queryset = User.objects.filter(following__user_id=request.user.id)
page = self.paginate_queryset(queryset)
serializer = FollowSerializer(
page,
many=True,
context={'request': request}
)
return self.get_paginated_response(serializer.data)
class TagViewSet(viewsets.ReadOnlyModelViewSet):
"""
Вьюсет для тегов.
Теги доступны только для чтения.
"""
queryset = Tag.objects.all()
serializer_class = TagSerializer
pagination_class = None
class IngredientViewSet(viewsets.ReadOnlyModelViewSet):
"""
Вьюсет для ингредиентов.
Теги доступны только для чтения.
"""
queryset = Ingredient.objects.all()
serializer_class = IngredientSerializer
pagination_class = None
filter_backends = (DjangoFilterBackend,)
filterset_class = IngredientFilter
class RecipesViewSet(viewsets.ModelViewSet, FavoriteShoppingcartMixin):
"""Вьюсет для рецептов"""
queryset = Recipe.objects.all()
serializer_class = RecipesSerializer
permission_classes = (IsOwnerAdminOrReadOnly,)
filter_backends = (DjangoFilterBackend,)
filterset_class = RecipeFilter
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=[IsOwnerAdminOrReadOnly],
)
def favorite(self, request, pk):
"""
Добавляет(удаляет) рецепт в избранное пользователя.
:param pk: id добавляемого рецепта.
:param request: данные запроса.
:return: Возвращает сериализованный рецепт, который добавили
или удалили из избранного.
"""
return self.add_del_to_db(
request=request,
pk=pk,
related_model=Favorite
)
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=[IsOwnerAdminOrReadOnly],
)
def shopping_cart(self, request, pk):
"""
Добавляет(удаляет) рецепт в корзину для покупок.
:param pk: id добавляемого рецепта.
:param request: данные запроса.
:return: Возвращает сериализованный рецепт, который добавили
или удалили в корзину для покупок.
"""
return self.add_del_to_db(
request=request,
pk=pk,
related_model=ShoppingCart
)
@action(
methods=['get'],
detail=False,
permission_classes=[IsOwnerAdminOrReadOnly],
)
def download_shopping_cart(self, request):
"""
Формирует файл списка продуктов из рецептов в списке покупок.
:param request:
:return:
"""
user = request.user
if user.is_anonymous:
raise permissions.exceptions.AuthenticationFailed
if not user.shoppingcart.all().exists():
return Response(status=status.HTTP_400_BAD_REQUEST)
recipes_of_user = user.shoppingcart.values('recipe')
ingredients_in_recipes = AmountIngredientRecipe.objects.filter(
recipe__in=recipes_of_user
)
sum_ingredients = ingredients_in_recipes.values(
ingredient_name=F('ingredient__name'),
measurement_unit=F('ingredient__measurement_unit')
).annotate(amount=Sum('amount'))
list_ingredients = (f'Список продуктов для пользователя с именем: '
f'{user.get_full_name()}\n\n')
for ingredient in sum_ingredients:
ingredient_str = (f'{ingredient["ingredient_name"]} '
f'({ingredient["measurement_unit"]}) - '
f'{ingredient["amount"]}\n')
list_ingredients += ingredient_str
file_name = f'shopping_cart_{user.username}.txt'
response = HttpResponse(
content=list_ingredients,
content_type='text/plain; charset=utf-8',
status=status.HTTP_200_OK,
)
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
| MihVS/foodgram-project-react | backend/foodgram/api/views.py | views.py | py | 7,796 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "djoser.views.UserViewSet",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.get_object_or_404",
"line_number": 40,
"usage_type": ... |
72136189865 | import os
from flask import jsonify, current_app
from flask_mail import Message
from werkzeug.utils import secure_filename
from PIL import Image
from api import mail
QUESTIONS_PER_PAGE = 5
def paginator(request, data):
page = request.args.get("page", 1, type=int)
start = (page - 1) * QUESTIONS_PER_PAGE
end = start + QUESTIONS_PER_PAGE
formatted_data = [item.format() for item in data]
return formatted_data[start:end]
ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg", "gif"}
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def save_image(image_file):
try:
new_image_name = secure_filename(image_file.filename)
output_folder_path = os.path.join(
current_app.root_path, "static/profile_pics", new_image_name
)
output_size = (200, 200)
i = Image.open(image_file)
i.thumbnail(output_size)
i.save(output_folder_path)
return new_image_name
except Exception as e:
return str(e)
def send_email(user, subject, sender, body):
msg = Message(subject, sender=sender, recipients=[user.email])
msg.body = body
mail.send(msg)
def json_failure(fields=None):
if fields is None:
fields = {}
return jsonify({"success": False, **fields})
def json_success(fields=None):
if fields is None:
fields = {}
return jsonify({"success": True, **fields}), 200
def json_404(fields=None):
if fields is None:
fields = {}
return jsonify({"success": True, **fields}), 404
| dennisappiah/pong-game-api | api/utils.py | utils.py | py | 1,591 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.curren... |
43471363833 | import pandas as pd
import numpy as np
# from sklearn.linear_model import LogisticRegression
# from omegaconf import DictConfig, OmegaConf
from loguru import logger
import joblib
import click
# from dataclasses import dataclass
# from hydra.core.config_store import ConfigStore
# from sklearn.pipeline import Pipeline
# from src.features.transformers import SqrTransformer
# from omegaconf import DictConfig, OmegaConf, MISSING
# from src.models.train_model import Config, RF, LogReg, ModelType
@click.command()
@click.option(
"--model",
help="Pretrained model path.",
type=click.Path(exists=True),
required=True,
)
@click.option(
"--dataset",
help="Input dataset in csv format.",
type=click.Path(exists=True),
required=True,
)
@click.option(
"--output",
help="Output file with predicted labels.",
type=click.Path(),
required=True,
)
def main(model: str, dataset: str, output: str) -> None:
logger.info('Reading data')
df = pd.read_csv(dataset)
if 'condition' in df.columns:
df = df.drop(['condition'], axis=1)
# with open(model, 'rb') as file:
# model = pickle.load(file)
# logger.info('Model loaded')
model = joblib.load(model)
y_pred = model.predict(df)
logger.info('Saving results')
np.savetxt(output, y_pred, delimiter=",")
if __name__ == "__main__":
main()
| made-mlops-2022/mlops-andrey-talyzin | ml_project/src/models/predict_model.py | predict_model.py | py | 1,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "loguru.logger.info",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"l... |
28451867185 | from rest_framework import serializers
from core.models import Tag,Ingredient
class TagSerializers(serializers.ModelSerializer):
'''serializer for the object'''
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
'''serializers for ingredient objects'''
class Meta:
model = Ingredient
fields = ('id','name')
read_only_fields = ('id',) | Manu1John/recipe-app-api | app/recipe/serializers.py | serializers.py | py | 486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "core.models.Tag",
"line_number": 8,
"usage_type": "name"
},
... |
15853176936 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import warnings
import unittest
from collections import OrderedDict
from w3lib.form import encode_multipart
class EncodeMultipartTest(unittest.TestCase):
def test_encode_multipart(self):
data = {'key': 'value'}
with warnings.catch_warnings(record=True):
body, boundary = encode_multipart(data)
expected_body = (
'\r\n--{boundary}'
'\r\nContent-Disposition: form-data; name="key"\r\n'
'\r\nvalue'
'\r\n--{boundary}--'
'\r\n'.format(boundary=boundary).encode('utf8')
)
self.assertEqual(body, expected_body)
def test_encode_multipart_unicode(self):
data = OrderedDict([
(u'ключ1', u'значение1'.encode('utf8')),
(u'ключ2', u'значение2'),
])
with warnings.catch_warnings(record=True):
body, boundary = encode_multipart(data)
expected_body = (
u'\r\n--{boundary}'
u'\r\nContent-Disposition: form-data; name="ключ1"\r\n'
u'\r\nзначение1'
u'\r\n--{boundary}'
u'\r\nContent-Disposition: form-data; name="ключ2"\r\n'
u'\r\nзначение2'
u'\r\n--{boundary}--'
u'\r\n'.format(boundary=boundary).encode('utf8')
)
self.assertEqual(body, expected_body)
def test_encode_multipart_file(self):
# this data is not decodable using utf8
data = {'key': ('file/name', b'\xa1\xa2\xa3\xa4\r\n\r')}
with warnings.catch_warnings(record=True):
body, boundary = encode_multipart(data)
body_lines = [
b'\r\n--' + boundary.encode('ascii'),
b'\r\nContent-Disposition: form-data; name="key"; filename="file/name"\r\n',
b'\r\n\xa1\xa2\xa3\xa4\r\n\r',
b'\r\n--' + boundary.encode('ascii') + b'--\r\n',
]
expected_body = b''.join(body_lines)
self.assertEqual(body, expected_body)
#def test_encode_multipart_int(self):
# data = {'key': 123}
# body, boundary = encode_multipart2(data)
# expected_body = (
# '\n--{boundary}'
# '\nContent-Disposition: form-data; name="key"\n'
# '\n123'
# '\n--{boundary}--'
# '\n'.format(boundary=boundary)
# )
# self.assertEqual(body, expected_body)
| bertucho/epic-movie-quotes-quiz | dialogos/build/w3lib/tests/test_form.py | test_form.py | py | 2,473 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "warnings.catch_warnings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "w3lib.form.encode_multipart",
"line_number": 14,
"usage_type": "call"
},
{
"api_nam... |
38591539805 | from elasticsearch import Elasticsearch
from search import search_user_query
class ESClient:
def __init__(self):
self.es = Elasticsearch("http://localhost:9200")
def extract_songs(self, resp):
songs = []
hits = resp["hits"]["hits"]
for i in range(len(hits)):
songs.append(hits[i]["_source"])
return songs
def get_all_songs(self):
resp = self.es.search(index="sinhala-songs-corpus", body={"query": {"match_all": {}}})
return self.extract_songs(resp)
def advanced_search(self, req_body):
filled_keys = {k: v for k, v in req_body.items() if v}
must_list = []
for k in filled_keys.keys():
must_list.append({ "match" : { k+".case_insensitive_and_inflections" : req_body[k] } })
resp = self.es.search(index="sinhala-songs-corpus",body={"query": {"bool": {"must": must_list}}})
return self.extract_songs(resp)
def get_logical_combinations(self, req_body):
resp = None
if req_body["operation"] == "and":
resp = self.es.search(index="sinhala-songs-corpus",body={
"query": {
"bool": {
"must": [
{ "match" : { req_body["key1"]+".case_insensitive_and_inflections" : req_body["value1"] } },
{ "match" : { req_body["key2"]+".case_insensitive_and_inflections" : req_body["value2"] } }
]
}
}
})
elif req_body["operation"] == "or":
resp = self.es.search(index="sinhala-songs-corpus",body={
"query": {
"bool": {
"should": [
{ "match" : { req_body["key1"]+".case_insensitive_and_inflections" : req_body["value1"] } },
{ "match" : { req_body["key2"]+".case_insensitive_and_inflections" : req_body["value2"] } }
]
}
}
})
elif req_body["operation"] == "not":
resp = self.es.search(index="sinhala-songs-corpus",body={
"query": {
"bool": {
"must" : {
"match" : { req_body["key1"]+".case_insensitive_and_inflections" : req_body["value1"] }
},
"must_not" : {
"match" : { req_body["key2"]+".case_insensitive_and_inflections" : req_body["value2"] }
}
}
}
})
return self.extract_songs(resp)
def regular_search(self, req_body):
resp = search_user_query(req_body["query"], self.es)
return self.extract_songs(resp) | PasinduUd/metaphor-based-search-engine | API/es_client.py | es_client.py | py | 2,414 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "search.search_user_query",
"line_number": 67,
"usage_type": "call"
}
] |
27792793140 | import random
import os
from model import *
from world import *
import numpy as np
import torch
import matplotlib.pyplot as plt
from datetime import datetime
mem, nomem, mem_vd, nomem_vd = [False, False, False, False]
mem = True
env_title = 'Tunl Mem'
if mem or nomem:
ld = 40
elif mem_vd or nomem_vd:
len_delays = [20, 40, 60]
len_delays_p = [1, 1, 1]
ld = max(len_delays)
len_edge = 7
rwd = 100
inc_rwd = -20
step_rwd = -0.1
poke_rwd = 5
rng_seed = 1234
if mem:
env = Tunl(ld, len_edge, rwd, inc_rwd, step_rwd, poke_rwd, rng_seed)
elif nomem:
env = Tunl_nomem(ld, len_edge, rwd, step_rwd, poke_rwd, rng_seed)
elif mem_vd:
env = Tunl_vd(len_delays, len_delays_p, len_edge, rwd, inc_rwd, step_rwd, poke_rwd, rng_seed)
elif nomem_vd:
env = Tunl_nomem_vd(len_delays, len_delays_p, len_edge, rwd, step_rwd, poke_rwd, rng_seed)
n_neurons = 512
lr = 1e-5
batch_size = 1
rfsize = 2
padding = 0
stride = 1
dilation = 1
conv_1_features = 16
conv_2_features = 32
hidden_types = ['conv', 'pool', 'conv', 'pool', 'lstm', 'linear']
net_title = hidden_types[4]
l2_reg = False
n_total_episodes = 50000
window_size = 5000 # for plotting
# Define conv & pool layer sizes
layer_1_out_h, layer_1_out_w = conv_output(env.h, env.w, padding, dilation, rfsize, stride)
layer_2_out_h, layer_2_out_w = conv_output(layer_1_out_h, layer_1_out_w, padding, dilation, rfsize, stride)
layer_3_out_h, layer_3_out_w = conv_output(layer_2_out_h, layer_2_out_w, padding, dilation, rfsize, stride)
layer_4_out_h, layer_4_out_w = conv_output(layer_3_out_h, layer_3_out_w, padding, dilation, rfsize, stride)
# Initializes network
net = AC_Net(
input_dimensions=(env.h, env.w, 3), # input dim
action_dimensions=6, # action dim
hidden_types=hidden_types, # hidden types
hidden_dimensions=[
(layer_1_out_h, layer_1_out_w, conv_1_features), # conv
(layer_2_out_h, layer_2_out_w, conv_1_features), # pool
(layer_3_out_h, layer_3_out_w, conv_2_features), # conv
(layer_4_out_h, layer_4_out_w, conv_2_features), # pool
n_neurons,
n_neurons], # hidden_dims
batch_size=batch_size,
rfsize=rfsize,
padding=padding,
stride=stride)
# If load pre-trained network
'''
load_dir = '2021_03_07_21_58_43_7_10_1e-05/net.pt'
parent_dir = '/home/mila/l/lindongy/tunl2d/data'
net.load_state_dict(torch.load(os.path.join(parent_dir, load_dir)))
'''
# Initializes optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# Define helper functions
def bin_rewards(epi_rewards, window_size):
"""
Average the epi_rewards with a moving window.
"""
epi_rewards = epi_rewards.astype(np.float32)
avg_rewards = np.zeros_like(epi_rewards)
for i_episode in range(1, len(epi_rewards)+1):
if 1 < i_episode < window_size:
avg_rewards[i_episode-1] = np.mean(epi_rewards[:i_episode])
elif window_size <= i_episode <= len(epi_rewards):
avg_rewards[i_episode-1] = np.mean(epi_rewards[i_episode - window_size: i_episode])
return avg_rewards
def ideal_nav_rwd(env, len_edge, len_delay, step_rwd, poke_rwd):
"""
Given env, len_edge, len_delay, step_rwd, poke_rwd, return the ideal navigation reward for a single episode.
Use after env.reset().
"""
ideal_nav_reward = (env.dist_to_init + 3 * (len_edge - 1) - min(
(len_delay, len_edge - 1))) * step_rwd + 3 * poke_rwd
return ideal_nav_reward
# Train and record
# Initialize arrays for recording
if mem_vd or nomem_vd:
len_delay = np.zeros(n_total_episodes, dtype=np.int8) # length of delay for each trial
if mem or mem_vd:
ct = np.zeros(n_total_episodes, dtype=np.int8) # whether it's a correction trial or not
stim = np.zeros((n_total_episodes, 2), dtype=np.int8)
epi_nav_reward = np.zeros(n_total_episodes, dtype=np.float16)
correct_perc = np.zeros(n_total_episodes, dtype=np.float16)
choice = np.zeros((n_total_episodes, 2), dtype=np.int8) # record the location when done
delay_loc = np.zeros((n_total_episodes, ld, 2), dtype=np.int16) # location during delay
delay_resp_hx = np.zeros((n_total_episodes, ld, n_neurons), dtype=np.float32) # hidden states during delay
delay_resp_cx = np.zeros((n_total_episodes, ld, n_neurons), dtype=np.float32) # cell states during delay
ideal_nav_rwds = np.zeros(n_total_episodes, dtype=np.float16)
for i_episode in range(n_total_episodes):
done = False
env.reset()
ideal_nav_rwds[i_episode] = ideal_nav_rwd(env, len_edge, env.len_delay, step_rwd, poke_rwd)
net.reinit_hid()
stim[i_episode] = env.sample_loc
if mem or mem_vd:
ct[i_episode] = int(env.correction_trial)
if mem_vd or nomem_vd:
len_delay[i_episode] = env.len_delay # For vd or it only
while not done:
pol, val = net.forward(
torch.unsqueeze(torch.Tensor(np.reshape(env.observation, (3, env.h, env.w))), dim=0).float()
) # forward
if env.indelay: # record location and neural responses
delay_loc[i_episode, env.delay_t - 1, :] = np.asarray(env.current_loc)
delay_resp_hx[i_episode, env.delay_t - 1, :] = net.hx[
hidden_types.index("lstm")].clone().detach().cpu().numpy().squeeze()
delay_resp_cx[i_episode, env.delay_t - 1, :] = net.cx[
hidden_types.index("lstm")].clone().detach().cpu().numpy().squeeze()
act, p, v = select_action(net, pol, val)
new_obs, reward, done, info = env.step(act)
net.rewards.append(reward)
choice[i_episode] = env.current_loc
if env.reward == rwd:
correct_perc[i_episode] = 1
epi_nav_reward[i_episode] = env.nav_reward
p_loss, v_loss = finish_trial(net, 0.99, optimizer)
avg_nav_rewards = bin_rewards(epi_nav_reward, window_size)
correct_perc = bin_rewards(correct_perc, window_size)
# Make directory to save data and figures
directory = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + f"_{env_title}_{net_title}"
parent_dir = '/home/mila/l/lindongy/tunl2d/data'
path = os.path.join(parent_dir, directory)
os.mkdir(path)
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(6, 6))
fig.suptitle(env_title)
ax1.plot(np.arange(n_total_episodes), avg_nav_rewards, label=net_title)
ax1.plot(np.arange(n_total_episodes), ideal_nav_rwds, label="Ideal navig reward")
ax1.set_xlabel('episode')
ax1.set_ylabel('navigation reward')
ax1.legend()
ax2.plot(np.arange(n_total_episodes), correct_perc, label=net_title)
ax2.set_xlabel('episode')
ax2.set_ylabel('correct %')
ax2.legend()
# plt.show()
fig.savefig(path+'/fig.png')
# save data
if mem:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, ct=ct, delay_loc=delay_loc,
delay_resp_hx=delay_resp_hx,
delay_resp_cx=delay_resp_cx)
elif nomem:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, delay_loc=delay_loc, delay_resp_hx=delay_resp_hx,
delay_resp_cx=delay_resp_cx)
elif mem_vd:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, ct=ct, len_delay=len_delay, delay_loc=delay_loc,
delay_resp_hx=delay_resp_hx, delay_resp_cx=delay_resp_cx)
elif nomem_vd:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, len_delay=len_delay, delay_loc=delay_loc,
delay_resp_hx=delay_resp_hx, delay_resp_cx=delay_resp_cx)
# save net
torch.save(net.state_dict(), path+'/net.pt')
| dongyanl1n/sim-tunl | run.py | run.py | py | 7,488 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.optim.Adam",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros_like... |
22163631798 | #!/usr/bin/env python
import csv
import gzip
import json
import os
import re
import sys
import pathlib
import sqlite3
from shapely.geometry import Polygon
from sqlite3 import Error
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(SCRIPT_DIR, os.path.join('..', '..', 'outputs', 'butte'))
GEOJSON_FILE = os.path.join(DATA_DIR, 'butte_parcels.geojson')
sql_create_parcel_table = """
CREATE TABLE IF NOT EXISTS parcels (
apn TEXT PRIMARY KEY, -- county specific id for parcel lookup
location TEXT NOT NULL, -- the address or geographical description of parcel
zipcode TEXT NOT NULL, -- the zipcode of the parcel
geo_lat NUMERIC, -- the latitude of the property centroid
geo_lon NUMERIC, -- the longitude of the property centroid
use_code TEXT, -- property use code
lot_size_sqft NUMERIC, -- the lot size in square feet
building_size_sqft NUMERIC, -- the building size in square feet
building_bed_count NUMERIC, -- the number of bedrooms in building
building_bath_count NUMERIC, -- the number of bathrooms in building
building_stories_count NUMERIC, -- the number of stories in building
building_units_count NUMERIC, -- the number of units in building
building_age NUMERIC, -- the year building is built
tax_value NUMERIC -- the appicable assessed tax
);
"""
sql_select_apn_from_parsed = """
SELECT EXISTS(SELECT 1 FROM parcels WHERE apn = ?);
"""
sql_insert_parcel_from_parsed = """
INSERT INTO parcels (apn, location, zipcode, geo_lat, geo_lon, use_code, lot_size_sqft, building_size_sqft, building_bed_count, building_bath_count, building_stories_count, building_units_count, building_age)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
flatten=lambda l: sum(map(flatten, l),[]) if isinstance(l,list) else [l]
def run():
conn = None
try:
db_file = os.path.join(DATA_DIR, 'butte_parcel.db')
print('Opening {}'.format(db_file))
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.execute(sql_create_parcel_table)
with open(GEOJSON_FILE) as f_in:
count = 0
for line in f_in:
count += 1
if count < 6:
# Skip geojson cruft left by conversion
continue
try:
json_to_parse = line.strip()
if json_to_parse.endswith(','):
json_to_parse = json_to_parse[:-1]
record = json.loads(json_to_parse)
except:
print('-> could not parse JSON on line %d' % (count,))
continue
props = record['properties']
formatted_apn = props['SiteAPN']
if not formatted_apn:
continue
if not record['geometry'] or not record['geometry']['coordinates']:
print('-> skip')
continue
# There is definitely a more correct way to do this.
flat_coords = [[xyz[0], xyz[1]] for coords in record['geometry']['coordinates'] for xyz in coords]
flat_coords = flatten(flat_coords)
coords = zip(flat_coords[0::2], flat_coords[1::2])
try:
centroid = list(Polygon(coords).centroid.coords)[0]
except:
print('-> could not find centroid')
continue
# check if id already exists
c.execute(sql_select_apn_from_parsed, (formatted_apn,))
(existsCheck,) = c.fetchone()
if existsCheck > 0:
continue
if not props['SiteZip']:
continue
insert_record = (
formatted_apn,
'{}\n{}'.format(props['SiteAddr'], props['SiteCity']),
props['SiteZip'],
centroid[1],
centroid[0],
props['UseCode'],
props['LotSizeSF'],
props['BuildingSF'],
props['Bedrooms'],
props['Bathrooms'],
props['Stories'],
props['Units'],
props['YrBuilt']
)
c.execute(sql_insert_parcel_from_parsed, insert_record)
conn.commit()
print("inserts: {}".format(c.lastrowid))
except Error as e:
print(e)
finally:
if conn:
conn.close()
if __name__ == '__main__':
run()
| typpo/ca-property-tax | scrapers/butte/create_parcels_db.py | create_parcels_db.py | py | 4,941 | python | en | code | 89 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
570436896 | import logging
from .geomsmesh import geompy
def sortFaces(facesToSort):
"""tri des faces par surface"""
logging.info('start')
l_surfaces = [(geompy.BasicProperties(face)[1], i, face) for i, face in enumerate(facesToSort)]
l_surfaces.sort()
facesSorted = [face for _, i, face in l_surfaces]
return facesSorted, l_surfaces[0][0], l_surfaces[-1][0]
| luzpaz/occ-smesh | src/Tools/blocFissure/gmu/sortFaces.py | sortFaces.py | py | 362 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "geomsmesh.geompy.BasicProperties",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "geomsmesh.geompy",
"line_number": 8,
"usage_type": "name"
}
] |
5668664706 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import sklearn.metrics as metrics
import seaborn as sns
from mosaic import features
from mosaic import contexts
from mosaic import image_io
from mosaic import plots
from mosaic import data_utils
from mosaic.grid.image_grid import images_to_grid
__all__ = ['scatter_grid']
def images_to_scatter_grid(images, x_var, y_var, padding=None, **kwargs):
"""Creates a grid plot from a scatter plot.
Parameters
----------
images : list of length [n_samples,]
A List of PIL Image objects. All images must be
the same shape NxWx3.
x_var : np.array of shape [n_samples,]
The x-coordinate in euclidean space.
y_var : np.array of shape [n_samples,]
The y-coordinate in euclidean space.
padding : int, optional
The padding between images in the grid.
Returns
-------
A properly shaped width x height x 3 PIL Image.
"""
# scale the variables between 0-1 (subtract off min?)
features.minmax_scale(x_var)
features.minmax_scale(y_var)
xy = np.c_[x_var, y_var]
# make a grid of evenly spaced points on the grid.
# The grid is of size sqrt(n_samples) x sqrt(n_samples)
grid_size = int(np.ceil(np.sqrt(len(images))))
grid_1d = np.linspace(0, 1, grid_size)
grid_2d = np.dstack(np.meshgrid(grid_1d, grid_1d)).reshape(-1, 2)
# distances between the evenly spaced grid and the points
dist = metrics.euclidean_distances(grid_2d, xy)
# determine order based on nearest neighbors
image_order = []
for i in range(grid_2d.shape[0]):
index = np.argmin(dist[i, :])
image_order.append(index)
dist[:, index] = np.inf # set to inf so we don't pick this point again
images = [images[index] for index in image_order]
grid = images_to_grid(images, padding=padding)
return plots.pillow_to_matplotlib(grid, **kwargs)
def scatter_grid(x, y,
images=None,
data=None,
hue=None,
image_dir='',
image_size=None,
padding=None,
n_jobs=1,
**kwargs):
"""Draw a plot ordering images in a regularly spaced 2-d grid
based on their distance in the x-y plane. The distance between
points is assumed to be euclidean.
Parameters
----------
x, y : str or array-like
Data or names of variables in `data`.
These variables correspond to the x-y coordinates
in the euclidean space.
images : str or array-like
Image arrays or names of the column pointing to the
image paths within `data`.
data : pd.DataFrame
Pandas dataframe holding the dataset.
hue : str or array-like
Data or the name of the variable to use to color
the individual images on the grid.
image_dir : str (default='')
The location of the image files on disk.
image_size : int
The size of each image in the scatter plot.
padding : int, optional
The padding between images in the grid.
n_jobs : int (default=1)
The number of parallel workers to use for loading
the image files.
Returns
-------
A properly shaped NxWx3 image with any necessary padding.
Examples
--------
Create a grid plot with hue labels.
.. plot:: ../examples/scatter_grid.py
"""
x_var = data_utils.get_variable(data, x)
y_var = data_utils.get_variable(data, y)
# TODO (seaborn is only required for a color palette. Remove this)
if hue is not None:
images = data_utils.get_images(
data, images,
image_dir=image_dir,
as_image=False,
image_size=image_size,
n_jobs=n_jobs)
hue = data_utils.get_variable(data, hue)
values, value_map = np.unique(hue, return_inverse=True)
palette = sns.husl_palette(len(values))
images = [features.color_image(img, hue=palette[val]) for
img, val in zip(images, value_map)]
images = [image_io.to_pillow_image(img) for img in images]
else:
# load images
images = data_utils.get_images(
data, images,
image_dir=image_dir,
as_image=True,
image_size=image_size,
n_jobs=n_jobs)
return images_to_scatter_grid(images, x_var, y_var, padding=padding, **kwargs)
| joshloyal/Mosaic | mosaic/grid/scatter_grid.py | scatter_grid.py | py | 4,542 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mosaic.features.minmax_scale",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "mosaic.features",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "mosaic.features.minmax_scale",
"line_number": 44,
"usage_type": "call"
},
{
"api_nam... |
35127026935 | import os
import numpy as np
import pickle
from dataclasses import dataclass
import itertools
from multiprocessing import Pool
import PIL
from noise_reducers.grayscale_gibbs_noise_reducer import GrayscaleGibbsNoiseReducer
from noise_reducers.grayscale_gradient_noise_reducer import GrayscaleGradientNoiseReducer
from noise_reducers import image_utils
@dataclass
class Experiment(object):
experiment_name: str
noise_level: float
EXPERIMENTS = [
Experiment(experiment_name="size800_noise10_flipped", noise_level=0.1),
Experiment(experiment_name="size800_noise20_flipped", noise_level=0.2),
]
IMAGES_PATH = "../grayscale_images"
IMAGES_PER_EXPERIMENT = 20
ITERATIONS_PER_EXPERIMENT = 300_000
ITERATIONS_PER_EVALUATION = 6000
@dataclass
class ImagePair(object):
image_id: int
ground_truth: np.ndarray
observation: np.ndarray
def get_image_pairs_to_evaluate(experiment_name):
images_path = os.path.join(IMAGES_PATH, experiment_name)
image_pairs = []
for image_id in range(IMAGES_PER_EXPERIMENT):
ground_truth_path = os.path.join(images_path, f"image_{image_id}_ground_truth.png")
observation_path = os.path.join(images_path, f"image_{image_id}_observation.png")
image_pairs.append(ImagePair(
image_id=image_id,
ground_truth=image_utils.load_grayscale_image_as_numpy_array(ground_truth_path),
observation=image_utils.load_grayscale_image_as_numpy_array(observation_path),
))
return image_pairs
def run_with_reducer(reducer, experiment_name, storage_folder):
os.makedirs(storage_folder, exist_ok=True)
for image_pair in get_image_pairs_to_evaluate(experiment_name):
reduction_result = reducer.reduce_noise(
original_image=image_pair.ground_truth, observation=image_pair.observation,
)
reduced_image = PIL.Image.fromarray(reduction_result.reduced_image.astype(np.uint8))
reduced_image.save(os.path.join(storage_folder, f"reduced_image_{image_pair.image_id}.png"), format="PNG")
with open(os.path.join(storage_folder, "average_statistics.pickle"), mode="wb") as file_stream:
pickle.dump(reducer.average_statistics, file_stream)
def run_with_gibbs_reducer(experiment):
print(f"Gibbs sampling for experiment {experiment.experiment_name} started!")
reducer = GrayscaleGibbsNoiseReducer(
noise_level_prior=experiment.noise_level, observation_strength=1.0, coupling_strength=4.0,
iterations_count=ITERATIONS_PER_EXPERIMENT, iterations_per_evaluation=ITERATIONS_PER_EVALUATION,
)
storage_folder = os.path.join(
IMAGES_PATH, experiment.experiment_name, f"grayscale_gibbs_reducer_{round(experiment.noise_level * 100)}"
)
run_with_reducer(reducer, experiment.experiment_name, storage_folder)
print(f"Gibbs sampling for experiment {experiment.experiment_name} done!")
def run_with_gradient_reducer(experiment):
print(f"Gradient-based sampling for experiment {experiment.experiment_name} started!")
reducer = GrayscaleGradientNoiseReducer(
noise_level_prior=experiment.noise_level, observation_strength=1.0, coupling_strength=4.0, temperature=2.0,
iterations_count=ITERATIONS_PER_EXPERIMENT, iterations_per_evaluation=ITERATIONS_PER_EVALUATION,
)
storage_folder = os.path.join(
IMAGES_PATH, experiment.experiment_name, f"grayscale_gradient_reducer_{round(experiment.noise_level * 100)}"
)
run_with_reducer(reducer, experiment.experiment_name, storage_folder)
print(f"Gradient-based sampling for experiment {experiment.experiment_name} done!")
def run_with_reducer_type(experiment, reducer_type):
if reducer_type == "gibbs":
run_with_gibbs_reducer(experiment)
elif reducer_type == "gradient":
run_with_gradient_reducer(experiment)
else:
raise ValueError("Invalid type of reducer")
def run_script():
arguments_to_run = list(itertools.chain(
zip(EXPERIMENTS, ["gibbs"] * len(EXPERIMENTS)),
zip(EXPERIMENTS, ["gradient"] * len(EXPERIMENTS)),
))
with Pool(processes=6) as pool:
pool.starmap(run_with_reducer_type, arguments_to_run)
if __name__ == "__main__":
run_script()
| Dawidsoni/noise-reduction | noise-reduction/generate_grayscale_statistics.py | generate_grayscale_statistics.py | py | 4,224 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "dataclass... |
41047380685 | """By: Xiaochi (George) Li: github.com/XC-Li"""
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
from bs4 import BeautifulSoup
def bs_parser(file_name, target_id):
"""
XML Parser implemented by Beautiful Soup Package
Args:
file_name(str): path to the document
target_id(str): the person_id of speaker of target document
Returns:
speech(str): the speech of the speaker
"""
text_list = []
with open(file_name, encoding='utf-8') as file:
soup = BeautifulSoup(file, 'xml')
target_speech = soup.find_all('speaker', personId=target_id)
if len(target_speech) > 1:
pass
# print('multiple speech:', target_id, file_name)
for item in target_speech:
# s = item.get_text(strip=False) # this will cause the string in subtag concatenated together
for s in item.stripped_strings: # bug fix: fix the problem on previous line
text_list.append(s)
return ' '.join(text_list)
def xml_parser(file_name, target_id):
"""
XML Parser implemented by xml package
Args:
file_name(str): path to the document
target_id(str): the person_id of speaker of target document
Returns:
speech(str): the speech of the speaker
"""
try:
tree = ET.parse(file_name, ET.XMLParser(encoding='utf-8'))
root = tree.getroot()
except ParseError:
with open(file_name, encoding='utf-8') as temp:
file_data = temp.read()
file_data = file_data.replace('&', 'and')
root = ET.fromstring(file_data)
text_list = []
for child in root[0]:
if child.tag == 'speaker':
if 'personId' in child.attrib: # contain person ID
person_id = child.attrib['personId']
else:
continue
if str(person_id) != str(target_id): # multiple speaker in a document, not target speaker
continue
for item in child.findall('p'):
if len(item) == 0:
text_list.append(item.text)
else: # multiple sub tag inside 'p' tag
if item.text is not None:
text_list.append(item.text)
text_list.append(' ')
for i in item:
if i.text is not None:
text_list.append(i.text)
text_list.append(' ')
if i.tail is not None:
text_list.append(i.tail)
text_list.append(' ')
return ''.join(text_list)
def xml_to_person_id(file_name):
"""
NO LONGER USEFUL
XML parser to get the person_ids from given XML file
Args:
file_name(str): file name
Returns:
person_ids(set[int]): a set of person ids
"""
person_ids = set()
with open(file_name, encoding='utf-8') as file:
soup = BeautifulSoup(file, 'xml')
all_speech = soup.find_all('speaker')
for single_speech in all_speech:
try:
person_ids.add(single_speech['personId'])
except KeyError:
continue
return person_ids
def get_person_speech_pair(file_name):
"""
XML parser to get the person_ids from given XML file
Args:
file_name(str): file name
Returns:
person_id_speech_pair(dict): Dict[person_id(int) -> speech(str)]
"""
person_id_speech_dict = dict()
with open(file_name, encoding='utf-8') as file:
soup = BeautifulSoup(file, 'xml')
all_speech = soup.find_all('speaker')
for single_speech in all_speech:
try: # newer format
person_id = single_speech['personId']
except KeyError:
try: # older format
person_id = single_speech['person']
except KeyError:
continue
single_speech_list = []
for s in single_speech.stripped_strings:
single_speech_list.append(s)
processed_speech = ' '.join(single_speech_list)
# print(parsed_speech, '\n')
if person_id not in person_id_speech_dict:
person_id_speech_dict[person_id] = []
person_id_speech_dict[person_id].append(processed_speech)
for person_id in person_id_speech_dict:
person_id_speech_dict[person_id] = ' '.join(person_id_speech_dict[person_id])
return person_id_speech_dict
if __name__ == '__main__':
# Sample: multiple sub-tag inside p tag
# print(xml_parser('../opinion_mining/cr_corpus/160/8/E63-E64/4407923.xml', '404'))
# print(bs_parser('../opinion_mining/cr_corpus/160/8/E63-E64/4407923.xml', '404'))
print(get_person_speech_pair("D:\\Github\\FN-Research-GW-Opinion-Mining\\opinion_mining\\cr_corpus\\146\\4\\E8-E10\\27718.xml"))
| XC-Li/FiscalNote_Project | deployment/util_code/xml_parser.py | xml_parser.py | py | 5,003 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "x... |
41561590327 | from api.core.workflow import workflow
from flask import request
import api.DAL.data_context.admin.user_update as user_update
import api.DAL.data_context.admin.user_insert as user_insert
import api.DAL.data_context.admin.user_select as user_select
from api.core.admin.credentials import Credentials
from api.core.admin.token import Token
from api.core.admin.validate import InvalidCredential
import api.core.admin.validate as validate
import api.core.response as response
import api.core.sanitize as sanitize
import json
@workflow.route('/admin/register', methods = ['POST'])
def register_user():
'''Called when adding a new user to the database. Makes sure that all information
provided is valid(see individual validations for details) and hashes the password for storage'''
credentials_form = json.loads(request.form['payload'])
credentials_form = sanitize.form_keys(credentials_form)
credentials = Credentials.map_from_form(credentials_form)
try:
validate.email(credentials.email)
validate.name(credentials.first_name)
validate.name(credentials.last_name)
validate.password(credentials.password)
except InvalidCredential as invalid:
return response.error(invalid.args[0])
credentials.hash_password()
user_insert.new_user(credentials)
return login()
@workflow.route('/admin/login', methods = ['POST'])
def login():
'''Called when a user is loging in (shocker)
Checks the provided email and password with the values stored in the database'''
credentials_form = json.loads(request.form['payload'])
credentials_form = sanitize.form_keys(credentials_form)
provided_credentials = Credentials.map_from_form(credentials_form)
stored_credentials = user_select.login_credentials(provided_credentials)
try:
validate.login(stored_credentials, provided_credentials)
except InvalidCredential as invalid:
return response.error(invalid.args[0])
token = Token()
token.user_id = stored_credentials.id
token.update()
user_update.token(token)
return response.add_token(token = token)
| RyanLadley/agility | api/core/workflow/admin_workflow.py | admin_workflow.py | py | 2,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "api.core.sanitize.fo... |
14582084682 | # -*- coding: utf-8 -*-
# @Author : Devin Yang(pistonyang@gmail.com), Gary Lai (glai9665@gmail.com)
__all__ = ['CosineWarmupLr', 'get_cosine_warmup_lr_scheduler', 'get_layerwise_decay_params_for_bert']
from math import pi, cos
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import LambdaLR
class Scheduler(object):
def __init__(self):
raise NotImplementedError
def get_lr(self):
raise NotImplementedError
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
class CosineWarmupLr(Scheduler):
"""Cosine lr decay function with warmup.
Lr warmup is proposed by `
Accurate, Large Minibatch SGD:Training ImageNet in 1 Hour`
`https://arxiv.org/pdf/1706.02677.pdf`
Cosine decay is proposed by `
Stochastic Gradient Descent with Warm Restarts`
`https://arxiv.org/abs/1608.03983`
Args:
optimizer (Optimizer): optimizer of a model.
batches_per_epoch (int): batches per epoch.
epochs (int): epochs to train.
base_lr (float): init lr.
target_lr (float): minimum(final) lr.
warmup_epochs (int): warmup epochs before cosine decay.
warmup_lr (float): warmup starting lr.
last_iter (int): init iteration.
Attributes:
niters (int): number of iterations of all epochs.
warmup_iters (int): number of iterations of all warmup epochs.
"""
def __init__(self,
optimizer,
batches: int,
epochs: int,
base_lr: float,
target_lr: float = 0,
warmup_epochs: int = 0,
warmup_lr: float = 0,
last_iter: int = -1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_iter = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.baselr = base_lr
self.learning_rate = base_lr
self.total_iters = epochs * batches
self.targetlr = target_lr
self.total_warmup_iters = batches * warmup_epochs
self.total_cosine_iters = self.total_iters - self.total_warmup_iters
self.total_lr_decay = self.baselr - self.targetlr
self.warmup_lr = warmup_lr
self.last_iter = last_iter
self.step()
def get_lr(self):
if self.last_iter < self.total_warmup_iters:
return self.warmup_lr + \
(self.baselr - self.warmup_lr) * self.last_iter / self.total_warmup_iters
else:
cosine_iter = self.last_iter - self.total_warmup_iters
cosine_progress = cosine_iter / self.total_cosine_iters
return self.targetlr + self.total_lr_decay * \
(1 + cos(pi * cosine_progress)) / 2
def step(self, iteration=None):
"""Update status of lr.
Args:
iteration(int, optional): now training iteration of all epochs.
Usually no need to set it manually.
"""
if iteration is None:
iteration = self.last_iter + 1
self.last_iter = iteration
self.learning_rate = self.get_lr()
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.learning_rate
def get_cosine_warmup_lr_scheduler(optimizer : Optimizer,
batches_per_epoch: int,
epochs: int,
warmup_epochs: int = 0,
last_epoch: int = -1):
"""Similar to CosineWarmupLr, with support for different learning rate for different parameter groups as well as better compatibility with current PyTorch API
Args:
optimizer (Optimizer): optimizer of a model.
batches_per_epoch (int): batches per epoch.
epochs (int): epochs to train.
warmup_epochs (int): warmup epochs before cosine decay.
last_epoch (int): the index of the last epoch when resuming training.
Example:
```
batches_per_epoch = 10
epochs = 5
warmup_epochs = 1
params = get_layerwise_decay_params_for_bert(model)
optimizer = optim.SGD(params, lr=3e-5)
lr_scheduler = get_cosine_warmup_lr_scheduler(optimizer, batches_per_epoch, epochs, warmup_epochs=warmup_epochs)
```
"""
total_steps = epochs * batches_per_epoch
# warmup params
total_warmup_steps = batches_per_epoch * warmup_epochs
# cosine params
total_cosine_steps = total_steps - total_warmup_steps
def lr_lambda(current_step):
# lr_lambda should return current lr / top learning rate
if current_step < total_warmup_steps:
warmup_progress = current_step / total_warmup_steps
return warmup_progress
else:
cosine_step = current_step - total_warmup_steps
cosine_progress = cosine_step / total_cosine_steps
return (1 + cos(pi * cosine_progress)) / 2
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_differential_lr_param_group(param_groups, lrs):
"""Assigns different learning rates to different parameter groups.
Discriminative fine-tuning, where different layers of the network have different learning rates, is first proposed in
`Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification.
https://arxiv.org/pdf/1801.06146.pdf.` It has been found to stabilize training and speed up convergence.
Args:
param_groups: a list of parameter groups (each of which is a list of parameters)
param group should look like:
[
[param1a, param1b, ..] <-- parameter group 1
[param2a, param2b, ..] <-- parameter group 2
...
]
lrs: a list of learning rates you want to assign to each of the parameter groups
lrs should look like
[
lr1, <-- learning rate for parameter group 1
lr2, <-- learning rate for parameter group 2
...
]
Returns:
parameter groups with different learning rates that you can then pass into an optimizer
"""
assert len(param_groups) == len(lrs), f"expect the learning rates to have the same lengths as the param_group length, instead got {len(param_groups)} and {len(lrs)} as lengths respectively"
param_groups_for_optimizer = []
for i in range(len(param_groups)):
param_groups_for_optimizer.append({
'params': param_groups[i],
'lr': lrs[i]
})
return param_groups_for_optimizer
def get_layerwise_decay_param_group(param_groups, top_lr=2e-5, decay=0.95):
"""Assign layerwise decay learning rates to parameter groups.
Layer-wise decay learning rate is used in `Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019.
How to fine-tune BERT for text classification? https://arxiv.org/abs/1905.05583` to improve convergence
and prevent catastrophic forgetting.
Args:
param_groups: a list of parameter groups
param group should look like:
[
[param1a, param1b, ..] <-- parameter group 1
[param2a, param2b, ..] <-- parameter group 2
..
]
top_lr: learning rate of the top layer
decay: decay factor. When decay < 1, lower layers have lower learning rates; when decay == 1, all layers have the same learning rate
Returns:
parameter groups with layerwise decay learning rates that you can then pass into an optimizer
Examples:
```
param_groups = get_layerwise_decay_params_group(model_param_groups, top_lr=2e-5, decay=0.95)
optimizer = AdamW(param_groups, lr = 2e-5)
```
"""
lrs = [top_lr * pow(decay, len(param_groups)-1-i) for i in range(len(param_groups))]
return get_differential_lr_param_group(param_groups, lrs)
def get_layerwise_decay_params_for_bert(model, number_of_layer=12, top_lr=2e-5, decay=0.95):
"""Assign layerwise decay learning rates to parameter groups of BERT.
Layer-wise decay learning rate is used in `Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019.
How to fine-tune BERT for text classification? https://arxiv.org/abs/1905.05583` to improve convergence
and prevent catastrophic forgetting.
Args:
model: your BERT model
number_of_layer: number of layers your BERT has
top_lr: learning rate of the top layer
decay: decay factor. When decay < 1, lower layers have lower learning rates; when decay == 1, all layers have the same learning rate
Returns:
BERT parameter groups with different learning rates that you can then pass into an optimizer
Example:
```
param_groups = get_layerwise_decay_params_for_bert(model, number_of_layer=12, top_lr=2e-5, decay=0.95)
optimizer = AdamW(param_groups, lr = 2e-5)
```
"""
param_groups = get_param_group_for_bert(model, number_of_layer=number_of_layer, top_lr=top_lr, decay=decay)
param_groups_for_optimizer = get_layerwise_decay_param_group(param_groups, top_lr=top_lr, decay=decay)
return param_groups_for_optimizer
def get_param_group_for_bert(model, number_of_layer=12, top_lr=2e-5, decay=0.95):
"""separate each layer of a BERT models into a parameter group
Args:
model: your BERT model
number_of_layer: number of layers your BERT has
top_lr: learning rate of the top layer
decay: decay factor. When decay < 1, lower layers have lower learning rates; when decay == 1, all layers have the same learning rate
Returns:
a param group that should look like:
[
...
[param1a, param1b, ..] <-- parameter group 1, layer 1 of BERT
[param2a, param2b, ..] <-- parameter group 2, layer 2 of BERT
...
]
"""
param_groups_for_optimizer = [[] for _ in range(number_of_layer+2)] # tail, layer0, layer1 ...., layer11, head
head = {'pooler', 'norm', 'relative_attention_bias'}
tail = {'embeddings',}
layers = [f'layer.{i}.' for i in range(number_of_layer)]
for name, param in model.named_parameters():
if belongs(name, tail):
param_groups_for_optimizer[0].append(param)
elif belongs(name, head):
param_groups_for_optimizer[-1].append(param)
else:
for i, layer in enumerate(layers):
if layer in name:
param_groups_for_optimizer[i+1].append(param)
return param_groups_for_optimizer
def belongs(name, groups):
""" checks if name belongs to any of the group
"""
for group in groups:
if group in name:
return True
return False
| PistonY/torch-toolbox | torchtoolbox/optimizer/lr_scheduler.py | lr_scheduler.py | py | 11,278 | python | en | code | 409 | github-code | 36 | [
{
"api_name": "torch.optim.optimizer.Optimizer",
"line_number": 68,
"usage_type": "argument"
},
{
"api_name": "math.cos",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "torch.optim.opti... |
4200242133 | import gevent
def eat(name):
print('%s start task' % name)
gevent.sleep(2)
print('%s end task' % name)
return name + " finished callback"
def play(name):
print('%s start task' % name)
gevent.sleep(1)
print('%s end task' % name)
return name + " finished callback"
def callback(greenlet):
print("callback successfully: " + greenlet.value)
g1 = gevent.spawn(eat, 'marcia')
g1.link(callback)
g2 = gevent.spawn(play, name='joe')
g2.link(callback)
gevent.joinall([g1, g2])
print('主')
# import gevent
# from gevent import Greenlet
#
#
# def callback_func():
# print("callback successfully")
#
#
# class MyGreenlet(Greenlet):
# def __init__(self, timeout, msg):
# Greenlet.__init__(self)
# self.timeout = timeout
# self.msg = msg
#
# def _run(self):
# print("I'm from subclass of Greenlet and want to say: %s" % (self.msg,))
# gevent.sleep(self.timeout)
# print("done after sleep %s" % self.timeout)
#
#
# greenlet1 = MyGreenlet(2, 'hello')
# greenlet2 = MyGreenlet(1, 'world')
# greenlet1.start()
# greenlet2.start()
# greenlet1.rawlink(callback_func())
#
# gevent.joinall([greenlet1, greenlet2])
# print("main")
#
| Marcia0526/how_to_learn_python | coroutine/gevent_demo.py | gevent_demo.py | py | 1,215 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gevent.sleep",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gevent.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gevent.spawn",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gevent.spawn",
"line_number... |
40568525715 | import logging
from dcs.point import MovingPoint
from dcs.task import EngageTargets, EngageTargetsInZone, Targets
from game.ato.flightplans.cas import CasFlightPlan
from game.utils import nautical_miles
from .pydcswaypointbuilder import PydcsWaypointBuilder
class CasIngressBuilder(PydcsWaypointBuilder):
def add_tasks(self, waypoint: MovingPoint) -> None:
if isinstance(self.flight.flight_plan, CasFlightPlan):
patrol_center = (
self.flight.flight_plan.layout.patrol_start.position
+ self.flight.flight_plan.layout.patrol_end.position
) / 2
waypoint.add_task(
EngageTargetsInZone(
position=patrol_center,
radius=int(self.flight.flight_plan.engagement_distance.meters),
targets=[
Targets.All.GroundUnits.GroundVehicles,
Targets.All.GroundUnits.AirDefence.AAA,
Targets.All.GroundUnits.Infantry,
],
)
)
else:
logging.error("No CAS waypoint found. Falling back to search and engage")
waypoint.add_task(
EngageTargets(
max_distance=int(nautical_miles(10).meters),
targets=[
Targets.All.GroundUnits.GroundVehicles,
Targets.All.GroundUnits.AirDefence.AAA,
Targets.All.GroundUnits.Infantry,
],
)
)
| dcs-liberation/dcs_liberation | game/missiongenerator/aircraft/waypoints/casingress.py | casingress.py | py | 1,579 | python | en | code | 647 | github-code | 36 | [
{
"api_name": "pydcswaypointbuilder.PydcsWaypointBuilder",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dcs.point.MovingPoint",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "game.ato.flightplans.cas.CasFlightPlan",
"line_number": 13,
"usage_type":... |
70719921064 | import numpy as np
from ..patch import Patch
from ..parcel import Parcel
class PropertyDeveloper:
def __init__(self, world, agent_type, view_radius=5, memory=100):
self.world = world
self.view_radius = view_radius
self.memory = memory
self.position = np.random.choice(world.patches.flatten()) # Starts in a random patch of the map
self.dev_sites = [] # TO DO: initialize dev_sites using starting position
self.dev_patches = []
self.considered_patches = []
self.agent_type = agent_type
# Weights for each type of developer
## eh ev epv dw dr di dpk dpr dm x
self.W = {
"r": [.1, .2, 0, .1, .4, 0, 0, .2, 0, 0],
"c": [ 0, .2, 0, .15, .15, 0, 0, 0, .4, 0],
"i": [ 0, .5, 0, .3, 0, .1, 0, .1, 0, 0],
"p": [ 0, 0, .2, .1, .1, 0, .4, 0, 0, .2]
}
def getRegion(self, i, j):
region = self.world.patches[max(1,i-self.view_radius):i+self.view_radius, # Adding padding of 1 patch
max(1, j-self.view_radius):j+self.view_radius].flatten()
region = [p for p in region if p.developable] # Selecting only developable patches (excludes water, etc)
region_parcels = []
region_patches = []
for patch in region:
if (patch.parcel == None):
region_patches.append(patch)
elif ( patch.parcel not in region_parcels and patch.parcel.development_type != self.agent_type): # Excludes parcels of same development type as the agent (TO DO: provisory)
region_parcels.append(patch.parcel)
return region_patches, region_parcels
# Searches for suitable locations in its surroundings
def prospect(self, dev_sites):
i, j = [self.position.i, self.position.j]
region_patches, region_parcels = self.getRegion(i, j)
patch_values, parcel_values = [self.world.patch_values, self.world.parcel_values]
values = patch_values | parcel_values
combined_region = region_patches + region_parcels
if (len(dev_sites)!=0 and (self.last_commit>0 or self.last_relocate>0)): # Move to a seen location
combined_region = combined_region + dev_sites # Searching in union of region and memory
region_values = [values[p][self.agent_type] for p in combined_region]
next_location = combined_region[np.argmax(region_values)]
else: # Relocate globlally
self.last_relocate = 5 # Resets relocation counter
# Selecting all empty patches from the world
world_patches = [p for p in self.world.patches.flatten() if p.undeveloped and p.developable]
world_parcels = [p for p in self.world.parcels if p.development_type != self.agent_type] # TO DO: check if conversion to agent_type is possible
world_sites = world_patches + world_parcels
sorted_idx = np.argsort([values[p][self.agent_type] for p in world_sites])[::-1]
world_sites = np.array(world_sites)[sorted_idx]
# Move to a random site in the top 5 best for the agent
try:
next_location = np.random.choice(world_sites[:5])
except: # No more areas to develop
return []
self.dev_patches = []
self.position = next_location
#region_patches, region_parcels = self.getRegion() # NOTE: maybe have to be updated after relocation
# TO DO / NOTE : Check this implementation better later
dev_parcels = region_parcels
dev_patches = []
for patch in self.dev_patches + region_patches:
if patch not in dev_patches and patch.undeveloped:
dev_patches.append(patch)
dev_patches_sorted_idx = np.argsort([values[p][self.agent_type] for p in dev_patches])[::-1]
dev_patches = np.array(dev_patches)[dev_patches_sorted_idx]
self.dev_patches = list(dev_patches[:int(0.9*len(dev_patches))]) # Selecting only the 90% best patches
dev_sites = list(dev_patches) + dev_parcels
return dev_sites
# Returns True if as build successfully and False otherwise
def build(self, site):
if (isinstance(site, Patch)): # Building in patch is direct
self.considered_patches.append(site)
new_parcel = self.world.createParcel(site, development_type=self.agent_type) # TO DO: expand to create parcels of multiple patches
if (new_parcel == None):
return False
for patch in new_parcel.patches:
# Preventing roads to be built on top of this patch
self.world.addBlockedPatch(patch)
return True
return False
def getScore(self, patch):
i, j = [patch.i, patch.j]
region_patches, region_parcels = self.getRegion(i, j)
eh = patch.get_eh(self.world.patches)
ev, epv = patch.get_ev(self.world.patches)
dpr = patch.get_dpr()
dw = patch.get_dw()
dr = patch.get_dr(region_patches, region_parcels)
dc = patch.get_dc(region_patches, region_parcels)
di = patch.get_di(region_patches, region_parcels)
dm = patch.get_dm(self.world.parcels)
dpk = patch.get_dm(self.world.parcels)
A = [eh, ev, epv, dw, dr, di, dpk, dpr, dm, 0]
W = [self.W['r'], self.W['i'], self.W['i'], self.W['p']]
Vr, Vc, Vi, _= np.dot(W, A)
Vp = (1/Vr + 1/Vc + 1/Vi) * self.W['p'][-1] # Anti-worth
return {'Vr': Vr, 'Vc': Vc, 'Vi': Vi, 'Vp': Vp}
def prospectNew(self):
i, j = [self.position.i, self.position.j]
region_patches, region_parcels = self.getRegion(i, j)
avaliable_patches = self.dev_patches + region_patches # Memory + new
avaliable_patches = [p for p in avaliable_patches if p not in self.considered_patches and self.world.isAccessible(p)]
# No more avaliable patches, relocate globaly
while len(avaliable_patches) == 0:
self.position = np.random.choice(self.world.patches.flatten())
i, j = [self.position.i, self.position.j]
region_patches, region_parcels = self.getRegion(i, j)
avaliable_patches = [p for p in region_patches if p not in self.considered_patches and self.world.isAccessible(p)]
scores = [self.getScore(patch)[self.agent_type] for patch in avaliable_patches]
idx_best = np.argmax(scores)
best_patch = avaliable_patches[idx_best]
#print(f"Selected patch: ({best_patch.i}, {best_patch.j}), undeveloped={best_patch.undeveloped}")
if (self.position == best_patch):
self.build(self.position)
print("Building")
else:
self.position = best_patch
print("Relocating")
return avaliable_patches
def buildNew(self):
avaliable_patches = [p for p in self.world.patches.flatten() if p.developable and p.undeveloped and p not in self.considered_patches]
# Scoring all patches and sorting them by score
scores = [self.getScore(p)[self.agent_type] for p in avaliable_patches]
sorted_idx = np.argsort(scores)[::-1]
sorted_patches = np.array(avaliable_patches)[sorted_idx]
# Triyng to build in the best score avaliable
built = False
for patch in sorted_patches:
# Checks if a patch is accessible
if(self.build(patch)):
break
# Interacts with the environment
def interact(self):
self.dev_patches = self.prospectNew()
self.dev_patches = self.dev_patches[:min(len(self.dev_patches), self.memory)]
# Removing already developed patched from the list
self.dev_patches = [p for p in self.dev_patches if p.developable]
'''
self.build(self.position) # My version
#for site in self.dev_sites: # Paper implementation
# self.build(site)
# Decreases counters
self.last_commit -= 1
self.last_relocate -= 1
'''
#print(f"Current position: {self.position.i}, {self.position.j}") | LFRusso/strabo | strabo/agents/property.py | property.py | py | 8,249 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.choice",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "patch.parcel",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "patch.parcel"... |
4472644976 | import pygame as pg
from gui.widgets.animated_widget import AnimatedWidget
from data.constants import *
class BackgroundImage(AnimatedWidget):
def __init__(self, x, y, w, h, image):
super().__init__()
self.pos = x, y
self.image = pg.transform.smoothscale(pg.image.load(image).convert_alpha(), (w, h))
def update(self, dt, animation_state=WAIT, time_elapsed=0.0):
if animation_state == WAIT and self.image.get_alpha() !=255:
self.image.set_alpha(255)
elif animation_state == OPEN:
self.image.set_alpha(round(255 * time_elapsed))
elif animation_state == CLOSE:
self.image.set_alpha(round(255 * (1 - time_elapsed)))
def draw(self, screen, animation_state=WAIT):
screen.blit(self.image, self.pos)
__all__ = ["BackgroundImage"]
| IldarRyabkov/BubbleTanks2 | src/gui/widgets/background_image.py | background_image.py | py | 834 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "gui.widgets.animated_widget.AnimatedWidget",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pygame.transform.smoothscale",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 11,
"usage_type": "attribute"
... |
23942544871 | """Assorted algorithms to verify end-to-end compiler functionality.
These tests include:
- Sum of array of integers
- Recursive Fibonacci sum
"""
import pytest
import tempfile
import functools
import os
from acctools import compilers
ACC_PATH=os.environ.get("ACC_PATH", os.path.join(os.path.dirname(__file__), "../build/acc"))
COMPILERS = [
compilers.GccCompiler,
functools.partial(compilers.AccIrCompiler, ACC_PATH, regalloc=False),
functools.partial(compilers.AccIrCompiler, ACC_PATH, regalloc=True),
functools.partial(compilers.AccAsmCompiler, ACC_PATH)
]
@pytest.fixture(params=COMPILERS)
def cc(request):
with tempfile.NamedTemporaryFile() as temp_file:
return request.param(temp_file.name)
CALCULATE_SUM = """
int calc_sum(unsigned char * arr, int n)
{
int i = 0, tot = 0;
while(i < n)
{
tot += arr[i++];
}
return tot;
}
int main()
{
unsigned char arr[7];
arr[0] = 1;
arr[1] = 2;
arr[2] = 4;
arr[3] = 8;
arr[4] = 16;
arr[5] = 32;
arr[6] = 64;
return calc_sum(arr, 7);
}
"""
def test_calculate_array_sum(cc):
cc.program(CALCULATE_SUM, returncode=127);
FIBONACCI = """
int fib(int n)
{
if(n == 0) return 1;
if(n == 1) return 1;
return fib(n-1) + fib(n-2);
}
int main()
{
int i = 0, tot = 0;
while(i < 10)
{
tot += fib(i);
i++;
}
return tot;
}
"""
def test_fibonacci(cc):
cc.program(FIBONACCI, returncode=143)
INSERTION_SORT = """
int sort(int * arr, int n)
{
int i = 1;
while(i < n)
{
int t = arr[i];
int j = i - 1;
while((t < arr[j]) & (j != -1))
{
arr[j+1] = arr[j];
j--;
}
arr[j+1] = t;
i++;
}
}
int main()
{
int l[10];
l[0] = 3;
l[1] = 8;
l[2] = 9;
l[3] = 121;
l[4] = 28;
l[5] = 1;
l[6] = 89;
l[7] = 90;
l[8] = 104;
l[9] = 101;
sort(l, 10);
return (l[0] != 1) | (l[9] != 121);
}
"""
def test_sort(cc):
cc.program(INSERTION_SORT) | alexking35h/acc | functional/test_algorithms.py | test_algorithms.py | py | 2,059 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
74541806822 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import logging
import time
import gzip
import random
from six.moves.urllib.error import URLError
from six.moves.urllib.request import Request, build_opener, HTTPCookieProcessor
from six.moves.urllib.parse import urlencode
from six.moves.http_cookiejar import CookieJar
from six.moves import StringIO
# pysocks
import socks
from sockshandler import SocksiPyHandler
logger = logging.getLogger(__name__)
def get_user_agent(idx=-1):
user_agent = [
'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130619 Firefox/17.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36',
]
if idx < 0:
idx = random.randint(0, len(user_agent) - 1)
return user_agent[idx]
def url_downloader(url, data=None, path=None, cookie=None,
timeout=5, retry=1, retry_ivl=5,
referer=None, agent=None, proxy=None):
"""Download URL link
url: url to download
data: post data
path: download to local file
timeout: socket timeout
retry: retry times to download url
retry_ivl: interval time when retry
agent: http user agent
proxy: socks5://127.0.0.1:1080
"""
while True:
try:
if isinstance(data, dict):
data = urlencode(data).encode('ascii')
request = Request(url, data=data)
request.add_header('User-Agent', agent or get_user_agent())
if referer:
request.add_header('Referer', referer)
if data:
request.add_header(
'Content-Type',
'application/x-www-form-urlencoded;charset=utf-8')
response = None
handlers = []
if proxy:
scheme, host, port = proxy.split(':')
host = host.strip('/')
proxy_handler = SocksiPyHandler(
socks.PROXY_TYPES[scheme.upper()], host, int(port)
)
handlers.append(proxy_handler)
if cookie is None:
cookie = CookieJar()
cookie_handler = HTTPCookieProcessor(cookie)
handlers.append(cookie_handler)
opener = build_opener(*handlers)
response = opener.open(request, timeout=timeout)
content_encoding = response.info().get('content-encoding')
if content_encoding:
with gzip.GzipFile(fileobj=StringIO(response.read())) as f:
r_data = f.read()
else:
r_data = response.read()
if path:
with open(path, 'wb') as f:
f.write(r_data)
r_data = None
response.close()
mime = response.info().get('content-type')
real_url = response.geturl()
err_msg = 'Ok'
break
except (URLError, IOError, OSError) as err:
response and response.close()
retry -= 1
err_msg = str(err)
if retry > 0:
logger.debug('Error: %s... Try again after %s seconds' % (
retry_ivl, err_msg))
time.sleep(retry_ivl)
retry_ivl += retry_ivl
timeout += timeout
else:
path = mime = r_data = real_url = None
break
return {
'mime': mime,
'path': path,
'data': r_data,
'url': real_url,
'cookie': cookie,
'error': err_msg,
}
| liuyug/utils | network.py | network.py | py | 3,623 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "six.moves.urllib.parse.urlencode",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "six... |
69904891626 | from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
with open('README.rst') as f:
long_desc = f.read()
setup(name='glucid',
version='0.5.0',
description='Configure the Lucid 8824 AD/DA Audio Interface via \
a Serial Connection',
url='http://github.com/danmechanic/glucid',
author='Daniel R Mechanic',
author_email='dan.mechanic@gmail.com',
license='GPL V3',
zip_safe=False,
# scripts=['bin/glucid','bin/xglucid'],
entry_points={ # Optional
'console_scripts': [
'glucid=glucid.glucid_cli:main',
'xglucid=glucid.xglucid:main'
],
},
long_description=long_desc,
keywords='lucid 8824 audio converter',
packages=['glucid'],
# py_modules=['glucid.glucid'],
python_requires=">=3",
package_dir={ 'glucid8824' : 'glucid',
'xglucid' : 'glucid',
'Glucid8824_UI' : 'glucid',
'Glucid8824' : 'glucid',
'xglucidUIWidgets' : 'glucid',
},
long_description_content_type='text/x-rst',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
],
project_urls={
'Author': 'http://www.danmechanic.com/',
'Source': 'https://github.com/danmechanic/glucid/',
},
install_requires=[
'PyQt5>=5.9',
'PySerial',
]
)
| danmechanic/glucid | setup.py | setup.py | py | 1,879 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "codecs.open",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 9,
"usage_type": "call"
}
] |
1710822043 | import logging
import warnings
import torch
import numpy as np
from data import data_utils
from data.ofa_dataset import OFADataset
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
target_strs = np.array([s["target_str"] for s in samples])
batch = {
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"target_strs": target_strs
}
return batch
class SummaryDataset(OFADataset):
def __init__(
self,
split,
dataset,
bpe,
src_dict,
tgt_dict=None,
code_dict_size=8192,
num_bins=1000,
max_src_length=512,
max_tgt_length=128,
noise_ratio=0.0,
description='base'
):
super().__init__(split, dataset, bpe, src_dict, tgt_dict)
self.max_src_length = max_src_length
self.max_tgt_length = max_tgt_length
self.code_dict_size = code_dict_size
self.num_bins = num_bins
self.noise_ratio = noise_ratio
self.description = description
if type(bpe).__name__ == 'GPT2BPE':
if self.description == 'base':
self.prompt = ' what is the summary of article " {} "?'
elif self.description == 'tep':
self.prompt = 'Dataset description: Gigaword is a large-scale dataset for natural language processing tasks, such as language modeling and machine translation. It contains over 5 billion words of text, drawn from a variety of sources, including news articles, books, and websites.The annotation process for Gigaword involves collecting text from a variety of sources and ensuring that it is accurately' \
' transcribed and formatted. The text is then divided into smaller units, such as sentences or paragraphs, and annotated with additional information, such as part-of-speech tags or named entity tags. ' \
'Input format: Text' \
'Output format: Text' \
'Output description: summary of input text' \
'prompt: what is the summary of article " {} "? '
elif self.description == 'wiki-tep':
self.prompt = 'Given a document, selecting a subset of the words or sentences which best represents a summary of the document.' \
'Dataset description: Gigaword is a large-scale dataset for natural language processing tasks, such as language modeling and machine translation. It contains over 5 billion words of text, drawn from a variety of sources, including news articles, books, and websites.The annotation process for Gigaword involves collecting text from a variety of sources and ensuring that it is accurately' \
' transcribed and formatted. The text is then divided into smaller units, such as sentences or paragraphs, and annotated with additional information, such as part-of-speech tags or named entity tags. ' \
'Input format: Text' \
'Output format: Text' \
'Output description: summary of input text' \
'prompt: what is the summary of article " {} "? '
elif self.description == 'annotation':
self.prompt = \
'Dataset description: Gigaword is a large-scale dataset for natural language processing tasks, such as language modeling and machine translation. It contains over 5 billion words of text, drawn from a variety of sources, including news articles, books, and websites.The annotation process for Gigaword involves collecting text from a variety of sources and ensuring that it is accurately' \
' transcribed and formatted. The text is then divided into smaller units, such as sentences or paragraphs, and annotated with additional information, such as part-of-speech tags or named entity tags. ' \
'Input format: Text' \
'Output format: Text' \
'Output description: summary of input text' \
'prompt: what is the summary of article " {} "? '
elif self.description == 'wiki':
self.prompt = \
'Given a document, selecting a subset of the words or sentences which best represents a summary of the document.' \
'prompt: what is the summary of article " {} "? '
elif description == 'onehot':
self.prompt = '1000000 {}'
elif type(bpe).__name__ == 'BertBPE':
self.prompt = "{} 请用一个句子简单总结上文:"
def __getitem__(self, index):
source, target = self.dataset[index]
target_str = target.lower()
source = self.pre_caption(source, max_words=self.max_src_length)
target = self.pre_caption(target, max_words=self.max_tgt_length)
source = source.replace('<unk>', 'unk')
target = target.replace('<unk>', 'unk')
src_item = self.encode_text(
self.prompt.format(source),
length=self.max_src_length
)
tgt_item = self.encode_text('{}'.format(target))
noise_tgt_item = self.add_noise_to_tgt(tgt_item.clone(), self.noise_ratio)
src_item = torch.cat([self.bos_item, src_item, self.eos_item])
target_item = torch.cat([tgt_item, self.eos_item])
prev_output_item = torch.cat([self.bos_item, noise_tgt_item])
example = {
"source": src_item,
"target": target_item,
"prev_output_tokens": prev_output_item,
"target_str": target_str
}
return example
def add_noise_to_tgt(self, target, p):
noise_indices = torch.FloatTensor(target.size(0)).uniform_() < p
target[noise_indices] = torch.randint(
4, len(self.src_dict) - self.code_dict_size - self.num_bins, size=(noise_indices.sum(),)
)
return target
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch containing the data of the task
"""
return collate(samples, pad_idx=self.pad, eos_idx=self.eos)
| evdcush/musketeer | data/nlg_data/summary_dataset.py | summary_dataset.py | py | 7,516 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "data.data_utils.collate_tokens",
"line_number": 18,
"usage_type": "call"
},
{
"api_name"... |
37722788260 | '''
商品详情页面
'''
from common.base import Base
good_url ='http://ecshop.itsoso.cn/goods.php?id=304'
class Buy_Good(Base):
'''页面点击立即购买'''
# 商品名字
good_name_loc=('class name','goods_style_name')
# 商品牌子
good_brand_loc=('css selector','a[href="brand.php?id=20"]')
# 购买数量框
number_loc=('id','number')
# 立即购买框
libuy_loc=('css selector','img[src="themes/default/images/buybtn1.png"]')
# 收藏按钮
collect_loc=('css selector','img[src="themes/default/images/bnt_colles.gif"]')
# 分享按钮
share_loc =('css selector','img[src="themes/default/images/bnt_recommend.gif"]')
# 价格
price_loc=('id','ECS_RANKPRICE_6')
# 前台商品货号
front_good_no_loc=('css selector','li.clearfix:nth-child(1)>dd:nth-child(1)')
# 点击商品牌子
def click_brand(self):
self.click(self.good_brand_loc)
# 购买数量输入
def send_number(self,num):
self.double_click(self.number_loc)
self.send_keys(self.number_loc,num)
self.click(self.price_loc)
# 点击立即购买
def click_libuy(self):
self.click(self.libuy_loc)
# 点击收藏按钮
def click_collect(self):
self.click(self.collect_loc)
# 点击分享按钮
def click_share(self):
self.click(self.share_loc)
# 获取商品名称
def get_good_name(self,locator):
element =self.find_element(locator)
text = element.text
return text
# 前台商品详情页面获取商品货号
def get_front_good_no(self):
element=self.find_element(self.front_good_no_loc)
content =element.text.split(':')
text =content[1] # ECS000304
# print(content) 商品货号:ECS000304
return text
if __name__ == '__main__':
from common.base import open_browser
from time import sleep
driver = open_browser('chrome')
libuy = Buy_Good(driver) # 实例化Buy_Good
libuy.open_url(good_url)
good_name_loc = ('class name', 'goods_style_name')
print(libuy.get_good_name(good_name_loc))
# 前台商品货号
front_good_no_loc = ('css selector', 'li.clearfix:nth-child(1)>dd:nth-child(1)')
num =libuy.get_front_good_no()
print(num)
# sleep(2)
# libuy.send_number(3)
# sleep(3)
#
#
# libuy.click_libuy()
| 15008477526/- | web_aaaaaaaa/page/good_details3.py | good_details3.py | py | 2,403 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "common.base.Base",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "common.base.open_browser",
"line_number": 72,
"usage_type": "call"
}
] |
24481282090 | import logging
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Logger(object):
__metaclass__ = Singleton
def __init__(self):
self._logger = logging.getLogger('[WikiTopK]')
self._logger.setLevel(logging.INFO)
formatter1 = logging.Formatter('%(name)s - %(message)s')
fh = logging.FileHandler('log/debug.log',mode='w')
fh.setLevel(logging.ERROR)
fh.setFormatter(formatter1)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter1)
self._logger.addHandler(fh)
self._logger.addHandler(ch)
| shalseban/wikiepedia-top-pageviews | src/main/python/logger.py | logger.py | py | 809 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.FileHan... |
43587647307 | import argparse as _argparse
import os as _os
import sys as _sys
from colorama import Fore as _Fore
from colorama import init as _colorama_init
from contexttimer import Timer as _Timer
from src import merge as _merge
from src import parse as _parse
if __name__ == '__main__':
"""
Example: python merge.py --in pss_api_ios_v0.989.9402_anonymized.json pss_api_steam_v0.991.4_anonymized.json --out examples
"""
# enable Windows support of colors
_colorama_init()
ERR_INPUT_NOT_FOUND = 1
parser = _argparse.ArgumentParser()
parser.add_argument('--in', dest='in_', type=str, nargs='+', required=True, help='Path(s) to the flows file(s) to be merged')
parser.add_argument('--overrides', type=str, required=False, help='Path to the overrides file')
parser.add_argument('--outfile', type=str, required=True, help='Target file path for the merged flows file')
parser.add_argument('--uncompressed', action='store_true', help='Preserve whitespace in the output file')
args = parser.parse_args()
error = False
for file_path in args.in_:
if not _os.path.isfile(file_path):
print(f'{_Fore.RED}ERROR: Specified flows JSON file does not exist: {file_path}{_Fore.RESET}')
if error:
_sys.exit(ERR_INPUT_NOT_FOUND)
with _Timer() as t:
print(f'{_Fore.YELLOW} >>>{_Fore.RESET} Input files:')
for in_ in args.in_:
print(f'{_Fore.YELLOW} >>> -{_Fore.RESET} {in_}')
print(f'{_Fore.YELLOW} >>>{_Fore.RESET} Output file: {args.outfile}')
print(f'{_Fore.YELLOW} >>>{_Fore.RESET} Compressed storage: {"No" if args.uncompressed else "Yes"}')
print(f'{_Fore.BLUE} >>>{_Fore.RESET} Merging parsed flows...')
result = _merge.read_structure_json(args.in_[0])
for merge_with in args.in_[1:]:
result = _merge.merge_api_structures(
result,
_merge.read_structure_json(merge_with)
)
if args.overrides:
overrides = _merge.read_structure_json(args.overrides)
result = _merge.apply_overrides(result, overrides)
_parse.store_structure_json(
args.outfile,
result,
(not args.uncompressed)
)
print(f'{_Fore.BLUE} >>>{_Fore.RESET} Done in {t.elapsed}s')
_sys.exit(0)
| PSS-Tools-Development/pss-api-parser | merge.py | merge.py | py | 2,353 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "colorama.init",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
8266325142 | import anki
from aqt import mw
import re
col = anki.collection.Collection('C:/Users/clept/AppData/Roaming/Anki2/Iván/collection.anki2')
deck_name = 'Seguridad social test'
search_query = '"deck:' + deck_name + '"'
cards = col.find_cards(search_query)
for card_id in cards:
# Get the card
card = col.get_card(card_id)
# Get the note associated with the card
note = card.note()
# bb = 'd (<a href="https://www.boe.es/buscar/act.php?id=BOE-A-2013-12632#a76">artículo 76.0</a>)'
text = note['Back']
try:
# matches 54.2, 2.b.0, índice
match = re.search(r'\">[a-záéíóú ]*(\d+\.\d+|índice|\d+\.[a-z]\.\d+|\d+)', text)
text_sort = match.group(1)
if text_sort == 'índice':
text_sort = '9999'
text_sort = re.sub('(\d+)(\.[a-z])(\.\d+)', '\\1\\3', text_sort)
if '.' not in text_sort:
text_sort += '.99'
if text_sort[-2] == '.':
text_sort = text_sort[:-1] + '0' + text_sort[-1:]
note['Sort'] = text_sort
except:
note['Sort'] = '9998.99'
note.flush()
# Synchronize the collection to save changes to the Anki database
col.autosave() | IvanDiazCostoya/anki-card-add-sort-field | main.py | main.py | py | 1,195 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "anki.collection.Collection",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "anki.collection",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.sub",
... |
34056654418 | import math
import torch
import torch.nn as nn
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0)]
return self.dropout(x)
class TransformerNet(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
batch_size=512,
):
super().__init__()
self.l1 = nn.Linear(1, d_model)
self.l2 = nn.Linear(1, d_model)
self.positional_encoder = PositionalEncoding(d_model, max_len=batch_size)
self.transformer = nn.Transformer(
d_model,
nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward,
dropout,
batch_first=True,
)
self.l3 = nn.Linear(d_model, 1)
def forward(self, enc_x, dec_x):
mask = nn.Transformer.generate_square_subsequent_mask(dec_x.shape[-1]).cuda()
enc_x = enc_x.unsqueeze(-1) # [N, x_seq, 1]
enc_x = self.l1(enc_x) # [N, x_seq, d_model]
enc_x = self.positional_encoder(enc_x)
dec_x = dec_x.unsqueeze(-1) # [N, t_seq, 1]
dec_x = self.l2(dec_x) # [N, t_seq, d_model]
dec_x = self.positional_encoder(dec_x)
y = self.transformer(enc_x, dec_x, tgt_mask=mask) # [N, t_seq, d_model]
y = self.l3(y).squeeze(-1) # [N, t_seq]
return y
def test(self, enc_x, dec_x, t_seq):
dec_x = dec_x[:, [0]] # [N, 1]
with torch.no_grad():
encoder = self.transformer.encoder
decoder = self.transformer.decoder
enc_x = enc_x.unsqueeze(-1)
enc_x = self.l1(enc_x)
enc_x = self.positional_encoder(enc_x)
enc_y = encoder(enc_x)
for i in range(1, t_seq + 1):
dec_x2 = dec_x.unsqueeze(-1) # [N, i, 1]
dec_x2 = self.l2(dec_x2) # [N, i, d_model]
dec_x2 = self.positional_encoder(dec_x2) # [N, i, d_model]
y = decoder(dec_x2, enc_y) # [N, i, d_model]
y = self.l3(y).squeeze(-1) # [N, 1]
dec_x = torch.cat([dec_x, y[:, [-1]]], dim=-1) # [N, i + 1]
return dec_x[:, 1:]
class LSTMNet(nn.Module):
def __init__(self, d_model=512, num_layers=1, dropout=0.1, bidirectional=False):
super().__init__()
self.l1 = nn.Linear(1, d_model)
self.l2 = nn.Linear(1, d_model)
self.enc_lstm = nn.LSTM(
d_model, d_model, num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True
)
self.dec_lstm = nn.LSTM(
d_model, d_model, num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True
)
self.l3 = nn.Linear(2 * d_model, 1) if bidirectional else nn.Linear(d_model, 1)
def forward(self, enc_x, dec_x):
enc_x = enc_x.unsqueeze(-1) # [N, x_seq, 1]
enc_x = self.l1(enc_x) # [N, x_seq, d_model]
_, hc = self.enc_lstm(enc_x)
dec_x = dec_x.unsqueeze(-1) # [N, t_seq, 1]
dec_x = self.l2(dec_x) # [N, t_seq, d_model]
y, _ = self.dec_lstm(dec_x, hc) # [N, t_seq, d_model]
y = self.l3(y).squeeze(-1) # [N, t_seq]
return y
def test(self, enc_x, dec_x, t_seq):
dec_x = dec_x[:, [0]] # [N, 1]
with torch.no_grad():
enc_x = enc_x.unsqueeze(-1) # [N, x_seq, 1]
enc_x = self.l1(enc_x) # [N, x_seq, d_model]
_, hc = self.enc_lstm(enc_x)
for i in range(1, t_seq + 1):
dec_x2 = dec_x[:, [-1]].unsqueeze(-1) # [N, 1, 1]
dec_x2 = self.l2(dec_x2) # [N, 1, d_model]
y, hc = self.dec_lstm(dec_x2, hc) # [N, i, d_model], ...
y = self.l3(y).squeeze(-1) # [N, 1]
dec_x = torch.cat([dec_x, y[:, [-1]]], dim=-1) # [N, i + 1]
return dec_x[:, 1:]
| yutotom/COVID-19-Forecasts | deep_learning/nets.py | nets.py | py | 4,463 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_num... |
4898894473 | import os
import sys
from PyQt5 import QtGui, QtWidgets
"""
from datetime import datetime,timedelta
from threading import Timer
"""
print('poggo')
class SystemTrayIcon(QtWidgets.QSystemTrayIcon):
def __init__(self,icon,parent=None):
QtWidgets.QSystemTrayIcon.__init__(self,icon,parent)
self.setToolTip('TimeUp')
menu = QtWidgets.QMenu(parent)
exitApp = menu.addAction('Exit')
#t.cancel()
exitApp.triggered.connect(lambda:sys.exit())
self.setContextMenu(menu)
self.activated.connect(self.trayActivate)
def trayActivate(self, reason):
if reason == self.DoubleClick:
window = startWindow()
def startWindow():
from mainmenu4 import Ui_MainWindow
import sys
global daWindow
global ui
daWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(daWindow)
ui.refreshList()
daWindow.show()
return daWindow
def trayMain():
from mainmenu4 import Ui_MainWindow
trayApp = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
trayApp.setQuitOnLastWindowClosed(False)
dirName=os.path.dirname(os.path.abspath(__file__))
iconPath = os.path.join(dirName, 'icon.png')
trayIcon = SystemTrayIcon(QtGui.QIcon(iconPath),w)
trayIcon.show()
"""
print ('setting up main window and hiding it.')
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ui.refreshList()
MainWindow.hide()
"""
#mainmenu4.runMain()
sys.exit(trayApp.exec_())
if __name__ == '__main__':
trayMain()
"""
xdate=datetime.today()
ydate=timedelta(0,10)
print (xdate)
print (ydate)
delta_t=xdate + ydate
print (delta_t)
secs= delta_t - xdate
def hello_world():
print ('hello world')
#print (secs)
t = Timer(5,hello_world())
""" | verentino/PU_PDT_TimeUp | tray_old.py | tray_old.py | py | 1,936 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QSystemTrayIcon",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSystemTrayIcon.__init__",
"line_number": 12,
"usage_type": "call"
... |
35941618047 | from flask import Flask, render_template, url_for, flash, redirect, request, session, make_response
from flask_wtf.file import FileField, FileAllowed
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_bcrypt import Bcrypt
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length, ValidationError, EqualTo, Email
from flask_login import LoginManager, UserMixin, login_user, current_user, logout_user, login_required
from PIL import Image
import re
import secrets
import os
from flask import Flask, redirect, url_for
import time
import requests
import json
import pandas as pd
import folium
import urllib.parse
from requests_oauthlib import OAuth1
import tweepy
app = Flask(__name__)
# this is the serects numbers
app.config['SECRET_KEY'] = 'ea7b11f0714027a81e7f81404612d80d'
# how to add the
# DB_URL = 'postgresql+psycopg2://jasonjia:227006636@csce-315-db.engr.tamu.edu/SILT_DB'.format(user=POSTGRES_USER,pw=POSTGRES_PW,url=POSTGRES_URL,db=POSTGRES_DB)
# DB_URL1 = 'postgresql://jasonjia:227006636@csce-315-db.engr.tamu.edu:5432/SILT_DB_test'
DB_URL1 = 'postgresql://doadmin:jglyvd028l8ced6h@db-silt-db-do-user-8284135-0.b.db.ondigitalocean.com:25060/defaultdb'
app.config['SQLALCHEMY_DATABASE_URI']=DB_URL1
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # silence the deprecation warning
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
# bootstrap color
login_manager.login_message_category = 'info'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(180), unique = True, nullable = False)
twitter_username = db.Column(db.String(50), unique=True, default = None)
username = db.Column(db.String(30), unique = True, nullable = False)
password = db.Column(db.String(), nullable = False)
user_pic = db.Column(db.String(20), nullable = False, default='default.jpg')
posts = db.relationship('Post', backref='author', lazy = True)
posts_ac = db.relationship('Post_ac', backref='author', lazy = True)
post_h = db.relationship('Post_h', backref='author', lazy = True)
post_sp = db.relationship('Post_sp', backref='author', lazy = True)
post_cr = db.relationship('Post_cr', backref='author', lazy = True)
post_ev = db.relationship('Post_ev', backref='author', lazy = True)
spotifyartist = db.relationship('SpotifyArtist', backref='author', lazy = True)
def __init__(self, email, username, password):
self.email = email
self.username = username
self.password = password
def __repr__ (self):
return f"User('{self.username}', '{self.email}', '{self.user_pic}', '{self.id}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_ac(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_h(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_sp(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_cr(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_ev(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class SpotifyArtist(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
artist_name = db.Column(db.String(2000), nullable=False)
artist_id = db.Column(db.String(2000), nullable=False)
# time_range = db.Column(db.String(15), nullable=False)
def __repr__(self):
return f"SpotifyArtist('{self.artist_name}', '{self.artist_id}')"
# do not change this
# from form import account, LoginForm, update_account, PostForm, spotify_profile
####################
## FORMS ##
####################
class account(FlaskForm):
# user name not null and not too long. Add validation
username = StringField('Username', validators=[DataRequired(), Length(min = 2, max = 30)])
email = StringField('Email', validators=[DataRequired(), Length(min = 6), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirmed_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
# def tamu_email_validate(self, form, field):
# # [A-Za-z0-9] firt character to match it.
# if not re.search(r"^[A-Za-z0-9](\.?[a-z0-9]){5,}@tamu\.edu$", field.data):
# raise ValidationError("Invalid Email Address")
# return True
# def validate_email(self, email):
# # [A-Za-z0-9] firt character to match it.
# if not re.search(r"^[A-Za-z0-9](\.?[a-z0-9]){5,}@tamu\.edu$", field.data):
# raise ValidationError("Invalid Email Address")
# # return True
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username is taken, Please choose a new one')
def validate_email(self, email):
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError('Email is taken, Please choose a new one')
# if not re.search(r"^[A-Za-z0-9](\.?[a-z0-9]){5,}@tamu\.edu$", email):
# raise ValidationError("Invalid Email Address")
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(min = 6)])
password = PasswordField('Password', validators=[DataRequired()])
remeber = BooleanField('Remember Me')
submit = SubmitField('Login')
class update_account(FlaskForm):
# user name not null and not too long. Add validation
username = StringField('Username', validators=[DataRequired(), Length(min = 2, max = 30)])
email = StringField('Email', validators=[DataRequired(), Length(min = 6), Email()])
picture = FileField('Update Your Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username is taken, Please choose a new one')
def validate_email(self, email):
if email.data != current_user.email:
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError('Email is taken, Please choose a new one')
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Post')
tweet = BooleanField('Post On Twitter')
class spotify_profile(FlaskForm):
artist_name = StringField('Artist', validators=[DataRequired()])
artist_id = StringField('Artist_ID', validators=[DataRequired()])
# time_range = StringField('time_range')
########################
## END FORMS ##
########################
@app.route("/", methods=['GET', 'POST'])
@app.route("/home", methods=['GET', 'POST'])
# in terminal:
# debug mode in flask: export FLASK_DEBUG=1
# run flask: flask run
def home():
posts = Post.query.all()
return render_template("home.html", posts=posts)
# @app.route("/funny")
# def funny():
# return render_template("funny.html")
#
#
@app.route("/Events", methods=['GET', 'POST'])
def eve():
posts_ev = Post_ev.query.all()
return render_template("Events.html", posts=posts_ev)
@app.route("/funny", methods=['GET', 'POST'])
def fun():
posts_h = Post_h.query.all()
return render_template("funny.html", posts= posts_h)
@app.route("/studyLounge", methods=['GET', 'POST'])
def study_lounge():
posts_ac = Post_ac.query.all()
return render_template("studylounge.html", posts = posts_ac)
@app.route("/sports", methods=['GET', 'POST'])
def sports():
posts_sp = Post_sp.query.all()
return render_template("sports.html", posts = posts_sp)
@app.route("/course", methods=['GET', 'POST'])
def course():
posts_cr = Post_cr.query.all()
return render_template("course.html", posts = posts_cr)
@app.route('/profile/<username>')
def user_profile(username):
# data we query
# dbArtists = SpotifyArtist.query.filter_by(user_id = current_user.id).first()
data = User.query.filter_by(username = username).first()
spotify_data = SpotifyArtist.query.filter_by(user_id = data.id).first()
print (spotify_data)
# print ((data))
artistArr = []
if (spotify_data != None):
if (len(spotify_data.artist_name.split(',! ')) == 31):
artistArr = spotify_data.artist_name.split(',! ')[20:-1]
print(artistArr)
# return render_template("user_profile.html", posts=data, art = spotify_data)
return render_template("user_profile.html", posts=data, art=artistArr, len=len(artistArr))
return str(username)
@app.route("/resources")
def resources():
return render_template("resources.html")
def save_image(form_picture):
random_h = secrets.token_hex(8)
_, fext = os.path.splitext(form_picture.filename)
picture_fn = random_h + fext
# root path attrinbute
picture_path = os.path.join(app.root_path, 'static/image', picture_fn)
output_size = (125,125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/profile", methods = ['GET', 'POST'])
@login_required
def profile(artists=[], artist_ids=[]):
time_range = ['short_term', 'medium_term', 'long_term']
leng = 0
print(artists)
if (len(artists) != 0):
# going to be 3
artists_string = ""
artists_id_string = ""
time_range_string = ""
for i in range(len(artists)):
for j in range(len(artists[0])):
# artists[i][j], artist_ids[i][j]
artists_string+=artists[i][j]
artists_string+=",! "
artists_id_string+=artist_ids[i][j]
artists_id_string+=", "
print(artists_string)
print(artists_id_string)
spo = SpotifyArtist(artist_name = artists_string, artist_id = artists_id_string, author=current_user)
db.session.add(spo)
db.session.commit()
# how can we save it to a online drive???
#image_file = 'https://i.pinimg.com/originals/0c/3b/3a/0c3b3adb1a7530892e55ef36d3be6cb8.png'
form = update_account()
if form.validate_on_submit():
if form.picture.data:
pic_file = save_image(form.picture.data)
current_user.user_pic = pic_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('You account is updated! ', 'success')
return redirect(url_for('profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename = 'image/' + current_user.user_pic, width=100)
dbArtists = SpotifyArtist.query.filter_by(user_id = current_user.id).first()
print("dbArtists:", dbArtists)
# return render_template("home.html", posts=posts)
artistArr = []
if (dbArtists != None):
if (len(dbArtists.artist_name.split(',! ')) == 31):
artistArr = dbArtists.artist_name.split(',! ')[20:-1]
print(artistArr)
return render_template("profile.html", title='Profile', image_file = image_file, form = form, leng=len(artistArr), posts=artistArr)
@app.route("/register", methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = account()
if form.validate_on_submit():
# hash the paswword to save to our database
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
# create a new user
user= User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created! You can now log in! ','success')
# we also need to redirect user to home page
return redirect(url_for('login'))
return render_template('register.html', title = 'Register', form = form)
@app.route("/login", methods = ['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remeber.data)
next_page = request.args.get('next')
# special python return
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login not successful. Please check your password and email.', 'danger')
return render_template('login.html', title = 'Login', form = form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
gloabal_true = False
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
# and form.tweet.data == True
if gloabal_true == True:
twitter_consumer_key = "bw5c7K2tzsceOlgenVFDRnogU"
twitter_consumer_secret = "CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m"
token_url = 'https://api.twitter.com/1.1/statuses/update.json'
token_secret = (session["twitter_secret"])
access_token = (session["twitter_token"])
print ("Auth: ")
print(access_token, token_secret)
if form.tweet.data == True:
print ("it is true")
auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(access_token, token_secret)
# Create API object
api = tweepy.API(auth)
# Create a tweet
api.update_status(form.content.data)
# post_response = requests.post(resource_url, auth=tw, data=body)
# post_response = requests.post(request_url, auth = tw)
# body = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code', 'client_id': CLI_ID, 'client_secret': CLI_SEC}
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/ac", methods=['GET', 'POST'])
@login_required
def new_post_ac():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_ac(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/h", methods=['GET', 'POST'])
@login_required
def new_post_h():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_h(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/sp", methods=['GET', 'POST'])
@login_required
def new_post_sp():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_sp(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/ev", methods=['GET', 'POST'])
@login_required
def new_post_ev():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_ev(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/cr", methods=['GET', 'POST'])
@login_required
def new_post_cr():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_cr(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
# oauth = OAuth(app)
#
# twitter = oauth.remote_app('twitter',
# consumer_key = 'bw5c7K2tzsceOlgenVFDRnogU',
# consumer_secret='CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m',
# base_url='https://api.twitter.com/1.1/',
# request_token_url='https://api.twitter.com/oauth/request_token',
# access_token_url='https://api.twitter.com/oauth/access_toke',
# authorize_url='https://api.twitter.com/oauth/authorize'
# )
# DELETE this
@app.route('/twitter_login')
def twitterPostForRequestToken():
request_url = 'https://api.twitter.com/oauth/request_token'
# authorization = app.config['AUTHORIZATION']
twitter_redirect_url = "http%3A%2F%2Fsilt-tamu.herokuapp.com%2Ftwitter_callback"
# oauth_callback="http%3A%2F%2Fmyapp.com%3A3005%2Ftwitter%2Fprocess_callback"
# headers = {'Authorization': authorization, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
#headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
twitter_consumer_key = "bw5c7K2tzsceOlgenVFDRnogU"
twitter_consumer_secret = "CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m"
tw = OAuth1(twitter_consumer_key, twitter_consumer_secret)
headers = {'oauth_callback': twitter_redirect_url, 'oauth_consumer_key': twitter_consumer_key}
#body = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code', 'client_id': CLI_ID, 'client_secret': CLI_SEC}
post_response = requests.post(request_url, auth = tw)
# print("Twitter Post Response:")
attrs = vars(post_response)
twitter_oauth = attrs.get('_content')
oauth_arr = str(twitter_oauth)[2:].split('&')
# oauth_token = oauth_arr[0].split('=')[1]
# oauth_token_secret = oauth_arr[1].split('=')[1]
oauth_token = oauth_arr[0]
oauth_token_secret = oauth_arr[1]
# print (oauth_token)
# print (oauth_token_secret)
authorize_url = "https://api.twitter.com/oauth/authorize?" + oauth_token
return redirect(authorize_url)
# 200 code indicates access token was properly granted
# if post_response.status_code == 200:
# json = post_response.json()
# return json['access_token'], json['refresh_token'], json['expires_in']
# else:
# print("LOGGING: " + 'getToken:' + str(post_response.status_code))
# # logging.error('getToken:' + str(post_response.status_code))
# return None
# https://yourCallbackUrl.com?oauth_token=NPcudxy0yU5T3tBzho7iCotZ3cnetKwcTIRlX0iwRl0&oauth_verifier=uw7NjWHT6OJ1MpJOXsHfNxoAhPKpgI8BlYDhxEjIBY
@app.route('/twitter_callback')
def twitter_callback():
url_parse = request.url
parse_arr = url_parse.split('=')[1:]
token = parse_arr[0].split('&')[0]
verifier = parse_arr[1]
# print (token, verifier)
request_url = 'https://api.twitter.com/oauth/access_token'
# authorization = app.config['AUTHORIZATION']
# twitter_redirect_url = "http%3A%2F%2F127.0.0.1%3A5000%2Ftwitter_callback"
# oauth_callback="http%3A%2F%2Fmyapp.com%3A3005%2Ftwitter%2Fprocess_callback"
# headers = {'Authorization': authorization, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
#headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
twitter_consumer_key = "bw5c7K2tzsceOlgenVFDRnogU"
twitter_consumer_secret = "CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m"
# oauth = OAuth1(client_key,
# client_secret=client_secret,
# resource_owner_key=resource_owner_key,
# resource_owner_secret=resource_owner_secret,
# verifier=verifier)
tw = OAuth1(twitter_consumer_key, client_secret=twitter_consumer_secret, resource_owner_key=token, verifier=verifier)
post_response = requests.post(request_url, auth = tw)
attrs = vars(post_response)
# print (attrs)
twitter_oauth = attrs.get('_content')
# print ("Content: ")
# print (twitter_oauth)
oauth_arr = str(twitter_oauth)[2:].split('&')
# oauth_token = oauth_arr[0].split('=')[1]
# oauth_token_secret = oauth_arr[1].split('=')[1]
oauth_token = oauth_arr[0].split('=')[1]
oauth_token_secret = oauth_arr[1].split('=')[1]
print (oauth_token, oauth_token_secret)
print("tokens:")
print(oauth_token, oauth_token_secret)
session.pop('twitter_token', None) # delete visits
session.pop('twitter_secret', None) # delete visits
session['twitter_token'] = oauth_token
session['twitter_secret'] = oauth_token_secret
session.modified = True
# posts = {"status": "test tweet"}
# token_url = 'https://api.twitter.com/1.1/statuses/update.json'
# tw = OAuth1(twitter_consumer_key,
# resource_owner_key=oauth_token,
# resource_owner_secret=oauth_token_secret,
# client_secret=twitter_consumer_secret)
# a = requests.post(token_url, data=posts, auth = tw)
# print (vars(a))
#
# auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
# auth.set_access_token(oauth_token, oauth_token_secret)
# # Create API object
# api = tweepy.API(auth)
# # Create a tweet
# api.update_status("Hello Tweepy2")
return redirect('/')
##############################
# Spotify section
##############################
# Spotify Prerequirements
CLI_ID = "035c861c44084c46bf08f93efed2bb4c"
CLI_SEC = "18cba64539fc4c39894f8b17b4e78b6e"
API_BASE = 'https://accounts.spotify.com'
REDIRECT_URI = "http://silt-tamu.herokuapp.com/api_callback"
SCOPE = 'playlist-modify-private,playlist-modify-public,user-top-read, user-library-read'
# Set this to True for testing but you probaly want it set to False in production.
SHOW_DIALOG = True
# Spotify pre-requirements end
@app.route("/spotify_authorize")
def authorize():
client_id = CLI_ID
redirect_uri = REDIRECT_URI
# TODO: change scope value
scope = SCOPE
# state_key = createStateKey(15)
# session['state_key'] = state_key
authorize_url = 'https://accounts.spotify.com/en/authorize?'
# parameters = 'response_type=code&client_id=' + client_id + '&redirect_uri=' + redirect_uri + '&scope=' + scope + '&state=' + state_key
parameters = 'response_type=code&client_id=' + client_id + '&redirect_uri=' + redirect_uri + '&scope=' + scope
response = make_response(redirect(authorize_url + parameters))
print("response")
return response
"""
Called after a new user has authorized the application through the Spotift API page.
Stores user information in a session and redirects user back to the page they initally
attempted to visit.
"""
@app.route('/api_callback')
def callback():
# make sure the response came from Spotify
# if request.args.get('state') != session['state_key']:
# # return render_template('index.html', error='State failed.')
# print("Error: State Failed")
# return
if request.args.get('error'):
# return render_template('index.html', error='Spotify error.')
print("Error: Spotify error")
else:
code = request.args.get('code')
# session.pop('state_key', None)
# get access token to make requests on behalf of the user
payload = getToken(code)
if payload != None:
session['token'] = payload[0]
session['refresh_token'] = payload[1]
session['token_expiration'] = time.time() + payload[2]
else:
# return render_template('index.html', error='Failed to access token.')
return "Failed to access token"
current_user = getUserInformation(session)
print("CURRENT USER:", current_user)
session['user_id'] = current_user['id']
# logging.info('new user:' + session['user_id'])
print("LOGGING: " + 'new user:' + session['user_id'])
# track_ids = getAllTopTracks(session)
artist_names, artist_ids = getAllTopArtists(session)
# if form.validate_on_submit() and form.tweet.data == True:
# post = Post(title=form.title.data, content=form.content.data, author=current_user)
# db.session.add(post)
# db.session.commit()
# flash('Your post has been created', 'success')
# return redirect(url_for('home'))
# print("------------------Artists---------------------")
time_range = ['short_term', 'medium_term', 'long_term']
# for i in range(len(artist_names)):
# term = time_range[i]
#
# for j in range(len(artist_names[0])):
# print(artist_names[i][j], artist_ids[i][j])
# SpotifyArtist = SpotifyArtist(user_id= , artist_name=artist_names[i][j], artist_id=artist_ids[i][j], time_range=term)
print("\nright before printing track_ids")
return profile(artists=artist_names, artist_ids=artist_ids)
def getToken(code):
token_url = 'https://accounts.spotify.com/api/token'
# authorization = app.config['AUTHORIZATION']
redirect_uri = REDIRECT_URI
# headers = {'Authorization': authorization, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
body = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code', 'client_id': CLI_ID, 'client_secret': CLI_SEC}
post_response = requests.post(token_url, headers=headers, data=body)
# 200 code indicates access token was properly granted
if post_response.status_code == 200:
json = post_response.json()
return json['access_token'], json['refresh_token'], json['expires_in']
else:
print("LOGGING: " + 'getToken:' + str(post_response.status_code))
# logging.error('getToken:' + str(post_response.status_code))
return None
"""
Makes a GET request with the proper headers. If the request succeeds, the json parsed
response is returned. If the request fails because the access token has expired, the
check token function is called to update the access token.
Returns: Parsed json response if request succeeds or None if request fails
"""
def makeGetRequest(session, url, params={}):
headers = {"Authorization": "Bearer {}".format(session['token'])}
response = requests.get(url, headers=headers, params=params)
# 200 code indicates request was successful
if response.status_code == 200:
return response.json()
# if a 401 error occurs, update the access token
elif response.status_code == 401 and checkTokenStatus(session) != None:
return makeGetRequest(session, url, params)
else:
# print("LOGGING: makeGetRequest")
# print("LOGGING: makeGetRequest: " + str(response.status_code))
# logging.error('makeGetRequest:' + str(response.status_code))
return None
def getUserInformation(session):
url = 'https://api.spotify.com/v1/me'
payload = makeGetRequest(session, url)
if payload == None:
return None
return payload
"""
Gets the top tracks of a user for all three time intervals. Used to display the top
tracks on the TopTracks feature page.
Returns: A list of tracks IDs for each of the three time intervals
"""
def getAllTopTracks(session, limit=10):
url = 'https://api.spotify.com/v1/me/top/tracks'
track_ids = []
time_range = ['short_term', 'medium_term', 'long_term']
for time in time_range:
track_range_ids = []
params = {'limit': limit, 'time_range': time}
payload = makeGetRequest(session, url, params)
# print("------------------PAYLOAD---------------------")
# print(payload)
# print("------------------PAYLOAD END-------------")
if payload == None:
return None
for track in payload['items']:
track_range_ids.append(track['id'])
track_ids.append(track_range_ids)
return track_ids
# TODO: situation where user has no tracks
def getAllTopArtists(session, limit=10):
url = 'https://api.spotify.com/v1/me/top/artists'
artist_names = []
artist_ids = []
time_range = ['short_term', 'medium_term', 'long_term']
for time in time_range:
track_range_ids = []
params = {'limit': limit, 'time_range': time}
payload = makeGetRequest(session, url, params)
if payload == None:
return None
artist_range_names = []
artist_range_ids = []
for artist in payload['items']:
artist_range_names.append(artist['name'])
artist_range_ids.append(artist['id'])
artist_names.append(artist_range_names)
artist_ids.append(artist_range_ids)
return artist_names, artist_ids
##############################
# Yelp API Section #
##############################
""" END POINTS """
# Business Search URL -- 'https://api.yelp.com/v3/businesses/search'
# Phone Search URL -- 'https://api.yelp.com/v3/businesses/search/phone'
# Transaction Search URL -- 'https://api.yelp.com/v3/transactions/{transaction_type}/search'
# Business Details URL -- 'https://api.yelp.com/v3/businesses/{id}'
# Business Match URL -- 'https://api.yelp.com/v3/businesses/matches'
# Reviews URL -- 'https://api.yelp.com/v3/businesses/{id}/reviews'
# Autocomplete URL -- 'https://api.yelp.com/v3/autocomplete'
# Define my API key, Endpoint, and Header
API_KEY = 'nTM36O5k4QpcgkccZVAMhP8U4BxpO68EYzIA7KPXpRmnT31qUK49B7sfYQ2uA2_uzGRr94oA9aIxdD4PyIa0hyaXIccmnOGCVQ2tMJg4s3-a24CLE3syjaMHsqWRX3Yx'
ENDPOINT_PREFIX = 'https://api.yelp.com/v3/'
HEADERS = {'Authorization': 'bearer %s' % API_KEY}
EMPTY_RESPONSE = json.dumps('')
# render popular locations webpage / make yelp API calls with user input for 'term' key
@app.route("/popular_locations", methods=['GET'])
def popular_locations():
# get user input from html form
term = request.args.get('searchInput', None)
# Check if user inputted a term
if term == None:
print("No term provided for business search, return nothing.")
# Define Business Search paramters
parameters = {
'location': 'College Station, TX',
'radius': 15000,
'term': term,
'sort_by': 'best_match',
'limit': 50
}
# Make request to Yelp API
url = ENDPOINT_PREFIX + 'businesses/search'
response = requests.get(url, params = parameters, headers = HEADERS)
# Check for good status code - if so, get JSON response and populate map
if response.status_code == 200:
print('Got 200 for business search')
# Try/catch for invalid user input for 'term': key-value
try:
# Convert JSON string to dictionary
businessSearchData = response.json()
# Create dataframe from API response (businesses, list of dictionaries)
dFrame = pd.DataFrame.from_dict(businessSearchData['businesses'])
# YELP MAP - RESTAURANTS MARKED
# Get latitude and longitude from Yelp API response
cStatLat = 30.627977
cStatLong = -96.334404
# Generate base map of college station
yelpMap = folium.Map(location = [cStatLat, cStatLong], zoom_start = 13)
# Generate map of restaurants - Iterate through dataframe and add business markers
for row in dFrame.index:
latLong = dFrame['coordinates'][row]
latitude = latLong['latitude']
longitude = latLong['longitude']
name = dFrame['name'][row]
rating = dFrame['rating'][row]
price = dFrame['price'][row]
location = dFrame['location'][row]
# Get address-1 from Location dictionary
for loc in location.keys():
if loc == 'address1':
address = location[loc]
# Create popup message for pin
details = ('{}' + '<br><br>' + 'Address: {}' + '<br>' + 'Price: {}' + '<br>' + 'Rating: {}/5').format(name, address, price, rating)
# Resize popup pin
test = folium.Html(details, script = True)
popup = folium.Popup(test, max_width = 300, min_width = 300)
# Create and business marker to map
marker = folium.Marker(location = [latitude, longitude], popup = popup, icon = folium.Icon(color = "darkred"))
marker.add_to(yelpMap)
# Display map on webpage
yelpMap.save('./templates/yelpMap.html')
except KeyError:
print('ERROR: User input provided an invalid key-value.')
flash(f'There was an error with your input.', 'danger')
return redirect(url_for('popular_locations'))
else:
print('Received non-200 response({}) for business search, returning empty response'.format(response.status_code))
return EMPTY_RESPONSE
return render_template('popularLocations.html', businessData = dFrame, isBusinessDataEmpty = dFrame.empty)
@app.route("/yelp_map")
def yelp_map():
return render_template('yelpMap.html')
@app.route("/empty_yelp_map")
def empty_yelp_map():
return render_template('./templates/blank_yelpMap.html')
if __name__ == '__main__':
app.run(debug=True)
| infknight/SILT | app.py | app.py | py | 37,771 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask_bcrypt.Bcrypt",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask_log... |
35735822351 | import re
import webbrowser
import markdown
import dominate
from dominate.util import raw
from dominate.tags import *
from argparse import ArgumentParser
import shutil
import tempfile
import json
import os
from logging import *
import time
import bs4
import base64
from urllib.parse import unquote_plus
basicConfig(level="DEBUG")
class TriliumPdfExporter:
EXCLUDE = ["file"]
def __init__(self, source: str, motd: str) -> None:
self.source: str = source
self.motd: str = motd
self.md = markdown.Markdown(extensions=["extra", "pymdownx.tilde"])
self.idmap = {}
self.tempdir: str = None
self.meta = {}
def _extract(self):
tempdir = tempfile.TemporaryDirectory()
shutil.unpack_archive(self.source, tempdir.name)
return tempdir
def _pathtuple(self, path):
fullpath = unquote_plus(path).split(os.sep)
pathparts = []
while len(fullpath) > 0:
pathparts.append(os.sep.join(fullpath))
del fullpath[0]
return tuple(pathparts)
def _util_parse_meta_children(self, children: list, current: str) -> list:
out = []
for c in children:
if not c["type"] in self.EXCLUDE:
if "dataFileName" in c.keys():
parts = self._pathtuple(
os.path.join(current, c["dataFileName"]))
self.idmap[tuple(parts)] = c["noteId"]
out.append(
{
"title": c["title"],
"id": c["noteId"],
"type": c["type"],
"mime": c["mime"] if "mime" in c.keys() else None,
"source": c["dataFileName"]
if "dataFileName" in c.keys()
else None,
"path": c["dirFileName"] if "dirFileName" in c.keys() else None,
"content": None,
"children": self._util_parse_meta_children(
c["children"],
os.path.join(
current,
c["dirFileName"] if "dirFileName" in c.keys() else "",
),
)
if "children" in c.keys()
else [],
}
)
return out
def _analyze_metadata(self):
if not os.path.exists(os.path.join(self.tempdir.name, "!!!meta.json")):
critical("Failed to load: !!!meta.json file missing.")
exit(0)
with open(os.path.join(self.tempdir.name, "!!!meta.json"), "r") as f:
try:
raw = json.load(f)
except:
critical("Failed to load: !!!meta.json is bad JSON")
exit(0)
self.idmap[("",)] = "root"
out = {
"title": f"Exported Notes: {time.strftime('%m / %d / %Y')}",
"id": "root",
"type": "book",
"mime": None,
"source": None,
"path": "",
"content": None,
"children": self._util_parse_meta_children(raw["files"], ""),
}
return out
def _convert_to_html(self, item: dict, current: str, top: bool = False) -> str:
if top:
content = div(self.motd if self.motd else "",
_class="note-content")
else:
content = ""
if item["source"]:
if item["source"].endswith(".md"):
with open(
os.path.join(self.tempdir.name, current,
item["source"]), "r"
) as f:
debug(f"Parsing {item['source']}")
raw_md = f.read().replace("\\\\(", "$").replace("\\\\)", "$")
for k in re.findall("~.*?~", raw_md):
raw_md = raw_md.replace(k, "~" + k + "~")
content = div(
raw(
self.md.convert(
raw_md,
).replace("h1", "h5")
),
_class="note-content",
)
item["content"] = content
elif item["type"] == "canvas":
with open(os.path.join(self.tempdir.name, current, item["source"]), "r") as f:
debug(f"Parsing canvase {item['source']}")
svg = json.load(f)["svg"]
content = div(img(
src=f"data:image/svg+xml;base64,{base64.b64encode(svg.encode('utf-8')).decode('utf-8')}",
_class="svg"
),
_class="note-content note-svg"
)
item["content"] = content
else:
with open(
os.path.join(self.tempdir.name, current,
item["source"]), "rb"
) as f:
item["content"] = "data:{};base64,{}".format(
item["mime"] if item["mime"] else "text/plain",
base64.b64encode(f.read()).decode("utf-8"),
)
self.idmap[
self._pathtuple(os.path.join(
current, item["source"]))
] = item["content"]
head = div(
h2(item["title"]) if item["type"] == "book" else h4(item["title"]),
_class="note-header",
id=item["id"],
)
children = div(_class="note-children")
for c in item["children"]:
try:
children += self._convert_to_html(
c, os.path.join(
current, item["path"] if item["path"] else "")
)
except ValueError:
warning("Experienced tag creation error, skipping")
return div(head, content, children, _class="note")
def _generate_html(self):
document = dominate.document(
title=f"Exported Notes: {time.strftime('%m / %d / %Y')}"
)
with document.head:
link(
rel="stylesheet",
href="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.css",
integrity="sha384-Xi8rHCmBmhbuyyhbI88391ZKP2dmfnOl4rT9ZfRI7mLTdk1wblIUnrIq35nqwEvC",
crossorigin="anonymous",
)
script(
defer=True,
src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.js",
integrity="sha384-X/XCfMm41VSsqRNQgDerQczD69XqmjOOOwYQvr/uuC+j4OPoNhVgjdGFwhvN02Ja",
crossorigin="anonymous",
)
script(
defer=True,
src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/contrib/auto-render.min.js",
integrity="sha384-+XBljXPPiv+OzfbB3cVmLHf4hdUFHlWNZN5spNQ7rmHTXpd7WvJum6fIACpNNfIR",
crossorigin="anonymous",
onload="console.log(renderMathInElement(document.body, {delimiters: [{left: '$', right: '$', display: false}]}));",
)
style(
"""
.note-children {
padding-left: 8px;
border-left: 2px solid #dddddd;
}
img {
display: block;
}
.note-content.note-svg {
display: block;
width: 90%;
height: auto;
box-sizing: border-box;
padding: 8px;
border: 2px solid #dddddd;
margin-left: 4px;
background-color: white;
}
.note-content.note-svg img {
display: inline-block;
height: auto;
width: 100%;
}
"""
)
document += self._convert_to_html(self.meta, "", top=True)
return document
def _resolve_link(self, path):
if not re.match("^[a-z]*?://.*", path):
path = os.path.join(
*[i for i in path.split(os.sep) if not i == ".."])
return path
else:
return path
def _resolve_links(self):
soup = bs4.BeautifulSoup(self.doc, "html.parser")
for l in soup.find_all("a"):
if re.match("^[a-z]*?://.*", l["href"]):
continue
lnk = self._resolve_link(unquote_plus(l["href"]))
key = self._pathtuple(lnk)
l["href"] = "#root"
for k in self.idmap.keys():
if any([x in k for x in key]):
l["href"] = "#" + self.idmap[k]
for i in soup.find_all("img"):
if re.match("^[a-z]*?://.*", i["src"]) or i["src"].startswith("data:"):
continue
lnk = self._resolve_link(unquote_plus(i["src"]))
key = self._pathtuple(lnk)
i["src"] = ""
for k in self.idmap.keys():
if any([x in k for x in key]):
i["src"] = self.idmap[k]
return str(soup)
def export(self, preserve=False) -> str:
info("Extracting zip file into temporary directory...")
self.tempdir = self._extract()
info("Analyzing export metadata")
self.meta = self._analyze_metadata()
self.doc = self._generate_html().render()
self.doc = self._resolve_links()
with tempfile.NamedTemporaryFile("r+", suffix=".html") as f:
f.write(self.doc)
f.flush()
webbrowser.open(f"file://{f.name}")
time.sleep(1)
info("Cleaning up...")
self.tempdir.cleanup()
if not preserve:
os.remove(self.source)
if __name__ == "__main__":
parser = ArgumentParser(
description="Parse a compressed MD export of Trilium notes, then convert to a web page for easy download"
)
parser.add_argument(
"source", metavar="S", type=str, help="Path to source .zip file."
)
parser.add_argument(
"-p",
"--preserve",
help="Whether to preserve the source zip file. Defaults to false.",
action="store_true",
)
parser.add_argument(
"-m",
"--motd",
type=str,
help="Message to display under main title",
default=None,
)
args = parser.parse_args()
exporter = TriliumPdfExporter(args.source, args.motd)
exporter.export(preserve=args.preserve)
| iTecAI/trilium-tools | pdf-export/trilium_to_pdf.py | trilium_to_pdf.py | py | 11,045 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "markdown.Markdown",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "shutil.unpack_archive",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "u... |
11916959214 | from django import forms
from .models import Comment ,Blog,Category
class BlogForm(forms.ModelForm):
category = forms.ModelChoiceField(
queryset=Category.objects.all().order_by('name'))
class Meta:
model = Blog
fields = ['title', 'featured_image', 'content','category']
def __init__(self, *args, **kwargs):
super(BlogForm, self).__init__(*args, **kwargs)
for name, field in self.fields.items():
field.widget.attrs.update({'class': 'form-control'})
field.widget.attrs.update({'id': 'form3Example1c'})
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ["content"]
widgets = {
"content":forms.TextInput(attrs={"class":"form-control"},)
}
| minarefaat1002/blog_website | blogs project/blog/forms.py | forms.py | py | 804 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelChoiceField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "d... |
5967270915 | # Sun Oct 27 15:40:29 2019
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
# Configurations
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 20
mpl.rcParams['font.weight'] = 'medium'
mpl.rcParams['font.style'] = 'normal'
mpl.rcParams['font.serif'] = 'DejaVu Serif'
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['mathtext.fallback_to_cm'] = True
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['savefig.dpi'] = 300
mpl.rcParams['savefig.bbox'] = 'tight'
###################### thermophysical prop ##################
cL = 4183.0
cS = 700.0
rhoL = 994.0 # kg/m3
rhoS = 7850.0 # kg/m3
h = 12000.0 # W/m2 K
kappaL = 0.682
kappaS = 1.3
###################### constants ############################
TH0 = 45
TC0 = 10
Ri = 0.05 # inner radius
Rs = 0.06
Ro = 0.08 # outer radius
pi = np.pi
Ac = pi * Ri**2
As = pi * Rs**2 - Ac
Ah = pi * Ro**2 - pi * Rs**2
L = 6.0 # length
S1 = L * pi * 2 * Rs
S2 = L * pi * 2 * Ri
uh = 0.3
uc = 0.3
Qh = uh * Ah
Qc = uc * Ac
dt = 0.01
t = np.arange(0, 200+dt, dt)
Th2 = np.zeros(len(t))
Tc2 = np.zeros(len(t))
Th2[0] = TH0
Tc2[0] = TC0
nx = 50
dx = L / nx
x = np.arange(dx/2, L+dx/2, dx)
TH = np.zeros(nx)
TS = np.zeros(nx)
TC = np.zeros(nx)
TH[:] = TH0
TS[:] = 0.5 * (TH0 + TC0)
TC[:] = TC0
TH_prev = TH.copy()
TS_prev = TS.copy()
TC_prev = TC.copy()
Th1 = np.zeros(len(t))
Tc1 = np.zeros(len(t))
Th1[:] = TH0
Tc1[:] = TC0
idx = int(100/dt)
Th1[idx::] = TH0 + 10
Tc1[idx::] = TC0
Si1 = S1 / nx
Si2 = S2 / nx
for it in range(len(t)-1):
rhs = Qh * cL * rhoL * (Th1[it] - TH_prev[0]) - h * Si1 * (TH_prev[0] - TS_prev[0])
rhs = rhs - kappaL * Ah * (TH_prev[0] - TH_prev[1]) / dx
temp = rhs * dt / (cL * rhoL * Ah * dx)
TH[0] = TH_prev[0] + temp
rhs = h * Si1 * (TH_prev[0] - TS_prev[0]) - h * Si2 * (TS_prev[0] - TC_prev[-1])
rhs = rhs - kappaS * As * (TS_prev[0] - TS_prev[1]) / dx
temp = rhs * dt / (cS * rhoS * As * dx)
TS[0] = TS_prev[0] + temp
rhs = Qc * cL * rhoL * (Tc1[it] - TC_prev[0]) + h * Si2 * (TS_prev[-1] - TC_prev[0])
rhs = rhs - kappaL * Ac * (TC_prev[0] - TC_prev[1]) / dx
temp = rhs * dt / (cL * rhoL * Ac * dx)
TC[0] = TC_prev[0] + temp
for ix in range(1,nx-1):
rhs = Qh * cL * rhoL * (TH_prev[ix-1] - TH_prev[ix]) - h * Si1 * (TH_prev[ix] - TS_prev[ix])
rhs = rhs + kappaL * Ah * (TH_prev[ix-1] - TH_prev[ix]) / dx - kappaL * Ah * (TH_prev[ix] - TH_prev[ix+1]) / dx
temp = rhs * dt / (cL * rhoL * Ah * dx)
TH[ix] = TH_prev[ix] + temp
rhs = h * Si1 * (TH_prev[ix] - TS_prev[ix]) - h * Si2 * (TS_prev[ix] - TC_prev[nx-ix-1])
rhs = rhs + kappaS * As * (TS_prev[ix-1] - TS_prev[ix]) / dx - kappaS * As * (TS_prev[ix] - TS_prev[ix+1]) / dx
temp = rhs * dt / (cS * rhoS * As * dx)
TS[ix] = TS_prev[ix] + temp
rhs = Qc * cL * rhoL * (TC_prev[ix-1] - TC_prev[ix]) + h * Si2 * (TS_prev[nx-ix-1] - TC[ix])
rhs = rhs + kappaL * Ac * (TC_prev[ix-1] - TC_prev[ix]) / dx - kappaL * Ac * (TC_prev[ix] - TC_prev[ix+1]) / dx
temp = rhs * dt / (cL * rhoL * Ac * dx)
TC[ix] = TC_prev[ix] + temp
rhs = Qh * cL * rhoL * (TH_prev[-2] - TH_prev[-1]) - h * Si1 * (TH_prev[-1] - TS_prev[-1])
rhs = rhs + kappaL * Ah * (TH_prev[-2] - TH_prev[-1]) / dx
temp = rhs * dt / (cL * rhoL * Ah * dx)
TH[-1] = TH_prev[-1] + temp
rhs = h * Si1 * (TH_prev[-1] - TS_prev[-1]) - h * Si2 * (TS_prev[-1] - TC_prev[0])
rhs = rhs + kappaS * As * (TS_prev[-2] - TS_prev[-1]) / dx
temp = rhs * dt / (cS * rhoS * As * dx)
TS[-1] = TS_prev[-1] + temp
rhs = Qc * cL * rhoL * (TC_prev[-2] - TC_prev[-1]) + h * Si2 * (TS_prev[0] - TC_prev[-1])
rhs = rhs + kappaL * Ac * (TC_prev[-2] - TC_prev[-1]) / dx
temp = rhs * dt / (cL * rhoL * Ac * dx)
TC[-1] = TC_prev[-1] + temp
for j in range(0,nx):
TH_prev[j] = TH[j]
TS_prev[j] = TS[j]
TC_prev[j] = TC[j]
Th2[it+1] = TH[-1]
Tc2[it+1] = TC[-1]
fig = plt.figure(figsize = (10,7))
ax = plt.subplot(111)
ax.plot(t, Th1, label = 'Hot in')
ax.plot(t, Tc1, label = 'Cold in')
ax.plot(t, Th2, label = 'Hot out')
ax.plot(t, Tc2, label = 'Cold out')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Temperature (K)')
ax.legend(loc=0)
plt.show()
fig = plt.figure(figsize = (10,7))
ax = plt.subplot(111)
ax.plot(x, TH, label = 'Hot')
ax.plot(x, TS, label = 'Surface')
ax.plot(x, TC[::-1], label='Cold')
#ax.set_xlim(0,6)
ax.set_xlabel('X (m)')
ax.set_ylabel('Temperature (K)')
ax.legend(loc=0)
plt.show()
| NingDaoguan/JI | PO6007-MSTPS/HW/HW3.py | HW3.py | py | 4,599 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name"... |
43354784770 | import json
from typing import Any, List
import numpy as np
import torch
from mmhuman3d.core.conventions.cameras import (
convert_cameras,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
class CameraParameter:
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
self.parameters_dict['H'] = H
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self):
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self):
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_K_R_T(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
KeyError: mat_key not in self.parameters_dict
"""
if mat_key not in self.parameters_dict:
raise KeyError(mat_key)
else:
self.parameters_dict[mat_key] = mat_numpy.tolist()
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
Raises:
KeyError: mat_key not in self.parameters_dict
"""
if mat_key not in self.parameters_dict:
raise KeyError(mat_key)
else:
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
Raises:
KeyError: key not in self.parameters_dict
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> Any:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_from_vibe(self,
vibe_camera,
name: str,
batch_index: int = 0) -> None:
"""Load name and parameters from a dict.
Args:
vibe_camera (mmhuman3d.core.cameras.
cameras.WeakPerspectiveCamerasVibe):
An instance.
name (str):
Name of this camera.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4 = vibe_camera.K[batch_index:batch_index + 1] # shape (1, 4, 4)
r_3x3 = vibe_camera.R[batch_index:batch_index + 1] # shape (1, 3, 3)
t_3 = vibe_camera.T[batch_index:batch_index + 1] # shape (1, 3)
new_K, new_R, new_T = convert_cameras(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(new_K, is_perspective=False)
k_3x3.numpy().squeeze(0)
r_3x3 = new_R.numpy().squeeze(0)
t_3 = new_T.numpy().squeeze(0)
self.name = name
self.set_mat_np('in_mat', k_3x3)
self.set_mat_np('rotation_mat', r_3x3)
self.set_value('translation', t_3.tolist())
def get_vibe_dict(self) -> dict:
"""Get a dict of camera parameters, which contains all necessary args
for mmhuman3d.core.cameras.cameras.WeakPerspectiveCamerasVibe(). Use mm
human3d.core.cameras.cameras.WeakPerspectiveCamerasVibe(**return_dict)
to construct a camera.
Returns:
dict:
A dict of camera parameters: name, dist, size, matrix, etc.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_3x3 = self.get_mat_np('in_mat') # shape (3, 3)
k_3x3 = np.expand_dims(k_3x3, 0) # shape (1, 3, 3)
k_4x4 = convert_K_3x3_to_4x4(
K=k_3x3, is_perspective=False) # shape (1, 4, 4)
rotation = self.get_mat_np('rotation_mat') # shape (3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = self.get_value('translation') # list, len==3
translation = np.asarray(translation)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K, new_R, new_T = convert_cameras(
K=k_4x4,
R=rotation,
T=translation,
is_perspective=False,
convention_src='opencv',
convention_dst='pytorch3d',
resolution_src=(height, width),
resolution_dst=(height, width))
new_K = torch.from_numpy(new_K)
new_R = torch.from_numpy(new_R)
new_T = torch.from_numpy(new_T)
ret_dict = {
'K': new_K,
'R': new_R,
'T': new_T,
}
return ret_dict
def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True):
"""Parse a dict loaded from chessboard file into another dict needed by
CameraParameter.
Args:
chessboard_camera_param (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to True.
Returns:
dict:
A dict of parameters in CameraParameter.to_dict() format.
"""
camera_param_dict = {}
camera_param_dict['H'] = chessboard_camera_param['imgSize'][1]
camera_param_dict['W'] = chessboard_camera_param['imgSize'][0]
camera_param_dict['in_mat'] = chessboard_camera_param['K']
camera_param_dict['k1'] = 0
camera_param_dict['k2'] = 0
camera_param_dict['k3'] = 0
camera_param_dict['k4'] = 0
camera_param_dict['k5'] = 0
camera_param_dict['p1'] = 0
camera_param_dict['p2'] = 0
camera_param_dict['name'] = name
camera_param_dict['rotation'] = chessboard_camera_param['R']
camera_param_dict['translation'] = chessboard_camera_param['T']
if inverse:
rmatrix = np.linalg.inv(
np.array(camera_param_dict['rotation']).reshape(3, 3))
camera_param_dict['rotation'] = rmatrix.tolist()
tmatrix = np.array(camera_param_dict['translation']).reshape((3, 1))
tvec = -np.dot(rmatrix, tmatrix)
camera_param_dict['translation'] = tvec.reshape((3)).tolist()
return camera_param_dict
__distort_coefficient_names__ = [
'k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'p1', 'p2'
]
def __zero_mat_list__(n=3):
"""Return a zero mat in list format.
Args:
n (int, optional):
Length of the edge.
Defaults to 3.
Returns:
list:
List[List[int]]
"""
ret_list = [[0] * n for _ in range(n)]
return ret_list
| hanabi7/point_cloud_smplify | mmhuman3d/core/cameras/camera_parameter.py | camera_parameter.py | py | 13,594 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
... |
17978578215 | # 导入操作系统库
import os
# 更改工作目录
os.chdir(r"D:\softwares\applied statistics\pythoncodelearning\chap3\sourcecode")
# 导入绘图库
import matplotlib.pyplot as plt
# 导入支持向量机模型
from sklearn import svm
# 导入决策边界可视化工具
from sklearn.inspection import DecisionBoundaryDisplay
# 导入数据集生成工具
from sklearn.datasets import make_blobs
# 导入绘图库中的字体管理包
from matplotlib import font_manager
# 实现中文字符正常显示
font = font_manager.FontProperties(fname=r"C:\Windows\Fonts\SimKai.ttf")
# 使用seaborn风格绘图
plt.style.use("seaborn-v0_8")
# 生成样本
n_samples_1 = 1000
n_samples_2 = 100
centers = [[0.0, 0.0], [2.0, 2.0]]
clusters_std = [1.5, 0.5]
# 分类数据
X, y = make_blobs(
n_samples=[n_samples_1, n_samples_2], # 分别的样本量
centers=centers, # 聚类中心
cluster_std=clusters_std, # 标准差
random_state=0,
shuffle=False,
)
# 线性SVM模型
clf = svm.SVC(kernel="linear", C=1.0)
# 模型拟合
clf.fit(X, y)
# 加权的SVM模型
wclf = svm.SVC(kernel="linear", class_weight={1: 10})
# 模型拟合
wclf.fit(X, y)
# 开始绘图
fig, ax = plt.subplots(figsize=(6,6), tight_layout=True)
# 绘制散点
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors="k")
# 绘制SVM的决策边界
disp = DecisionBoundaryDisplay.from_estimator(
clf,
X,
plot_method="contour",
colors="k",
levels=[0],
alpha=0.5,
linestyles=["--"],
ax=ax
)
# 绘制加权的SVM的决策边界
wdisp = DecisionBoundaryDisplay.from_estimator(
wclf,
X,
plot_method="contour",
colors="r",
levels=[0],
alpha=0.5,
linestyles=["-"],
ax=ax
)
# 添加图例
ax.legend(
[disp.surface_.collections[0], wdisp.surface_.collections[0]],
["non weighted", "weighted"],
loc="upper right",
)
plt.show()
fig.savefig("../codeimage/code4.pdf")
| AndyLiu-art/MLPythonCode | chap3/sourcecode/Python4.py | Python4.py | py | 1,925 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager",
"line_number": 16,
"usage_type": "name"
},
{
"api_name":... |
9190017437 | # imports
# scipy/anaconda imports
import pandas
from scipy import stats
import numpy
# python standard library imports
import math
import statistics
import copy
import collections
import time
nan = float("nan")
def fit_line(x_data, y_data):
"""
performs a linear fit to the data and return the slope, y-intercept, R-squared
value, P value, and standard error for the fit. Use as follows:
// x and y are lists of numbers with more than 75 elements
// fitting points 25 through 75 in from the data
start = 25
end = 75
slope, y_intercept, r_squared, p_value, std_err = MathHelper.fit_line(x[start:end], y[start,end])
print( str.format("Fitted formula: Y = {a}X + {b}", a=slope, b=y_intercept))
print( str.format("\tR-squared = {r2}", r2=r_squared))
"""
slope, y_intercept, r_value, p_value, std_err = stats.linregress(x_data, y_data)
r_squared = r_value * r_value
return slope, y_intercept, r_squared, p_value, std_err
def chi_squared_of_fit(y_data, fitted_y_data):
"""
This function calculates and returnsthe Chi-squared value of a generated set
of Y values (fitted_y_data) against the observed Y values (y_data).
"""
# note that in the typical definition of Chi-squared, it is assumed that
# nature is wrong and our formula is theoretically perfect, but here we
# are testing a model against emperical data, so the "expected" values
# are the data we measured and the "observed" values are the values
# generated by a fitting formula, and therefore we swap these variables
# in the stats.chisquare(...) invocation
return stats.chisquare(fitted_y_data,y_data)
def r_squared(y_data,test_y_data):
average = 0
iii = 0
size = len(y_data)
for n in range(0,size):
average += y_data[n]
iii += 1
average = average / iii
sumResidual = 0
sumTotal = 0
for n in range(0,size):
d = y_data[n]
sim = test_y_data[n]
sumResidual += (d - sim)*(d - sim)
sumTotal += (d - average)*(d - average)
return 1 - (sumResidual / sumTotal)
def _fit_err(x_data, y_data, formula_function, coefficient_vector):
"""
quickly calculates a simple fitting error value (this is NOT standard error!)
"""
sum = 0;
m = 1.0 / len(y_data)
for n in range(0,len(x_data)):
t = x_data[n]
obs = y_data[n]
sim = formula_function(*( t, coefficient_vector ) )
er = sim - obs
sum += er * er * m
return sum
def fit_function_bruteforce(x_data, y_data, formula_function, coefficient_vector, max_iterations ):
"""
This function uses a brute-force guess-and-check
x_data: the x values of the data set
y_data: the y values of the data set
formula_function: a function whose input parameters are (x, coefficient_vector)
and returns a single number by applying a formula to x with
coefficients defined in coefficient_vector. For example,
here's the fuction definition for an exponential decay fit
where the coefficients are [A, tau, C] in the formula
Y=A*e^(-x/tau)+C:
def exponential_decay_function(x, cv_list):
return cv_list[0] * math.exp( (-1 / cv_list[1]) x ) + cv_list[2]
coefficients, precisions, iterations = fit_function_bruteforce(x, y, exponential_decay_function, [1,10,0], 1000000)
print(str.format("fit: Y={A}*e^(-x/{tau})+{C}",A=coefficients[0], tau=coefficients[1], C=coefficients[2]))
coefficient_vector: list of numbers corresponding to coefficients in the
formula which fit_function_bruteforce(...) will manipulate
to try to fit he formula to the data. The starting values
should be a best guess close to the actual value. If these
values are too far off, the formula may get stick in a
local maxima or edge-case
max_iterations: the maximum number of iterations through the fitting formula
returns coefficient_vector, precision, iteration_count:
returns the coefficient_vector after adjusting it to achieve the best
possible fit within the allowed number of iterations, the +/- precision
of the fit for each coefficient (also as a list), and the actual number
of iterations used to perform the fit
"""
iterations = 0
# initialize deltas to the coefficients
delta_vector = scalar_multiply(copy.deepcopy(coefficient_vector), 0.25)
while(iterations < max_iterations):
# scale-down the deltas by a factor of 2 each time
delta_vector = scalar_multiply(delta_vector, 0.5)
new_cv, jiggles = _improveFit(x_data, y_data, formula_function, coefficient_vector, delta_vector, max_iterations - iterations)
coefficient_vector = new_cv
iterations += jiggles
# done
return coefficient_vector, delta_vector, iterations
def _improveFit(x,y,formula,cvec,delta, maxIterations):
"""
jiggles the coefficients to improve the formula fit a little bit
x: x data
y: y data
formula: the fitting formula (see description for fit_function_bruteforce(...) )
cvec: coefficient vector (see description for fit_function_bruteforce(...) )
delta: list of jiggle sizes corresponding to cvec
maxIterations: maximum number of jiggles allowed before returning
"""
# adjust the variables by the delta amount in decrease the error value
iterations = 0
while True: # python does not support do-while loops
lastErr = _fit_err(x,y,formula,cvec)
for i in range(len(cvec)):
oldC = cvec[i]
upC = cvec[i]+delta[i]
downC = cvec[i]-delta[i]
# current fit error
currentErr = _fit_err(x,y,formula,cvec)
# increase the coefficient a little and check again
cvec[i] = upC
errPlus = _fit_err(x,y,formula,cvec)
# decrease the coefficient a little and check again
cvec[i] = downC
errMinus = _fit_err(x,y,formula,cvec)
if(errPlus < currentErr and errPlus < errMinus):
# increase the variable
cvec[i] = upC;
elif(errMinus < currentErr):
# decrease the variable
cvec[i] = downC
else:
# no change
cvec[i] = oldC
iterations += 1
if(lastErr <= _fit_err(x,y,formula,cvec) or iterations >= maxIterations):
break
return cvec, iterations
def scalar_multiply(vector_list, scalar):
"""
Multiplies a vector (represented as a list of numbers) by a scalar value and
returns the new vector (original vector values are not changed)s
"""
v = copy.deepcopy(vector_list)
for i in range(0, len(v)):
v[i] = v[i] * scalar
return v
def p_value(set1, set2):
"""
returns the T-test P-value for two independent sets of data
"""
s, p = stats.ttest_ind(set1, set2)
return p
def mean(data_set):
try:
return statistics.mean(data_set)
except:
return nan
def stdev(data_set):
if(len(data_set) < 2):
return nan
else:
try:
return statistics.stdev(data_set)
except:
return nan | Kramer-Lab-Team-Algae/vO2-per-LEF-scripts | Data Analysis/MathHelper.py | MathHelper.py | py | 6,671 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.stats.linregress",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "scipy.stats.chisquare",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "scipy.stats"... |
1158088849 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from utils import INPUT_SHAPE, batch_generator
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, Dropout, Dense, Flatten
from keras.models import load_model
def load_data():
data_df = pd.read_csv('driving_log.csv', names=['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'])
X = data_df[['center', 'left', 'right']].values
y = data_df['steering'].values
X_train, X_valid, y_train, y_valid = train_test_split(X,y,test_size = 0.2)
return X_train, X_valid, y_train, y_valid
def build_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))
model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
model.summary()
return model
def train_model(model, X_train, X_valid, y_train, y_valid,
data_dir = "IMG", batch_size = 1000, nb_epoch = 20, samples_per_epoch = 100000, learning_rate = 1.0e-4):
#Saves the best model so far.
checkpoint = ModelCheckpoint('model2-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto')
model.compile(loss='mean_squared_error', optimizer=Adam(lr=learning_rate))
# keras 2 has other ideas about certain things
steps_per_epoch = samples_per_epoch/batch_size
v_steps = int(np.floor(len(X_valid)/batch_size))
model.fit_generator(batch_generator(data_dir, X_train, y_train, batch_size, True),
steps_per_epoch, nb_epoch, max_queue_size=1,
validation_data=batch_generator(data_dir, X_valid, y_valid, batch_size, False),
validation_steps=v_steps, callbacks=[checkpoint])
data = load_data()
model = build_model()
#continue from previously trained model
#model = load_model("model-010.h5")
train_model(model, *data)
| thomashiemstra/self_driving_car_simulation | train.py | train.py | py | 2,585 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 25,
"usage_type": "call"
},
{
"... |
25947442218 | import os
import sqlite3
from datetime import datetime, timedelta
import telebot
bot = telebot.TeleBot(os.getenv("BOT_TOKEN"))
memes_chat_id = int(os.getenv("MEMES_CHAT_ID"))
flood_thread_id = int(os.getenv("FLOOD_THREAD_ID", 1))
memes_thread_id = int(os.getenv("MEMES_THREAD_ID", 1))
conn = sqlite3.connect("memes.db", check_same_thread=False)
def main():
seven_days_ago = datetime.now() - timedelta(days=14)
query = "SELECT u.user_id, u.username FROM users u LEFT JOIN user_messages um ON um.user_id=u.user_id AND um.created_at > ? WHERE um.message_id is NULL AND u.active=1"
rows = conn.execute(query, (seven_days_ago,)).fetchall()
msg = ["Список вуаеристов\n"]
for row in rows:
user_id, username = row
user_data = bot.get_chat_member(memes_chat_id, user_id)
if user_data.status == "administrator":
continue
msg.append(
"[{username}](tg://user?id={user_id}) {user_id}".format(
username=username,
user_id=user_id,
)
)
bot.send_message(
memes_chat_id,
"\n".join(msg),
message_thread_id=flood_thread_id,
parse_mode="Markdown",
)
if __name__ == "__main__":
main()
| dzaytsev91/tachanbot | cron_job_message_count.py | cron_job_message_count.py | py | 1,258 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
... |
24267348983 | import pandas as pd
import geocoder
import math
from RR import *
class TreeOp:
def __init__(self, path=None):
# The path is the path of the csv file. Call this function to create the R-Trees
# Will Create R-Entries and then those entries will be search
# X is the longitude, Y is the Latitude
self.tree = RTree()
self.Area = dict()
self.Results = list()
df = pd.read_csv(path, header=0)
for i, row in df.iterrows():
Lat = float(row['Latitude'])
Lon = float(row['Longitude'])
g = geocoder.google([Lat, Lon], method='reverse')
#g = geocoder.google(row['Address']+',Karachi,Sindh,Pakistan')
a = g.bbox
x1, y1 = self.convertSphericalToCartesian(
a["northeast"][0], a["northeast"][1])
x2, y2 = self.convertSphericalToCartesian(
a["southwest"][0], a["southwest"][1])
entry = TreeEntry(([x1, x2], [y1, y2]))
g = geocoder.google(row['Area']+',Karachi,Sindh,Pakistan')
a = g.bbox
x1, y1 = self.convertSphericalToCartesian(
a["northeast"][0], a["northeast"][1])
x2, y2 = self.convertSphericalToCartesian(
a["southwest"][0], a["southwest"][1])
if row['Area'] not in self.Area:
self.Area[row['Area']] = ([x1, x2], [y1, y2])
# ShopID,Name,Address,City,Province,Area,Cell,Landline,Longitude,Latitude,StoreType
Name = row['Name']
Address = row['Address']
Province = row['Province']
Area = row['Area']
Cell = row['Cell']
Landline = row['Landline']
Latitude = row['Latitude']
Longitude = row['Longitude']
StoreType = row['StoreType'].split(";")
entry.setData(Name, Address, Province, Area, Cell,
Landline, Latitude, Longitude, StoreType)
self.tree.insert(entry)
self.Results.append(entry)
if i == 5:
break
print(self.tree.Root)
def getAreas(self):
# This function will return disctinct dictionary of areas of the file
# For each Area in the key I will store its bounding box
return list(self.Area.keys())
def Search(self, Entity, AreaK, flag):
# Entity will tell what key what type of store
# AreaChose will be the key to the dictionary which will then select the file the bounds for searching
# NearMe is a boolean flag that will tell key Near Me search karni hai
filter = []
if flag == False:
for i in self.Results:
if Entity in i.StoreType:
filter.append(i)
else:
serch = self.tree.Search(self.tree.Root, self.Area[AreaK])
for i in serch:
if Entity in i.StoreType:
filter.append(i)
return filter
def convertSphericalToCartesian(self, latitude, longitude):
# Convert from Degrees to Radians
latRad = latitude * (math.pi)/180
lonRad = longitude * (math.pi)/180
earthRadius = 6367 # Radius in km
posX = earthRadius * math.cos(latRad) * math.cos(lonRad)
posY = earthRadius * math.cos(latRad) * math.sin(lonRad)
return(round(posX, 3), round(posY, 3))
| munawwar22HU/Ehsas | Source/RTreeOperations.py | RTreeOperations.py | py | 3,426 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "geocoder.google",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "geocoder.google",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_n... |
42236411796 | from pick import pick
import re
import sys
result = ""
selected = []
all_feature = []
def loadDB(filename):
# 加载产生式数据库
dictionary = {}
all = []
global all_feature
with open(filename, 'r') as f:
for line in f.readlines():
# 按行加载
if line[0] == "#":
continue
# 产生式应该长这样:<编号>:IF <条件1> [& <条件2>...] -> <结论>
num = line.split("IF")[0].strip()
tmp = re.findall(r'IF(.*?)->', line.replace(" ", ""))[0]
conditions = tmp.split("&")
all.extend(conditions)
conclusion = line.split("->")[1].strip()
# 存到 dict 里面
dictionary[conclusion] = conditions
# print(num, conditions, conclusion)
all_feature = list(set(all))
# print(all_feature)
return dictionary
def IS(text, current_dict):
# 如果给定的条件text,没有出现在dict的key里,排除掉
ans = {}
for key, value in current_dict.items():
if text in value:
ans[key] = value
return ans
def main(debug=False):
dictionary = loadDB("db.txt")
title = "请选择全部条件。"
temp = pick(all_feature, title, multiselect=True, min_selection_count=1)
selected.extend(i[0] for i in temp)
if debug:
print(f"选择的所有条件与规则:{selected}")
for i in selected:
# 依据规则,逐条推理
if debug:
print(f"处理规则 '{i}' 后的结论区:{list(dictionary.keys())}")
dictionary = IS(i, dictionary)
# print(dictionary)
if len(dictionary) == 0:
print("没有通过条件找到您的结论")
elif len(dictionary) == 1:
print(f"您输入的条件找到的结论是:{list(dictionary.keys())[0]}!")
elif len(dictionary) > 1:
print(f"您提供的条件对应数条结论,第一条是:{list(dictionary.keys())[0]}!")
if __name__ == "__main__":
if len(sys.argv) > 1:
if (sys.argv[1] == "--debug"):
main(debug=True)
else:
main()
| littlebear0729/Production-system | identify_system.py | identify_system.py | py | 2,137 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pick.pick",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 70,
... |
17585730612 | from __future__ import annotations
from collections import defaultdict
from starwhale import Job, handler, evaluation
from starwhale.utils.debug import console
PROJECT_URI = "https://cloud.starwhale.cn/project/349"
JOB_URI_TEMPLATE = "%s/job/{job_id}" % PROJECT_URI
JOB_IDS = [
"845",
"844",
"843",
"842",
"830",
"828",
"827",
"818",
"817",
"816",
"814",
"813",
"810",
"796",
"759",
]
SHOT_GROUPS = ("zero_shot", "one_shot", "five_shot")
CATEGORY_GROUPS = ("first-level", "second-level", "third-level")
@handler(replicas=1)
def analysis_leaderboard() -> None:
r_score = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
r_score_models = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
r_benchmark = defaultdict(dict)
r_jobs = {}
job_cnt = len(JOB_IDS)
for idx, job_id in enumerate(JOB_IDS):
uri = JOB_URI_TEMPLATE.format(job_id=job_id)
job = Job.get(uri)
console.print(f"{idx+1}/{job_cnt} processing [{job_id}] {job.model} ...")
if job.model:
r_jobs[job_id] = job.model.name
for row in job.get_table_rows("results"):
benchmark_id = row["id"]
if benchmark_id not in r_benchmark:
r_benchmark[benchmark_id] = {
"question": row["input/question"],
"answer": row["input/answer"],
"choices": row["input/choices"],
}
for category in CATEGORY_GROUPS:
r_benchmark[benchmark_id][f"category/{category}"] = row[
f"input/category/{category}"
]
for shot in SHOT_GROUPS:
score = row[f"output/{shot}/score"]
score = f"score-{score}"
if score == "score-1":
r_score[benchmark_id][shot]["right_count"] += 1
r_score[benchmark_id][shot][score] += 1
if job.model:
r_score_models[benchmark_id][shot][score].append(job.model.name)
r_right_distribution = defaultdict(lambda: defaultdict(int))
for benchmark_id, scores in r_score.items():
evaluation.log(
category="leaderboard-analysis",
id=benchmark_id,
metrics={
"benchmark": r_benchmark[benchmark_id],
"scores": scores,
"models": r_score_models[benchmark_id],
},
)
for shot, score_values in scores.items():
score_one_cnt = score_values.get("right_count") or 0
r_right_distribution[shot][score_one_cnt] += 1
for shot, score_values in r_right_distribution.items():
for count_name, count_value in score_values.items():
metrics = {
f"{shot}/count": count_value,
f"{shot}/percentage": f"{count_value/len(r_benchmark):.2%}",
}
evaluation.log(
category="right-answer-distribution",
id=count_name,
metrics=metrics,
)
console.log(f"{count_name} - {shot}: {metrics}")
evaluation.log_summary(
{
"project": PROJECT_URI,
"benchmark/name": "cmmlu",
"benchmark/questions_count": len(r_benchmark),
"analysis/job_models": list(r_jobs.values()),
"analysis/job_ids": JOB_IDS,
}
)
console.print(":clap: finished!")
| star-whale/starwhale | example/llm-leaderboard/src/analysis.py | analysis.py | py | 3,516 | python | en | code | 171 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 35,
"usage_type": "call"
},
{
"api_name"... |
38326596416 | import pandas as pd
from matplotlib import pyplot as plt
from oemof.tools import logger
import logging
import q100opt.plots as plots
from q100opt.buildings import BuildingInvestModel, SolarThermalCollector
from q100opt.scenario_tools import ParetoFront
from q100opt.setup_model import load_csv_data
logger.define_logging(screen_level=logging.INFO)
# read data
timeseries = pd.read_csv("data/test-building-timeseries.csv")
weather = pd.read_csv("data/weather.csv")
tech_data = pd.read_csv("data/techdata.csv", index_col=0, skiprows=1)
commodity_data = load_csv_data("data/commodities")
# define data, that could/should be in the Kataster
kataster = {
'heat_load_space_heating': 10, # heat load space heating [kW]
'heat_load_dhw': 4, # heat load hot water [kW]
'heat_load_total': 12, # total heat load [kW]
'pv_1_max': 5, # maximum kWp of PV area 1
'pv_2_max': 3, # maximum kWp of PV area 2
'pv_3_max': 0, # maximum kWp of PV area 2
# roof areas
# roof 1, e.g. west orientation
'roof_1_azimuth': 90, # Ausrichtung [°]
'roof_1_pitch': 40, # Dachneigung in [°]
'roof_1_area_usable': 20, # [m²]
# roof 1, e.g. south orientation
'roof_2_azimuth': 180, # Ausrichtung [°]
'roof_2_pitch': 40, # Dachneigung in [°]
'roof_2_area_usable': 20, # [m²]
# roof 1, e.g. east orientation
'roof_3_azimuth': 270, # Ausrichtung [°]
'roof_3_pitch': 40, # Dachneigung in [°]
'roof_3_area_usable': 20, # [m²]
# solar thermal options
# maximum share of roof area considered for solar thermal
'st_1_max': 0.8,
'st_2_max': 0.8,
'st_3_max': 0.8,
# maximum values of units (for investment model)
"gas-boiler.maximum": 100,
"pellet-boiler.maximum": 0,
"wood-boiler.maximum": 0,
"heatpump-geo.maximum": 10,
"heatpump-air.maximum": 10,
"thermal-storage.maximum": 100,
"battery-storage.maximum": 100,
"substation.maximum": 100,
# installed capacities for operation model
"gas-boiler.installed": 10,
"pellet-boiler.installed": 0,
"wood-boiler.installed": 0,
"heatpump-geo.installed": 0,
"heatpump-air.installed": 10,
"thermal-storage.installed": 0,
"battery-storage.installed": 0,
}
my_collector = SolarThermalCollector(
eta_0=0.825,
a_1=3.41,
a_2=0.0161,
)
house = BuildingInvestModel(
space_heating_demand=timeseries["E_th_RH"],
electricity_demand=timeseries["E_el"],
hot_water_demand=timeseries["E_th_TWE"],
pv_1_profile=timeseries["E_el_PV_1"],
pv_2_profile=timeseries["E_el_PV_2"],
commodity_data=commodity_data,
tech_data=tech_data,
weather=weather,
timesteps=8760,
start_date="2015-01-01 01:00",
location=(52.516254, 13.377535),
solar_thermal_collector=my_collector,
exclusive_roof_constraint=True, # for each roof a constraint with limited area is created
pv_system={"space_demand": 5}, # [m²/kWp],
**kataster,
)
table_collection = house.create_table_collection()
# ab hier wäre es aufbauend auf den bestehenden Funktionen von q100opt
house.pareto_front = ParetoFront(
table_collection=house.table_collection,
number_of_points=5,
number_of_time_steps=700,
)
house.pareto_front.calc_pareto_front(solver='gurobi', tee=True)
# some plots
house.pareto_front.results["pareto_front"].plot(
x='emissions', y='costs', kind='scatter'
)
plt.xlabel('emissions')
plt.ylabel('costs')
plt.show()
for emission_limit, scenario in house.pareto_front.district_scenarios.items():
plots.plot_investments(
scenario.results['main'], title="Emissionscenario: " + emission_limit
)
| quarree100/q100opt | examples/single_building/example_house_with_solarthermal.py | example_house_with_solarthermal.py | py | 3,764 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "oemof.tools.logger.define_logging",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "oemof.tools.logger",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name":... |
73974471145 | import logging
from datetime import datetime
from time import sleep
from typing import Union, Optional, Tuple, List, Sequence
import mariadb
from mariadb import Cursor, Connection
from accounting_bot import utils
from accounting_bot.exceptions import DatabaseException
logger = logging.getLogger("ext.accounting.db")
class AccountingDB:
def __init__(self, username: str, password: str, host: str, port: str, database: str) -> None:
self.cursor = None # type: Cursor | None
self.con = None # type: Connection | None
self.username = username
self.password = password
self.host = host
self.port = port
self.database = database
connected = False
counter = 0
while not connected and counter < 5:
# Retrying the connection in case the database is not yet ready
try:
self.try_connect()
connected = True
except mariadb.Error:
counter += 1
logger.warning(f"Retrying connection in {counter * 2} seconds")
sleep(counter * 2)
if not connected:
raise DatabaseException(f"Couldn't connect to MariaDB database on {self.host}:{self.port}")
def try_connect(self) -> None:
logger.info("Connecting to database...")
try:
self.con = mariadb.connect(
user=self.username,
password=self.password,
host=self.host,
port=self.port,
database=self.database,
connect_timeout=8
)
logger.info("Connected to database!")
self.cursor = self.con.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS messages ("
"msgID BIGINT NOT NULL, "
"userID BIGINT NOT NULL, "
"verified BIT NOT NULL DEFAULT b'0', "
"t_state TINYINT, "
"ocr_verified BIT NOT NULL DEFAULT b'0', "
"PRIMARY KEY (msgID)"
") ENGINE = InnoDB; ")
self.cursor.execute("CREATE TABLE IF NOT EXISTS shortcuts ("
"msgID BIGINT NOT NULL, "
"channelID BIGINT NOT NULL, "
"PRIMARY KEY (msgID)"
") ENGINE = InnoDB; ")
except mariadb.Error as e:
logger.error(f"Error connecting to MariaDB Platform: {e}")
raise e
def ping(self):
try:
if self.con is None or not self.con.open:
self.try_connect()
start = datetime.now()
self.con.ping()
return (datetime.now() - start).microseconds
except mariadb.Error as e:
utils.log_error(logger, e)
return None
def execute_statement(self, statement: str, data: Sequence = ()) -> Cursor:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(statement, data)
self.con.commit()
return self.cursor
except mariadb.Error as e:
logger.error("Error while trying to execute statement %s: %s", statement, e)
raise e
def add_transaction(self, message: int, user: int) -> None:
logger.debug(f"Saving transaction to database with msg {str(message)} and user {str(user)}")
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"INSERT INTO messages (msgID, userID) VALUES (?, ?);",
(message, user))
self.con.commit()
except mariadb.Error as e:
logger.error(f"Error while trying to insert a new transaction: {e}")
raise e
def set_state(self, message: int, state: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"UPDATE messages SET t_state = ? WHERE messages.msgID=?;",
(state, message))
self.con.commit()
return self.cursor.rowcount
except mariadb.Error as e:
logger.error(f"Error while trying to update the transaction {message} to state {state}: {e}")
raise e
def get_state(self, message: int) -> Optional[bool]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT msgID, t_state FROM messages WHERE messages.msgID=?;",
(message,))
self.con.commit()
res = self.cursor.fetchone()
if res is None:
return None
(msgID, state) = res
return state
except mariadb.Error as e:
logger.error(f"Error while trying to get state of a transaction: {e}")
raise e
def get_owner(self, message: int) -> Optional[Tuple[int, bool]]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT userID, verified FROM messages WHERE msgID=?;",
(message,))
res = self.cursor.fetchone()
if res is None:
return None
(user, verified) = res
verified = verified == 1
return user, verified
except mariadb.Error as e:
logger.error(f"Error while trying to get a transaction: {e}")
raise e
def set_verification(self, message: int, verified: Union[bool, int]) -> int:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"UPDATE messages SET verified = ? WHERE messages.msgID=?;",
(verified, message))
self.con.commit()
return self.cursor.rowcount
except mariadb.Error as e:
logger.error(f"Error while trying to update the transaction {message} to {verified}: {e}")
raise e
def is_unverified_transaction(self, message: int) -> Optional[bool]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT msgID, verified FROM messages WHERE messages.msgID=?;",
(message,))
self.con.commit()
res = self.cursor.fetchone()
if res is None:
return None
(msgID, verified) = res
return verified == b'\x00'
except mariadb.Error as e:
logger.error(f"Error while trying to check a transaction: {e}")
raise e
def get_unverified(self, include_user: bool = False) -> Union[List[int], List[Tuple[int, int]]]:
if self.con is None or not self.con.open:
self.try_connect()
try:
res = []
if include_user:
self.cursor.execute(
"SELECT msgID, userID FROM messages WHERE verified=b'0';")
for (msg, user) in self.cursor:
res.append((msg, user))
else:
self.cursor.execute(
"SELECT msgID FROM messages WHERE verified=b'0';")
for (msg,) in self.cursor:
res.append(msg)
return res
except mariadb.Error as e:
logger.error(f"Error while trying to get all unverified transactions: {e}")
raise e
def set_ocr_verification(self, message: int, verified: Union[bool, int]) -> int:
if type(verified) == bool:
verified = 1 if verified else 0
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"UPDATE messages SET ocr_verified = ? WHERE messages.msgID=?;",
(verified, message))
self.con.commit()
return self.cursor.rowcount
except mariadb.Error as e:
logger.error(f"Error while trying to update the transaction {message} to ocr_verified {verified}: {e}")
raise e
def get_ocr_verification(self, message: int) -> Optional[bool]:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"SELECT msgID, ocr_verified FROM messages WHERE messages.msgID=?;",
(message,))
self.con.commit()
res = self.cursor.fetchone()
if res is None:
return None
(msgID, verified) = res
return verified == b'\x01'
except mariadb.Error as e:
logger.error(f"Error while trying to check a transaction: {e}")
raise e
def delete(self, message: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"DELETE FROM messages WHERE messages.msgID=?",
(message,))
self.con.commit()
affected = self.cursor.rowcount
if not affected == 1:
logger.warning(f"Deletion of message {message} affected {affected} rows, expected was 1 row")
else:
# logger.info(f"Deleted message {message}, affected {affected} rows")
pass
except mariadb.Error as e:
logger.error(f"Error while trying to delete a transaction: {e}")
raise e
def add_shortcut(self, msg_id: int, channel_id: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"INSERT INTO shortcuts (msgID, channelID) VALUES (?, ?);",
(msg_id, channel_id))
self.con.commit()
affected = self.cursor.rowcount
if not affected == 1:
logger.warning(f"Insertion of shortcut message {msg_id} affected {affected} rows, expected was 1 row")
else:
logger.info(f"Inserted shortcut message {msg_id}, affected {affected} rows")
except mariadb.Error as e:
logger.error(f"Error while trying to insert a shortcut message {msg_id}: {e}")
raise e
def get_shortcuts(self) -> List[Tuple[int, int]]:
if self.con is None or not self.con.open:
self.try_connect()
try:
res = []
self.cursor.execute(
"SELECT msgID, channelID FROM shortcuts;")
for (msg, channel) in self.cursor:
res.append((msg, channel))
return res
except mariadb.Error as e:
logger.error(f"Error while trying to get all shortcut messages: {e}")
raise e
def delete_shortcut(self, message: int) -> None:
if self.con is None or not self.con.open:
self.try_connect()
try:
self.cursor.execute(
"DELETE FROM shortcuts WHERE shortcuts.msgID=?",
(message,))
self.con.commit()
affected = self.cursor.rowcount
if not affected == 1:
logger.warning(f"Deletion of shortcut message {message} affected {affected} rows, expected was 1 row")
else:
logger.info(f"Deleted shortcut message {message}, affected {affected} rows")
except mariadb.Error as e:
logger.error(f"Error while trying to delete a shortcut message: {e}")
raise e
| Blaumeise03/AccountingBot | accounting_bot/ext/accounting_db.py | accounting_db.py | py | 11,837 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mariadb.Error",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "accounting_bot.except... |
26944213809 | # -*- coding: utf-8 -*-
'''
@author: davandev
'''
import logging
import os
import traceback
import sys
import davan.config.config_creator as configuration
import davan.util.constants as constants
from davan.util import cmd_executor as cmd_executor
from davan.http.service.base_service import BaseService
class PictureService(BaseService):
'''
Motion detected on sensors, take photo from camera and send to
all Telegram receivers
'''
def __init__(self, service_provider, config):
'''
Constructor
'''
BaseService.__init__(self, "TakePicture", service_provider, config)
self.logger = logging.getLogger(os.path.basename(__file__))
def handle_request(self, msg):
'''
Handle received request,
- Take pictures from all configured cameras,
- Send pictures to all configured receivers
- Delete pictures.
'''
try:
self.increment_invoked()
camera = self.parse_request(msg)
self.take_picture(camera)
self.send_picture(camera)
self.delete_picture()
except:
self.logger.error(traceback.format_exc())
self.increment_errors()
self.logger.error("Failed to handle picture request")
return constants.RESPONSE_NOT_OK, constants.MIME_TYPE_HTML, constants.RESPONSE_FAILED_TO_TAKE_PICTURE
return constants.RESPONSE_OK, constants.MIME_TYPE_HTML, constants.RESPONSE_EMPTY_MSG
def parse_request(self, msg):
'''
Return camera name from received msg.
'''
self.logger.debug("Parsing: " + msg )
msg = msg.replace("/TakePicture?text=", "")
return msg
def delete_picture(self):
'''
Deletes the taken photo
'''
self.logger.debug("Deleting picture")
os.remove("/var/tmp/snapshot.jpg")
def send_picture(self, camera):
'''
Send picture to all configured telegram receivers
@param camera: camera name
'''
self.logger.info("Sending picture to telegram accounts")
for chatid in self.config['CHATID']:
self.logger.debug("Sending picture to chatid[" + chatid + "]")
telegram_url = ('curl -X POST "https://api.telegram.org/bot' +
self.config['TOKEN'] +
'/sendPhoto" -F chat_id=' +
chatid +
' -F photo="@/var/tmp/snapshot.jpg" -F caption="Rörelse upptäckt från ' +
camera +'"' )
cmd_executor.execute_block(telegram_url,"curl")
def take_picture(self, camera):
'''
Take a picture from the camera, store it temporary on file system
Verify that camera is configured (has ip adress, user and password) otherwise rais an exception
@param camera: camera name
'''
self.logger.info("Take picture from camera [" + camera + "]")
if self.config["CAMERAS"].has_key(camera):
cam_picture_url = self.config["CAMERAS"][camera]
cmd_executor.execute("wget " + cam_picture_url + " --user=" + self.config["CAMERA_USER"] +
" --password=" + self.config["CAMERA_PASSWORD"] + " --auth-no-challenge")
pos = cam_picture_url.rfind('/')
file_name = cam_picture_url[pos+1:]
cmd_executor.execute("sudo mv "+file_name+" /var/tmp/snapshot.jpg")
else:
raise Exception("No camera url for [" + camera + "] configured")
def has_html_gui(self):
"""
Override if service has gui
"""
return True
def get_html_gui(self, column_id):
"""
Override and provide gui
"""
if not self.is_enabled():
return BaseService.get_html_gui(self, column_id)
column = constants.COLUMN_TAG.replace("<COLUMN_ID>", str(column_id))
column = column.replace("<SERVICE_NAME>", self.service_name)
column = column.replace("<SERVICE_VALUE>", "<li>Cameras: " + str(self.config["CAMERAS"].keys()) + " </li>\n")
return column
if __name__ == '__main__':
from davan.util import application_logger as log_config
config = configuration.create()
log_config.start_logging(config['LOGFILE_PATH'],loglevel=4)
camerapath = "/TakePicture?text=Framsidan"
test = PictureService()
test.start(camerapath)
| davandev/davanserver | davan/http/service/picture/PictureService.py | PictureService.py | py | 4,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "davan.http.service.base_service.BaseService",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "davan.http.service.base_service.BaseService.__init__",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "davan.http.service.base_service.BaseService",
... |
28721865338 | # usage: python dropprofiles.py
# looks through mongo argo:argo and lists ids
from pymongo import MongoClient
client = MongoClient('mongodb://database/argo')
db = client.argo
mongoprofiles = open("mongoprofiles", "w")
mongoids = [x['_id'] for x in list(db.argo.find({}, {'_id':1}))]
for x in mongoids:
mongoprofiles.write(x)
mongoprofiles.write('\n')
| argovis/ifremer-sync | audit/mongoids.py | mongoids.py | py | 363 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
}
] |
32137328654 | import datetime
import json
import os
from datetime import timezone
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold, ParameterGrid, train_test_split
from prism_kondo.experiment_utils import (
add_random_noise_arnaiz,
calc_model_errors,
run_selector,
save_hyperopt_result,
train_lr_model,
)
from prism_kondo.instance_selection._params_dict import (
NOISE_DEPENDENT_PARAMS,
PARAMS_DICTS_NOISE,
)
class NoiseExperimenter:
def generate_gaussian_linear_data(
self,
nr_samples: int,
nr_features: int,
mean: float,
std: float,
random_state=None,
):
rs = np.random.RandomState(random_state)
X = rs.normal(mean, std, size=(nr_samples, nr_features))
y = np.zeros(nr_samples)
coefs = np.round(rs.uniform(-10, 10, nr_features), 2)
for i in range(nr_features):
y += coefs[i] * X[:, i]
y += rs.normal(0, 1, size=nr_samples)
return X, y
def run_experiments_arnaiz(
self,
selectors,
nr_datasets,
nr_samples,
nr_features,
mean,
std,
noise_frac: float,
output_dir="arnaiz_synthetic",
):
for i in range(nr_datasets):
if i % 20 == 0:
print(f"generated {i}/{nr_datasets} datasets")
X, y = self.generate_gaussian_linear_data(
nr_samples, nr_features, mean, std
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
X_val, X_test = X_test[:200], X_test[200:]
y_val, y_test = y_test[:200], y_test[200:]
y_train_noisy, noisy_idx = add_random_noise_arnaiz(
y_train, noise_frac=noise_frac
)
for selector_name in selectors:
if selector_name in ["selcon", "shapley", "fish1"]:
params = NOISE_DEPENDENT_PARAMS[noise_frac][selector_name]
elif selector_name == "ground_truth":
params = {}
else:
params = PARAMS_DICTS_NOISE[selector_name]
model_clean = train_lr_model(X_train, y_train)
errors_clean = calc_model_errors(model_clean, X_test, y_test)
model_noisy = train_lr_model(X_train, y_train_noisy)
errors_noisy = calc_model_errors(model_noisy, X_test, y_test)
if selector_name == "ground_truth":
labels_clean = np.ones(len(X_train), dtype="bool")
labels_noisy = np.ones(len(X_train), dtype="bool")
labels_noisy[noisy_idx] = False
elif selector_name in ["selcon"]:
X_train_and_val = np.vstack([X_train, X_val])
y_train_and_val = np.concatenate([y_train, y_val])
y_train_noisy_and_val = np.concatenate([y_train_noisy, y_val])
labels_clean, _ = run_selector(
X_train_and_val, y_train_and_val, selector_name, params
)
labels_clean = labels_clean[:600]
labels_noisy, _ = run_selector(
X_train_and_val, y_train_noisy_and_val, selector_name, params
)
labels_noisy = labels_noisy[:600]
elif selector_name in ["reg_enn_time", "fixed_window", "fish1"]:
base_time = datetime.datetime(2000, 1, 1)
time_train = np.array(
[
base_time + datetime.timedelta(days=i)
for i in range(X_train.shape[0])
],
dtype="datetime64[ns]",
).astype(np.float32)
X_time = np.hstack([X_train, time_train.reshape(-1, 1)])
if selector_name in ["reg_enn_time", "fixed_window"]:
labels_clean, _ = run_selector(
X_time, y_train, selector_name, params
)
labels_noisy, _ = run_selector(
X_time, y_train_noisy, selector_name, params
)
elif selector_name in ["fish1"]:
x_target = np.hstack(
[X_test[0, :], time_train[-1].astype(np.float32)]
)
X_fish = np.vstack([X_time, x_target])
labels_clean, _ = run_selector(
X_fish, y_train, selector_name, params
)
labels_noisy, _ = run_selector(
X_fish, y_train_noisy, selector_name, params
)
else:
labels_clean, _ = run_selector(
X_train, y_train, selector_name, params
)
labels_noisy, _ = run_selector(
X_train, y_train_noisy, selector_name, params
)
model_labels_clean = train_lr_model(
X_train[labels_clean, :], y_train[labels_clean]
)
model_labels_noisy = train_lr_model(
X_train[labels_noisy, :], y_train_noisy[labels_noisy]
)
errors_clean_selector = calc_model_errors(
model_labels_clean, X_test, y_test
)
errors_noisy_selector = calc_model_errors(
model_labels_noisy, X_test, y_test
)
errors_clean.update(
{f"clean_selector_{k}": v for k, v in errors_clean_selector.items()}
)
errors_clean.update({f"noisy_{k}": v for k, v in errors_noisy.items()})
errors_clean.update(
{f"noisy_selector_{k}": v for k, v in errors_noisy_selector.items()}
)
errors_clean["selector"] = selector_name
if selector_name == "ground_truth":
errors_clean["params"] = {}
else:
errors_clean["params"] = PARAMS_DICT[selector_name]
errors_clean["noise_frac"] = noise_frac
errors_clean["mean"] = mean
errors_clean["std"] = std
errors_clean["nr_samples"] = nr_samples
errors_clean["nr_features"] = nr_features
errors_clean["std_y"] = float(np.std(y_test))
(
correctly_kicked_out,
falsely_kicked_out,
) = self.calc_correctly_identified_noise_samples(
noisy_idx, labels_noisy
)
errors_clean["clean_frac_kicked_out"] = len(
np.argwhere(labels_clean == False).flatten()
) / len(labels_clean)
errors_clean["noisy_frac_kicked_out"] = len(
np.argwhere(labels_noisy == False).flatten()
) / len(labels_noisy)
errors_clean["frac_correctly_kicked_out"] = correctly_kicked_out / len(
noisy_idx
)
errors_clean["frac_falsely_kicked_out"] = falsely_kicked_out / len(
y_train
)
self.save_json_file(
errors_clean,
output_dir,
selector_name,
)
def save_json_file(self, info_dict, output_dir, selector_name):
directory_path = os.path.join(output_dir, selector_name)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
timestamp = datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d_%H-%M-%S.%f")
filename = f"{timestamp}"
filepath = os.path.join(directory_path, filename)
with open(filepath, "w") as outfile:
json.dump(info_dict, outfile)
def run_hyperopt(
self,
selector_name,
param_dict_ranges,
nr_samples,
nr_features,
mean,
std,
noise_frac,
n_splits=10,
):
X, y = self.generate_gaussian_linear_data(nr_samples, nr_features, mean, std)
X, _, y, _ = train_test_split(X, y, test_size=0.25)
y_train_noisy, noisy_idx = add_random_noise_arnaiz(y, noise_frac=noise_frac)
base_time = datetime.datetime(2000, 1, 1)
time_train = np.array(
[base_time + datetime.timedelta(days=i) for i in range(X.shape[0])],
dtype="datetime64[ns]",
).astype(np.float32)
X_time = np.hstack([X, time_train.reshape(-1, 1)])
all_param_combinations = list(ParameterGrid(param_dict_ranges))
iteration = 1
for param_dict in all_param_combinations:
print("trying combination", iteration, "/", len(all_param_combinations))
iteration += 1
kf = KFold(n_splits=n_splits, shuffle=False)
cv_val_scores = []
cv_dict = {}
for i, (train_index, val_index) in enumerate(kf.split(X)):
X_train = X[train_index, :]
y_train = y_train_noisy[train_index]
X_val = X[val_index, :]
y_val = y_train_noisy[val_index]
if selector_name == "reg_enn_time":
boolean_labels, scores = run_selector(
X_time[train_index, :], y_train, selector_name, param_dict
)
elif selector_name == "fish1":
x_target = np.hstack(
[X_val[0, :], time_train[-1].astype(np.float32)]
)
X_fish = np.vstack([X_time[train_index, :], x_target])
boolean_labels, scores = run_selector(
X_fish, y_train, selector_name, param_dict
)
else:
boolean_labels, scores = run_selector(
X_train, y_train, selector_name, param_dict
)
model = train_lr_model(
X_train[boolean_labels, :], y_train[boolean_labels]
)
error_dict = calc_model_errors(model, X_val, y_val)
cv_val_scores.append(error_dict["r2"])
cv_dict["raw_scores"] = cv_val_scores
cv_dict["mean_score"] = np.mean(cv_val_scores)
cv_dict["std_scores"] = np.std(cv_val_scores)
cv_dict["n_splits"] = n_splits
cv_dict["noise_frac"] = noise_frac
cv_dict["mean"] = mean
cv_dict["std"] = std
cv_dict["nr_samples"] = nr_samples
cv_dict["nr_features"] = nr_features
save_hyperopt_result(
selector_name,
param_dict,
cv_dict,
f"noise{noise_frac}",
"",
"hyperopt_synthetic",
)
def calc_correctly_identified_noise_samples(self, noisy_idx, labels):
idx_by_selector = np.argwhere(labels == False).flatten()
correctly_kicked_out = len(
set(noisy_idx).intersection(set(np.argwhere(labels == False).flatten()))
)
falsely_kicked_out = len(idx_by_selector) - correctly_kicked_out
return correctly_kicked_out, falsely_kicked_out
def create_pca_plot(self, noise_frac=0.3, filename=None, random_state=None):
X, y = self.generate_gaussian_linear_data(
1000, 5, 0, 1, random_state=random_state
)
y_noisy, noisy_idx = add_random_noise_arnaiz(y, noise_frac=noise_frac)
pca = PCA(2)
X = pca.fit_transform(X)
# fig, axes = plt.subplots(1, 2, figsize=(15, 10), sharey=True)
gridspec = {"width_ratios": [1, 1, 0.05]}
fig, axes = plt.subplots(1, 3, figsize=(15, 10), gridspec_kw=gridspec)
axes[0].scatter(X[:, 0], X[:, 1], c=y, cmap="seismic")
axes[0].set_title("a) Clean Data Set")
size = np.ones(len(y)) * 20
size[noisy_idx] = 60
axes[1].scatter(X[:, 0], X[:, 1], c=y_noisy, s=size, cmap="seismic")
axes[1].set_title(f"b) Noisy Data Set ( {int(noise_frac*100)}% )")
fig.supxlabel("Principal Component 1", fontsize=16, y=0.0)
fig.supylabel("Principal Component 2", fontsize=16, x=0.08)
cmap = mpl.cm.get_cmap("seismic")
norm = mpl.colors.Normalize(vmin=y.min(), vmax=y.max())
cbar = plt.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=axes[2],
)
cbar.set_label(label="y Value", size="large", weight="bold")
if filename:
fig.savefig(filename)
| lurue101/Ruecker_MA | prism_kondo/noise_experiments.py | noise_experiments.py | py | 13,013 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.RandomState",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.round",
... |
12006624886 | #! /usr/bin/env python3
import sys
sys.path.insert(0, '/home/pi/soco') # Add soco location to system path
import time
from soco import SoCo
from soco.snapshot import Snapshot
print("Starting Doorbell Player...")
### Setup
# Define doorbell MP3 file as bellsound and set doorbell volume
bellsound = "http://www.orangefreesounds.com/wp-content/uploads/2016/06/Westminster-chimes.mp3"
bellvolume = 50
# Assign all zone player IPs to their names
study = SoCo('192.168.1.225')
masterbedroom = SoCo('192.168.1.83') # Left Play:1 (master)
invmasterbedroom = SoCo('192.168.1.245') # Right Play:1 (invisible slave)
library = SoCo('192.168.1.166')
guestroom = SoCo('192.168.1.184')
kitchen = SoCo('192.168.1.212')
diningroom = SoCo('192.168.1.162')
livingroom = SoCo('192.168.1.238') # Playbar (master)
invlivingroom = SoCo('192.168.1.208') # SUB IP (invisible slave)
# Identify doorbell and non-doorbell players
doorbell1 = study
doorbell2 = masterbedroom
doorbell3 = diningroom
doorbell4 = kitchen
nondoorbell1 = library
nondoorbell2 = guestroom
nondoorbell3 = livingroom
# Create group lists of doorbell, non-doorbell and invisible players
doorbellgroup = [doorbell1, doorbell2, doorbell3, doorbell4]
nondoorbellgroup = [nondoorbell1, nondoorbell2, nondoorbell3]
invisiblezones = [invmasterbedroom, invlivingroom]
### Store pre-doorbell state
# Take a snapshot of the states of doorbellgroup players
for zp in doorbellgroup:
print("\nSnapshotting current state of " + zp.player_name + "\n")
zp.snap = Snapshot(zp)
zp.snap.snapshot()
# Build descriptor list for each doorbell group player for later processing & restoration
for zp in doorbellgroup:
print("\nGetting current group state of " + zp.player_name + "\n")
zp.groupstatus = [zp, # 0 player object
bool(len(set(zp.group.members) - set(invisiblezones)) != 1), # 1 in a group? (can't rely on boolean return from Snapshot, because invisible players are included in group/non-group status)
zp.is_coordinator, # 2 is coordinator?
zp.group.coordinator, # 3 curent coordinator object
bool(set(zp.group.members) & set(nondoorbellgroup)), # 4 heterogeneous group? (made up of both doorbell and non-doorbell players)
(list(set(nondoorbellgroup) & set(zp.group.members)) + [False])[0] # 5 First non-doorbell group member from list; Blank if only doorbellgroup members
]
### Doorbell player routine
# Pause and unjoin doorbell zone players from any current groups
print("Unjoining doorbell group players from current groups...\n")
for zp in doorbellgroup :
zp.unjoin()
# Join doornell zone players into a group with doorbell1 as master
print("Joining doorbell group players with " + doorbell1.player_name + " as master...\n")
for i in range(1,len(doorbellgroup)):
zp = doorbellgroup[i]
zp.join(doorbell1)
# Wait for zone players to be ready
while not doorbell1.is_coordinator:
print("Waiting for " + doorbell1.player_name + " to be coordinator...\n")
time.sleep(0.1)
# Set volume for doorbell sound
for zp in doorbellgroup:
zp.volume = bellvolume
print("Setting " + zp.player_name + " volume.\n")
# Play doorbell sound
doorbell1.play_uri(uri=bellsound)
track = doorbell1.get_current_track_info()
print(track['title'])
# Show state of playing doorbell
while str(doorbell1.get_current_transport_info()[u'current_transport_state']) != "PLAYING":
print("Waiting to start playing...")
time.sleep(0.1)
while str(doorbell1.get_current_transport_info()[u'current_transport_state']) == "PLAYING":
print("Ringing...")
time.sleep(0.1)
# Unjoin doornbell zone players doorbell group
print("\nUnjoining doorbell group players from doorbell group...")
for zp in doorbellgroup:
zp.unjoin()
# Wait for zone players to be ungrouped
for zp in doorbellgroup:
while not zp.is_coordinator:
print("\nWaiting for " + zp.player_name + " to be ungrouped...")
time.sleep(0.1)
### Restore and regroup doorbell players
# Restore original state of doorbell players
print("\nRestoring doorbell group players to former states...")
for zp in doorbellgroup:
zp.snap.restore(fade=0)
time.sleep(1)
# Restore groups based on zp.groupstatus descriptor list of original group state
print("\nRestoring groups...")
for zp in doorbellgroup:
if zp.groupstatus[1] == False: # Loner
pass #### Do nothing; was not in a group
elif zp.groupstatus[2] == False and zp.groupstatus[4] == False: # Homog group slave
zp.join(zp.groupstatus[3]) ##### Rejoin to original coord
elif zp.groupstatus[2] == True and zp.groupstatus[4] == False: # Homog group coord
pass #### Do nothing; slaves are rejoined above
elif zp.groupstatus[2] == True and zp.groupstatus[4] == True: # Former coord of heterog group
zp.join(zp.groupstatus[5].group.coordinator) ##### Query new coord of non-doorbell group member & rejoin to it
elif zp.groupstatus[2] == False and zp.groupstatus[3] not in doorbellgroup: # Slave in heterog group with non-doorbell coord
zp.join(zp.groupstatus[3]) #### Rejoin to original coord
else: # Slave in heterog group with doorbell coord
zp.join(zp.groupstatus[5].group.coordinator) #### Query new coord of non-doorbell group member & rejoin to it
# Finish
print("\nDoorbell Player finished.\n")
| ronschaeffer/sonosdoorbell | SonosDoorbellPlayer.py | SonosDoorbellPlayer.py | py | 5,389 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "soco.SoCo",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "soco.SoCo",
"line_number": ... |
10179832367 | #!/usr/bin/python3
"""
Prints the titles of the first 10 hot posts listed for a given subreddit
"""
import requests
def top_ten(subreddit):
"""
Prints the titles of the first
10 hot posts listed for a given subreddit
"""
if subreddit is None or not isinstance(subreddit, str):
print("None")
user_agent = {'User-agent': 'Google Chrome Version 81.0.4044.129'}
params = {'limit': 10}
url = 'https://www.reddit.com/r/{}/hot/.json'.format(subreddit)
response = requests.get(url, headers=user_agent, params=params)
results = response.json()
try:
my_data = results['data']['children']
for i in my_data:
print(i['data']['title'])
except Exception:
print("None")
| jamesAlhassan/alx-system_engineering-devops | 0x16-api_advanced/1-top_ten.py | 1-top_ten.py | py | 756 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
}
] |
18198988741 | """Providers filters file."""
from django.db import models
import django_filters
from tersun.common.filters import SearchComboboxBaseFilter
from tersun.providers import models as provider_models
class ProviderFilter(SearchComboboxBaseFilter):
"""Provider filter class."""
class Meta:
"""Meta class for the providers filter."""
model = provider_models.Provider
fields = "__all__"
filter_overrides = {
models.FileField: {
'filter_class': django_filters.CharFilter,
'extra': lambda f: {
'lookup_expr': 'icontains',
},
},
} | SonnyKundi/teebeauty_backend | tersun/providers/filters.py | filters.py | py | 671 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tersun.common.filters.SearchComboboxBaseFilter",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "tersun.providers.models.Provider",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tersun.providers.models",
"line_number": 16,
"usage_... |
32578660748 | import cv2 as cv
import numpy as np
image = cv.imread('img.jpg')
imageGray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
imageGrayCanny = cv.Canny(image,100,150)
cv.imshow('canny',imageGrayCanny)
contours, hierarchy = cv.findContours(imageGrayCanny,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_NONE)
contours_poly = [None] * len(contours)
boundRect = [None] * len(contours)
for local, contour in enumerate(contours):
contours_poly[local] = cv.approxPolyDP(contour, 3, True)
boundRect[local] = cv.boundingRect(contours_poly[local])
draw = np.copy(image)
for i in range(len(contours)):
color = ((0, 0, 255))
cv.drawContours(draw, contours_poly, i, color)
cv.rectangle(draw, (int(boundRect[i][0]), int(boundRect[i][1])),
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
for index ,contour in enumerate(contours):
print(f"object {index + 1}: {cv.contourArea(contour)} a.u.")
cv.imshow('draw',draw)
cv.waitKey(0)
cv.imwrite("objects.jpg", draw)
| mycatdoitbetter/projects-opencv2-python | t31 - t45/t-32/t-32.py | t-32.py | py | 1,040 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.Canny",
"line_nu... |
71249270184 | """
Core client functionality, common across requests.
"""
import collections
import random
import requests
import time
from datetime import datetime
from datetime import timedelta
RETRIABLE_STATUSES = {500, 503, 504}
class AbstractRestClient:
"""Performs requests to APIs services."""
def __init__(self, base_url,
timeout=None, connect_timeout=None, read_timeout=None, retry_timeout=60,
queries_per_second=10,
requests_kwargs=None):
"""
:param base_url: base url to perform requests
:type base_url: string
:param timeout: Combined connect and read timeout for HTTP requests, in
seconds. Specify "None" for no timeout.
:type timeout: int
:param connect_timeout: Connection timeout for HTTP requests, in
seconds. You should specify read_timeout in addition to this option.
Note that this requires requests >= 2.4.0.
:type connect_timeout: int
:param read_timeout: Read timeout for HTTP requests, in
seconds. You should specify connect_timeout in addition to this
option. Note that this requires requests >= 2.4.0.
:type read_timeout: int
:param retry_timeout: Timeout across multiple retriable requests, in
seconds.
:type retry_timeout: int
:param queries_per_second: Number of queries per second permitted.
If the rate limit is reached, the client will sleep for the
appropriate amount of time before it runs the current query.
:type queries_per_second: int
:param requests_kwargs: Extra keyword arguments for the requests
library, which among other things allow for proxy auth to be
implemented. See the official requests docs for more info:
http://docs.python-requests.org/en/latest/api/#main-interface
:type requests_kwargs: dict
"""
self.session = requests.Session()
if timeout and (connect_timeout or read_timeout):
raise ValueError("Specify either timeout, or connect_timeout "
"and read_timeout")
if connect_timeout and read_timeout:
self.timeout = (connect_timeout, read_timeout)
else:
self.timeout = timeout
self.retry_timeout = timedelta(seconds=retry_timeout)
self.requests_kwargs = requests_kwargs or {}
self.requests_kwargs.update({
"timeout": self.timeout,
})
self.queries_per_second = queries_per_second
self.sent_times = collections.deque("", queries_per_second)
self.base_url = base_url
def _request(self, url, method="get", first_request_time=None, retry_counter=0, requests_kwargs=None):
"""Performs HTTP GET/POST.
:param url: URL path for the request. Should begin with a slash.
:type url: string
:param method: HTTP method name, support get and post.
:type method: string
:param first_request_time: The time of the first request (None if no
retries have occurred).
:type first_request_time: datetime.datetime
:param retry_counter: The number of this retry, or zero for first attempt.
:type retry_counter: int
:raises ApiError: when the API returns an error.
:raises Timeout: if the request timed out.
:raises TransportError: when something went wrong while trying to
exceute a request.
"""
if not first_request_time:
first_request_time = datetime.now()
elapsed = datetime.now() - first_request_time
if elapsed > self.retry_timeout:
raise ValueError("timeout")
if retry_counter > 0:
# 0.5 * (1.5 ^ i) is an increased sleep time of 1.5x per iteration,
# starting at 0.5s when retry_counter=0. The first retry will occur
# at 1, so subtract that first.
delay_seconds = 0.5 * 1.5 ** (retry_counter - 1)
# Jitter this value by 50% and pause.
time.sleep(delay_seconds * (random.random() + 0.5))
requests_kwargs = requests_kwargs or {}
final_requests_kwargs = dict(self.requests_kwargs, **requests_kwargs)
try:
response = self.session.request(method, url, **final_requests_kwargs)
except requests.exceptions.Timeout:
raise ValueError("timeout")
if response.status_code in RETRIABLE_STATUSES:
return self._request(url, first_request_time, retry_counter + 1, requests_kwargs)
# Check if the time of the nth previous query (where n is
# queries_per_second) is under a second ago - if so, sleep for
# the difference.
if self.sent_times and len(self.sent_times) == self.queries_per_second:
elapsed_since_earliest = time.time() - self.sent_times[0]
if elapsed_since_earliest < 1:
time.sleep(1 - elapsed_since_earliest)
self.sent_times.append(time.time())
return response
def _get_request_uri(self, partial_uri=""):
if partial_uri.startswith("http"):
return partial_uri
return "{}{}".format(self.base_url, partial_uri)
def get(self, uri, params=None, headers=None):
requests_kwargs = {"params": params or {}, "headers": headers or {}}
url = self._get_request_uri(uri)
return self._request(url, "get", requests_kwargs=requests_kwargs)
def post(self, uri, data=None, headers=None):
requests_kwargs = {"json": data or {}, "headers": headers or {}}
url = self._get_request_uri(uri)
return self._request(url, "post", requests_kwargs=requests_kwargs)
| ifreddyrondon/address-resolver | addressresolver/core/client.py | client.py | py | 5,786 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.dateti... |
14537963306 | # 주식 비교 및 분석
# 1. 주식 비교
# 야후 파인낸스 사용
# 필요 라이브러리는 yfinance, pandas-datareader
# 주식 시세 구하는 함수는 get_data_yahoo()
# get_data_yahoo(조회할 주식 종목 [, start=조회 기간의 시작일] [, end=조회 기간의 종료일])
from pandas_datareader import data as pdr
import yfinance as yf
yf.pdr_override()
sec = pdr.get_data_yahoo('063160.KS', start='2020-08-17')
msft = pdr.get_data_yahoo('MSFT', start='2018-05-04')
print(sec)
tmp_msft = msft.drop(columns='Volume') # 거래량 컬럼 삭제
print(tmp_msft.tail()) # tail()은 최근 5개 데이타 출력
print(sec.index) | drafighter/dra_investar | stock_basic.py | stock_basic.py | py | 655 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "yfinance.pdr_override",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas_datareader.data.get_data_yahoo",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas_datareader.data",
"line_number": 12,
"usage_type": "name"
},
{
... |
18074097517 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
#
urlpatterns = patterns('',
url(r'^index$', 'Users.views.start'),
url(r'^setNewUser$', 'Users.views.setNewUser'),
url(r'^getRegisterForm$', 'Users.views.getRegisterForm'),
url(r'^login$', 'Users.views.login'),
url(r'^logout$', 'Users.views.logout'),
)
| ggarri/photoDiary | Users/urls.py | urls.py | py | 512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dj... |
2124363907 | import torch
import torch.nn as nn
class RNN_Classifier(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers = 1, batch_first = True, use_gpu = True):
super(RNN_Classifier, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.num_layers = num_layers
self.rnn = nn.RNN(self.input_size, self.hidden_size, num_layers, batch_first = batch_first)
self.fc = nn.Linear(self.hidden_size, self.output_size)
self.softmax = torch.nn.Softmax(dim=1)
self.use_gpu = True
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
if self.use_gpu:
h0 = h0.cuda()
# c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# print(type(x))
# print(type(h0))
out, hidden = self.rnn(x, h0)
out = self.fc(out[:, -1, :])
# out = self.softmax(out)
return out
| nhatleminh1997/ASL_detection | RNN_classifer.py | RNN_classifer.py | py | 1,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.RNN",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number... |
40895132002 | from flask import Flask, render_template, request, Response, url_for,jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_weasyprint import HTML, render_pdf
import ast
import json
import dicttoxml
from datetime import datetime
from model import Reports
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://interview:LetMeIn@candidate.suade.org/suade'
db = SQLAlchemy(app)
db.init_app(app)
@app.route("/")
def hello():
return "Welcome to Suade Reporting API!"
@app.route('/api/reports/', methods=['GET'])
def get_all_reports():
reports = Reports.query.all()
return jsonify([item.type for item in reports])
@app.route('/api/report/<int:id>/', methods=['GET'])
def get_report(id):
report = Reports.query.get(id)
return report.type
# XML endpoint
@app.route('/api/report/<int:id>.xml/')
def get_xml_report(id):
report = Reports.query.get(id)
obj = json.loads(report.type)
xml = dicttoxml.dicttoxml(obj)
return Response(xml, mimetype='text/xml')
# PDF endpoint
@app.route('/api/report/<int:id>.pdf/')
def get_pdf_report(id):
# converting string to dictionary
report = ast.literal_eval(Reports.query.get(id).type)
report['created'] = datetime.now().strftime('%Y-%m-%d') # timestamp
html = render_template('report.html',report= report)
return render_pdf(HTML(string=html))
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
if __name__ == "__main__":
app.run() | webbyfox/suade | app.py | app.py | py | 1,494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "model.Reports.query.all",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "model... |
9376474634 | import numpy as np
import cv2
import sys
from math import sqrt
sys.setrecursionlimit(10000)
class MandelbrotSet:
def __init__(self, size, numColors):
self.size = size
self.c = 0
self.img = np.zeros((size,size,3), dtype='uint8')
self.pallet = []
self.generatePallet(numColors)
self.MAX_STACK = len(self.pallet)-1
self.generateImage()
def generatePallet(self, numColors):
step = int(255 / (numColors**0.33333333 - 1)) # cada canal deve ter n cores, onde numColors = n*n*n
for b in range(0, 256, step):
for g in range(0, 256, step):
for r in range(0, 256, step):
self.pallet.append([g,b,r])
self.pallet = np.array(self.pallet, dtype='uint8')
def f(self, z):
return z**2 + self.c
def mag(self, x):
return sqrt((x.real**2) + (x.imag**2))
def num(self, z, iterations):
if iterations < self.MAX_STACK:
if self.mag(self.f(z)) > 2:
return 1
else:
return self.num(self.f(z), iterations+1) + 1
return 0
def generateImage(self):
r = np.linspace(-2, 2, self.size) # eixo dos #'s reais
im = np.linspace(2j, -2j, self.size) # eixo dos #'s imaginarios
for i in range(0, self.size):
for j in range(0, self.size):
self.c = r[j] + im[i]
self.img[i,j] = self.pallet[self.num(0, 0)]
def getImage(self):
return self.img
mbs = MandelbrotSet(600, 500) # imagem de 600x600 e max 500 cores
cv2.imwrite('mandel.png', mbs.getImage())
| cleiston/Fractals | MandelbrotSet.py | MandelbrotSet.py | py | 1,684 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_nu... |
38197374041 | import matplotlib.pylab as plt
import matplotlib.patches as mpatch
import numpy as np
import pandas as pd
# Get alcohol consumption level and GSP
YEAR = 2009
df = pd.read_csv("cache/niaaa-report.csv")
df = df[df.Year == YEAR]
df2 = pd.read_csv("cache/usgs_state_2009.csv", dtype={"Gross State Product": np.float64},
skiprows=5, nrows=52, thousands=",")
df3 = df.merge(df2, on='State', how='left')
# Compute measures per capita
df3["GSP per capita"] = df3["Gross State Product"].div(df3["Population (million)"])
df3["Alcohol"] = df3["Beer"] + df3["Wine"] + df3["Spirits"]
# Construct cross-table
gsp = df3["GSP per capita"] > df3["GSP per capita"].mean()
alcohol = df3["Alcohol"] > df3["Alcohol"].mean()
table = pd.crosstab(gsp, alcohol)
print(table)
# Compute correlation between ALCOHOL CONSUMPTION/capita and GSP/capita
df4 = pd.DataFrame({"GSP": df3["GSP per capita"], "Alcohol": df3["Alcohol"]})
print("\ncorr: ", df4.corr().GSP[0])
# Generate scatter plot, each alcohol is plotted separately
plt.scatter(df3["Beer"], df3["GSP per capita"], color="Blue")
plt.scatter(df3["Spirits"], df3["GSP per capita"], color="Green")
plt.scatter(df3["Wine"], df3["GSP per capita"], color="Red")
red = mpatch.Patch(color='red', label='Wine')
blue = mpatch.Patch(color='blue', label='Beer')
green = mpatch.Patch(color='green', label='Spirits')
plt.legend(handles=[red, green, blue], loc="upper left")
plt.title("GSP/Capita vs Alcohol Consumption/Capita")
plt.xlabel("Alcohol Consumption/Capita")
plt.ylabel("GSP/Capita")
plt.grid()
plt.savefig("results/gsp-alcohol.png")
| hmly/data-science | demo-analysis/demo-analysis.py | demo-analysis.py | py | 1,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pandas.crosstab",
... |
21107263411 | """Utility functions for building models."""
from __future__ import print_function
import collections
import time
import os
import numpy as np
import tensorflow as tf
from .utils import iterator_utils
from .utils import misc_utils as utils
from .utils import data_utils
__all__ = [
"get_initializer", "get_device_str",
"create_train_model", "create_eval_model", "create_infer_model",
"create_rnn_cell", "gradient_clip", "create_or_load_model", "load_model"
]
def get_initializer(init_op, seed=None, init_weight=None):
"""Create an initializer, init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
"""Return a device string from multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
# Train Model
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"data_placeholder"))):
pass
def create_train_model(model_creator, hparams, scope=None):
"""Create train graph, model, and iterator."""
out_dir = hparams.out_dir
data_file = hparams.data_file
data_dtype, data_shape = data_utils.check_data(data_file, out_dir)
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
# Define dataset from placeholder, will be fed in during training
data_placeholder = tf.placeholder(data_dtype, data_shape)
dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)
iterator = iterator_utils.get_iterator(
dataset,
batch_size=hparams.batch_size,
random_seed=hparams.random_seed,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
output_buffer_size=None)
model_device_fn = None # if we have a special device name or function
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
scope=None)
return TrainModel(
graph=graph,
model=model,
iterator=iterator,
data_placeholder=data_placeholder)
# Eval Model
class EvalModel(
collections.namedtuple("EvalModel", ("graph", "model", "iterator",
"data_placeholder"))):
pass
def create_eval_model(model_creator, hparams, scope=None):
"""Create eval graph, model, and iterator."""
out_dir = hparams.out_dir
dev_data_file = hparams.dev_data_file
data_dtype, data_shape = data_utils.check_data(dev_data_file, out_dir)
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
# Define dataset from placeholder, will be fed in during evaluation
data_placeholder = tf.placeholder(data_dtype, data_shape)
dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)
iterator = iterator_utils.get_iterator(
dataset,
batch_size=hparams.batch_size,
random_seed=hparams.random_seed,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
output_buffer_size=None)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
scope=None)
return EvalModel(
graph=graph,
model=model,
iterator=iterator,
data_placeholder=data_placeholder)
# Infer Model
class InferModel(
collections.namedtuple("InferModel", ("graph", "model", "iterator",
"data_placeholder"))):
pass
def create_infer_model(model_creator, hparams, scope=None):
"""Create infer graph, model, and iterator."""
out_dir = hparams.out_dir
if hparams.infer_data_file:
infer_data_file = hparams.infer_data_file
else:
infer_data_file = hparams.sample_infer_data_file
data_dtype, data_shape = data_utils.check_data(infer_data_file, out_dir)
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "infer"):
# Define dataset from placeholder, will be fed in during inference
data_placeholder = tf.placeholder(data_dtype, data_shape)
dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)
iterator = iterator_utils.get_infer_iterator(
dataset,
batch_size=hparams.infer_batch_size,
num_infer_steps=hparams.num_infer_steps)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
scope=None)
return InferModel(
graph=graph,
model=model,
iterator=iterator,
data_placeholder=data_placeholder)
def create_rnn_cell(unit_type, num_units, forget_bias, dropout, mode,
num_proj=None, use_peepholes=True, device_str=None):
"""Creates an instance of a single RNN cell."""
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias)
single_cell = tf.contrib.rnn.LSTMCell(
num_units=num_units,
use_peepholes=use_peepholes, # diagonal peephole connections to learn timing
num_proj=num_proj, # linear output projection
forget_bias=forget_bias)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# # Dropout (= 1-keep_prob) is set to 0 for eval and infer modes
# dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# if dropout > 0.0:
# single_cell = tf.contrib.rnn.DropoutWrapper(
# cell=single_cell, input_keep_prob=(1.0-dropout))
# utils.print_out(" %s, dropout=%g " % (type(single_cell).__name__, dropout))
# TODO: Residual
# TODO: DeviceWrapper
return single_cell
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
return clipped_gradients, gradient_norm
def load_model(model, ckpt, session, name):
"""Load model from checkpoint."""
start_time = time.time()
model.saver.restore(session, ckpt)
utils.print_out(" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt, time.time()-start_time))
return model
def create_or_load_model(model, ckpt_dir, session, name):
"""Create nMOR model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(ckpt_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time()-start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
| panchgonzalez/nmor | nmor/model_helper.py | model_helper.py | py | 7,400 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "tensorflow.random_uniform_initializer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.initializers.glorot_normal",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 29,
"usage_type": "at... |
17904371974 | import pandas as pd
import tensorflow as tf
import psycopg2
import configparser as cf
import numpy as np
import key_driver_analysis as kda
SQL_COLUMN_NAMES = ['nct_id',
'start_date',
'study_type',
'enrollment_type',
'phase',
'overall_status']
STATUS = ['Completed',
'Terminated']
def get_db_connection_str(db_props = 'aact.properties'):
"""Returns a psycopg2 DB database connection string"""
config = cf.ConfigParser()
with open(db_props) as f:
config.readfp(f, filename=db_props)
dbargs=""
for k, v in config['aact.database'].items():
dbargs=dbargs + k + "=" + v + " "
return dbargs
def train_validate_test_split(df, train_percent=.6, validate_percent=.2, seed=None):
"""Splits the original CT items into 3 sets: training, validation, testing"""
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = df.loc[perm[:train_end]]
validate = df.loc[perm[train_end:validate_end]]
test = df.loc[perm[validate_end:]]
return train, validate, test
def kda_metadata(df):
target = 'status'
features=list(set(df.columns.tolist()).difference(set([target])))
print(f'target --> {target}')
print(f'features --> {features}')
rw_df = kda.relative_importance(df,
target=target,
features=features,
verbose=True)
return rw_df
def load_data(y_name='status', db_props='aact.properties'):
"""Returns the CT dataset as (train_x, train_y), (test_x, test_y), , (validate_x, validate_y)."""
dbargs=get_db_connection_str(db_props)
conn = psycopg2.connect(dbargs)
sqlstr= \
"SELECT s." + ",s.".join(SQL_COLUMN_NAMES) + ", sp.agency_class as sponsor_type, cv.number_of_facilities, e.gender, " + \
" cv.has_us_facility, cv.average_condition_completion_ratio, " + \
" CASE WHEN s.brief_title LIKE '%age III%' THEN '1' WHEN s.brief_title LIKE '%age IV%' THEN '2' ELSE 0 END as condition_stage, " + \
" CASE WHEN s.number_of_arms IS NULL THEN 0 ELSE s.number_of_arms END as number_of_arms_clean, " + \
" d.allocation, d.intervention_model, d.primary_purpose, 0 as drug_recency, bs.description, " + \
" count(dgi.id) as design_group_intervention_count, count(distinct(i.intervention_type)) as intervention_type_count, " + \
" count(distinct(sp2.name)) as sponsor_count " + \
"FROM studies as s, calculated_values as cv, eligibilities as e, interventions as i, " + \
" sponsors as sp, sponsors as sp2, design_group_interventions as dgi, designs as d, brief_summaries as bs " + \
"WHERE s.nct_id=cv.nct_id AND s.nct_id=sp.nct_id AND s.nct_id=i.nct_id AND s.nct_id=sp2.nct_id AND s.nct_id=e.nct_id " + \
"AND s.nct_id=dgi.nct_id AND s.nct_id=d.nct_id AND s.nct_id=bs.nct_id " + \
"AND s.start_date > '2019-01-01' " + \
"AND cv.is_oncology = true " + \
"AND s.overall_status in ('Completed', 'Terminated') " + \
"AND s.enrollment IS NOT NULL AND cv.number_of_facilities > 0 " + \
"AND sp.lead_or_collaborator = 'lead' " + \
"GROUP BY s." + ",s.".join(SQL_COLUMN_NAMES) + ", sponsor_type, cv.number_of_facilities, cv.average_condition_completion_ratio, " + \
" e.gender, cv.has_us_facility, s.brief_title, s.number_of_arms, e.criteria, " + \
" d.allocation, d.intervention_model, d.primary_purpose, bs.description "
print(sqlstr)
df = pd.read_sql_query(sql=sqlstr,
con=conn,
index_col='nct_id',
parse_dates={'start_date': '%Y-%m-%d'})
conn.close()
# df_sponsors = df1['source'].value_counts()
# df=df1.join(df_sponsors,
# on='source',
# rsuffix='_local')
# print(df.groupby('phase').count())
df['start_epoch'] = df.start_date.dt.year
df['study_type_category'] = 0
df['agency_type_category'] = 0
df['gender_category'] = 0
df['allocation_type'] = 0
df['enrollment_type_category'] = 0
# df['intervention_model_type'] = 0
df['primary_purpose_type'] = 0
df['status'] = 0
df.loc[df.study_type == 'Expanded Access', 'study_type_category'] = 1
df.loc[df.study_type == 'Interventional', 'study_type_category'] = 2
df.loc[df.study_type == 'Observational', 'study_type_category'] = 3
df.loc[df.study_type == 'Observational [Patient Registry]', 'study_type_category'] = 4
df.loc[df.overall_status == 'Completed', 'status'] = 0
df.loc[df.overall_status == 'Terminated', 'status'] = 1
df.loc[df.sponsor_type == 'U.S. Fed', 'agency_type_category'] = 0
df.loc[df.sponsor_type == 'NIH', 'agency_type_category'] = 1
df.loc[df.sponsor_type == 'Industry', 'agency_type_category'] = 2
df.loc[df.sponsor_type == 'Other', 'agency_type_category'] = 3
df.loc[df.gender == 'Male', 'gender_category'] = 1
df.loc[df.gender == 'Female', 'gender_category'] = 2
df.loc[df.allocation == 'Randomized', 'allocation_type'] = 1
df.loc[df.description.str.contains('randomized'), 'allocation_type'] = 1
df.loc[df.allocation == 'Non-Randomized', 'allocation_type'] = 2
df.loc[df.description.str.contains('non-randomized'), 'allocation_type'] = 2
df.loc[df.number_of_arms_clean == 1, 'allocation_type'] = 2
# df.loc[df.intervention_model == 'Crossover Assignment', 'intervention_model_type'] = 1
# df.loc[df.intervention_model == 'Factorial Assignment', 'intervention_model_type'] = 2
# df.loc[df.intervention_model == 'Parallel Assignment', 'intervention_model_type'] = 3
# df.loc[df.intervention_model == 'Sequential Assignment', 'intervention_model_type'] = 4
# df.loc[df.intervention_model == 'Single Group Assignment', 'intervention_model_type'] = 5
df.loc[df.enrollment_type == 'Anticipated', 'enrollment_type_category'] = 1
df.loc[df.primary_purpose == 'Basic Science', 'primary_purpose_type'] = 1
df.loc[df.primary_purpose == 'Device Feasibility', 'primary_purpose_type'] = 2
df.loc[df.primary_purpose == 'Diagnostic', 'primary_purpose_type'] = 3
df.loc[df.primary_purpose == 'Educational/Counseling/Training', 'primary_purpose_type'] = 4
df.loc[df.primary_purpose == 'Health Services Research', 'primary_purpose_type'] = 5
df.loc[df.primary_purpose == 'Prevention', 'primary_purpose_type'] = 6
df.loc[df.primary_purpose == 'Screening', 'primary_purpose_type'] = 7
df.loc[df.primary_purpose == 'Supportive Care', 'primary_purpose_type'] = 8
df.loc[df.primary_purpose == 'Treatment', 'primary_purpose_type'] = 9
df.to_csv('/tmp/ct.csv')
df.drop(columns=['start_date','overall_status','average_condition_completion_ratio','sponsor_type', 'gender', 'phase', 'study_type',
'has_us_facility', 'allocation', 'intervention_model', 'primary_purpose', 'enrollment_type', 'description'], inplace=True)
train, validate, test = train_validate_test_split(df, 0.7, 0.005)
print("Rows", len(df))
print("************")
rw_df = kda_metadata(df)
print(rw_df)
print("************")
train_x, train_y = train, train.pop(y_name)
test_x, test_y = test, test.pop(y_name)
validate_x, validate_y = validate, validate.pop(y_name)
return (train_x, train_y), (test_x, test_y), (validate_x, validate_y)
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the dataset.
return dataset
def eval_input_fn(features, labels, batch_size):
"""An input function for evaluation or prediction"""
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the dataset.
return dataset
| nastacio/clinical-bi | src/main/py/ct_data.py | ct_data.py | py | 8,474 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.r... |
73974487785 | import random
import unittest
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from accounting_bot.ext.checklist import CheckList, Task, RepeatDelay
class ChecklistTest(unittest.TestCase):
def test_expire(self):
# noinspection PyTypeChecker
checklist = CheckList(plugin=None)
now = datetime.now()
task_valid = Task(name="valid", time=now - timedelta(days=1, hours=12), repeat=RepeatDelay.never)
task_expired = Task(name="expired", time=now - timedelta(days=2, hours=12), repeat=RepeatDelay.never)
task_other1 = Task(name="other3", time=now, repeat=RepeatDelay.never)
task_other2 = Task(name="other1", time=now + timedelta(days=2, hours=12), repeat=RepeatDelay.never)
task_other3 = Task(name="other2", time=now + timedelta(days=1, hours=12), repeat=RepeatDelay.never)
checklist.tasks = [task_valid, task_expired, task_other1, task_other2, task_other3]
checklist.cleanup_tasks()
self.assertCountEqual([task_valid, task_other1, task_other2, task_other3], checklist.tasks)
def test_refresh(self):
# noinspection PyTypeChecker
checklist = CheckList(plugin=None)
now = datetime.now()
task_never = Task(name="never", time=now - timedelta(days=1), repeat=RepeatDelay.never)
task_daily = Task(name="daily", time=now - timedelta(days=3, hours=1), repeat=RepeatDelay.daily)
task_weekly = Task(name="weekly", time=now - timedelta(days=3, hours=1), repeat=RepeatDelay.weekly)
task_monthly = Task(name="monthly", time=now - timedelta(days=3, hours=1), repeat=RepeatDelay.monthly)
# This task is only about one day expired and is not marked as finished, it should not get refreshed yet
task_daily_pending = Task(name="daily2", time=now - timedelta(days=1, hours=1), repeat=RepeatDelay.daily)
# This task is the same but marked as finished, it should get refreshed
task_daily_completed = Task(name="daily3", time=now - timedelta(days=1, hours=1), repeat=RepeatDelay.daily)
task_daily_completed.finished = Task
checklist.tasks = [task_never, task_daily, task_weekly, task_monthly, task_daily_pending, task_daily_completed]
checklist.cleanup_tasks()
self.assertEqual(now - timedelta(days=1), task_never.time)
self.assertEqual(now + timedelta(days=1, hours=-1), task_daily.time)
self.assertEqual(now + timedelta(days=4, hours=-1), task_weekly.time)
self.assertEqual(now + relativedelta(months=1) - timedelta(days=3, hours=1), task_monthly.time)
self.assertEqual(now - timedelta(days=1, hours=1), task_daily_pending.time)
self.assertEqual(now + timedelta(days=1, hours=-1), task_daily_completed.time)
def test_sorting(self):
# noinspection PyTypeChecker
checklist = CheckList(plugin=None)
now = datetime.now()
task_a = Task(name="a", time=now + timedelta(days=1, hours=0), repeat=RepeatDelay.never)
task_b = Task(name="b", time=now + timedelta(days=3, hours=1), repeat=RepeatDelay.daily)
task_c = Task(name="c", time=now + timedelta(days=9, hours=11), repeat=RepeatDelay.weekly)
task_d = Task(name="d", time=now + timedelta(days=20, hours=17), repeat=RepeatDelay.monthly)
checklist.tasks = random.sample([task_a, task_b, task_c, task_d], 4)
checklist.cleanup_tasks()
self.assertListEqual([task_a, task_b, task_c, task_d], checklist.tasks)
if __name__ == '__main__':
unittest.main()
| Blaumeise03/AccountingBot | tests/test_checklist.py | test_checklist.py | py | 3,556 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "accounting_bot.ext.checklist.CheckList",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
... |
27705271301 | import yfinance as yf
import requests
from datetime import datetime
def calculate_dma(ticker, days):
data = yf.download(ticker, period='1mo')
data['DMA'] = data['Close'].rolling(window=days).mean()
return data
def generate_signal(live_price, dma):
if live_price > dma:
return "buy"
else:
return "sell"
def get_live_price():
response = requests.get('https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=usd')
return response.json()['bitcoin']['usd']
data = calculate_dma('BTC-USD', 23)
dma = data['DMA'].iloc[-1]
dma_date = data.index[-1].strftime('%Y-%m-%d') # Date of the last DMA calculation
live_price = get_live_price()
live_price_date = datetime.now().strftime('%Y-%m-%d') # Current date
print(f"23-DMA at {dma_date:16} : {dma:.2f}")
print(f"Live Price at {live_price_date:12} : {live_price:.2f}")
signal = generate_signal(live_price, dma)
print(f"Signal : {signal}") | vajjhala/dma-btc | dma-btc.py | dma-btc.py | py | 946 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yfinance.download",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime... |
34182723957 | # coding=utf-8
from __future__ import print_function
"""负责从主网址中爬取出需要的网址"""
import datetime
import logging
import bs4
import requests
import re
import tools.newspublish
from bs4 import BeautifulSoup
from models import *
from .tools.bloomfilter import BloomFilter
from Spider.autonews.tools.svmutil import *
from .object import URL
from ..autorecog.recognize import *
from ..autorecog.keywords import analyse_keywords
import sys
reload(sys)
sys.setdefaultencoding('utf8')
logger = logging.getLogger(__name__)
dirname = path.dirname(path.abspath(__file__))
# 适配不同平台加载模型内容
if sys.platform == 'win32':
content_model = svm_load_model(path.join(dirname, ".\content.model"))
else:
content_model = svm_load_model(path.join(dirname, './content.model'))
def crawl(task):
"""根据任务爬取内容的主函数"""
assert isinstance(task, Task)
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
cookie = ""
if task.cookie is not None and task.cookie != "":
cookie = dict(task.cookie)
# 对复杂url进行分析处理
site_url = task.site_url
site_urls = []
matchObj1 = re.match(r'.*\(date,(.*?),(.*?)\).*', site_url, re.M | re.I)
if matchObj1:
patten = matchObj1.group(1)
lead = matchObj1.group(2)
patten = patten.replace("yyyy", "%Y")
patten = patten.replace("MM", "%m")
patten = patten.replace("dd", "%d")
patten = patten.replace("HH", "%H")
patten = patten.replace("mm", "%M")
patten = patten.replace("ss", "%S")
delta = datetime.timedelta(days=int(lead))
now = datetime.datetime.now() - delta
patterned_time = now.strftime(patten) # 计算完偏移量的日期
site_url = re.sub(r"\(date,(.*?)\)", patterned_time, site_url)
matchObj = re.match(r'.*\(loop,(.*?),(.*?),(.*?)\).*', site_url, re.M | re.I)
if matchObj:
first_num = int(matchObj.group(1))
last_num = int(matchObj.group(2))
number_of_phases = int(matchObj.group(3))
for i in range(first_num, last_num, number_of_phases):
u = re.sub(r"\(loop,(.*?),(.*?),(.*?)\)", str(i), site_url)
site_urls.append(u)
if len(site_urls) is 0:
site_urls.append(site_url)
hrefs = []
url_model = UrlModel.objects.filter(task=task)
if len(url_model) is 0: # 判断url是否有模板
for u in site_urls:
href = __crawl_urls(u, cookie) # 获取所有需要采集的地址
hrefs.extend(href)
else:
for u in site_urls:
href = __crawl_urls_by_model(url_model[0], u, cookie)
hrefs.extend(href)
for url in hrefs:
try:
r = requests.get(url, headers=header, cookies=cookie)
logger.info('开始请求%s,返回状态码为%d,当前时间为%s' % (url, r.status_code, datetime.datetime.now()))
# 如果请求失败重试三次
if r.status_code != 200:
i = 0
while i < 3 and r.status_code != 200:
logger.info('正在重试第%d次' % (i + 1))
r = requests.get(url, headers=header, cookies=cookie)
i += 1
if r.status_code != 200:
raise requests.ConnectionError('网址连接失败'+url)
html = r.text
code = "utf8" # 用于下面对html进行操作
# 编码判断(待改进)
try:
html = html.encode(r.encoding).decode("utf8")
except UnicodeDecodeError:
html = html.encode(r.encoding).decode("GB18030")
code = "utf8"
except UnicodeEncodeError:
html = html.encode("GB18030").decode("GB18030")
code = "GB18030"
logger.debug("网址%s \n"
"编码%s \n"
"返回内容%s \n"
% (url, r.encoding, html))
# 分析每条网址并且根据模板识别内容,然后保存数据库并且发送
ret = __recognize_by_model(html, task, code)
title = ret.get("title")
content_html = ret.get("content_html")
content = ret.get("content")
if title is None or content_html is None or content_html is '' or title is '':
ret = traversal(html)
t = ret.get("title")
c = ret.get("content_html")
p = ret.get("content")
if title is None or title is '':
title = t
if content_html is None or content_html is '':
content_html = c
content = p
content_html = __convert_img(content_html, str(url)) # 将文中的图片的相对路径转换为绝对路径
print (title)
print (type(content))
news = News()
news.task = task
news.url = url
news.title = title
news.content = content_html
news.keywords = analyse_keywords(content, 5)
news.save()
publishers = task.publisher.all()
print (type(publishers))
if title is not None and content_html is not None and content_html is not '' and title is not '':
for publisher in publishers:
publisher_type = publisher.type
publisher_url = publisher.url
r = eval("tools.newspublish."+publisher_type+"(publisher_url, title, content_html, task.site_name, task.site_column, news.keywords)")
print (r)
bf = BloomFilter()
bf.insert(url)
except Exception as e:
print (e)
def __crawl_urls(url, cookie):
"""分析URL下所有可以采集的URL
:param url:需要分析的URL
:return set
"""
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHT ML, like Gecko) Chrome/43.0.235'
}
r = requests.get(url, headers=header, cookies=cookie)
content = r.text
# print r.encoding
# TODO(qi): 解析编码方式还是不太好,虽然一般够用了,下次有更好的解决方案需要替换掉这段
try:
content = content.encode(r.encoding).decode("utf8")
except UnicodeDecodeError:
content = content.encode(r.encoding).decode("GB18030")
except UnicodeEncodeError:
content = content.encode("GB18030").decode("GB18030")
soup = BeautifulSoup(content, "html.parser")
t = soup.find_all("a")
hrefs = set('')
bf = BloomFilter()
for tag in t:
if tag.get("href") is not None:
newurl = str(tag.get("href")).strip()
# 处理不是http开头的各种情况,将相对路径拼接成绝对路径
if not newurl.startswith("http") and newurl.lower().find("javascript") is -1:
domain = re.match(r'http(s)?://(.*/)', url, re.M | re.I).group() # 拿到当前目录
if newurl.startswith("/"):
newurl = domain + newurl
elif newurl.startswith("./"):
newurl.replace("./","")
newurl = domain + newurl
elif newurl.startswith("../"):
count = newurl.count("../")
while count>0:
domain = domain[:len(domain) - 1]
domain = re.match(r'http(s)?://(.*/)', domain, re.M | re.I).group()
count -= 1
newurl = domain + newurl.replace("../", "")
else: # 剩下的”content_234.html"这种情况
newurl = domain + newurl
# 清理url中最后的#,以及当中的多个///的情况
newurl = newurl.partition("#")[0]
newurl = newurl.replace("://", "!!!")
while newurl.find("//") is not -1:
newurl = newurl.replace("//", "/")
newurl = newurl.replace("!!!", "://")
#TODO 错误识别“http://newspaper.jfdaily.com/xwcb/resfiles/2017-06/19/A0120170619S.pdf”临时处理,以后加(下次看到的话)
if newurl.find(".pdf") != -1:
continue
if "http" in newurl:
url_o = URL.URL(newurl, unicode(tag.string))
if url_o.is_contenturl():
if not bf.isContains(newurl):
# 转跳到下步处理分析内容
hrefs.add(newurl)
print ("已采集新网址"+url_o.url_name)
else:
print("该网址已采集过")
log_hrefs = "已分析网址"+str(url)
for h in hrefs:
log_hrefs += "\r\n"
log_hrefs += h
logger.info(log_hrefs)
return hrefs
def __crawl_urls_by_model(url_model, url, cookie):
"""通过模板来获取网址"""
assert isinstance(url_model, UrlModel)
start_location = url_model.start_location
end_location = url_model.end_location
include_word = url_model.include_words
include_words = None
if include_word is not u"":
include_words = include_word.split(";")
exclude_word = url_model.exclude_words
exclude_words = None
if exclude_word is not u"":
exclude_words = exclude_word.split(";")
hrefs = set('')
bf = BloomFilter()
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
r = requests.get(url, headers=header, cookies=cookie)
content = r.text
code = r.encoding
try:
content = content.encode(r.encoding).decode("utf8")
except UnicodeDecodeError:
content = content.encode(r.encoding).decode("GB18030")
except UnicodeEncodeError:
content = content.encode("GB18030").decode("GB18030")
code = "GB18030"
start_num = 0
end_num = len(content)
if start_location is not None and start_location != "":
start_num = content.find(start_location)
if start_num == -1:
start_num = 0
if end_location is not None and end_location != "":
end_num = content.find(end_location, start_num)
if end_num == -1:
end_num = len(content)
content = content[start_num:end_num]
soup = BeautifulSoup(content, "html.parser")
a_tags = soup.find_all("a")
for tag in a_tags:
if tag.get("href") is not None:
newurl = str(tag.get("href")).strip()
newurl = newurl.replace("\\","/")
# 处理不是http开头的各种情况,将相对路径拼接成绝对路径
if not newurl.startswith("http") and newurl.lower().find("javascript") is -1:
domain = re.match(r'http(s)?://(.*/)', url, re.M | re.I).group() # 拿到当前目录
if newurl.startswith("/"):
newurl = domain + newurl
elif newurl.startswith("./"):
newurl.replace("./","")
newurl = domain + newurl
elif newurl.startswith("../"):
count = newurl.count("../")
while count>0:
domain = domain[:len(domain) - 1]
domain = re.match(r'http(s)?://(.*/)', domain, re.M | re.I).group()
count -= 1
newurl = domain + newurl.replace("../", "")
else: # 剩下的”content_234.html"这种情况
newurl = domain + newurl
# 清理url中最后的#,以及当中的多个///的情况
newurl = newurl.partition("#")[0]
newurl = newurl.replace("://", "!!!")
while newurl.find("//") is not -1:
newurl = newurl.replace("//", "/")
newurl = newurl.replace("!!!", "://")
# url过滤条件
continue_flag = False
if include_words is not None and len(include_words) is not 0:
for word in include_words:
if newurl.find(word) is -1:
continue_flag = True
break
if exclude_words is not None and len(exclude_words) is not 0:
for word in exclude_words:
if newurl.find(word) is not -1:
continue_flag = True
break
if continue_flag:
continue
# TODO 错误识别“http://newspaper.jfdaily.com/xwcb/resfiles/2017-06/19/A0120170619S.pdf”临时处理,以后加(以后高兴加的话)
if newurl.find(".pdf") != -1:
continue
if "http" in newurl:
if not bf.isContains(newurl):
# 转跳到下步处理分析内容
hrefs.add(newurl)
print("已采集新网址" + newurl)
else:
print("该网址已采集过")
log_hrefs = "已分析网址"+str(url)
for h in hrefs:
log_hrefs += "\r\n"
log_hrefs += h
logger.info(log_hrefs)
return hrefs
# def __recognize_content(soup):
# """识别网页标题和内容"""
#
# assert isinstance(soup, BeautifulSoup)
# soup = __clean(soup)
# title = ""
# content = ""
# return title, content
# def __clean(soup):
# """清理网页噪声"""
# assert isinstance(soup, BeautifulSoup)
#
# try:
# for script in soup.find_all('script'):
# script.decompose()
# except TypeError:
# pass
# try:
# for style in soup.find_all('style'):
# style.decompose()
# except TypeError:
# pass
# try:
# for meta in soup.find_all('meta'):
# meta.decompose()
# except TypeError:
# pass
# try:
# for form in soup.find_all('soup'):
# form.decompose()
# except TypeError:
# pass
# try:
# for inputs in soup.find_all('input'):
# inputs.decompose()
# except TypeError:
# pass
# try:
# for select in soup.find_all('select'):
# select.decompse()
# except TypeError:
# pass
# try:
# for link in soup.find_all('link'):
# link.decompse()
# except TypeError:
# pass
#
# return soup
def __recognize_by_model(html, task, code):
"""根据模板筛选标题和内容"""
assert isinstance(task, Task)
title = ""
content = ""
content_html = ""
models = Model.objects.filter(task_id=task.id)
for model in models:
tag_name = model.tag_name
tag_id = model.tag_id
tag_attrs = model.tag_attrs
attrs_dict = {}
if tag_attrs is not None and tag_attrs != "":
attrs_dict = eval(tag_attrs)
is_title = model.is_title
# 前后文截取
# html = html.encode("utf-8")
assert isinstance(html, unicode)
start = model.start_location
end = model.end_location
start_num = 0
end_num = len(html)
if start is not None and start != "":
start_num = html.find(start.decode(code))
if start_num == -1:
start_num = 0
if end is not None and end != "":
end_num = html.find(end.decode(code), start_num)
if end_num == -1:
end_num = len(html)
html = html[start_num:end_num]
try:
html = html.encode("utf8").decode("utf8")
except UnicodeDecodeError:
html = html.encode("utf8").decode("GB18030")
except UnicodeEncodeError:
html = html.encode("GB18030").decode("GB18030")
try:
soup = BeautifulSoup(html, "html.parser")
except Exception as e:
print (e)
print (html)
if is_title:
try:
title = soup.find(name=tag_name, id=tag_id, attrs=attrs_dict).string
except AttributeError:
print ("找不到标题")
else:
# TODO(qi): 需要提供图片
try:
if tag_name is not u'' and attrs_dict is not {}:
content_soups = soup.find_all(name=tag_name, attrs=attrs_dict)
# for s in content_soups:
# if str(s.string) is not None and "None" not in str(s.string):
# content += s.get_text()
content_html += str(content_soups[0])
content += content_soups[0].get_text()
else:
content_html += str(soup)
content += soup.get_text()
except AttributeError:
print("找不到内容")
except TypeError:
print("找不到内容")
result = {"title": title, "content": content, "content_html": content_html}
return result
# def __recognize(lines, line_max):
# """该私有方法为处理数据并调用libsvm识别标题和内容"""
#
# title = '' # 存放标题
# content = '' # 存放内容
# content_html = '' # 存放原生html
#
# content_flag = False # 上一条是否为正文,是的话为True,否的话为False
# tags = [] # 存放所有Tag
# for line in lines:
# # print line.get('content')
# sequence = line.get('sequence')
# tag = line.get('tag')
# tag_name = line.get('tag_name')
# tag_id = line.get('tag_id')
# tag_class = line.get('tag_class')
# content_len = line.get('content_len')
#
# # 如果是紧跟正文的图片则判断为需要的图片
# if content_flag is True and tag_name == 'img':
# content_html += line.get('content_html')
#
# content_flag = False
# if not tag_name == 'img':
# f1 = sequence / line_max # 在队列中的顺序
#
# f2 = 0.5
# try:
# if tag_name.lower() == "h1":
# f2 = 1
# if tag_name.lower() == "h2" or tag_name.lower() == "h3":
# f2 = 0.90
# if tag_name.lower() == "title":
# f2 = 0.80
# if tag_name.lower() == "div":
# f2 = 0.70
# if tag_name.lower() == "span":
# f2 = 0.30
# if tag_name.lower() == "td" or tag_name.lower() == "th":
# f2 = 0.20
# if tag_name.lower() == "strong":
# f2 = 0.15
# if tag_name.lower() == "article":
# f2 = 0.10
# if tag_name.lower() == "p":
# f2 = 0
# except AttributeError:
# pass
#
# f3 = 0.5
# try:
# if tag_id.lower().find("title") is not -1 or tag_class.lower().find("title") is not -1:
# f3 = 1
# if tag_id.lower().find("headline") is not -1 or tag_class.lower().find("headline") is not -1:
# f3 = 0.90
# if tag_id.lower().find("pic") is not -1 or tag_class.lower().find("pic") is not -1:
# f3 = 0.40
# if tag_id.lower().find("content") is not -1 or tag_class.lower().find("content") is not -1:
# f3 = 0.30
# if tag_id.lower().find("text") is not -1 or tag_class.lower().find("text") is not -1:
# f3 = 0.20
# if tag_id.lower().find("author") is not -1 or tag_class.lower().find("author") is not -1:
# f3 = 0.10
# if tag_id.lower().find("editor") is not -1 or tag_class.lower().find("editor") is not -1:
# f3 = 0
# except AttributeError:
# pass
#
# f4 = content_len / 100
# if f4 > 1:
# f4 = 1
#
# data_list = []
# row = "0 1:%f 2:%f 3:%f 4:%f" % (f1, f2, f3, f4)
# # print row
# data_list.append(row)
# y, x = svm_read_problem(data_list)
# # print (os.path.abspath('..'))
# # m = svm_load_model('./Spider/autonews/content.model')
# p_labs, p_acc, p_vals = svm_predict(y, x, content_model)
# if p_labs[0] == 1.0:
# title += line.get('content')
# if p_labs[0] == 2.0:
# content_flag = True
# content += line.get('content')
# content_html += line.get('content_html')
# tags.append(tag)
#
# result = {"title": title, "content": content, "content_html": content_html, "tags": tags}
# return result
# def traversal(html):
# soup = BeautifulSoup(html, "lxml")
# lines = []
# # 遍历所有节点
# i = 0
# for tag in soup.descendants:
# line = {'sequence': i}
# i += 1
# line['tag'] = tag
# if type(tag) == bs4.element.Tag:
# try:
# # 标签有内容或者是p标签,并且标签的父节点没有p(因为只需要判断到p就可以了,里面的东西都要的)
# if (tag.string is not None or tag.name == 'p') and tag.find_parent('p') is None:
# line['content_html'] = str(tag)
# try:
# line['content_len'] = len(tag.string.strip())
# except TypeError and AttributeError:
# line['content_len'] = 0
# content = ''
# for string in tag.stripped_strings:
# content += string
# line['content'] = content
# # content = tag.string
# line['tag_name'] = tag.name
# line['tag_id'] = tag.get("id")
# line['tag_class'] = tag.get("class")
#
# # p提取其下所有标签的文字
# if tag.name == 'p':
# content = ''
# for string in tag.stripped_strings:
# content += string
# line['content_len'] = len(content.strip())
# line['content'] = content
# elif tag.name == 'img':
# line['tag_name'] = tag.name
# line['content_html'] = str(tag)
#
# except StopIteration:
# pass
#
# if type(tag) == bs4.element.NavigableString and tag.string.strip() != '':
# if tag.next_sibling is not None and tag.previous_sibling is not None:
# line['content_html'] = str(tag)+"</br>"
# line['tag_name'] = 'p'
# line['content_len'] = len(unicode(tag).strip())
# content = tag.string
# line['content'] = content
#
# # 判断该节点是否为需要的节点
# if line.get('tag_name') is not None:
# lines.append(line) # 在队列尾部插入新数据
#
# result = __recognize(lines, i)
# tags = result['tags']
# if len(tags) > 0:
# count = 0
# last_parent = tags[0].parent
# for t in tags:
# if t not in last_parent.descendants and t is not None:
# last_parent = last_parent.parent
# count += 1
# if count is 3:
# last_parent = None
# break
# if last_parent is not None:
# result['content_html'] = str(last_parent)
# print ("success: "+str(last_parent))
#
# return result
def __convert_img(content_html, url):
"""
将文章中的相对图片路径转换为绝对路径(如果有的化)
:param content_html: HTML版的正文
:param url: 文章的地址
:return content: 将修改完的文章替换
"""
assert isinstance(content_html, str)
assert isinstance(url, str)
try:
soup = BeautifulSoup(content_html, "html.parser")
except Exception as e:
print ("处理图片地址转换失败")
return content_html
imgs = soup.find_all(name="img")
for img in imgs:
if img.get("src") is not None:
src = str(img.get("src")).strip()
src = src.replace("\\","/")
# 处理不是http开头的各种情况,将相对路径拼接成绝对路径
if not src.startswith("http") and src.lower().find("javascript") is -1:
domain = re.match(r'http(s)?://(.*/)', url, re.M | re.I).group() # 拿到当前目录
if src.startswith("/"):
src = domain + src
elif src.startswith("./"):
src.replace("./", "")
src = domain + src
elif src.startswith("../"):
count = src.count("../")
while count>0:
domain = domain[:len(domain) - 1]
domain = re.match(r'http(s)?://(.*/)', domain, re.M | re.I).group()
count -= 1
src = domain + src.replace("../", "")
else: # 剩下的”content_234.html"这种情况
src = domain + src
img['src'] = src
return str(soup)
if __name__ == '__main__':
crawl(1)
# __crawl_urls("http://www.sina.com.cn")
# crawl_urls("http://www.163.com")
# crawl_urls("http://www.qq.com")
# crawl_urls("http://www.sohu.com")
# crawl_urls("http://www.kankanews.com")
#
# crawl_urls("http://www.people.com.cn")
# crawl_urls("http://www.gmw.cn")
# crawl_urls("http://chinese.yonhapnews.co.kr")
# crawl_urls("https://www.washingtonpost.com")
# crawl_urls("http://www.thepaper.cn")
| zqkarl/Spider | Spider/autonews/url_spider.py | url_spider.py | py | 26,835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "re.match",... |
42982956826 | import redis
#import hazelcast
import logging
import random
import azlog
log = azlog.getLogger(__name__)
def SetupCacheConn(type,ip,port,key,ssl):
if (type=="redis"):
if (ssl=="yes"):
r=SetupRedisSSLConn(ip,port,key)
else:
r=SetupRedisConn(ip,port,key)
else:
print("working on it. not yet supported...")
return r
def SetupRedisConn(ip,port,key):
r = redis.Redis(
host=ip,
port=port,
password=key)
return r
def SetupRedisSSLConn(ip,port,key):
r = redis.StrictRedis(
host=ip,
port=port,
password=key,
ssl_cert_reqs=u'none', #-- or specify location of certs
ssl=True)
return r
def GetTrade(r,keyname):
xmlstring = r.get(keyname)
return xmlstring
''' cachetype = redis, nfs etc.
io = "input" or "output"
r = redis handle
format = eyxml or varxml
tradenum = trade number
xmlstring = the trade xml data
'''
def PutTrade(cache_type,io,r,format,tradenum,xmlstring):
if (format == "eyxml"):
prefix = "ey"
elif (format == "varxml"):
prefix = "var"
else:
log.error("invalid format: %s" % format)
return(1)
if (io == "input"):
keyname = "%s%007d.xml" % (prefix, tradenum)
elif (io == "output"):
keyname = "%s%007d_result.xml" % (prefix, tradenum)
else:
log.error("File format: %s; input/output only supported. " % format)
return(1)
if (cache_type=="redis"):
r.set(keyname,xmlstring)
log.debug("Trade %d: written as: %s:\n%s" % (tradenum,keyname,xmlstring))
return r
def InjectRandomFail(failure):
if random.uniform(0.0, 1.0) < failure:
logging.error("RANDOM ERROR INJECTION: TASK EXIT WITH ERROR")
return(1)
def DoFakeCompute(xmlstring,delay_time,task_duration,mem_usage):
import numpy as np
import time
import random
# allocate the memory
array_size = (mem_usage, 131072)
data = np.ones(array_size, dtype=np.float64)
# do startup delay
time.sleep(delay_time)
# now do fake computation
task_duration_s = task_duration / 1000.0 #- convert from ms to s
end_time = time.time() + task_duration_s
while time.time() < end_time:
data *= 12345.67890
data[:] = 1.0
| Azure/HPC-Accelerator | scenarios/batch/code/src/utils.py | utils.py | py | 2,295 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "azlog.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "redis.StrictRedis",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"li... |
20672750408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from graph_tool.all import *
import numpy as np
from pathos.multiprocessing import ProcessingPool as Pool
import tqdm
import pickle
def swir(n, z, rho0, kappa, mu, eta, num_ensamble):
pER = z/n
ss = 1 - mu - kappa
ww = 1 - eta
np.random.seed(num_ensamble)
seed_rng(num_ensamble)
# inicialización del grafo erdos renyi
g = random_graph(n, lambda: np.random.poisson((n-1) * pER), directed=False, model="erdos", random=True)
# declarar una propiedad llamada "estado" que va a guardar el estado de cada nodo
estado = g.new_vertex_property("short")
# Asignar a todos los nodos el estado susceptible
estado.get_array()[:] = 0
# Generar rho0*n índices aleatorios
infected_index = np.random.choice(np.arange(0, n), size=int(n*rho0), replace=False)
# Actualizar el estado de rho0*n nodos al estado infectado
estado.get_array()[infected_index] = 1
# Iterador que contiene los nodos infectados
I = [g.vertex(i) for i in infected_index]
def reaction(vertex):
"""
Función que realiza un paso de tiempo para un nodo infectado:
vertex es un nodo en estado infectado.
"""
# actualiza el estado del nodo actual infectado a recuperado
estado[vertex] = 3
# obtiene los vecinos del nodo infectado
vecinos = vertex.out_neighbors()
# lista de índices de vecinos susceptibles
S = np.array([g.vertex_index[v] for v in vecinos if estado[v]==0]).astype(int)
#obtiene los vecinos del nodo infectado, de nuevo.
vecinos = vertex.out_neighbors()
# lista de índices de vecinos debilitados
W = np.array([g.vertex_index[v] for v in vecinos if estado[v]==2]).astype(int)
# calcula nuevos estados de los vecinos susceptibles
new_states = np.random.choice([0, 1, 2], size=S.size, p=[ss, kappa, mu])
# actualiza estados de vecinos susceptibles
estado.get_array()[S] = new_states
# inicia lista de nuevos nodos infectados con los susceptibles que se infectaron
new_infected = [g.vertex(i) for i in S[new_states==1]]
# calcula nuevos estados de los vecinos debilitados
new_states = np.random.choice([2, 1], size=W.size, p=[ww, eta])
# actualiza estados de vecinos debilitados
estado.get_array()[W] = new_states
# actualiza lista de nuevos nodos infectados con los debilitados que se infectaron
new_infected += [g.vertex(i) for i in W[new_states==1]]
# devuelve los vecinos que se infectaron
return new_infected
while I:
# inicia una lista que guarda los nodos que serán infectados en el paso de tiempo n
new_infected_n = []
# itera sobre los nodos infectados
for i in I:
# hace que el nodo infectado reaccione con sus vecinos
ni = reaction(i)
# agrega los vecinos infectados a la lista de los nuevos infectados
new_infected_n.append(ni)
# actualiza la lista de los nuevos infectados para el paso de tiempo n+1
I = [s for sublist in new_infected_n for s in sublist]
np.random.shuffle(I)
magnetisation = np.count_nonzero(estado.get_array()==3)/estado.get_array().size
return magnetisation
# N = 50000
# def worker_function(num_ensamble, N):
# kappa_range = (np.linspace(-1, 1, 20))**3
# max_new = 0.108021+0.003
# min_new = 0.108021-0.003
# kappa_range = (kappa_range-kappa_range.min()) * (max_new - min_new) / 2 + min_new
# results = []
# for kappa in kappa_range:
# results.append(swir(N, 8, 0.00747762, kappa, kappa, 0.5, num_ensamble))
# with open('./results/{0}_{1}_rho_critical.p'.format(num_ensamble, N), "wb") as f:
# pickle.dump(results, f)
# ensambles = 20000
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function, range(ensambles), (N for n in range(ensambles))),
# total=ensambles))
# def worker_function_2(num_ensamble):
# kappa_sub = 0.115023
# rho_0_sub = 2e-3
# kappa_c = 0.108021
# rho_0_c = 0.00747762
# #N_range = [int(n) for n in np.geomspace(1e5, 3e6, 10)]
# N_range = [ 100000, 145923, 212936, 310723]
# np.random.shuffle(N_range)
# results = []
# for N in N_range:
# #results.append(swir(N, 8, rho_0_sub, kappa_sub, kappa_sub, 0.5, num_ensamble))
# results.append(swir(N, 8, rho_0_c, kappa_c, kappa_c, 0.5, num_ensamble))
# with open('./results/{0}_fig7_rho_critical.p'.format(num_ensamble), "wb") as f:
# pickle.dump(results, f)
# ensambles = 32000
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_2, range(ensambles)), total=ensambles))
# def worker_function_3(num_ensamble, N):
# kappa_sub = 0.115023
# rho_0_sub = 2e-3
# kappa_c = 0.108021
# rho_0_c = 0.00747762
# r = swir(N, 8, rho_0_c, kappa_c, kappa_c, 0.5, num_ensamble)
# with open('./results/{0}_{1}_fig10_rho_critical.p'.format(num_ensamble, N), "wb") as f:
# pickle.dump(r, f)
# N_range = [ 100000, 145923, 212936, 310723]
# ensambles = 10000
# for N in N_range:
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_3, range(ensambles), [N]*ensambles), total=ensambles))
# def worker_function_4(num_ensamble, N):
# kappa_sub = 0.115023
# rho_0_sub = 2e-3
# kappa_c = 0.108021
# rho_0_c = 0.00747762
# r = swir(N, 8, rho_0_sub, kappa_sub, kappa_sub, 0.5, num_ensamble)
# with open('./results/{0}_{1}_fig7_rho_sub_critical.p'.format(num_ensamble, N), "wb") as f:
# pickle.dump(r, f)
# N_range = [ 100000, 145923, 212936, 310723]
# ensambles = 10000
# for N in N_range:
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_4, range(ensambles), [N]*ensambles), total=ensambles))
# def worker_function_5(num_ensamble, kappa):
# rho_0_sub = 2e-3
# rho_0_c = 0.00747762
# r = swir(1000000, 8, rho_0_sub, kappa, kappa, 0.5, num_ensamble)
# with open('./results/{0}_{1}_fig6_rho_sub_critical.p'.format(num_ensamble, kappa), "wb") as f:
# pickle.dump(r, f)
# kappa_sub = 0.115023
# kappa_range = np.linspace(kappa_sub-8e-5, kappa_sub-1e-2, 10)
# ensambles = 3000
# for kappa in kappa_range:
# with Pool(64) as pool:
# results = list(tqdm.tqdm(pool.imap(worker_function_5, range(ensambles), [kappa]*ensambles), total=ensambles))
Nnu = 656
kappa_range = (np.linspace(-1, 1, 20))**3
max_new = 0.108021+10/Nnu
min_new = 0.108021-10/Nnu
kappa_range = (kappa_range-kappa_range.min()) * (max_new - min_new) / 2 + min_new
ensambles = 3000
def worker_function_6(num_ensamble, kappa):
rho_0_sub = 2e-3
rho_0_c = 0.00747762
return swir(1000000, 8, rho_0_c, kappa, kappa, 0.5, num_ensamble)
for idx, kappa in enumerate(kappa_range):
with Pool(64) as pool:
results = list(tqdm.tqdm(pool.imap(worker_function_6, range(ensambles), [kappa]*ensambles), total=ensambles))
with open('./results/{0}_{1}_fig14y15_rho_critical.p'.format(1000000, idx), "wb") as f:
pickle.dump(results, f) | VolodyaCO/erSWIR | implementation.py | implementation.py | py | 7,234 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.poisson",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random... |
2848517125 | #############################################################################################################################################
__filename__ = "main.py"
__description__ = """Represents main program.
"""
__author__ = "Anand Iyer"
__copyright__ = "Copyright 2016-17, Anand Iyer"
__credits__ = ["Anand Iyer"]
__version__ = "2.6"
__maintainer__ = "Anand Iyer"
__email__ = "anand.iyer@moolya.com"
__status__ = "Testing" #Upgrade to Production once tested to function.
#############################################################################################################################################
import re
import random
import sys
import os
import argparse
import support
from importlib import import_module
from excelfunctions import *
default_module = __import__("functions") #functions.py
def store (line_counter, variable, fp, function_args):
return_value = call_function (line_counter, fp, function_args)
setattr (default_module, variable, return_value)
return "Stored return value in $%s" %(variable)
def handle_reference (pattern, each, each_row, replace=False):
m = re.search (pattern, each)
if m is not None:
try:
return_value = readfromexcelmap (excel_map, each_row, int (m.groups(1)[0]))
if replace:
if type (return_value) == str and not support.isdigit (return_value):
return_value = "'" + return_value + "'"
return_value = re.sub (pattern, return_value, each)
except:
pass
else:
return_value = each
return return_value
def call_function (each_row, fp, function_args):
global excel_map
all_args = []
if fp == None:
function_args = list(support.lexer_setup(function_args)) #function_args is now a list of arguments, with commas ignored between two forward slashes
for each in function_args:
all_args.append (handle_reference ("C(\d+)$", each, each_row))
return_value = ''.join (all_args) #populate with plain strings. No function calls.
elif "EVAL_IN_PYTHON" in str (fp):
#Need to first split the arguments, so we can use the references. Now, join appropriately to form the function call syntax.
function, function_args = extract_function_with_args (pattern_eval, function_args) #function_args[0]
function_args = list(support.lexer_setup(function_args)) #function_args is now a list of arguments, with commas ignored between two forward slashes
for each in function_args:
arg = handle_reference ("C(\d+)$", each, each_row)
if type(arg) == str and not support.isdigit(arg):
arg = "'" + arg + "'"
all_args.append (arg)
all_args = function + "(" + ','.join(str(x) for x in all_args) + ")"
all_args = support.remove_slashes (all_args)
return_value = fp (all_args) #internal calls the function with appropriate arguments
elif callable (fp):
function_args = list(support.lexer_setup(function_args)) #function_args is now a list of arguments, with commas ignored between two forward slashes
for each in function_args:
all_args.append (handle_reference ("C(\d+)$", each, each_row))
return_value = fp (all_args) #internal calls the function with appropriate arguments
return return_value
#Call function once for each row to fill
#If there are column references, use it along with current row, and fill in actual value
#Same function is used with fp and function_args passed as None and empty string respectively, to populate plain strings.
def construct_result_dict (start_row, col, end_row, fp, function_args):
return_list = []
for each_row in range (start_row, end_row):
return_list.append (call_function (each_row, fp, function_args))
#construct tuple for row, col number, in order construct the result dictionary.
all_cells = []
for each_row in range (start_row, end_row):
all_cells.append ((each_row, col))
#and return all results as part of a single dictionary.
return dict(zip(all_cells, return_list))
def extract_function_with_args (pattern, function_to_call):
function = ""
function_args = ""
function_object = pattern.search (function_to_call)
if function_object:
function = function_object.groups(1)[0]
function_args = str (function_object.groups(1)[1]).strip ()
return function, function_args
def get_function (line):
function = ""
function_args = ""
mod = None
#some defaults
variable = ""
start_row = 1
col = 0
try:
#Identify start_row and col
pattern_row_col = re.compile ("R(\d+)C(\d+)")
row_col_object = pattern_row_col.search (line)
if row_col_object:
start_row = int (row_col_object.groups(1)[0])
col = int (row_col_object.groups(0)[1])
#Identify variable
pattern_variable = re.compile ("(.*?)\|(.*?)\|(.*?)\|(.*)")
variable_object = pattern_variable.search (line)
if variable_object:
if variable_object.groups(0)[0][0] == "$":
variable = variable_object.groups(0)[0][1:] #remove the $
function_to_call = variable_object.groups(0)[3]
function, function_args = extract_function_with_args (pattern, function_to_call)
except:
pass
end_row = start_row + rows_to_fill
return start_row, col, end_row, mod, variable, function, function_args
#main program
support.log ("####ExcelWriter version %s. %s####" %(__version__, __copyright__))
support.log ()
pattern = re.compile ("\((.*?):(.*)\)")
pattern_eval = re.compile ("(.*)\((.*)\)")
#"parser" is an "argparse" object that defaults to certain values for each of the command line arguments
#Following command line arguments are supported = config, rowcount, colcount, startrow.
#Use with appropriate switches, when calling from command line.
parser = argparse.ArgumentParser(prog='python main.py', conflict_handler='resolve')
parser.add_argument("--config", help="Configuration file", default="..\\Template.txt")
parser.add_argument("--output", help="Output excel", default="..\\Template_latest.xls")
parser.add_argument("--rowcount", help="Number of rows to fill in Excel template", default=5)
parser.add_argument("--colcount", help="Number of columns in Excel template", default=10)
parser.add_argument("--startrow", help="Starting row number (to generate excel map)", default=1)
args = parser.parse_args()
config_file = args.config
f = open (config_file) #input config file
book, sheet = openexcel (os.path.splitext(config_file)[0] + ".xls", 0) #0 is the first sheet
rows_to_fill = int (args.rowcount)
start_row = int (args.startrow)
end_row = start_row + rows_to_fill
colcount = int (args.colcount)
excel_map = {}
excel_map = map_excel (sheet, start_row, end_row, colcount)
line_counter = 0
for line in f:
line_counter += 1
if len (line) == 1 or line[0] == '#': #only newline or line has been commented
continue
start_row, col, end_row, mod, variable, function, function_args = get_function (line) #in case of plain string, function and function_args are empty.
if mod is None:
mod = default_module
if function == "":
plain_string = line.split ('|')[3].strip('\n')
return_dict = construct_result_dict (start_row, col, end_row, None, plain_string)
excel_map.update (return_dict)
support.log (str.format ("%d. ," %(line_counter))) # the comma at the end ensures newline is NOT printed.
support.log (str.format ("========>%s printed %d times" %(plain_string, rows_to_fill)))
support.log ()
continue
try:
fp = getattr (mod, function) #getting function pointer
support.log (str.format ("%d. ," %(line_counter))) # the comma at the end ensures newline is NOT printed.
if variable != "":
support.log (store (line_counter, variable, fp, function_args))
support.log ()
continue
return_dict = construct_result_dict (start_row, col, end_row, fp, function_args)
excel_map.update (return_dict)
#support.log (return_dict)
support.log (str.format ("========>%s called %d times" %(function, rows_to_fill)))
support.log ()
except:
support.log (str.format ("%s may not exist." %(function)), True)
closeexcel (book)
#write excel_map to excel
book, copybook, sheet = openexcel (os.path.splitext(config_file)[0] + ".xls", 0,"w") #0 is the first sheet
for each in excel_map:
writetoexcel (sheet, each[0], each[1], excel_map[each])
copybook.save(args.output)
support.log (str.format ("Saved to " + args.output + ". Please rename the file to avoid overwriting during the next iteration."))
closeexcel (book) | ananddotiyer/DDE-Lite | ExcelWriter/main.py | main.py | py | 9,200 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.search",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "support.isdigit",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "support.lexer_setup",
"line_numb... |
29595058013 | from fontTools.ttLib import TTFont
import random, copy, os, time, base64
# Measure creation time
start = time.time()
# Read original TTF/OTF font file
f = TTFont('font.ttf')
# Find font's longest CMAP table
cmap = f['cmap']
longestCMAPtable = None
for t in cmap.tables:
if not longestCMAPtable or len(t.cmap) > len(longestCMAPtable.cmap):
longestCMAPtable = t
# Read it into a normal list for shuffling
# This is not excatly elegant, but it works. Improve it.
originalCMAP = []
for u in longestCMAPtable.cmap:
originalCMAP.append((u, longestCMAPtable.cmap[u]))
# Make copy and shuffle that copy
newCMAP = copy.copy(originalCMAP)
random.shuffle(newCMAP)
# These funtions are ugly, but work well. Improve them.
def newNameToUnicode(unicode):
for i in range(len(originalCMAP)):
if originalCMAP[i][0] == unicode:
return newCMAP[i][1]
def newUnicodeToUnicode(unicode):
for i in range(len(originalCMAP)):
if newCMAP[i][0] == unicode:
return originalCMAP[i][0]
def newUnicodeToName(name):
for i in range(len(newCMAP)):
if newCMAP[i][1] == name:
return originalCMAP[i][0]
def translateText(text):
new = u''
for g in text:
if newUnicodeToUnicode(ord(g)):
new += unichr(newUnicodeToUnicode(ord(g)))
return new
# Go through all entries in all cmap tables and assign the new randomized glyph names to the unicodes
for t in cmap.tables:
for u in t.cmap.keys():
if newNameToUnicode(u):
t.cmap[u] = newNameToUnicode(u)
# Save new font file to disk
# Maybe it's a good idea to use unique file names here
f.save('new.ttf')
# Stop measuring time
end = time.time()
# Read font file into base64 string for delivery within CSS
fontBase64 = base64.b64encode(open('new.ttf').read())
# Delete the temporary file
os.remove('new.ttf')
# Time it took to create the web font
duration = end - start
# Output this text alongside the new throw-away web font
securetext = translateText('Putin is a wussy.') | yanone/geheimsprache | geheimsprache.py | geheimsprache.py | py | 1,928 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fontTools.ttLib.TTFont",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_n... |
6319889570 | import json
import socket as s
import selectors
import threading
import types
import logging
logger = logging.getLogger("Main." + __name__)
class SocketHandler:
socket = None
sel = selectors.DefaultSelector()
selector_timeout = 4
doShutdown = threading.Event()
connected_sockets = []
handler_list = []
def __init__(self, host_addr="localhost", socket_port=5000) -> None:
self.host_addr = host_addr
self.port = socket_port
socket = s.socket(s.AF_INET, s.SOCK_STREAM)
socket.bind((self.host_addr, self.port))
socket.listen()
logger.info(f"Socket listening on: {self.host_addr}:{self.port}")
socket.setblocking(False)
self.sel.register(socket, selectors.EVENT_READ, data=None)
self.listener_thread = threading.Thread(target=self._listener_loop)
self.listener_thread.start()
def _accept_and_register(self, sock):
conn, addr = sock.accept()
logger.debug(f"Accepted connection from {addr}")
conn.setblocking(False)
data = types.SimpleNamespace(addr=addr, inb=b'', outb=b'')
events = selectors.EVENT_READ # | selectors.EVENT_WRITE
self.sel.register(conn, events, data=data)
self.connected_sockets.append(conn)
def _service_connection(self, key, mask):
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
# read magic happens here
recv_data = sock.recv(1)
recv_data = sock.recv(int.from_bytes(recv_data, "big"))
logger.debug(f"Received from {data.addr}: {recv_data}")
if recv_data:
self._handle_incoming(sock, recv_data)
else:
logger.debug(f"closing connection to {data.addr}")
self._close_socket(sock)
def _listener_loop(self):
while not self.doShutdown.is_set():
events = self.sel.select(timeout=self.selector_timeout)
if events is None:
continue
for key, mask in events:
if key.data is None:
self._accept_and_register(key.fileobj)
else:
try:
self._service_connection(key, mask)
except Exception as e:
if str(e).startswith("[WinError 10054]"):
self._close_socket(key.fileobj)
logger.debug("Socket Closed")
else:
logger.warning(f"Socket error! {key.data.addr}:\n{e}")
raise e
def _handle_incoming(self, sock, data: bytes):
try:
str_version = data.decode("utf-8")
except UnicodeDecodeError:
data = data[1:]
str_version = data.decode("utf-8")
str_version = str_version.replace('"true"', 'true').replace('"false"', 'false')
usable_json = json.loads(str_version)
for i in self.handler_list:
i(usable_json)
def _prune_sockets(self):
index = 0
while index < len(self.connected_sockets):
if self.connected_sockets[index].fileno() == -1:
del self.connected_sockets[index]
index = 0
def _close_socket(self, sock):
self._prune_sockets()
try:
self.connected_sockets.remove(self._find_same_addr_index(sock))
except ValueError:
pass
except Exception as e:
logger.warning(f"Error removing socket from list: {repr(e)}")
try:
self.sel.unregister(sock)
sock.close()
except Exception as e:
logger.warning(f"Error unregistering or closing socket: {repr(e)}")
self._prune_sockets()
def _find_same_addr_index(self, sock):
for i in range(len(self.connected_sockets) - 1):
if self.connected_sockets[i].raddr == sock.raddr:
return i
nothing = False
def send_all(self, message: str):
if len(self.connected_sockets) == 0:
if not self.nothing:
logger.warning("TRY TO SEND MESSAGE TO NOTHING! regards, SocketHandler.send_all()")
self.nothing = True
return
logger.debug("Sending to all sockets: " + message)
for sock in self.connected_sockets:
self.nothing = False
try:
sock.sendall(message.encode("utf-8") + b"\n")
except BlockingIOError as e:
logger.critical(f"Sending IO Error! {repr(e)}")
except ConnectionResetError:
logger.warning("Socket Forcibly close by host")
self._close_socket(sock)
def register_message_handler(self, function):
"""register function to be called when a message is received from the socket
Args:
function ([function(json.JSON)]): [description]
"""
self.handler_list.append(function)
def unregister_handler(self, function):
self.handler_list.remove(self.handler_list.index(function))
def close(self):
self.doShutdown.set()
if len(self.connected_sockets) == 0:
return
for i in range(len(self.connected_sockets)):
self._close_socket(self.connected_sockets[i])
| Nickiel12/Church-Programs | Android/android_server/Classes/SocketHandler.py | SocketHandler.py | py | 5,396 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selectors.DefaultSelector",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "socket.soc... |
6911896789 | import json
from pydantic import BaseModel
from pdf_token_type_labels.TokenType import TokenType
from pdf_features.Rectangle import Rectangle
SCALE_RATIO = 0.75
class SegmentBox(BaseModel):
left: float
top: float
width: float
height: float
page_number: int
segment_type: TokenType = TokenType.TEXT
def to_dict(self):
return json.loads(self.model_dump_json())
def get_bounding_box(self) -> Rectangle:
return Rectangle.from_width_height(
left=int(self.left), top=int(self.top), width=int(self.width), height=int(self.height)
)
def scale_down(self):
self.left = round(self.left * SCALE_RATIO, 0)
self.top = round(self.top * SCALE_RATIO, 0)
self.width = round(self.width * SCALE_RATIO, 0)
self.height = round(self.height * SCALE_RATIO, 0)
def scale_up(self):
self.left = round(self.left / SCALE_RATIO, 0)
self.top = round(self.top / SCALE_RATIO, 0)
self.width = round(self.width / SCALE_RATIO, 0)
self.height = round(self.height / SCALE_RATIO, 0)
| huridocs/pdf_metadata_extraction | src/data/SegmentBox.py | SegmentBox.py | py | 1,091 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pdf_token_type_labels.TokenType.TokenType",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pdf_token_type_labels.TokenType.TokenType.TEXT",
"line_number": 16,
"usage_t... |
73137151144 | import pytest
from registry_schemas import validate
# Properties can be anything, using show* for testing.
REGISTRATIONS_TABLE = {
'showColumn1': True,
'showColumn2': False,
'showColumn3': True,
'showColumn4': False
}
# Properties can be anything, using misc* for testing.
MISC_PREFERENCES = {
'preference1': 'A',
'preference2': False,
'preference3': 3
}
TEST_REG_TABLE_JSON = {
'paymentConfirmationDialog': True,
'registrationsTable': REGISTRATIONS_TABLE
}
TEST_MISC_PREF_JSON = {
'paymentConfirmationDialog': True,
'miscellaneousPreferences': MISC_PREFERENCES
}
TEST_ALL_JSON = {
'paymentConfirmationDialog': True,
'selectConfirmationDialog': False,
'defaultDropDowns': True,
'defaultTableFilters': False,
'registrationsTable': REGISTRATIONS_TABLE,
'miscellaneousPreferences': MISC_PREFERENCES
}
TEST_COMBO_JSON = {
'paymentConfirmationDialog': True,
'selectConfirmationDialog': False
}
TEST_PAYMENT_JSON = {
'paymentConfirmationDialog': True
}
TEST_SELECT_JSON = {
'selectConfirmationDialog': False
}
TEST_DROPDOWN_JSON = {
'defaultDropDowns': True
}
TEST_FILTER_JSON = {
'defaultTableFilters': False
}
TEST_EMPTY_JSON = {
}
TEST_UNKNOWN_JSON = {
'unknown': 'xxxx'
}
TEST_INVALID_TYPE_JSON = {
'selectConfirmationDialog': 'wrong'
}
# testdata pattern is ({description}, {is valid}, {data})
TEST_DATA = [
('All settings', True, TEST_ALL_JSON),
('2 settings', True, TEST_COMBO_JSON),
('Just payment', True, TEST_PAYMENT_JSON),
('Just search select', True, TEST_SELECT_JSON),
('Just dropdown', True, TEST_DROPDOWN_JSON),
('Just table filter', True, TEST_FILTER_JSON),
('Just registrations table', True, TEST_REG_TABLE_JSON),
('Just miscellaneous preferences', True, TEST_MISC_PREF_JSON),
('No settings', False, TEST_EMPTY_JSON),
('Unknown setting', False, TEST_UNKNOWN_JSON),
('Invalid type setting', False, TEST_INVALID_TYPE_JSON)
]
@pytest.mark.parametrize('desc,valid,data', TEST_DATA)
def test_user_profile(desc, valid, data):
"""Assert that the schema is performing as expected for a user profile."""
is_valid, errors = validate(data, 'userProfile', 'common')
if errors:
# print(errors)
for err in errors:
print(err.message)
assert is_valid == valid
| bcgov/registry-schemas | tests/unit/common/test_user_profile.py | test_user_profile.py | py | 2,351 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "registry_schemas.validate",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 76,
"usage_type": "attribute"
}
] |
8649507511 | """
============================
Author:柠檬班-木森
Time:2020/5/12 20:40
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import time
import unittest
from selenium import webdriver
from ddt import ddt, data
from web_08day.page.page_login import LoginPage
from web_08day.page.page_index import IndexPage
"""
'18684720553,python'
"""
error_case_data = [
{'mobile': "", "pwd": "python1", "expected": "请输入手机号"},
{'mobile': "1868472055a", "pwd": "python1", "expected": "请输入正确的手机号"},
{'mobile': "18684720553", "pwd": "", "expected": "请输入密码"}
]
@ddt
class TestLogin(unittest.TestCase):
"""测试登录"""
def setUp(self):
self.driver = webdriver.Chrome()
self.login_page = LoginPage(self.driver)
self.index_page = IndexPage(self.driver)
def test_login_pass(self):
"""正常登录的用例"""
# 进行登录的操作
self.login_page.login('18684720553', 'python')
# 获取登录之后的用户信息
res = self.index_page.get_my_user_info()
# 断言用例执行是否通过
self.assertEqual('登录成功', res)
@data(*error_case_data)
def test_login_error_case(self, case):
# 执行登录操作
self.login_page.login(case['mobile'], case['pwd'])
# 获取实际提示结果
result = self.login_page.get_error_info()
# 断言
self.assertEqual(case['expected'], result)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| huchaoyang1991/py27_web | web_08day(web自动化用例编写和PO模式)/testcase/test_login_02.py | test_login_02.py | py | 1,630 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "w... |
22078953220 | from enum import Enum
import random
import copy
random.seed(None)
def setSeed(s):
random.seed(s)
"""
Basic enumerated class to specify colors when needed.
"""
class Color(Enum):
WHITE = 0
BLACK = 1
GREEN = 2
RED = 3
BLUE = 4
GOLD = 5
@classmethod
def mapToColor(self, color):
if (color == "W"):
return Color.WHITE
elif (color == "K"):
return Color.BLACK
elif (color == "E"):
return Color.GREEN
elif (color == "R"):
return Color.RED
elif (color == "B"):
return Color.BLUE
elif (color == "G"):
return Color.GOLD
else:
return None
def __str__(self):
s = ""
if self.value == 1:
s = "W"
elif self.value == 2:
s = "K"
elif self.value == 3:
s = "E"
elif self.value == 4:
s = "R"
elif self.value == 5:
s = "B"
elif self.value == 6:
s = "G"
return s
"""
Basic enumerated class to specify whether a player is a human or AI.
"""
class PlayerType(Enum):
HUMAN = 1
AI = 2
"""
A GemDict is a dictionary with Color keys and int values. Dictionary can be accessed through the getter.
"""
class GemDict:
def __init__(self, gem_lst):
self.data = {
Color.WHITE : gem_lst[0],
Color.BLACK : gem_lst[1],
Color.GREEN : gem_lst[2],
Color.RED : gem_lst[3],
Color.BLUE : gem_lst[4]}
def add(self, color, num):
if color == Color.WHITE:
self.data[Color.WHITE] += num
elif color == Color.BLACK:
self.data[Color.BLACK] += num
elif color == Color.GREEN:
self.data[Color.GREEN] += num
elif color == Color.RED:
self.data[Color.RED] += num
elif color == Color.BLUE:
self.data[Color.BLUE] += num
else:
raise ValueError
def remove(self, color, num):
if color == Color.WHITE:
self.data[Color.WHITE] -= num
elif color == Color.BLACK:
self.data[Color.BLACK] -= num
elif color == Color.GREEN:
self.data[Color.GREEN] -= num
elif color == Color.RED:
self.data[Color.RED] -= num
elif color == Color.BLUE:
self.data[Color.BLUE] -= num
else:
raise ValueError
def total_gems(self):
return (
self.data[Color.WHITE] +
self.data[Color.BLACK] +
self.data[Color.GREEN] +
self.data[Color.RED] +
self.data[Color.BLUE])
def get_data(self):
return self.data
def data_gui(self):
return [
self.data[Color.WHITE],
self.data[Color.BLACK],
self.data[Color.GREEN],
self.data[Color.RED],
self.data[Color.BLUE]]
def addGD(self, gd):
w = self.data_gui()[0] + gd.data_gui()[0]
k = self.data_gui()[1] + gd.data_gui()[1]
e = self.data_gui()[2] + gd.data_gui()[2]
r = self.data_gui()[3] + gd.data_gui()[3]
b = self.data_gui()[4] + gd.data_gui()[4]
return self.gdString(w,k,e,r,b)
def gdString(self,w,k,e,r,b):
return (str(w)+"|"+str(k)+"|"+str(e)+"|"+str(r)+"|"+str(b))
def __str__(self):
w = self.data_gui()[0]
k = self.data_gui()[1]
e = self.data_gui()[2]
r = self.data_gui()[3]
b = self.data_gui()[4]
return self.gdString(w,k,e,r,b)
"""
A State will contain:
current_player: A Player object of the player who's turn it currently is
players: A list of Player objects, in turn order, corresponding to each player in the game
tier1_deck: a list of Card objects that make up the remaining cards in the tier 1 deck
tier2_deck: a list of Card objects that make up the remaining cards in the tier 2 deck
tier3_deck: a list of Card objects that make up the remaining cards in the tier 3 deck
tier1: a list of Card objects that represent the tier 1 cards available for purchase
tier2: a list of Card objects that represent the tier 2 cards available for purchase
tier3: a list of Card objects that represent the tier 3 cards available for purchase
available_gems: a GemDict representing the number of gems available to take
gold: an int, the number of gold gems available to take
nobles: a list of GemDicts representing the nobles available for purchase (each noble is worth 3 points)
"""
class State:
"""
Sets up the initial state of the game with a randomized board and gems for the correct number of players.
"""
def __init__(self, num_human_players, num_AI_players):
self._players = []
for i in range(num_human_players):
self._players.append(Player(PlayerType.HUMAN, "HUMAN " + str(i)))
for j in range(num_AI_players):
self._players.append(Player(PlayerType.AI, "AI " + str(j)))
random.shuffle(self._players) #put players in random order
self._current_player = self._players[0] #set current player to first player in list
if num_AI_players+num_human_players == 2:
self._available_gems = GemDict([4,4,4,4,4])
elif num_human_players + num_human_players == 3:
self._available_gems = GemDict([5,5,5,5,5])
else:
self._available_gems = GemDict([7,7,7,7,7])
self._gold = 5
self._tier1_deck = self.gen_tier1()
self._tier2_deck = self.gen_tier2()
self._tier3_deck = self.gen_tier3()
random.shuffle(self._tier1_deck)
random.shuffle(self._tier2_deck)
random.shuffle(self._tier3_deck)
self._tier1 = self._tier1_deck[:4]
self._tier2 = self._tier2_deck[:4]
self._tier3 = self._tier3_deck[:4]
self._tier1_deck = self._tier1_deck[4:]
self._tier2_deck = self._tier2_deck[4:]
self._tier3_deck = self._tier3_deck[4:]
self._nobles = self.gen_nobles()
random.shuffle(self._nobles)
self._nobles = self._nobles[:(num_human_players+num_AI_players+1)] #num nobles available = num players + 1
self._turnCount = 1
self._discarding = False
self._firstWinner = None
self._winners = []
self._running = True
self._interactions = 0
"""Generates a shuffled deck of tier 1 cards."""
def gen_tier1(self):
tier1_deck = []
tier1_deck.append(Card(Color.BLACK, 0, [1, 0, 1, 1, 1], 1))
tier1_deck.append(Card(Color.BLACK, 0, [0, 0, 2, 1, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [2, 0, 2, 0, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [0, 1, 1, 3, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [0, 0, 3, 0, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [1, 0, 1, 1, 2], 1))
tier1_deck.append(Card(Color.BLACK, 0, [2, 0, 0, 1, 2], 1))
tier1_deck.append(Card(Color.BLACK, 1, [0, 0, 0, 0, 4], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 2, 0, 0, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 1, 1, 2, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 1, 1, 1, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [0, 0, 3, 1, 1], 1))
tier1_deck.append(Card(Color.BLUE, 0, [0, 3, 0, 0, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 0, 2, 2, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [0, 2, 2, 0, 0], 1))
tier1_deck.append(Card(Color.BLUE, 1, [0, 0, 0, 4, 0], 1))
tier1_deck.append(Card(Color.GREEN, 0, [2, 0, 0, 0, 1], 1))
tier1_deck.append(Card(Color.GREEN, 0, [0, 0, 0, 2, 2], 1))
tier1_deck.append(Card(Color.GREEN, 0, [1, 0, 1, 0, 3], 1))
tier1_deck.append(Card(Color.GREEN, 0, [1, 1, 0, 1, 1], 1))
tier1_deck.append(Card(Color.GREEN, 0, [1, 1, 0, 1, 2], 1))
tier1_deck.append(Card(Color.GREEN, 0, [0, 2, 0, 2, 1], 1))
tier1_deck.append(Card(Color.GREEN, 0, [0, 0, 0, 3, 0], 1))
tier1_deck.append(Card(Color.GREEN, 1, [0, 4, 0, 0, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [3, 0, 0, 0, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [1, 3, 0, 1, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [0, 0, 1, 0, 2], 1))
tier1_deck.append(Card(Color.RED, 0, [2, 2, 1, 0, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [2, 1, 1, 0, 1], 1))
tier1_deck.append(Card(Color.RED, 0, [1, 1, 1, 0, 1], 1))
tier1_deck.append(Card(Color.RED, 0, [2, 0, 0, 2, 0], 1))
tier1_deck.append(Card(Color.RED, 1, [4, 0, 0, 0, 0], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 2, 0, 2], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 0, 2, 0], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 1, 1, 1], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 0, 0, 0, 3], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 0, 2, 0, 2], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 2, 1, 1], 1))
tier1_deck.append(Card(Color.WHITE, 0, [3, 1, 0, 0, 1], 1))
tier1_deck.append(Card(Color.WHITE, 1, [0, 0, 4, 0, 0], 1))
return tier1_deck
"""Generates a shuffled deck of tier 2 cards."""
def gen_tier2(self):
tier2_deck = []
tier2_deck.append(Card(Color.BLACK, 1, [3, 0, 2, 0, 2], 2))
tier2_deck.append(Card(Color.BLACK, 1, [3, 2, 3, 0, 0], 2))
tier2_deck.append(Card(Color.BLACK, 2, [0, 0, 4, 2, 1], 2))
tier2_deck.append(Card(Color.BLACK, 2, [5, 0, 0, 0, 0], 2))
tier2_deck.append(Card(Color.BLACK, 2, [0, 0, 5, 3, 0], 2))
tier2_deck.append(Card(Color.BLACK, 3, [0, 6, 0, 0, 0], 2))
tier2_deck.append(Card(Color.BLUE, 1, [0, 0, 2, 3, 2], 2))
tier2_deck.append(Card(Color.BLUE, 1, [0, 3, 3, 0, 2], 2))
tier2_deck.append(Card(Color.BLUE, 2, [5, 0, 0, 0, 3], 2))
tier2_deck.append(Card(Color.BLUE, 2, [0, 0, 0, 0, 5], 2))
tier2_deck.append(Card(Color.BLUE, 2, [2, 4, 0, 1, 0], 2))
tier2_deck.append(Card(Color.BLUE, 3, [0, 0, 0, 0, 6], 2))
tier2_deck.append(Card(Color.GREEN, 1, [3, 0, 2, 3, 0], 2))
tier2_deck.append(Card(Color.GREEN, 1, [2, 2, 0, 0, 3], 2))
tier2_deck.append(Card(Color.GREEN, 2, [4, 1, 0, 0, 2], 2))
tier2_deck.append(Card(Color.GREEN, 2, [0, 0, 5, 0, 0], 2))
tier2_deck.append(Card(Color.GREEN, 2, [0, 0, 3, 0, 5], 2))
tier2_deck.append(Card(Color.GREEN, 3, [0, 0, 6, 0, 0], 2))
tier2_deck.append(Card(Color.RED, 1, [0, 3, 0, 2, 3], 2))
tier2_deck.append(Card(Color.RED, 1, [2, 3, 0, 2, 0], 2))
tier2_deck.append(Card(Color.RED, 2, [1, 0, 2, 0, 4], 2))
tier2_deck.append(Card(Color.RED, 2, [3, 5, 0, 0, 0], 2))
tier2_deck.append(Card(Color.RED, 2, [0, 5, 0, 0, 0], 2))
tier2_deck.append(Card(Color.RED, 3, [0, 0, 0, 6, 0], 2))
tier2_deck.append(Card(Color.WHITE, 1, [0, 2, 3, 2, 0], 2))
tier2_deck.append(Card(Color.WHITE, 1, [2, 0, 0, 3, 3], 2))
tier2_deck.append(Card(Color.WHITE, 2, [0, 2, 1, 4, 0], 2))
tier2_deck.append(Card(Color.WHITE, 2, [0, 0, 0, 5, 0], 2))
tier2_deck.append(Card(Color.WHITE, 2, [0, 3, 0, 5, 0], 2))
tier2_deck.append(Card(Color.WHITE, 3, [6, 0, 0, 0, 0], 2))
return tier2_deck
"""Generates a shuffled deck of tier 3 cards."""
def gen_tier3(self):
tier3_deck = []
tier3_deck.append(Card(Color.BLACK, 3, [3, 0, 5, 3, 3], 3))
tier3_deck.append(Card(Color.BLACK, 4, [0, 0, 0, 7, 0], 3))
tier3_deck.append(Card(Color.BLACK, 4, [0, 3, 3, 6, 0], 3))
tier3_deck.append(Card(Color.BLACK, 5, [0, 3, 0, 7, 0], 3))
tier3_deck.append(Card(Color.BLUE, 3, [3, 5, 3, 3, 0], 3))
tier3_deck.append(Card(Color.BLUE, 4, [7, 0, 0, 0, 0], 3))
tier3_deck.append(Card(Color.BLUE, 4, [6, 3, 0, 0, 3], 3))
tier3_deck.append(Card(Color.BLUE, 5, [7, 0, 0, 0, 3], 3))
tier3_deck.append(Card(Color.GREEN, 3, [5, 3, 0, 3, 3], 3))
tier3_deck.append(Card(Color.GREEN, 4, [3, 0, 3, 0, 6], 3))
tier3_deck.append(Card(Color.GREEN, 4, [0, 0, 0, 0, 7], 3))
tier3_deck.append(Card(Color.GREEN, 5, [0, 0, 3, 0, 7], 3))
tier3_deck.append(Card(Color.RED, 3, [3, 3, 3, 0, 5], 3))
tier3_deck.append(Card(Color.RED, 4, [0, 0, 7, 0, 0], 3))
tier3_deck.append(Card(Color.RED, 4, [0, 0, 6, 3, 3], 3))
tier3_deck.append(Card(Color.RED, 5, [0, 0, 7, 3, 0], 3))
tier3_deck.append(Card(Color.WHITE, 3, [0, 3, 3, 5, 3], 3))
tier3_deck.append(Card(Color.WHITE, 4, [0, 7, 0, 0, 0], 3))
tier3_deck.append(Card(Color.WHITE, 4, [3, 6, 0, 3, 0], 3))
tier3_deck.append(Card(Color.WHITE, 5, [3, 7, 0, 0, 0], 3))
return tier3_deck
def gen_nobles(self):
nobles = []
nobles.append(GemDict([0,0,4,4,0]))
nobles.append(GemDict([3,3,0,3,0]))
nobles.append(GemDict([4,0,0,0,4]))
nobles.append(GemDict([4,4,0,0,0]))
nobles.append(GemDict([0,0,4,0,4]))
nobles.append(GemDict([0,0,3,3,3]))
nobles.append(GemDict([3,0,3,0,3]))
nobles.append(GemDict([0,4,0,4,0]))
nobles.append(GemDict([3,3,0,0,3]))
nobles.append(GemDict([0,3,3,3,0]))
return nobles
"""
Returns whether the game has finished.
"""
def running(self):
return self._running
"""
Set _running to False.
"""
def endGame(self):
self._running = False
"""
Returns the turn count for the game.
"""
def get_turn_count(self):
return self._turnCount
"""
Returns a list of Player objects, in turn order, corresponding to each player in the game.
"""
def get_players(self):
return self._players
"""
Returns a Player object of the current player.
"""
def get_current_player(self):
return self._current_player
"""
Returns the remaining tier1 cards left in the deck as a list of Card objects.
"""
def get_tier1_deck(self):
return self._tier1_deck
"""
Returns the remaining tier2 cards left in the deck as a list of Card objects.
"""
def get_tier2_deck(self):
return self._tier2_deck
"""
Returns the remaining tier3 cards left in the deck as a list of Card objects.
"""
def get_tier3_deck(self):
return self._tier3_deck
"""
Returns the remaining tier1 cards currently on the board as a list of Card objects.
"""
def get_tier1(self):
return self._tier1
"""
Returns the remaining tier2 cards currently on the board as a list of Card objects.
"""
def get_tier2(self):
return self._tier2
"""
Returns the tier3 cards currently on the board as a list of Card objects.
"""
def get_tier3(self):
return self._tier3
"""
Returns the gems available to take as a GemDict object.
"""
def get_avail_gems(self):
return self._available_gems
"""
Returns int of the number of gold gems still available.
"""
def get_num_gold(self):
return self._gold
"""
Returns the nobles remaining on the board.
"""
def get_nobles(self):
return self._nobles
def getNoble(self, noble):
return self._nobles[noble]
def getPlayerReserved(self, player, card):
self._players[player].get_reserved()[card].reserve(card)
return self._players[player].get_reserved()[card]
"""
Returns the index of the current player in the order the players are in.
"""
def current_player_index(self):
return self._players.index(self._current_player)
"""
Changes the current player to the next player in the player list.
"""
def next_player(self):
current = self.current_player_index()
self._current_player = self._players[current+1-len(self._players)]
self._turnCount += 1
"""
Removes one card from the desired tier deck and adds it to the tier cards on the board.
tier: Int. The tier number whose deck a card should be removed from. Also the tier number
on the board that the card will be added to.
If tier = 1, remove from tier 1 deck. If tier = 2, remove from tier 2 deck.
If tier = 3, remove from tier 3 deck. Otherwise the method raises ValueError.
This method is called after a player purchases a card to replenish the cards on the board.
If there are no cards remaining in the desired tier deck, no card is added to the board.
"""
def draw_from_deck(self, tier):
if tier == 1:
if len(self._tier1_deck)!=0:
new_card = self._tier1_deck.pop()
self._tier1.append(new_card)
elif tier == 2:
if len(self._tier2_deck)!=0:
new_card = self._tier2_deck.pop()
self._tier2.append(new_card)
elif tier == 3:
if len(self._tier3_deck)!=0:
new_card = self._tier3_deck.pop()
self._tier3.append(new_card)
else:
raise ValueError
"""
Helper function that removes and returns the top card ot a specified tier deck.
"""
def reserve_from_deck(self, tier):
if tier == 1:
if len(self._tier1_deck)!= 0:
new_card = self._tier1_deck.pop()
return new_card
else:
return None
elif tier == 2:
if len(self._tier2_deck)!= 0:
new_card = self._tier2_deck.pop()
return new_card
else:
return None
elif tier == 3:
if len(self._tier3_deck)!= 0:
new_card = self._tier3_deck.pop()
return new_card
else:
return None
else:
raise ValueError
"""
Removes a card from the cards available to purchase from a specific tier.
tier: Int. The tier from which the card should be removed.
If tier = 1, remove from tier 1. If tier = 2, remove from tier 2. If tier = 3,
remove from tier 3. Otherwise the method raises ValueError.
card: Card object. The card that should be removed from the board.
"""
def remove_tier_card(self, tier, card):
if tier == 1:
del self._tier1[card]
elif tier == 2:
del self._tier2[card]
elif tier == 3:
del self._tier3[card]
else:
raise ValueError
"""
Removes gems from the game's available gems.
gem_lst: list of ints representing the number of gems of each color to remove. The order of the list is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing removing
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def remove_gems(self, gem_lst):
self._available_gems.remove(Color.WHITE, gem_lst[0])
self._available_gems.remove(Color.BLACK, gem_lst[1])
self._available_gems.remove(Color.GREEN, gem_lst[2])
self._available_gems.remove(Color.RED, gem_lst[3])
self._available_gems.remove(Color.BLUE, gem_lst[4])
"""
Adds gems to the game's available gems.
gem_lst: list of ints representing the number of gems of each color to add. The order of the list is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing adding
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def add_gems(self, gem_lst):
self._available_gems.add(Color.WHITE, gem_lst[0])
self._available_gems.add(Color.BLACK, gem_lst[1])
self._available_gems.add(Color.GREEN, gem_lst[2])
self._available_gems.add(Color.RED, gem_lst[3])
self._available_gems.add(Color.BLUE, gem_lst[4])
"""
Removes the given noble from the game's available nobles.
noble: GemDict object. The noble to be removed from the board.
"""
def remove_noble(self, noble):
self._nobles.remove(noble)
"""
Returns the card for the tier and card index specified, or None if it DNE.
"""
def getTierCard(self, tier, card):
c = None
if (tier == 1):
c = self._tier1[card]
elif (tier == 2):
c = self._tier2[card]
elif (tier == 3):
c = self._tier3[card]
return c
"""
Decrements the game's total number of available gold gems by 1.
"""
def decr_gold(self):
self._gold -= 1
"""
Increments the game's total number of available gold gems by 1.
"""
def incr_gold(self):
self._gold += 1
"""
Returns True if the current player is discarding cards.
"""
def get_discarding(self):
return self._discarding
"""
Sets _discarding to dis.
"""
def set_discarding(self, dis):
self._discarding = dis
"""
Returns None if nobody has won; else, a player's name.
"""
def get_firstWinner(self):
return self._firstWinner
"""
Sets _firstWinner to fw.
"""
def set_firstWinner(self, fw):
self._firstWinner = fw
"""
Sets _winners to empty list of winners.
"""
def reset_winners(self):
self._winners = []
"""
Returns list of winners.
"""
def get_winners(self):
return self._winners
"""
Returns winners message.
"""
def get_winners_text(self):
text = ""
if (len(self._winners) == 0):
return "Nobody won... [1000 Turn Limit]"
if (len(self._winners) == 1):
text = "" + self._winners[0] + " Wins!"
elif (len(self._winners) > 1):
for w in self._winners[:-2]:
text += w + ", "
text += self._winners[-2] + " and " + self._winners[-1] + " Win!"
else:
return ""
return text
"""
Add w to _winners.
"""
def add_winner(self, w):
self._winners.append(w)
"""
Adds a player to the game's player list.
player: Player object. The player to be added to the player list. The player is added to
the end of the list (will have the last turn).
"""
def add_player(self, player):
self._players.append(player)
def can_buy_card(self, t, c, r):
card = None
if (r):
card = self._players[t].get_reserved()[c]
else:
if (t == 1):
card = self._tier1[c]
elif (t == 2):
card = self._tier2[c]
elif (t == 3):
card = self._tier3[c]
difference = 0
player = self._current_player
keys = card.get_cost().get_data()
discounts = player.get_discounts().get_data()
adjusted_cost = [0] * 5
#Check if user has enough gems to buy card (adjusted with discounts)
for color in keys:
card_cost = max(keys[color] - discounts[color], 0)
adjusted_cost[color.value] = card_cost
if card_cost < 0:
card_cost = 0
player_cash = player.get_gems().get_data()[color]
if card_cost > player_cash:
difference += card_cost - player_cash
if difference > player.get_gold():
return False
return True
def toList(self):
p = []
i = 0
for pl in self._players:
rsv = []
k = 0
for r in pl.get_reserved():
res = [0] * 5
res[r.get_color().value] = 1
rsv += [r.get_points()] + res + r.get_cost().data_gui()
k += 1
for j in range(3-k):
rsv += [0] * 11
p += rsv + [22-pl.get_points()] + pl.get_colors() + pl.get_discounts().data_gui()
i += 1
for j in range(4-i):
p += [0] * 45
g = [self._gold] + self._available_gems.data_gui()
td = [len(self._tier1_deck)] + [len(self._tier2_deck)] + [len(self._tier3_deck)]
nb = []
i = 0
for n in self._nobles:
nb += n.data_gui()
i += 1
for j in range(5-i):
nb += [0] * 5
t1 = []
i = 0
for t in self._tier1:
res = [0] * 5
res[t.get_color().value] = 1
t1 += [t.get_points()] + res + t.get_cost().data_gui()
i += 1
for j in range(4-i):
t1 += [0] * 11
t2 = []
i = 0
for t in self._tier2:
res = [0] * 5
res[t.get_color().value] = 1
t2 += [t.get_points()] + res + t.get_cost().data_gui()
i += 1
for j in range(4-i):
t2 += [0] * 11
t3 = []
i = 0
for t in self._tier3:
res = [0] * 5
res[t.get_color().value] = 1
t3 += [t.get_points()] + res + t.get_cost().data_gui()
i += 1
for j in range(4-i):
t3 += [0] * 11
return (p + g + td + nb + t1 + t2 + t3)
def incr_interactions(self):
self._interactions += 1
def get_interactions(self):
return self._interactions
def input_info(self, stepsLeft):
pturn = [0] * 4
pind = self.current_player_index()
pturn[pind] = 1
return (self.toList() + self.possible_moves(pind) +
pturn + [stepsLeft])
"""
Returns all the possible moves in this state.
Indexes [0...4] for whether it's possible to take at least one of the gem.
Indexes [5...9] for whether it's possible to take at two of the gem.
Indexes [10...24] for whether a card can be reserved; R-to-L, Tier 1 -> 3.
Indexes [25...27] for whether a reserved card can be bought.
Indexes [28...39] for whether a tier card can be bought; Tier 1 -> 3.
Indexes [40...45] for whether the player can discard one of their colors.
"""
def possible_moves(self, cpind):
gm = self.get_avail_gems().data_gui()
cp = self.get_players()[cpind]
canTake1 = [0] * 5
canTake2 = [0] * 5
canReserve = [0] * 15
canBuy = [0] * 15
canDiscard = [0] * 6
if (not self.get_discarding()):
canTake1 = list(map(lambda n : int(n>0), gm))
canTake2 = list(map(lambda n : int(n>3), gm))
if (len(cp.get_reserved()) < 3):
canReserve[0] = int(len(self.get_tier1_deck()) > 0)
canReserve[5] = int(len(self.get_tier2_deck()) > 0)
canReserve[10] = int(len(self.get_tier3_deck()) > 0)
for i in range(len(self.get_tier1())):
canReserve[i+1] = 1
for i in range(len(self.get_tier2())):
canReserve[i+6] = 1
for i in range(len(self.get_tier3())):
canReserve[i+11] = 1
for r in range(len(cp.get_reserved())):
canBuy[r] = int(self.can_buy_card(cpind, r, True))
for t1 in range(len(self.get_tier1())):
canBuy[t1+3] = int(self.can_buy_card(1, t1, False))
for t2 in range(len(self.get_tier2())):
canBuy[t2+7] = int(self.can_buy_card(2, t2, False))
for t3 in range(len(self.get_tier3())):
canBuy[t3+11] = int(self.can_buy_card(3, t3, False))
else:
canDiscard = list(map(lambda n : int(n>0), cp.get_colors()))
return (canTake1 + canTake2 + canReserve + canBuy + canDiscard)
"""
A Card will contain:
color: a Color indicating the discount the card gives
points: an int indicating the number of points the card gives
cost: A GemDict indicating the cost of the card
"""
class Card:
"""
cost_lst: a list of the number of each color of gem that the card costs [red, blue, green, white, black].
For example, [0, 1, 3, 0, 2] would representing the card costing
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def __init__(self, color, points, cost_lst, tier):
self._color = color
self._points = points
self._cost = GemDict(cost_lst)
self._tier = tier
self._reserved = [False, None]
"""Returns the Color indicated the discount the card gives."""
def get_color(self):
return self._color
"""Returns the point value of the card."""
def get_points(self):
return self._points
"""Returns the cost of the card as a Gem Dict."""
def get_cost(self):
return self._cost
"""Returns the tier of the card. One of [1,2,3]."""
def get_tier(self):
return self._tier
"""Returns boolean for whether the card is reserved."""
def reserved(self):
return self._reserved
"""Returns [True, index], where index is the index in its reserve pile."""
def reserve(self, index):
self._reserved = [True, index]
def __str__(self):
return ( str(self._points) + " " + str(self._color) +
"\n\n\n" + "W|K|E|R|B\n" +
str(self.get_cost()) )
"""
A Player will contain:
name: a string representing the name of the player. ex. HUMAN 0 for human player, AI 0 for AI player.
discounts: A GemDict with Color keys and int values. Represents the number of cards of each color a player has.
gems: A GemDict with with Color keys and int values. Represents the number of gems of each color a player has.
gold: an int representing the number of gold gems a player has
reserved: A list of Card objects that the player has reserved
points: The number of points a player has
player_type: A PlayerType indicating whether the player is AI or human
num_cards: An int indicating the number of cards a player has bought (used for end-game tiebreaker).
num_moves: Number of moves the player has taken
gems_taken: list of gem colors in the order that the player took them (only used for AI)
"""
class Player:
"""
Sets up an empty Player object with each of the attributes described above.
"""
def __init__(self, player_type, name):
self._name = name
self._discounts = GemDict([0,0,0,0,0])
self._gems = GemDict([0,0,0,0,0])
self._gold = 0
self._reserved = []
self._points = 0
self._player_type = player_type
self._num_cards = 0
self._num_moves = 0
self._gems_taken = []
self._move_dict = {'take_two' : 0, 'take_three' : 0, 'buy' : 0, 'buy_noble' : 0, 'reserve' : 0, 'reserve_top': 0, 'discard' : 0}
def get_move_dict(self):
return self._move_dict
def add_move_dict(self, move):
self._move_dict[move] += 1
"""
Returns the type of this player.
"""
def get_player_type(self):
return self._player_type
"""
Returns the name of the current player.
"""
def get_name(self):
return self._name
"""
Returns the player's discounts as a GemDict which represents the number of cards of each color a player has.
"""
def get_discounts(self):
return self._discounts
"""
Returns the number of gems of each color the player has.
"""
def get_gems(self):
return self._gems
"""
Returns the number of gold gems a player has.
"""
def get_gold(self):
return self._gold
"""
Returns a list of Card objects the player has reserved.
"""
def get_reserved(self):
return self._reserved
"""
Returns the number of points the player has.
"""
def get_points(self):
return self._points
"""
Returns number of cards the player has bought.
"""
def get_purchased(self):
return self._num_cards
"""
Returns number of cards the player has bought.
"""
def gemGoldAmt(self):
return self._gems.total_gems() + self._gold
"""
Returns the list of all the colors the player owns.
"""
def get_colors(self):
return [self._gold] + self._gems.data_gui()
"""
Increments the player's number of gold gems by 1.
"""
def incr_gold(self):
self._gold += 1
"""
Decrements the player's number of gold gems by 1.
"""
def decr_gold(self):
self._gold -= 1
"""
Adds card to the player's list of reserved cards.
"""
def add_reserved(self, card):
self._reserved.append(card)
card.reserve(self._reserved.index(card))
"""
Removes card from the player's list of reserved cards.
"""
def remove_reserved(self, card):
del self._reserved[card]
"""
Adds the color to the player's discounts.
"""
def set_discount(self, color):
self._discounts.add(color, 1)
"""
Adds the given number of gems in color_lst to the player's total gems. color_lst is a list
of ints representing the number of gems of each color to be added. The order of color_lst is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing adding
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def add_gems(self, color_lst):
self._gems.add(Color.WHITE, color_lst[0])
self._gems.add(Color.BLACK, color_lst[1])
self._gems.add(Color.GREEN, color_lst[2])
self._gems.add(Color.RED, color_lst[3])
self._gems.add(Color.BLUE, color_lst[4])
"""
Removes the given number of gems in color_lst from the player's total gems. color_lst is a list
of ints representing the number of gems of each color to be added. The order of color_lst is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing removing
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def remove_gems(self, color_lst):
self._gems.remove(Color.WHITE, color_lst[0])
self._gems.remove(Color.BLACK, color_lst[1])
self._gems.remove(Color.GREEN, color_lst[2])
self._gems.remove(Color.RED, color_lst[3])
self._gems.remove(Color.BLUE, color_lst[4])
def ai_remove_gems(self, color_lst):
if self._player_type == 2: #use only for AI
for i in range(len(color_lst)):
for j in range(color_lst[i]):
if i == 0:
self._gems_taken.remove(Color.WHITE)
elif i == 1:
self._gems_taken.remove(Color.BLACK)
elif i == 2:
self._gems_taken.remove(Color.GREEN)
elif i == 3:
self._gems_taken.remove(Color.RED)
elif i == 4:
self._gems_taken.remove(Color.BLUE)
"""
Returns list of gems that the player took in the order they took them
"""
def get_gems_ordered(self):
return self._gems_taken
"""
Takes a list of Colors and adds them to the front of the list of gems that have
already been taken
"""
def ai_add_gems(self, color_lst):
color_lst + self._gems_taken
"""
Adds the given number of points to the player's point total.
"""
def set_points(self, num_points):
self._points += num_points
"""
Increments the player's number of purchased cards by 1.
"""
def incr_card_total(self):
self._num_cards += 1
"""
Returns the player's number of moves so far, not including this current one.
"""
def get_num_moves(self):
return self._num_moves
"""
Increments the player's number of moves by 1.
"""
def incr_num_moves(self):
self._num_moves += 1
def __str__(self):
gd = self._gold
gm = self._gems
dc = self._discounts
return (
str(self._points) + " " + self._name + "\n\n\n" +
" G " + "W|K|E|R|B\n" +
"GEMS " + str(gd) + " " + str(gm) + "\n" +
"CARDS " + str(dc) + "\n" +
"_______________________\n" +
"TOTAL " + str(gd) + " " + gm.addGD(dc)
) | ckpalma/splendor-ai | gym-master/gym/envs/splendor/structure.py | structure.py | py | 36,935 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.seed",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 57,
... |
15166686250 | from matplotlib import pyplot as plot
plot.rcParams [ "savefig.facecolor" ] = "w"
plot.rcParams [ "savefig.edgecolor" ] = "b"
lab_value , title_numb = 'Lab_None' , 0
def setLab ( text = None ) :
global lab_value
if text : lab_value = text
print ( 'lab value:' , lab_value )
def save () :
global title_numb , lab_value
for i in plot.get_fignums () :
fig = plot.figure ( i )
plot.savefig ( '{}_{}.png'.format ( lab_value , str ( title_numb ) ) )
print ( '{}_{}.png'.format ( lab_value , str ( title_numb ) ) )
fig.clear ()
plot.close ( fig )
title_numb += 1
def plot_series ( time = [] , series = [] , format = "-" , start = 0 , end = None ,
lr_with_var_value = [] , history = {} , xmin = None , xmax = None , ymin = None , ymax = None ,
title = None , xlabel = None , ylabel = None , labels = [] ) :
plot.figure ( figsize = ( 10 , 6 ) )
label = ( labels [ 0 ] if ( len ( labels ) and labels [ 0 ] ) else None )
if len ( time ) and len ( series ) and ( type ( series ) is tuple ) :
for i , series_num in enumerate ( series ) :
label = ( labels [ i ] if ( len ( labels ) and labels [ i ] ) else None )
plot.plot ( time [ start : end ] , series_num [ start : end ] , format , label = label )
elif len ( time ) and len ( series ) : plot.plot ( time [ start : end ] , series [ start : end ] , format , label = label )
plot.title ( str ( title ) )
plot.legend ( )
plot.xlabel ( "Time" if not xlabel else xlabel )
plot.ylabel ( "Value" if not ylabel else ylabel )
plot.grid ( True )
if len ( lr_with_var_value ) and ( 'loss' in history.history ) and (
xmin != None and xmax != None and ymin != None and ymax != None ) :
plot.semilogx ( lr_with_var_value , history.history [ "loss" ] )
plot.tick_params ( 'both' , length = 10 , width = 1 , which = 'both' )
plot.axis ( [ xmin , xmax , ymin , ymax ] )
save ()
| AmalLight/deepL_RL | saveFigure.py | saveFigure.py | py | 2,204 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"ap... |
37558477516 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.hashers import make_password, check_password
from superadmin.models import User, UserGroup, Account, AccountChangeLog
from .forms import RawLoginForm
# Create your views here.
def user_login(request):
if request.session.get('user'):
return redirect('user:user-home')
login = RawLoginForm()
response = True
if request.method=='POST':
login = RawLoginForm(request.POST)
if login.is_valid():
username = login.cleaned_data['username']
password = login.cleaned_data['password']
try:
user = User.objects.get(username=username)
if check_password(password, user.password):
print("Success")
login = RawLoginForm()
request.session['user'] = username
return redirect('user:user-home')
else:
response = False
login = RawLoginForm()
except User.DoesNotExist:
response = False
login = RawLoginForm()
context = {
'login_form': login,
'response': response
}
return render(request, "user_login.html", context)
def user_home(request):
changelog = ""
username = request.session.get('user')
if not username:
return redirect('user:user-login')
if request.method=='POST' and 'logout' in request.POST:
print("Logout")
return user_logout(request)
if request.method=='POST' and 'search_form' in request.POST:
print(request.POST.get("searchkey"))
searchkey = request.POST.get("searchkey")
try:
userdata = User.objects.get(username=username)
grouplist = userdata.group.all().values_list('id', flat='True')
try:
accountid = Account.objects.filter(username=searchkey, group__id__in=grouplist).values_list('id', flat='True').distinct()
for id in accountid.iterator():
try:
accountdata = Account.objects.get(id=id)
try:
changelog = AccountChangeLog.objects.filter(username=accountdata).order_by('-modified')
except AccountChangeLog.DoesNotExist:
changelog = ""
except Account.DoesNotExist:
changelog = ""
except Account.DoesNotExist:
changelog = ""
except User.DoesNotExist:
changelog = ""
context = {
'username': username,
'changelog': changelog
}
return render(request, "user_home.html", context)
def user_logout(request):
username = request.session.get('user')
if username and request.method=='POST' and 'logout' in request.POST:
print("Logout Button Clicked")
del request.session['user']
return redirect('user:user-login')
print("Logout Button Unclicked")
return redirect('user:user-home') | lymen/localusermanager | user/views.py | views.py | py | 2,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "forms.RawLoginForm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "forms.RawLoginForm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "super... |
8546055493 | import random
import pygame as pg
from creature import Creature
def draw_creatures(list):
i= 0
for i in range(len(list)):
list[i].draw()
def replication(creatures, display, border_rect, start_speed,\
start_sense, start_energy, speed_mutation, sense_mutation, nutrition, color):
for creature in creatures:
if creature.food_count >= 2 and creature.energy_used < creature.energy:
new_creature = Creature(display, border_rect, start_speed, start_sense,\
start_energy, speed_mutation, sense_mutation, nutrition, color)
new_creature.speed = creature.speed
new_creature.radius = creature.radius
new_creature.sense = creature.sense
new_creature.mutate()
creatures.append(new_creature)
return creatures
def check_survival(creatures, display, border_rect, start_speed, start_sense,\
start_energy, speed_mutation, sense_mutation, nutrition, color):
creatures = list(filter(lambda creature: creature.food_count > 0, creatures))
creatures = list(filter(lambda creature: creature.energy_used < creature.energy, creatures))
creatures = replication(creatures, display, border_rect, start_speed,\
start_sense, start_energy, speed_mutation, sense_mutation, nutrition, color)
for creature in creatures:
creature.food_count = 0
creature.energy_used = 0
creature.spawn()
return creatures
def spawn_food(display, num, WIDTH, HEIGHT):
color = (0, 255, 0)
rectangles = []
for i in range(num):
x = random.randint(100, 900)
y = random.randint(100, 900)
rect = pg.draw.rect(display, color, (x, y, WIDTH, HEIGHT))
collides = False
for r in rectangles:
if rect.colliderect(r):
collides = True
if not collides:
rectangles.append(rect)
return rectangles
def avg_of_list(list):
avg_list = sum(list) / len(list)
avg = float("%.2f" % avg_list)
return avg | lkh-767572/natural-selection-of-traits | methods.py | methods.py | py | 2,031 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "creature.food_count",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "creature.energy_used",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "creature.energy",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name"... |
32736730593 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 3/9/19 7:49 PM
# @Author : zchai
import itertools
import os
import torch
from allennlp.data.dataset_readers.seq2seq import Seq2SeqDatasetReader
from allennlp.data.iterators import BucketIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers.word_tokenizer import WordTokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.activations import Activation
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.modules.attention import LinearAttention
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.predictors import SimpleSeq2SeqPredictor, Seq2SeqPredictor
from allennlp.training.trainer import Trainer
from torch import optim
from couplet_generator.my_logger import Logger
from couplet_generator.utils import conf
logger = Logger(__name__).get_logger()
class Seq2SeqAllen:
def __init__(self, training=False):
self.training = training
config = conf['seq2seq_allen']
self.model_path = config['model_path']
self.vocab_path = config['vocab_path']
prefix = config['processed_data_prefix']
train_file = config['train_data']
valid_file = config['test_data']
src_embedding_dim = config['src_embedding_dim']
trg_embedding_dim = config['trg_embedding_dim']
hidden_dim = config['hidden_dim']
epoch = config['epoch']
patience = config['patience']
if torch.cuda.is_available():
self.cuda_device = 0
else:
self.cuda_device = -1
self.reader = Seq2SeqDatasetReader(
source_tokenizer=WordTokenizer(),
target_tokenizer=WordTokenizer(),
source_token_indexers={'tokens': SingleIdTokenIndexer()},
target_token_indexers={'tokens': SingleIdTokenIndexer()})
if self.training:
self.train_dataset = self.reader.read(os.path.join(prefix, train_file))
self.valid_dataset = self.reader.read(os.path.join(prefix, valid_file))
self.vocab = Vocabulary.from_instances(self.train_dataset + self.valid_dataset,
min_count={'tokens': 3})
else:
self.vocab = Vocabulary.from_files(self.vocab_path)
src_embedding = Embedding(num_embeddings=self.vocab.get_vocab_size('tokens'),
embedding_dim=src_embedding_dim)
encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(src_embedding_dim, hidden_dim, batch_first=True))
source_embedder = BasicTextFieldEmbedder({"tokens": src_embedding})
self.model = SimpleSeq2Seq(vocab=self.vocab, source_embedder=source_embedder, encoder=encoder,
max_decoding_steps=20,
target_embedding_dim=trg_embedding_dim,
use_bleu=True)
optimizer = optim.Adam(self.model.parameters())
iterator = BucketIterator(batch_size=32, sorting_keys=[("source_tokens", "num_tokens")])
# 迭代器需要接受vocab,在训练时可以用vocab来index数据
iterator.index_with(self.vocab)
self.model.cuda(self.cuda_device)
if training:
self.trainer = Trainer(model=self.model,
optimizer=optimizer,
iterator=iterator,
patience=patience,
train_dataset=self.train_dataset,
validation_dataset=self.valid_dataset,
serialization_dir=self.model_path,
num_epochs=epoch,
cuda_device=self.cuda_device)
if not self.training:
with open(os.path.join(self.model_path, 'best.th'), 'rb') as f:
self.model.load_state_dict(torch.load(f))
self.model.cuda(self.cuda_device)
self.model.training = self.training
self.predictor = Seq2SeqPredictor(self.model, dataset_reader=self.reader)
def train(self):
self.vocab.save_to_files(self.vocab_path)
self.trainer.train()
def predict(self, sentence):
if not self.training:
return self.predictor.predict(sentence)
else:
logger.warning('Mode is in training mode!')
| adamsZQ/couplet_generator | couplet_generator/seq2seq_allen.py | seq2seq_allen.py | py | 4,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "couplet_generator.my_logger.Logger",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "couplet_generator.utils.conf",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 46,
"usage_type": "call"
},
... |
9661100905 | from __future__ import print_function
import sys
import time
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from useFunc.detectAndTrack import *
from useFunc.utils import *
from useFunc.featMatch import *
if __name__ == '__main__':
# Params
intv_EM = 4 # interval to implement
# - focal length, Camera height
foc_len, H_cam = 1200, 0.8
thresh = 3 # threshold angle to avoid outliers
# settings for read & write video
prePath = r'C:\ProgamData\global_dataset\img_vid'
vidName = r'\vid1_4'
fmt = '.mp4'
cap = cv.VideoCapture(prePath + vidName + fmt)
fourcc = cv.VideoWriter_fourcc(*'XVID')
fps_vid = cap.get(cv.CAP_PROP_FPS)
sizeW, sizeH = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
# size = (sizeW*2, int(sizeH*crop))
size = (sizeW, sizeH)
c_pnt = (int(sizeW / 2), int(sizeH / 2))
camMat = np.array([[foc_len, 0, c_pnt[0]],
[0, foc_len / 1.6, c_pnt[1]],
[0, 0, 1]])
# init video writer
write_name = 'output\\' + vidName + '_EM.avi'
vidWrite = cv.VideoWriter(write_name, fourcc, fps_vid, size)
# Read first frame, quit if unable to read the video file
success, _ = cap.read()
if not success:
print('Failed to read video')
sys.exit(1)
# MAIN
numFr = 0
while cap.isOpened():
# see if it's the end
t1 = time.time()
success, frame = cap.read()
if not success:
print("Done")
break
frameCopy = np.copy(frame)
# Eigen-motion estimation, independent of detection
if numFr == 0: # init
frame0 = np.copy(frameCopy)
angs = np.array([0, 0, 0])
pitch = 0 # orig pose
elif numFr % intv_EM == 0: # angle calc
angs = feat_match(frame0, frameCopy, numFr, size, camMat=camMat, crop=1,
foc_len=foc_len, match_pnts=20, thresh=thresh)
frame0 = np.copy(frameCopy) # stored for next round
pitch += angs[0]
# counter udpate
numFr += 1
t = time.time() - t1
# print info
if t > 0:
print_info(frameCopy, t, numFr, pitch, angs[0])
cv.imshow("Ego-motion", frameCopy)
vidWrite.write(frameCopy)
if cv.waitKey(1) & 0xFF == 27:
break
| dexter2406/MonoVision_MotionEstimation | MoVis_EM.py | MoVis_EM.py | py | 2,416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP... |
29719010517 | from functools import reduce
def flip_data(arr, curr, l):
"""flip the data accounting for wrapping"""
# base case
if l == 1:
return
# get the subset
subset = []
for i in range(l):
n = (curr + i) % len(arr)
subset.append(arr[n])
# reverse
subset = subset[::-1]
# put back
for i in range(l):
n = (curr + i) % len(arr)
arr[n] = subset[i]
def chunks(l, n):
"""split l into chunks of size n"""
for i in range(0, len(l), n):
yield l[i:i + n]
def sparse_hash(arr):
"""calculate sparse hash"""
hash = ''
# iterate on size 16 chunks
for c in chunks(arr, 16):
assert len(c) == 16
# xor the chunk together
res = reduce((lambda x, y: x ^ y), c)
# convert to hex
res = hex(res)[2:].zfill(2)
# add to string
hash += res
return hash
def hash_data(numbers, lengths):
"""hash the data with knot tying"""
current = 0
skip_size = 0
# run 64 times
for _ in range(64):
for l in lengths:
# get slice
flip_data(numbers, current, l)
# move current spot
current = (current + l + skip_size) % len(numbers)
# increase skip size
skip_size += 1
return sparse_hash(numbers)
def convert_length(lengths_raw):
"""convert from char to ascii codes"""
return list(map(ord, lengths_raw)) + [17, 31, 73, 47, 23]
# test input
numbers = list(range(256))
lengths = convert_length('')
assert hash_data(numbers, lengths) == 'a2582a3a0e66e6e86e3812dcb672a272'
numbers = list(range(256))
lengths = convert_length('AoC 2017')
assert hash_data(numbers, lengths) == '33efeb34ea91902bb2f59c9920caa6cd'
numbers = list(range(256))
lengths = convert_length('1,2,3')
assert hash_data(numbers, lengths) == '3efbe78a8d82f29979031a4aa0b16a9d'
numbers = list(range(256))
lengths = convert_length('1,2,4')
assert hash_data(numbers, lengths) == '63960835bcdc130f0b66d7ff4f6a5a8e'
# real input
numbers = list(range(256))
lengths = convert_length(
"76,1,88,148,166,217,130,0,128,254,16,2,130,71,255,229")
print(hash_data(numbers, lengths))
| yknot/adventOfCode | 2017/10_02.py | 10_02.py | py | 2,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 39,
"usage_type": "call"
}
] |
30988421589 | import json
import os
from flask import Flask, request, jsonify
app = Flask(__name__)
here = os.path.dirname(__file__)
state_path = os.path.join(here, "state.json")
@app.route("/")
def home():
with open(state_path) as f:
return jsonify(json.load(f))
@app.route("/<page>", methods=["GET", "POST"])
def route(page):
with open(state_path) as f:
data = json.load(f)
if request.method == "POST":
for k, v in request.form.items():
if k not in data[page]:
return f"Invalid attribute {k} for {page}", 400
try:
v = int(v)
except ValueError:
pass
data[page][k] = v
with open(state_path, "w") as f:
json.dump(data, f, indent=2)
return jsonify(data[page])
@app.route("/tank_stats")
def tank_stats():
with open(state_path) as f:
data = json.load(f)
return jsonify({
k: data["tank"][k]
for k in [
"volume_in", "volume_out_tank", "volume_out_urban_network",
"pump_in_running_duration", "pump_out_running_duration", "urban_network_running_duration", "is_tank_full",
"is_tank_empty"
]
})
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0", port=5001)
| tartopum/atelier | fake_arduino/server.py | server.py | py | 1,307 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number... |
27338086141 | # inspired from: https://codehandbook.org/how-to-read-email-from-gmail-using-python/
# https://github.com/jay3dec/pythonReadEmail
# Python 3.8^ standard libraries
from traceback import print_exc
from imaplib import IMAP4_SSL
from email import message_from_bytes
from base64 import b64decode
from uuid import uuid4
from json import load, dump
from os import walk, linesep
from email.policy import default as default_policy
import configparser
import subprocess
# get environment variables
config = configparser.ConfigParser()
config.read('./env/env.ini')
# -------------------------------------------------
#
# Read email from gmail using python
#
# -------------------------------------------------
def read_email_from_gmail():
# note: values from env.ini don't need quotes and are all strings
# except for FROM_BOX as explained below
FROM_EMAIL = config['DEFAULT']['FROM_USER'] + \
config['DEFAULT']['ORG_EMAIL']
FROM_PWD = config['DEFAULT']['FROM_PWD']
IMAP_SERVER = config['DEFAULT']['IMAP_SERVER']
# collect new spot data in geojson-friendly 'features' list
new_spots = []
# json data to read, update and write back to file
spots_db = False
# append each feature to a JSON structure: [{feature}, {feature}]
with open(f"{config['DEFAULT']['DATA_PATH']}/spots.geojson", 'r') as db_file:
spots_db = load(db_file)
try:
print('\nconnecting to gmail..')
# SSL to SMTP server via imaplib using credentials
mail = IMAP4_SSL(IMAP_SERVER)
mail.login(FROM_EMAIL, FROM_PWD)
# avoid selecting entire inbox if possible
# be careful to transmit double quotes to mail.select()
# env.ini FROM_BOX string includes the double quotes
mail.select(config['DEFAULT']['FROM_BOX'])
# can avoid selecting 'ALL'
# try 'UNSEEN' once its up and running
mail_data = mail.search(None, 'ALL')
print('reading mail..')
# all this is just to list ints [1,...24] to decrement over
# what about a less variably and extra implementation?
# range only uses it once: mail.fetch(str(i), '<PROTOCOL>')
mail_ids = mail_data[1]
id_list = mail_ids[0].split()
if not len(id_list):
print('<error> no email')
return
first_id = int(id_list[0])
last_id = int(id_list[-1])
# original implementation was not printing final list value
# range(start, stop, step)
# ranges stop when they hit their stop arg value
# ∴ the stop value itself is not used
# ∴ i need a range of (1, 25)
for i in range(last_id, first_id - 1, -1):
# use RFC822 protocol (investigate security/options)
response = mail.fetch(str(i), '(RFC822)')
# response is a tuple of length 2
# for response_part in response:
# get first item of tuple part
# arr = response_part[0]
# if not isinstance(arr, tuple):
# print('<continue> response_part[0] is not a tuple')
# continue
# print('part:', type(arr))
# bytes and a default policy avoids environment
# string encoding preferences
# ∴ is more consistent, predictable and robust
# msg = email.message_from_bytes(
# arr[1], policy=default_policy)
# condensed into list comprehension
msgs = [message_from_bytes(res_part[0][1], policy=default_policy)
for res_part in response if isinstance(res_part[0], tuple)]
# list for identifying emails
subject_strings = ['New submission', 'on-the-spot']
for msg in msgs:
# filter for formspree new submissions on on-the-spot only
if not (msg['from'] == 'Formspree <noreply@formspree.io>' and all(x in msg['subject'] for x in subject_strings)):
# print('<continue> wrong mail')
continue
body = msg.get_body(('plain'))
content = body.get_content()
# line 26 is base64 value (if it exists)
# is this split reading all lines up front?
# could try just grab first 25 ie by yielding
lines = content.split(linesep)
# can access features and spots_db here
# but wastes computation making a full spot
# just to check the lat/lng
# shift range props add up to here
# leave img handling to its own function
# or idk global or something else
# theres light repeatable work to do up front
# and img stuff to do conditionally
# test if last form field line number has changed
if lines[21] != 'base64:':
# likely some change in form fields number/name
# eventually change away from hardcoded 'base64' detection
print('<continue> cannot find form fields in message')
continue
# returns the msg spot data in geojson dict structure
# is this call expensive? use <caching thingy> to check
# test each functions, img will likely be biggest
spot_data = get_spot_data(lines)
# if spot data is wrong and function empty returns
# todo: improve this cos function is unlikely to falsy
# q: is a nested empty object falsy? use any()?
# still not a great test cos line values could be trivially non-empty
if not any(spot_data.values()):
print('<continue> all form fields are empty')
print(spot_data)
continue
# quick-test if new spot already exists
# assume if lat_lng are identical
# functionalise this test for lat/lng/id's?
# this would help break the loop when a match is found via return
# lines[14] = lat value
# lines[18] = lng value
# create 2D list of coords
db_coords = [v['geometry']['coordinates']
for v in spots_db['features']]
# create msg_coords from lines data
msg_coords = [float(lines[18].strip()),
float(lines[14].strip())]
if msg_coords in db_coords:
print('<continue> spot already exists')
continue
# enforce no whitespace and use hypens?
# or let people say what they wanna say?
# or save both?
# make spot name up front to ref into functions
spot_name = lines[6].strip().lower()
# make spot id up front to ref into functions
spot_id = str(uuid4())[:8].lower()
# add id
spot_data['id'] = spot_id
# check if new spot image already exists
if match_file_name(f"{config['DEFAULT']['DATA_PATH']}/img", spot_name):
print('<continue> spot name already exists in images')
continue
# img_file is changed to the path if the image exists
# i dont think i need these if False's
img_file = False
# handle img if it exists in msg but not on disk
if lines[22]:
img_file = save_base64_img(
lines[22].strip(), spot_name, spot_id)
# add path to image
if img_file:
spot_data['picture'] = img_file
# organise spot data into geojson feature dict
# couldnt properties dict be filled out with a loop?
# coordinates in GeoJSON: [longitude, latitude] --> use this one
# coordinates in Leaflet: [latitude, longitude]
# [float(spot_data['longitude']), float(spot_data['latitude'])]
feature = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": msg_coords
},
"properties": {
"id": spot_data['id'],
"name": spot_data['name'],
"city": spot_data['city'],
}
}
# check for spot picture
if 'picture' in spot_data:
# add picture filename to spot feature
feature['properties']['picture'] = spot_data['picture']
# add feature to spots list
new_spots.append(feature)
# print(f"found {feature['properties']['name']}..")
# else:
# print('some other response part')
except Exception as e:
print_exc()
print(str(e))
return
if not new_spots:
print('..no new spots found\n')
return
print(f"found {len(new_spots)} new spot{'s' if len(new_spots) != 1 else ''}")
# this is far better approach
# communicates to devs declaratively
# uses built ins and avoids branching
# print(f"{len(spots_db['features'])}")
# print(spots_db['features'])
# get a list of spots from db
db_spots = spots_db.get('features', [])
print(f"found {len(db_spots)} old spot{'s' if len(db_spots) != 1 else ''}")
# todo: add some id checker thingy here
print('updating spots..')
# add new spots to list
db_spots.extend(new_spots)
# add all spots back to db
spots_db['features'] = db_spots
print(
f"total spot{'s' if len(spots_db['features']) != 1 else ''}: {len(spots_db['features'])}")
# write updated spots back to file
with open(f"{config['DEFAULT']['DATA_PATH']}/spots.geojson", 'w') as json_file:
dump(spots_db, json_file, indent=2)
print('updated spots database')
print('pushing changes to github..')
result_git_add = subprocess.run(
["git", "add", "-A"], cwd=config['DEFAULT']['DATA_PATH'])
result_git_commit = subprocess.run(
["git", "commit", "-m", "updated spots from python"], cwd=config['DEFAULT']['DATA_PATH'])
result_git_push = subprocess.run(
["git", "push", "origin", "main"], cwd=config['DEFAULT']['DATA_PATH'])
# if needed can check results e.g:
# print(result_git_push.stderr)
print('..done\n')
# -------------------------------------------------
#
# Parses message content into spot data dict
#
# -------------------------------------------------
# expect a change: removing email form field
# to automate parameterise some values like:
# or use +4 system from starting_point = 5
# <assume> msg content is predictable
# because if lines[n] == <last_form_field> check has been done
# prior to calling this function
def get_spot_data(lines):
# starting key:value at 5:6, next at +4
# apply line-plucking
spot_data = {lines[n][:-1].strip().lower(): lines[n + 1].strip().lower()
for n in range(5, 22, 4)}
return spot_data
# -------------------------------------------------
#
# Checks if file names match a given substring
# Returns a Boolean
#
# -------------------------------------------------
def match_file_name(file_dir, match_name):
file_names = next(walk(file_dir), (None, None, []))[2]
# todo :there must be a better higher order method to do this with
# feels like im walking the file_directory then looping to check
# surely this could be one loop?
for file_name in file_names:
if file_name.startswith(match_name.replace(' ', '-') + '-'):
print('found a match:', file_name.replace('-', ' '), match_name)
# end loop after first match
return True
return False
# -------------------------------------------------
#
# Creates metadata and file from base64 string
#
# -------------------------------------------------
def save_base64_img(data_url, spot_name, spot_id):
# todo: return an object with created values instead of updating globals
# 'jpg' and 'png' exts only (form restricts file types)
# that validation is still client-side vulnerable though
# todo: add (MIME?) type checks for jpg/png only
# (or all valid/safe image types)
# change 'jpeg' to 'jpg' and maintain 'png' as is
ext = data_url[11:15].lower().replace('e', '').replace(';', '')
# remove data URI scheme prefix
b64 = data_url[data_url.find(';base64,') + len(';base64,'):]
# img file name: <spot-name>-<uuid>.<jpg/png>
# replace all whitespaces in spot_name with hypens
file_name = f"{spot_name.replace(' ', '-')}-{spot_id}.{ext}"
# write out to image file
with open(f"{config['DEFAULT']['DATA_PATH']}/img/{file_name}", "wb") as fh:
fh.write(b64decode(b64))
return file_name
# -------------------------------------------------
#
# Runs module if called directly
#
# -------------------------------------------------
if __name__ == '__main__':
read_email_from_gmail()
| PAR-iTY/on-the-spot | python/on-the-spot-mail.py | on-the-spot-mail.py | py | 13,406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "imaplib.IMAP4_SSL",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "email.message_f... |
73139030824 | import numpy as np
from scipy.ndimage import maximum_filter
from operator import itemgetter
# implementation with pure functional procedure
# it could be refactored as object-oriented way....
def find_spot(mesh, N):
""" find view spots in a landscape """
try:
validate_mesh_grid(mesh)
grid = scale_to_grid(mesh)
peaks = find_peak(grid)
elements = []
for peak in peaks:
idx = lookup_mesh_element(grid=grid, gcoord=peak, mesh=mesh)
values = list(itemgetter(*idx)(mesh['values']))
elements.append(max(values, key=lambda v: v['value']))
elements.sort(key=lambda v: v['value'], reverse=True)
return elements[0:N]
except AssertionError:
raise RuntimeError("Support only well-defined mesh format (i.e. implicit grid)")
def find_peak(grid):
"""
find peak (e.g local maxima) in a 2d grid
@param grid
a 2d grid structure
@return a list of [x, y] pair where x,y denote the index from the grid
For example: [[x_0, y_0], [x_1, y_1], ... [x_k, y_k]]
"""
local_max = maximum_filter(grid, size=(4,4), mode="nearest") == grid
return np.swapaxes(np.nonzero(local_max), 0, 1)
def validate_mesh_grid(mesh):
"""
validate if a mesh object is in a well-defined format.
A well-defined mesh has implicit a grid structure.
An AssertionError would be raised on an invalidation.
@param mesh
a well-defined mesh object
"""
elements = mesh['elements']
nodes = mesh['nodes']
values = mesh['values']
# all are sorted w.r.t array index
for idx, n in enumerate(nodes):
assert n["id"] == idx
for idx, e in enumerate(elements):
assert e["id"] == idx
for idx, v in enumerate(values):
assert v["element_id"] == idx
# triangle elements are well defined
assert len(elements) == len(values)
# referenced nodes of triangle are well defined
ref_n = {n for e in elements for n in e["nodes"]}
assert len(ref_n) == len(nodes)
# two consecutive elements share a long edge
assert len(elements) % 2 == 0
for idx in range(0, len(elements), 2):
e1 = set(elements[idx]['nodes'])
e2 = set(elements[idx+1]['nodes'])
edge_n = e1.intersection(e2)
assert len(edge_n) == 2
# rigid grid (nodes)
first, second, last = nodes[0], nodes[1], nodes[-1]
w, h = last['x'] - first['x'], last['y'] - first['y']
step_y = second['y'] - first['y']
rows = 1 + int(h / step_y)
cols = int(len(nodes) / rows)
step_x = w / (cols - 1)
assert rows * cols == len(nodes)
for idx, node in enumerate(nodes):
assert node['y'] == first['y'] + step_y * (idx % rows)
assert node['x'] == first['x'] + step_x * (idx // rows)
# rigid grid (elements)
for x in range(0, cols-1):
for y in range(0, rows-1):
i = x*(rows-1)+y
e1 = elements[i*2]
e2 = elements[i*2+1]
node_id = x*rows+y
assert sorted(e1['nodes']) == sorted([node_id, node_id+1, node_id+1+rows])
assert sorted(e2['nodes']) == sorted([node_id, node_id+rows, node_id+1+rows])
def lookup_mesh_element(grid, gcoord, mesh):
""""
retrieve elements from mesh according to corresponding
coordinates in the equivalent grid.
@param grid
a 2d grid
@param gcoord
[x,y] coordinate of the grid
@param mesh
the well-defined mesh
@return a list of element IDs from the mesh
"""
x, y = gcoord
w, h = grid.shape
i = x * h + y
elements = mesh['elements']
return [elements[i*2]['id'], elements[i*2+1]['id']]
def scale_to_grid(mesh):
"""
Scale the mesh down to a structural grid.
Every two consecutive elements (i.e. triangles) of the mesh
are represented with a single cell of the grid.
@param mesh
a well-defined mesh object
@return numpy 2d array representing the mesh as a grid structure
"""
elements = mesh['elements']
nodes = mesh['nodes']
values = mesh['values']
first, second, last = nodes[0], nodes[1], nodes[-1]
w, h = last['x'] - first['x'], last['y'] - first['y']
step_y = second['y'] - first['y']
rows = 1 + int(h / step_y)
cols = int(len(nodes) / rows)
grid = np.zeros((cols-1, rows-1))
for x in range(0, cols-1):
grid_y = np.zeros((rows-1,))
for y in range(0, rows-1):
i = x*(rows-1)+y
grid_y[y] = (values[i*2]['value'] + values[i*2+1]['value']) / 2.0
grid[x] = grid_y
return grid | easz/view_spot_finder | view_spot_finder/finder.py | finder.py | py | 4,367 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "operator.itemgetter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.maximum_filter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.swapaxes",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy... |
74649265384 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
from BeautifulReport import BeautifulReport
from utils.my_logger import logger
from utils.get_path import *
from scp import SCPClient
import paramiko
import os
import time
def make_report(name):
base_dir = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]
report_dir = os.path.join(base_dir, 'test_report')
logger.debug("报告输出模块:获取当前脚本路径")
s = unittest.TestLoader().discover(start_dir=case_dir, pattern=name)
logger.debug("testsuit填充用例,%s", s)
print('*'*25,'测试开始','*'*25)
br = BeautifulReport(s)
filename = time.strftime("%Y-%m-%d_%H:%M:%S") + r".html"
logger.debug("报告输出模块:设置报告格式")
br.report(filename=filename, description='回归用例自动化测试报告', report_dir=report_dir)
try:
file = os.path.join('%s' % report_dir, filename)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname='192.168.90.162', port=22, username='deploy', password='linkcld123456')
scpclient = SCPClient(ssh.get_transport(), socket_timeout=15.0)
scp = SCPClient(ssh.get_transport())
scp.put(file, recursive=True, remote_path='/linkcld/uploadfile/report.html')
scp.close()
except:
logger.exception("上传报告失败")
else:
logger.info("报告已上传192.168.90.162/linkcld/uploadfile/report.html") | iospeng/python | pycharm_demo/pythonProject2/utils/report.py | report.py | py | 1,550 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.split",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_n... |
23420574740 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0201_auto_20160926_1614'),
]
operations = [
migrations.RenameField(
model_name='transferenciastock',
old_name='deposito_solicitante_transferencia',
new_name='deposito_destino_transferencia',
),
migrations.RenameField(
model_name='transferenciastock',
old_name='deposito_proveedor_transferencia',
new_name='deposito_origen_transferencia',
),
migrations.AddField(
model_name='transferenciastock',
name='fecha_hora_autorizacion_transferencia',
field=models.DateTimeField(auto_now=True, help_text=b'La fecha y hora se asignan al momento de autorizarse la Transferencia. No se requiere el ingreso de este dato.', null=True, verbose_name=b'Fecha/hora autorizacion Transferencia'),
),
migrations.AlterField(
model_name='movimientostock',
name='producto_stock',
field=models.ForeignKey(related_name='producto_stock', verbose_name=b'Producto', to='stock.Producto', help_text=b'Seleccione el Producto a registrar en el Stock.'),
),
]
| pmmrpy/SIGB | stock/migrations/0202_auto_20160926_1825.py | 0202_auto_20160926_1825.py | py | 1,333 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RenameField",
"line_number": 14,
"usage_type": "call"
},
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.