seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14372065002 | # -*- coding: utf-8 -*-
from telebot import types
import sqlite3 as sq
from datetime import date
import time
import datetime
import requests
from bs4 import BeautifulSoup as Bs
from telebot import TeleBot
from fake_useragent import UserAgent
import random
from threading import Thread
from config import *
def get_proxy(proxy_list):
proxy = random.choice(proxy_list)
proxies = {"http": f'http://{proxy}', "https": f'http://{proxy}'}
return proxies
def get_useragent():
my_browser = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
try:
ua = UserAgent(fallback=my_browser)
my_agent = ua.random
return my_agent
except Exception as e:
print(e)
return my_browser
headers = {'User-Agent': get_useragent()}
# Фильтрация обьявлений на от агентство и от частника
def filter_AN(link_user):
r = requests.get(link_user, headers=headers,proxies=get_proxy(proxy_list), timeout=5)
soup = Bs(r.text, features="html.parser")
ads = (soup.findAll('a', class_='adTile-mainInfo'))
n = 0
for i in ads:
i = i.get("href")
if 'komnat' in str(i):
n = n + 1
print(n)
if int(n) < 3:
return None
else:
return True
# Добавление в базу новых обьявлений
def base_add(rooms, series, ploshad, remont, etaj, etaj_iz, rayon, prodaves, price, add_link, poslednyi_et, AN,
photo_links,data_soz,data_prod):
with sq.connect('version_1/lalago.db') as con:
cur = con.cursor()
cur.execute(
f"INSERT INTO base (rooms,series,ploshad,remont,etaj,etaj_iz,rayon,prodaves, price, an, add_link, poslednyi_et, photo_links,data_soz, data_prod) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?,?)",
( int(rooms), str(series), int(ploshad), str(remont), int(etaj), int(etaj_iz),str(rayon),str(prodaves), price, str(AN), str(add_link), str(poslednyi_et), str(photo_links) ,str(data_soz), str(data_prod)
))
con.commit()
# Сравнение квартиры с похожимы квартирами
def osenka_kv(series, remont, rooms, rayon, etaj, etaj_iz, poslednyi_et):
with sq.connect('../base_3.db') as con:
cur = con.cursor()
try:
if int(etaj) == 1:
info = (cur.execute(
f"SELECT sum(ploshad), sum(price) FROM base WHERE rooms ='{rooms}' AND series = '{series}' AND remont = '{remont}' AND rayon = '{rayon}' AND price != '1' GROUP BY rooms,series")).fetchone()
price_srednyi = (int(info[1]) / int(info[0])) * 0.96
return price_srednyi
elif int(etaj) == int(etaj_iz):
info = (cur.execute(
f"SELECT sum(ploshad), sum(price) FROM base WHERE rooms ='{rooms}' AND series = '{series}' AND remont = '{remont}' AND rayon = '{rayon}' AND price != '1' GROUP BY rooms,series")).fetchone()
price_srednyi = (int(info[1]) / int(info[0])) * 0.96
return price_srednyi
else:
info = (cur.execute(
f"SELECT sum(ploshad), sum(price) FROM base WHERE rooms ='{rooms}' AND series = '{series}' AND remont = '{remont}' AND rayon = '{rayon}' AND price != '1' GROUP BY rooms,series")).fetchone()
price_srednyi = int(info[1]) / int(info[0])
return price_srednyi
except:
pass
def Starter_check_lalafo_kg():
while True:
try:
check_lalafo_kg()
time.sleep(600)
except:
print('exept parser_kvartiry')
time.sleep(1800)
continue
def check_lalafo_kg():
rayon_list = {
'3 мкр': ['3 мкр', '3мкр', '3мик', '3-ми', '3-мкр', '3 мик'],
'4 мкр': ['4 мкр', '4мкр', '4мик', '4-ми', '4-мкр', '4 мик'],
'5 мкр': ['5 мкр', '5мкр', '5мик', '5-ми', '5-мкр', '5 мик'],
'6 мкр': ['6 мкр', '6мкр', '6мик', '6-ми', '6-мкр', '6 мик'],
'7 мкр': ['7 мкр', '7мкр', '7мик', '7-ми', '7-мкр', '7 мик'],
'8 мкр': ['8 мкр', '8мкр', '8мик', '8-ми', '8-мкр', '8 мик'],
'9 мкр': ['9 мкр', '9мкр', '9мик', '9-ми', '9-мкр', '9 мик'],
'10 мкр': ['10 мкр', '10мкр', '10мик', '10-ми', '10-мкр', '10 мик'],
'11 мкр': ['11 мкр', '11мкр', '11мик', '11-ми', '11-мкр', '11 мик'],
'12 мкр': ['12 мкр', '12мкр', '12мик', '12-ми', '12-мкр', '12 мик'],
'110 квартал': ['110 квартал', '110-квартал'],
'1000 мелочей (Карпинка)': ['карпин', '1000 мелоч', '1000-мелоч'],
'Азамат авторынок': ['Азамат авторынок', 'Азамат авторынок', 'авторынок'],
'Азия Молл': ['азия мол', 'азия-мол', 'Азия Молл'],
'Ак-Бата ж/м': ['ак-бата', 'ак бата'],
'Ак-Босого ж/м': ['ак-босого', 'ак босого'],
'Ак-Жар ж/м': ['ак-жар', 'ак жар'],
'Ак-Кеме (старый аэропорт)': ['ак-кеме', 'ак кеме', 'старый аэропорт',
'старого аэропор', 'старый аеропорт',
'старого аеропор'],
'Ак-Орго ж/м': ['ак-орго', 'ак орго'],
'Ак-Ордо 1 ж/м': ['ак-ордо 1', 'ак ордо 1', 'акордо 1'],
'Ак-Ордо 2 ж/м': ['ак-ордо 1', 'ак ордо 2', 'акордо 2'],
'Ак-Ордо 3 ж/м': ['ак-ордо 3', 'ак ордо 3', 'акордо 3'],
'Ак-Ордо ж/м': ['ак-ордо', 'ак ордо', 'ак ордо', 'ак-ордо', 'акордо'],
'Ак-Тилек ж/м': ['ак-тилек', 'актилек', 'ак тилек'],
'Ала-Арча ТРЦ': ['ала-арча', 'ала арча'],
'Ала-Арча ж/м': ['ала-арча ж/м', 'ж/м ала-арча', 'ж/м ала арча', 'ала арча ж/м'],
'Ала-Тоо ж/м': ['Ала-Тоо ж/м', 'ив Ала-Тоо ж/м', 'Ала-Тоо ж/м'],
'Аламедин-1 мкр': ['Аламедин 1', 'Аламедин-1', 'Аламедин1'],
'Аламединский рынок / базар': ['Аламединс', 'рынок Аламедин', 'аламедин баз'],
'Алтын-Казык ж/м': ['Алтын-Казык', 'Алтын Казык', 'Алтын-Казык ж/м',
'Алтын Казык ж/м', 'ж/м Алтын-Казык', 'ж/м Алтын Казык'],
'Алтын-ордо ж/м': ['Алтын-ордо', 'Алтын ордо'],
'Анар ж/м': ['Анар'],
'Арча-Бешик ж/м': ['Арча-Бешик ж/м'],
'Асанбай мкр': ['Асанбай', 'Асанбай мкр'],
'Аска-Таш ж/м': ['Аска-Таш', 'Аска Таш'],
'Ата-Журт ж/м': ['Ата-Журт ж/м', 'Ата-Журт ж/м'],
'Ата-Тюрк парк': ['Ата-Тюрк', 'Ата Тюрк', 'Ата-турк', 'Ата турк', 'Ататюрк', 'Ататурк'],
'Аэропорт Манас': ['Аэропорт Манас', 'Аэропорта Манас'],
'БГУ': ['бгу', 'БГУ'],
'Бакай-Ата ж/м': ['Бакай-Ата ж/м', 'Бакай-Ата'],
'Баткенский рынок / базар': ['Баткенский рынок', 'рынок Баткен', 'баткен базар'],
'Баят рынок / базар': ['Баят рынок / базар', 'Баят базар', 'рынок баят'],
'Бета Сторес': ['Бета Сторес', 'Бета-Сторес'],
'Бета Сторес 2': ['Бета Сторес 2', 'Бета-Сторес 2', 'Бета Сторес2', 'Бета-Сторес2'],
'Биримдик-Кут ж/м': ['Биримдик-Кут', 'Биримдик Кут'],
'Бишкек Парк ТРЦ': ['Бишкек Парк', 'Бишкек-Парк', 'БишкекПарк'],
'Ботанический сад': [' Ботанический сад', 'Ботсад', 'Бот.сад'],
'Бугу-Эне-Багыш ж/м': ['Бугу-Эне-Багыш', 'Бугу Эне Багыш'],
'Бугу-Эне-Сай ж/м': [' Бугу-Эне-Сай', 'Бугу Эне Сай'],
'Военный городок': [' Военный городок', 'Военный-городок'],
'Восток-5 мкр': ['Восток-5', 'Восток 5', 'Восток5'],
'Восточный (старый) автовокзал': [' Восточный автовокзал', ' Восточного автовокзал',
'старый автовокзал', 'старого автовокзал',
'Восточный(с', 'Восточный (с'],
'Газ Городок': [' Газ Городок', ' Газ-Городок'],
'Гоин': ['Гоин', 'гоин'],
'Городок Энергетиков': [' Городок Энергетиков', ' Городок-Энергетиков'],
'Городок строителей': [' Городок строителей', ' Городок-строителей'],
'Городская больница №4 (ул. Айни)': ['айни', 'больница №4'],
'Городское ГАИ': ['Городское ГАИ', 'гаи'],
'Дворец спорта': ['Дворец спорта', 'Дворец-спорта'],
'Джал мкр (в т.ч. Верхний, Нижний, Средний)': ['жал'],
'Джунхай рынок': ['Джунхай рынок', 'Джунхай'],
'Дордой-1 ж/м': ['Дордой-1 ж/м', 'Дордой-1'],
'Дордой-2 ж/м': ['Дордой-2 ж/м', 'Дордой-2'],
'Дордой Моторс рынок': ['Дордой Моторс', 'Дордой Моторс'],
'Дордой рынок / базар': ['Дордой рынок', 'рынок дордой', 'дордой базар'],
'Достук мкр': ['Достук', 'Достук мкр'],
'ЖД вокзал': ['ЖД вокзал', 'ЖД-вокзал'],
'Западный (новый) автовокзал': ['западного автовокзал', 'Западный автовокзал',
'новый автовокзал'],
'Интрегельпо': ['Интрегельпо', 'Интрегель'],
'КНУ': ['кну', 'КНУ'],
'Калыс-ордо ж/м': ['Калыс-ордо', 'Калыс-ордо ж/м'],
'Кара-Жыгач ж/м': ['Кара-Жыгач', 'Кара Жыгач'],
'Караван ТРЦ': ['Караван', 'Караван ТРЦ'],
'Карагачевая роща': ['Карагачевая роща', 'Карагачевая-роща'],
'Келечек ж/м': ['Келечек ж/м', 'ж/м Келечек'],
'Керемет ж/м': ['Керемет ж/м', 'ж/м Керемет'],
'Киргизия 1': ['Киргизия 1', 'Киргизия-1', 'Киргизия1', 'Киргизия'],
'Киркомстром': ['Киркомстром', 'Кирком'],
'Кирпичный Завод': ['Кирпичный Завод', 'Завод Кирпичный'],
'Кожомкул стадион': ['Кожомкул стадион', 'стадион Кожомкул'],
'Кок-Жар мкр': ['Кок Жар', 'Кок-Жар', 'кокжар'],
'Колмо ж/м': ['Колмо ж/м', 'Колмо'],
'Красный Строитель ж/м': ['Красный Строитель', 'Красный-Строитель'],
'Кудайберген авторынок': ['Кудайберген', 'Кудайберген авторынок'],
'Кызыл Аскер': ['Кызыл Аскер', 'Кызыл-Аскер'],
'Кырман ж/м': ['Кырман ж/м', 'Кырман'],
'Мадина рынок': ['Мадина', 'мадина'],
'Мега Комфорт ТЦ': ['Мега Комфорт', 'Мега-Комфорт'],
'Мед Академия': ['Мед Академия', 'МедАкадемия', 'Мед. Академия', 'Мед-Академия',
'Мед.Академия'],
'Моссовет': ['Моссовет', 'Мосовет'],
'Мурас-Ордо ж/м': ['Мурас-Ордо', 'Мурас Ордо'],
'Орок ж/м': ['Орок', 'Орок ж/м'],
'Ортосайский рынок / базар': ['Ортосай', 'Ортосайский рынок / базар'],
'Оскон-ордо ж/м': ['Оскон-ордо', 'Оскон-ордо ж/м'],
'Ошский рынок / базар': ['Ошский', 'ош базар'],
'Панорама': ['Панорама', 'Панорама'],
'Пишпек': ['Пишпек', 'Пишпек'],
'Политех': ['Политех'],
'Полицейский городок ж/м': ['Полицейский городок ж/м', 'Полицейский городок'],
'Рабочий Городок': [' Рабочий Городок', ' Рабочий-Городок'],
'Рухий Мурас ж/м': ['Рухий-Мурас', 'Рухий Мурас'],
'Рынок Баят': ['Рынок Баят', 'Баят'],
'Салам-Алик ж/м': ['Салам-Алик', 'Салам-Алик ж/м'],
'Сары-Озон Дыйкан рынок': ['Сары-Озон', 'дыйкан', 'сары озон'],
'Сары-Челек ж/м': ['Сары-Челек', 'Сары Челек'],
'Совмина мкр': ['Совмина', 'Совмина мкр'],
'Старый толчок рынок / базар': ['Старый толчок', 'Старый толчок рынок / базар'],
'ТЭЦ': ['район ТЭЦ', 'районе ТЭЦ'],
'Таатан ТЦ': ['Таатан', 'Таатан ТЦ'],
'Таш-Добо ж/м': ['Таш-Добо', 'Таш Добо'],
'Таш-Рабат ТРЦ': ['Таш-Рабат', 'Таш Рабат'],
'Тендик ж/м': ['Тендик ж/м', 'Тендик'],
'Токольдош': ['Токольдош', 'Токольдош'],
'Тунгуч мкр': [' Тунгуч', 'Тунгуч '],
'Тынчтык ж/м': ['Тынчтык ж/м', 'Тынчтык ж/м'],
'Улан мкр': [' Улан', ' Улан мкр', 'Улан'],
'Умут ж/м': [' Умут ж/м', ' Умут'],
'Учкун мкр': [' Учкун', 'Учкун мкр'],
'Физприборы': ['физприбор', 'физ.прибор', 'физ прибор'],
'Филармония': [' Филармони', ' Филармония', 'Филармония '],
'Центральная мечеть': ['Центральная мечеть', 'Центральный мечеть'],
'Церковь': ['Церковь', 'Церков'],
'Цум': ['Цум', 'Цум'],
'Чекиш-Ата рынок': ['Чекиш-Ата рынок', 'Чекиш-Ата'],
'Шлагбаум': ['район Шлагбаум', 'Шлагбаума'],
'Шоро завод': ['Шоро завод', 'Шоро'],
'Ынтымак ж/м': ['Ынтымак ж/м', ' Ынтымак ж'],
'Эне-Сай ж/м': ['Эне-Сай', 'Эне-Сай ж/м'],
'Энесай ж/м': ['Энесай', 'Энесай ж/м'],
'Юг-2 мкр': ['Юг-2', 'Юг 2', 'Юг2'],
'парк Фучика': ['Фучика', 'парк Фучика']}
with sq.connect('version_1/lalago.db') as con:
cur = con.cursor()
add_link_list = cur.execute(f"SELECT link FROM link_kv").fetchall()
for i in add_link_list:
add_link = (((str(i)).replace("',)","")).replace("('","")).replace(' ','')
print(add_link)
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS base (
rooms INTEGER DEFAULT 0,
series TEXT,
ploshad INTEGER DEFAULT 0,
remont TEXT,
etaj INTEGER DEFAULT 0,
etaj_iz INTEGER DEFAULT 0,
rayon TEXT DEFAULT Бишкек,
data_soz TEXT,
data_prod TEXT,
prodaves TEXT,
price INTEGER DEFAULT 1,
an TEXT,
osenka TEXT,
add_link TEXT,
poslednyi_et TEXT,
photo_links TEXT
)""")
# Проверка на уникальность обьявлений
print('Проверка на уникальность...')
cur.execute(f"SELECT add_link FROM base where add_link =?", (add_link,))
try:
r = requests.get(add_link, headers=headers,proxies=get_proxy(proxy_list), timeout=5)
soup = Bs(r.text, features="html.parser")
data_info = soup.find_all('div', class_='about-ad-info__date')
print(data_info)
except:
continue
if str(data_info) == '[]':
print('не активно')
else:
try:
if cur.fetchone() is None:
print('NEW ADS!!!')
# Парсим обявлений
price = soup.find('span', class_='heading international-price').text
print(price)
image = soup.find('div', class_='desktop css-10yjukn')
photo_links = []
for i in str(image).split('"'):
if 'https://img5.lalafo.com/i/posters/api' in i:
i = i.replace("/api/", "/original/")
if i in photo_links:
continue
else:
photo_links.append(i)
try:
opisanie = soup.find('div', class_='description__wrap').text
link_user = 'https://lalafo.kg' + (soup.find('a', class_='userName')).attrs['href']
except:
print('ошибка получение ссылки пользователя или Описания!!!')
print('точка 1')
for i in data_info:
i = i.text
if 'Создано: ' in i:
i = i.replace('Создано: ', '')
data_soz = i
elif 'Обновлено: ' in i:
i = i.replace('Обновлено: ', '')
data_prod = i
else:
data_soz = ' '
data_prod = ' '
continue
print('точка 2')
# Стандартизация цены
price = price.replace('USD', '').replace(' ', '').replace('KGS', '')
parameters = soup.find('ul', class_='details-page__params css-tl517w')
remont = ' '
series = ' '
ploshad = '0'
etaj = '0'
etaj_iz = '0'
rayon = ' '
prodaves = ' '
rooms = '0'
poslednyi_et = ' '
print('точка 3')
try:
print('parametr', parameters)
for i in parameters:
if 'Площадь (м2):' in i.text:
i = i.text
i = i.replace('Площадь (м2):', '')
ploshad = i
elif 'Количество комнат:' in i.text:
i = i.text
i = i.replace('Количество комнат:', '').replace(' комнаты', '').replace(
' комната',
'').replace(' комнат', '')
rooms = i
if rooms == 'Студия':
rooms = 1
elif 'Этаж:' in i.text:
i = i.text
i = i.replace('Этаж:', '')
etaj = i
elif 'Количество этажей:' in i.text:
i = i.text
i = i.replace('Количество этажей:', '')
etaj_iz = i
elif 'Район:' in i.text:
i = i.text
i = i.replace('Район:', '')
rayon = i
elif 'Тип предложения:' in i.text:
i = i.text
i = i.replace('Тип предложения:', '')
prodaves = i
elif 'Серия:' in i.text:
i = i.text
i = i.replace('Серия:', '')
series = i
elif 'Ремонт:' in i.text:
i = i.text
i = i.replace('Ремонт:', '')
remont = i
else:
continue
# Анализ текста для опеределения данных
if series == ' ':
elitka_list = ['элитка', 'элитный', 'премиум', 'элит']
seria_102 = ['102 серия', '102 сер', '102сер', '102']
seria_104 = ['104 серия', '104 сери', '104сер', '104']
seria_105 = ['105 серия', '105 сери', '105сер', '105']
seria_106 = ['106 серия', '106 сери', '106сер', '106']
hrishevka = ['хрущёвка', 'хрущевка', 'хруш', "хрущ"]
for i in seria_102:
if i in opisanie:
series = "102 серия"
else:
continue
for i in seria_104:
if i in opisanie:
series = '104 серия'
else:
continue
for i in seria_105:
if i in opisanie:
series = '105 серия'
else:
continue
for i in seria_106:
if i in opisanie:
series = '106 серия'
else:
continue
for i in hrishevka:
if i in opisanie:
series = 'Хрущёвка'
else:
continue
for i in elitka_list:
if i in opisanie:
series = 'Элитка'
else:
continue
print('Стандартизация ремонта')
if remont == ' ':
bez_remont = ['псо', 'без ремонта', 'требуеться рем', 'ПСО', 'самоотделк',
'само отде', 'под само']
st_remont = ['старый ремонт', 'ремонт старый', "требуеться косм", 'состояние сре']
sv_remont = ['свежий ремонт', 'новый ремонт', 'ремонт', 'обои', 'состояние']
ev_remont = ['Евроремонт', 'евро', 'Eвроремонт', 'техника', 'мебел']
for i in st_remont:
if i in opisanie:
remont = 'Старый ремонт'
else:
continue
for i in sv_remont:
if i in opisanie:
remont = 'Свежий ремонт'
else:
continue
for i in bez_remont:
if i in opisanie:
remont = 'Без ремонта'
else:
continue
for i in ev_remont:
if i in opisanie:
remont = 'Евроремонт'
else:
continue
print('Стандартизация Района')
if rayon == ' ':
for k, v in rayon_list.items():
for i in v:
if i in opisanie:
rayon = k
print('Стандартизация площади')
# Стандартизация площади
series = str(series)
rooms = int(rooms)
if series == '102 серия':
if rooms == 1:
ploshad = 30
elif rooms == 2:
ploshad = 42
elif rooms == 3:
ploshad = 56
elif series == "104 серия":
if rooms == 1:
ploshad = 32
elif rooms == 2:
ploshad = 43
elif rooms == 3:
ploshad = 58
elif series == "104 серия улучшенная":
if rooms == 1:
ploshad = 30
elif rooms == 2:
ploshad = 42
elif rooms == 3:
ploshad = 56
elif series == "105 серия":
if rooms == 1:
ploshad = 34
elif rooms == 2:
ploshad = 50
elif rooms == 3:
ploshad = 62
elif rooms == 4:
ploshad = 74
elif rooms == 5:
ploshad = 87
elif series == "105 серия улучшенная":
if rooms == 1:
ploshad = 34
elif rooms == 2:
ploshad = 50
elif rooms == 3:
ploshad = 62
elif rooms == 4:
ploshad = 74
elif rooms == 5:
ploshad = 87
elif series == "106 серия":
if rooms == 1:
ploshad = 35
elif rooms == 2:
ploshad = 52
elif rooms == 3:
ploshad = 66
elif series == "106 серия улучшенная":
if rooms == 1:
ploshad = 35
elif rooms == 2:
ploshad = 52
elif rooms == 3:
ploshad = 66
elif series == "Хрущёвка":
if rooms == 1:
ploshad = 30
elif rooms == 2:
ploshad = 42
elif rooms == 3:
ploshad = 56
else:
pass
except:
print('Точка 4')
print('Точка 5')
AN = str(filter_AN(link_user=link_user))
print('точка 7')
if price == 'Договорная':
continue
try:
# Проверка на выгодность квартиры
if 5 < int(ploshad) < 350:
if 280 < int(price) < 1800:
price = int(ploshad) * int(price)
if int(etaj) == int(etaj_iz):
poslednyi_et = 'последний этаж'
# Оценка на выгодность
sr_price = osenka_kv(series=series, remont=remont, rooms=rooms, rayon=rayon,
etaj=etaj,
etaj_iz=etaj_iz, poslednyi_et=poslednyi_et)
price_m2 = round(int(price) / int(ploshad))
sr_price = int(sr_price)
if int(price_m2) < int(sr_price):
print('Оценка: TRUE')
osenka = 'Выгодный'
if rayon == ' ':
print('Район пустой')
continue
else:
osenka = 'Выше среднего'
base_add(rooms=rooms, series=series, ploshad=ploshad, remont=remont, etaj=etaj,
etaj_iz=etaj_iz, rayon=rayon, prodaves=prodaves, price=price, AN=AN,
add_link=add_link, poslednyi_et=poslednyi_et, photo_links=photo_links,
data_soz=data_soz, data_prod=data_prod)
print('Добавлено в базу!!!')
else:
print('Не корректный площад')
base_add(rooms=rooms, series=series, ploshad=ploshad, remont=remont, etaj=etaj,
etaj_iz=etaj_iz, rayon=rayon, prodaves=prodaves, price=price, AN=AN,
add_link=add_link, poslednyi_et=poslednyi_et, photo_links=photo_links, data_soz=data_soz,data_prod=data_prod)
print('Добавлено в базу!!!')
continue
except:
base_add(rooms=rooms, series=series, ploshad=ploshad, remont=remont, etaj=etaj,
etaj_iz=etaj_iz, rayon=rayon, prodaves=prodaves, price=price, AN=AN,
add_link=add_link, poslednyi_et=poslednyi_et, photo_links=photo_links,
data_soz=data_soz, data_prod=data_prod)
print('exept')
print('Обновление базы!!!')
print(osenka)
with sq.connect('version_1/lalago.db') as con:
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS base (
rooms INTEGER DEFAULT 0,
series TEXT,
ploshad INTEGER DEFAULT 0,
remont TEXT,
etaj INTEGER DEFAULT 0,
etaj_iz INTEGER DEFAULT 0,
rayon TEXT DEFAULT Бишкек,
data_soz TEXT,
data_prod TEXT,
prodaves TEXT,
price INTEGER DEFAULT 1,
an TEXT,
osenka TEXT,
add_link TEXT,
poslednyi_et TEXT,
photo_links TEXT
)""")
cur.execute(
f"UPDATE base SET osenka = '{str(osenka)}' WHERE add_link = '{str(add_link)}' ")
con.commit()
print('ОЦЕНКА ОБНОВЛЕНО!!!')
else:
pass
except:
continue
Thread(target=Starter_check_lalafo_kg(), args=()).start()
| NurAbain/kvartira_bishkek_1.0.0 | parser_kvartiry.py | parser_kvartiry.py | py | 36,966 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "random.choice",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "fake_useragent.UserAgent",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup... |
30710495366 | import matplotlib
matplotlib.use('Agg')
import sys
import pynbody
import pynbody.plot as pp
import pynbody.plot.sph as sph
import matplotlib.pylab as plt
import numpy as np
import h5py as h5
pynbody.config['number_of_threads'] = 2
def make_plots():
output = int(sys.argv[1])
plot_type = sys.argv[2]
cr = int(sys.argv[3])
if cr:
s = pynbody.load('/nobackup/ibutsky/simulations/patient0_agncr/pioneer50h243.1536gst1bwK1BH.%06d'%(output))
else:
s = pynbody.load('/nobackup/ibutsky/simulations/patient0_nocr/pioneer50h243.1536gst1bwK1BH.%06d'%(output))
# s = pynbody.load('/nobackup/nnsanche/pioneer50h243.1536g1bwK1BH/pioneer50h243.1536gst1bwK1BH.%06d'%(output))
s.physical_units()
if plot_type == 'density':
make_density_plots(s, output, cr)
elif plot_type == 'rotation':
make_rotation_curves(s, output, cr)
elif plot_type == 'temperature':
make_temperature_plots(s, output, cr)
elif plot_type == 'all':
make_density_plots(s, output, cr)
make_rotation_curves(s, output, cr)
elif plot_type == 'sfh':
make_sfh(s, output, cr)
def make_density_plots(s, output, cr):
pynbody.analysis.angmom.faceon(s.g)
sph.image(s.g,qty="rho",units="g cm^-3",width=60,cmap="magma", vmin=5e-27, vmax=1e-23)
if cr:
plt.savefig('pioneer.%06d_density_faceon.png'%(output))
else:
plt.savefig('pioneer.%06d_density_faceon_nocr.png'%(output))
plt.clf()
pynbody.analysis.angmom.sideon(s.s)
sph.image(s.g,qty="rho",units="g cm^-3",width=60,cmap="magma", vmin=5e-27, vmax=1e-23)
if cr:
plt.savefig('pioneer.%06d_density_sideon.png'%(output))
else:
plt.savefig('pioneer.%06d_density_sideon_nocr.png'%(output))
plt.clf()
def make_temperature_plots(s, output, cr):
pynbody.analysis.angmom.faceon(s.g)
sph.image(s.g,qty="temp",units="K",width=100,cmap="afmhot", vmin=1e4, vmax=1e6)
if cr:
plt.savefig('pioneer.%06d_temperature_faceon.png'%(output))
else:
plt.savefig('pioneer.%06d_temperature_faceon_nocr.png'%(output))
plt.clf()
pynbody.analysis.angmom.sideon(s.s)
sph.image(s.g,qty="temp",units="K",width=100,cmap="afmhot", vmin=1e4, vmax=1e6)
if cr:
plt.savefig('pioneer.%06d_temperature_sideon.png'%(output))
else:
plt.savefig('pioneer.%06d_temperature_sideon_nocr.png'%(output))
plt.clf()
def make_sfh(s, output, cr):
#s_agn = pynbody.load('/nobackup/nnsanche/pioneer50h243.1536g1bwK1BH/pioneer50h243.1536gst1bwK1BH.003456')
s_agn = pynbody.load('/nobackup/ibutsky/simulations/patient0_nocr/pioneer50h243.1536gst1bwK1BH.%06d'%(output))
# s_sn = pynbody.load('/nobackup/nnsanche/NO_BHs/pioneer50h243.1536gst1bwK1/pioneer50h243.1536gst1bwK1.003456')
s_cr = pynbody.load('/nobackup/ibutsky/simulations/patient0_new/pioneer50h243.1536gst1bwK1BH.002432')
s_agncr = pynbody.load('/nobackup/ibutsky/simulations/patient0_agncr/pioneer50h243.1536gst1bwK1BH.%06d'%(output))
plt.xlim(0, 12)
# sfh_p0noBH = pp.sfh(s_sn, label = 'Thermal SNe only')
sfh_cr = pp.sfh(s_cr, label = 'CR SNe feedback')
sfh_p0 = pp.sfh(s_agn, label = 'P0')
sfh_agncr = pp.sfh(s_agncr, label = 'P0+CR')
plt.legend()
plt.savefig('sfh_compare_%06d.png'%(output))
print(sfh_cr[0])
# print(sfh_cr.d)
f = h5.File('sfh_data.h5', 'w')
f.create_dataset("sfh_p0", data=sfh_p0[0])
f.create_dataset("sfh_agncr", data=sfh_agncr[0])
f.create_dataset("sfh_cr", data = sfh_cr[0])
f.create_dataset("time_p0", data=sfh_p0[1])
f.create_dataset("time_agncr", data=sfh_agncr[1])
f.create_dataset("time_cr", data = sfh_cr[1])
f.close()
def make_rotation_curves(s, output, cr):
pynbody.analysis.angmom.faceon(s.g)
if cr:
s.g['eps'] = s.g['soft']
s.d['eps'] = s.d['soft']
s.s['eps'] = s.s['soft']
p = pynbody.analysis.profile.Profile(s,min=.01,max=500,type='log',ndim=3)
pg = pynbody.analysis.profile.Profile(s.g,min=.01,max=500,type='log',ndim=3)
ps = pynbody.analysis.profile.Profile(s.s,min=.01,max=500,type='log',ndim=3)
pd = pynbody.analysis.profile.Profile(s.d,min=.01,max=500,type='log',ndim=3)
# make the plot
plt.plot(p['rbins'],p['v_circ'],label='total')
plt.plot(pg['rbins'],pg['v_circ'],label='gas')
plt.plot(ps['rbins'],ps['v_circ'],label='stars')
plt.plot(pd['rbins'],pd['v_circ'],label='dm')
plt.xlabel('R [kpc]')
plt.ylabel(r'$v_c$ [km/s]')
plt.legend()
if cr:
plt.savefig('pioneer.%06d_rotation_curve.png'%(output))
else:
plt.savefig('pioneer.%06d_rotation_curve_nocr.png'%(output))
plt.clf()
make_plots()
| ibutsky/synthetic_spectra | scripts/plotting/pynbody_plots.py | pynbody_plots.py | py | 4,708 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "pynbody.config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_... |
7442842816 | import requests
import card
import json
import sys
from bs4 import BeautifulSoup
# Get Request for Webpage
def card_page_init(card_webpage):
response = requests.get(card_webpage)
if response.status_code != 200:
print("Error fetching page")
exit()
else:
content = response.content
return BeautifulSoup(response.content, 'html.parser')
# Returns list card URLs to be searched from set table
def card_urls(tag):
url_list = []
table = tag.find('table', class_='wikitable')
for row in table.find_all('a'):
href = row['href']
url_list.append(href)
return url_list
# Gets card ability/effect information from wiki page
def get_card_ability(tag):
card_text = tag.findAll(name="tr")
card_text_list = []
for line in card_text:
card_lines = line.get_text(separator=" ", strip=True)
card_text_list.append(card_lines)
ability_index = card_text_list.index('Ability / Effect') + 1
return card_text_list[ability_index]
# Gets specific card information from wiki page
def get_card_info(tag):
card_stats = tag.findAll(name="table", class_="main")
stats_list = []
for stat in card_stats:
stats = stat.get_text(separator=";", strip=True)
stats_list = stats.split(";")
stats_list.append(get_card_ability(tag))
return stats_list
# Getting Card types from wiki page (Monster, Spell, Impact)
def get_card_type(tag):
card_type = None
if tag.find(name='a', string="Monster") is not None:
card_type = "Monster"
elif tag.find(name='a', string="Spell") is not None:
card_type = "Spell"
elif tag.find(name='a', string="Impact") is not None:
card_type = "Impact"
elif tag.find(name='a', string="Item") is not None:
card_type = "Item"
return card_type
#Gets a list of card attributes
def grab_card_attributes(info):
startIndex = info.index("Attribute") + 1
if "Illust" in info or "Design / Illust" in info:
if "Illust" in info:
endIndex = info.index("Illust")
elif "Design / Illust" in info:
endIndex = info.index("Design / Illust")
else:
endIndex = -1
attribute_list = info[startIndex : endIndex]
attributes = attribute_list[::2]
return attributes
# Creates a card object based on card type and adds to list
# Make this function modular
def card_create(tag, c_list):
card_type = get_card_type(tag)
info = get_card_info(tag)
attribute = grab_card_attributes(info)
if card_type == "Monster":
if info[2] == "Kanji" and info[4] == "Kana":
c_list.append(card.Monster(info[1], info[19], info[11], info[13], info[15], info[17], info[-1], attribute).__dict__)
else:
c_list.append(card.Monster(info[1], info[17], info[9], info[11], info[13], info[15], info[-1], attribute).__dict__)
elif card_type == "Spell":
if info[2] == "Kanji" and info[4] == "Kana":
if(info[8] == "Translated"):
c_list.append(card.Spell(info[1], info[13], info[-1], attribute).__dict__)
else:
c_list.append(card.Spell(info[1], info[11], info[-1], attribute).__dict__)
else:
c_list.append(card.Spell(info[1], info[9], info[-1], attribute).__dict__)
elif card_type == "Impact":
c_list.append(card.Impact(info[1], info[11], info[-1], attribute).__dict__)
elif card_type == "Item":
c_list.append(card.Item(info[1], info[15], info[11], info[13], info[-1], attribute).__dict__)
soup = card_page_init(str(sys.argv[1]))
card_url_list = card_urls(soup)
card_url_list = card_url_list[1::3]
card_list = []
#Iterates through Card URLs in Set and Creates Card Objects
for url in card_url_list:
soup = card_page_init("https://buddyfight.fandom.com/" + url)
card_create(soup, card_list)
#Opens/Creates json File and Dumps Card Objects
jsonFileName = str(sys.argv[1]).split('/')[-1].replace(":", "") + ".json"
with open("card_set_data/"+ jsonFileName, 'w') as f:
json.dump(card_list, f, indent=4)
print("\n--Set Completed--\n")
| dm554/BuddyfightCardWebScraper | main.py | main.py | py | 4,141 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "card.Monster",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "card.Monster",
"line_... |
20741454900 | import json
import re
import logging
from __init__ import app
def extract_dispatch_content(text):
pattern = r'R9\.redux\.dispatch\((\{.+?\})\)'
matches = re.findall(pattern, text, flags=re.DOTALL)
return matches
def add_quotes(json_str):
# Agregar comillas a las claves de primer nivel
regex_keys = r'(?<={|,)\s*([a-zA-Z0-9]+)\s*:'
result = re.sub(regex_keys, r' "\1":', json_str)
# Reemplazamos las comillas simples por dobles
pattern = r"(?<!\w)'(?!\w)|(?<=\w)'(?!\w)|(?<!\w)'(?=\w)"
result = re.sub(pattern, '"', result)
return result
def get_json_from_kayak_response(response_text):
if response_text:
response_json = json.loads(response_text)
# Accede a los campos del JSON y extrae el contenido de los scripts
scripts = response_json.get('bufferedScripts')
# Combina todas las cadenas de los scripts en una sola cadena
combined_scripts = ' '.join(scripts)
# Encuentra todas las ocurrencias de 'R9.redux.dispatch(...)'
matches = extract_dispatch_content(combined_scripts)
# Busca el contenido en el que el valor de 'type' es 'FlightResultsList'
flight_results_list = None
for match in matches:
formatted_match = add_quotes(match)
json_data = json.loads(formatted_match)
logging.info(json_data.get('type'))
if 'FlightResultsList' in json_data.get('type'):
flight_results_list = json_data.get('state').get('results')
return flight_results_list
app.logger.debug("No se encontró un objeto con 'type' igual a 'FlightResultsList'")
| danielurrutxua/easyflight | scrapi/app/search/service/sources/utils/kayak_parser.py | kayak_parser.py | py | 1,665 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "re.findall",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 18,
"u... |
74540232672 | import os
import re
from traitlets import List, default
from .base import BaseConverter
from ..preprocessors import (
InstantiateTests,
ClearOutput,
CheckCellMetadata
)
from traitlets.config.loader import Config
from typing import Any
from ..coursedir import CourseDirectory
class GenerateSourceWithTests(BaseConverter):
@default("permissions")
def _permissions_default(self) -> int:
return 664 if self.coursedir.groupshared else 644
@property
def _input_directory(self) -> str:
return self.coursedir.source_directory
@property
def _output_directory(self) -> str:
return self.coursedir.source_with_tests_directory
preprocessors = List([
InstantiateTests,
ClearOutput,
CheckCellMetadata
]).tag(config=True)
def _load_config(self, cfg: Config, **kwargs: Any) -> None:
super(GenerateSourceWithTests, self)._load_config(cfg, **kwargs)
def __init__(self, coursedir: CourseDirectory = None, **kwargs: Any) -> None:
super(GenerateSourceWithTests, self).__init__(coursedir=coursedir, **kwargs)
def start(self) -> None:
old_student_id = self.coursedir.student_id
self.coursedir.student_id = '.'
try:
super(GenerateSourceWithTests, self).start()
finally:
self.coursedir.student_id = old_student_id
| jupyter/nbgrader | nbgrader/converters/generate_source_with_tests.py | generate_source_with_tests.py | py | 1,375 | python | en | code | 1,232 | github-code | 1 | [
{
"api_name": "base.BaseConverter",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "traitlets.default",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "traitlets.List",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "preprocessors.Ins... |
32458720992 | # CS5487 demo script for Programming Assignment 2
#
# The script has been tested with python 2.7.6
#
# It requires the following modules:
# numpy 1.8.1
# matplotlib v1.3.1
# scipy 0.14.0
# Image (python image library)
import pa2
import numpy as np
import pylab as pl
import scipy.io as sio
from PIL import Image
from Assignment2.Problem1.K_means import *
from Assignment2.Problem1.EM import *
from Assignment2.Problem1.Mean_shift import *
from Assignment2.Problem1.P1_function import *
#import Image
def demo():
import scipy.cluster.vq as vq
## load and show image
img = Image.open('../images/12003.jpg')
pl.subplot(1, 3, 1)
pl.imshow(img)
## extract features from image (step size = 7)
X, L = pa2.getfeatures(img, 7)
## Call kmeans function in scipy. You need to write this yourself!
# C,Y = vq.kmeans2(vq.whiten(X.T), 2, iter=1000, minit='random')
d = 4
K = 4
N = X.shape[1]
Mju = RandomMju(150, 155, K, d)
##########Kmeans##########
#Y = Clustering_Kmeans(Mju, X, K, N, d, 1000, 0.000000000001)
##########EM#########
#d = 4
#K = 2
#SIGMA = RandomSIGMA(100, 110, K, d)
#Pi = [0.1, 0.1, 0.05, 0.1, 0.1, 0.1, 0.1, 0.1, 0.05, 0.2]
#Y = Clustering_EM(Mju, SIGMA, Pi, X, K, d, X.shape[1], 800, 0.00000000000001)
###################
#####Meanshift####
h = 45
maxRound = 30
limitation = 0.00001
cluster_gap = 8
X_peak = Peak_MeanShift(X, d, N, h, maxRound, limitation)
cluster = Clustering_MeanShift(X, X_peak, N, d, cluster_gap)
Y = cluster[1]
##########################
# Use matlab 1-index labeling
# make segmentation image from labels
segm = pa2.labels2seg(Y, L)
pl.subplot(1, 3, 2)
pl.imshow(segm)
# color the segmentation image
csegm = pa2.colorsegms(segm, img)
picname ="seastar"
title = "Menshift, h="+str(h)
pl.title(title)
pl.subplot(1, 3, 3)
pl.imshow(csegm)
pl.savefig('/Users/gaobrook/Desktop/Clustering/problem2/'+title+'_'+picname+'.png', dpi=200)
pl.show()
def main():
demo()
if __name__ == '__main__':
main()
| brookgao/Machine-Learning | Assignment2/Problem2/python/test_pa2.py | test_pa2.py | py | 2,147 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pylab.subplot",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pylab.imshow",
"line_numbe... |
3275424876 | from typing import List
from PyQt5 import QtGui
import networkx as nx
from commandbar.api import cmdutils
from commandbar.commands.cmdexc import PrerequisitesError
from commandbar.utils import objreg
from mainwindow.graph_view import GraphView
from mainwindow.mainwindow import MainWindow
from tree_covers.pygraph.metric_spaces import tree_cover_bad_pairs, tree_cover_embedding_distortion
@cmdutils.register(name='calc-stretch', instance='main-window', scope='window', maxsplit=0)
def calculate_stretch(self: MainWindow):
"""
calculate the stretch of the first graph in the window with respect to the cover
that is induced by all other graphs in the window
"""
if len(self.canvases) <= 1:
raise PrerequisitesError("Number of graph view must be more then 1 to calculate stretch")
V = set(self.canvases[0].G.nodes)
if not all(V.issubset(gv.G.nodes) for gv in self.canvases[1:]):
raise PrerequisitesError("the set of verticies of the first graph must"
"be a subset of the vertices of tje other graphs")
sc = StretchCalculator(self.canvases)
objreg.register('stretch-calculator', sc, scope='window', window=self.win_id)
class StretchCalculator:
def __init__(self, graph_views: List[GraphView]) -> None:
self.graph_views = graph_views
self.bad_pairs = self.calc_and_display_strecth_cb()
# for gv in self.graph_views:
# gv.G.register_on_edge_add_callback(self.calc_and_display_strecth_cb)
# gv.G.register_on_edge_remove_callback(self.calc_and_display_strecth_cb)
self.last_bad_pair = None
def calc_and_display_strecth_cb(self, *args):
dist = tree_cover_embedding_distortion(self.graph_views[0].G, [gv.G for gv in self.graph_views[1:]])
print(dist)
self.bad_pairs = tree_cover_bad_pairs(self.graph_views[0].G, [gv.G for gv in self.graph_views[1:]], 1.1)
self.bad_pairs = list(self.bad_pairs)
print(self.bad_pairs)
self.bad_pairs = iter(self.bad_pairs)
return self.bad_pairs
@cmdutils.register(instance='stretch-calculator', name='next-stetch-pair', scope='window')
def next_pair(self):
"""
displays the next pair that has high stretch
and its respective paths in each graph view
"""
if self.last_bad_pair:
u, v = self.last_bad_pair
for gv in self.graph_views:
p = nx.shortest_path(gv.G, u, v)
gv.draw_edges(zip(p, p[1:]))
gv.draw_vertices(p)
try:
u, v = next(self.bad_pairs)
self.last_bad_pair = u, v
for gv in self.graph_views:
p = nx.shortest_path(gv.G, u, v)
red_pen = QtGui.QPen(QtGui.QColor('red'))
gv.draw_edges(zip(p, p[1:]), red_pen, None)
gv.draw_vertices(p, red_pen)
except StopIteration:
raise PrerequisitesError("Out of bad pairs") | yairmol/graphui | commands/stretch.py | stretch.py | py | 3,000 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mainwindow.mainwindow.MainWindow",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "commandbar.commands.cmdexc.PrerequisitesError",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "commandbar.commands.cmdexc.PrerequisitesError",
"line_number":... |
21106216381 | # -*- coding: utf-8 -*-
# This script goes through USFM files, generating a list of verses that contain properly marked footnotes.
# Reports errors to stderr and issues.txt.
# Set source_dir and usfmVersion to run.
# Global variables
source_dir = r'C:\DCS\Portuguese\pt-br_ulb'
usfmVersion = 2 # if version 3.0 or greater, tolerates unknown tokens and verse fragments
import os
import sys
import parseUsfm
import io
import re
import json
if usfmVersion >= 3.0:
import usfm_utils
vv_re = re.compile(r'([0-9]+)-([0-9]+)')
class State:
IDs = []
ID = ""
chapter = 0
verse = 0
reference = ""
footnoteRefs = list()
# Resets state data for a new book
def addID(self, id):
State.IDs.append(id)
State.ID = id
State.chapter = 0
State.verse = 0
State.reference = id
def getIDs(self):
return State.IDs
def addChapter(self, c):
State.chapter = int(c)
State.verse = 0
State.reference = State.ID + " " + c
def addVerse(self, v):
State.verse = int(v)
State.reference = State.ID + " " + str(State.chapter) + ":" + v
# Adds the current reference to the list of footnote references
def addFootnote(self):
if self.reference not in State.footnoteRefs:
self.footnoteRefs.append(self.reference)
# Returns list of footnote references as a json string
def getFootnoteReferences(self):
return json.dumps(self.footnoteRefs)
def countFootnotes(self):
return len(self.footnoteRefs)
def shortname(longpath):
shortname = longpath
if source_dir in longpath:
shortname = longpath[len(source_dir)+1:]
return shortname
def takeID(id):
state = State()
if len(id) < 3:
reportError("Invalid ID: " + id)
id = id[0:3].upper()
if id in state.getIDs():
reportError("Duplicate ID: " + id)
state.addID(id)
def takeC(c):
state = State()
state.addChapter(c)
# Receives a string containing a verse number or range of verse numbers.
# Reports errors related to the verse number(s), such as missing or duplicated verses.
def takeV(vstr):
state = State()
vlist = []
if vstr.find('-') > 0:
vv_range = vv_re.search(vstr)
if vv_range:
vn = int(vv_range.group(1))
vnEnd = int(vv_range.group(2))
while vn <= vnEnd:
vlist.append(vn)
vn += 1
else:
reportError("Problem in verse range near " + State.reference)
else:
vlist.append(int(vstr))
for vn in vlist:
v = str(vn)
state.addVerse(str(vn))
# Writes error message to stderr and to issues.txt.
def reportError(msg):
try:
sys.stderr.write(msg + "\n")
except UnicodeEncodeError as e:
state = State()
sys.stderr.write(state.reference + ": (Unicode...)\n")
# Returns true if token is a countable part of a footnote
def isFootnote(token):
# return token.isF_S() or token.isF_E() or token.isFR() or token.isFR_E() or token.isFT() or token.isFP() or token.isFE_S() or token.isFE_E()
return token.isF_S() or token.isF_E()
def take(token):
global usfmVersion
state = State()
if isFootnote(token):
state.addFootnote()
if token.isID():
takeID(token.value)
elif token.isC():
if not state.ID:
reportError("Missing book ID: " + state.reference)
sys.exit(-1)
takeC(token.value)
elif token.isV():
takeV(token.value)
# Corresponding entry point in tx-manager code is verify_contents_quiet()
def processFile(path):
input = io.open(path, "tr", 1, encoding="utf-8-sig")
str = input.read(-1)
input.close()
if usfmVersion >= 3.0:
str = usfm_utils.usfm3_to_usfm2(str)
print("CHECKING " + shortname(path))
sys.stdout.flush()
for token in parseUsfm.parseString(str):
take(token)
state = State()
state.addID("")
sys.stderr.flush()
# Verifies all .usfm files under the specified folder.
def processDir(dirpath):
for f in os.listdir(dirpath):
if f[0] != '.': # ignore hidden files
path = os.path.join(dirpath, f)
if os.path.isdir(path):
# It's a directory, recurse into it
processDir(path)
elif os.path.isfile(path) and path[-3:].lower() == 'sfm':
processFile(path)
# Writes list of footnote references to a file.
def dumpFootnoteReferences():
state = State()
refs = state.getFootnoteReferences()
if refs:
path = os.path.join(source_dir, "footnotedVerses.json")
footnoteFile = io.open(path, "tw", encoding='utf-8', newline='\n')
footnoteFile.write(refs)
footnoteFile.close()
sys.stdout.write("Found " + str(state.countFootnotes()) + " footnotes.\n")
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] != 'hard-coded-path':
source_dir = sys.argv[1]
if os.path.isdir(source_dir):
processDir(source_dir)
elif os.path.isfile(source_dir):
path = source_dir
source_dir = os.path.dirname(path)
processFile(path)
else:
reportError("File not found: " + source_dir)
dumpFootnoteReferences()
| unfoldingWord-dev/tools | usfm/listFootnotes.py | listFootnotes.py | py | 5,352 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number... |
34623651816 | # -*- coding: utf-8 -*-
# +
import argparse
import datetime
import random
import time
import pandas as pd
from pathlib import Path
import torch
import torchvision.transforms as standard_transforms
import numpy as np
from PIL import Image
import cv2
from crowd_datasets import build_dataset
from engine import *
from models import build_model
import os
import warnings
warnings.filterwarnings('ignore')
def get_args_parser():
parser = argparse.ArgumentParser('Set parameters for P2PNet evaluation', add_help=False)
# * Backbone
parser.add_argument('--backbone', default='vgg16_bn', type=str,
help="name of the convolutional backbone to use")
parser.add_argument('--row', default=2, type=int,
help="row number of anchor points")
parser.add_argument('--line', default=2, type=int,
help="line number of anchor points")
parser.add_argument('--output_dir', default='./',
help='path where to save')
parser.add_argument('--weight_path', default='./weights/SHTechA.pth',
help='path where the trained weights saved')
parser.add_argument('--gpu_id', default=0, type=int, help='the gpu used for evaluation')
return parser
def main(args, debug=False):
start=time.time()
os.environ["CUDA_VISIBLE_DEVICES"] = '{}'.format(args.gpu_id)
print(args)
device = torch.device('cuda')
# get the P2PNet
model = build_model(args)
# move to GPU
model.to(device)
# load trained model
if args.weight_path is not None:
checkpoint = torch.load(args.weight_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
# convert to eval mode
model.eval()
# create the pre-processing transform
transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
url = "https://889962c23ee6.ap-northeast-2.playback.live-video.net/api/video/v1/ap-northeast-2.196202674046.channel.ppLGhcD0q6ZR.m3u8"
cap=cv2.VideoCapture(url)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
fps = cap.get(cv2.CAP_PROP_FPS)
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH) #원본 동영상 크기 정보 받아옴
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
out = cv2.VideoWriter('output.mp4', fourcc, 30, (int(w//128*128),int(h//128*128)))
while True:
hasFrame, img_frame=cap.read() #프레임 읽어옴
if not hasFrame: #더이상 받아올 프레임 없는 경우
break
img_raw = Image.fromarray(img_frame).convert('RGB')
# round the size
width, height = img_raw.size
new_width = width // 128 * 128
new_height = height // 128 * 128
img_raw = img_raw.resize((new_width, new_height), Image.ANTIALIAS)
# pre-proccessing
img = transform(img_raw)
samples = torch.Tensor(img).unsqueeze(0)
samples = samples.to(device)
# run inference
outputs = model(samples)
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
outputs_points = outputs['pred_points'][0]
threshold = 0.5
# filter the predictions
points = outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist()
predict_cnt = int((outputs_scores > threshold).sum())
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
outputs_points = outputs['pred_points'][0]
# draw the predictions
size = 5
img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
for p in points:
img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), size, (0, 0, 255), -1)
img_to_draw=cv2.cvtColor(np.array(img_to_draw), cv2.COLOR_BGR2RGB)
out.write(img_to_draw)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if predict_cnt>=5:
print(predict_cnt)
outputs_df = pd.DataFrame(points, columns=['width', 'height'])
outputs_df.to_csv('tmp_points.csv', encoding='utf-8')
print("original size: ", width, height)
print("resized size: ", new_width, new_height)
cap.release()
out.release()
print(time.time()-start)
if __name__ == '__main__':
parser = argparse.ArgumentParser('P2PNet evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
# -
| kookmin-sw/capstone-2023-26 | headcount/CrowdCounting_P2PNet/run_test_stream.py | run_test_stream.py | py | 4,511 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.environ"... |
29341535711 | import cv2
import numpy as np
img = cv2.imread('/home/kanish/Documents/ICR advanced forms/Advanced handwritting samples/athul_scanned/525/525_2.png', 0)
kernel = np.ones((5, 1), np.uint8)
erosion = cv2.erode(img, kernel, iterations=1)
cv2.imwrite('morphex.png', erosion)
cv2.imshow('gray', erosion)
cv2.waitKey(0)
| kanishmathew777/image_processing | backend/image_processing_backend/scipy_width_path_finder/image_preprocessing/denosing.py | denosing.py | py | 318 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.erode",
"line_number": 7,... |
8202759653 | import requests
from bs4 import BeautifulSoup as soup
def requesting_ip():
HEADERS = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'en-US,en;q=0.5',
'Connection':'keep-alive',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0',
}
url = 'https://www.duckduckgo.com/html?q=my+ip'
if url:
r = requests.get(url, headers=HEADERS)
page = soup(r.text, 'html5lib')
page = str(page).splitlines()
return page
elif url_check == False:
print('< check url >')
def parsing_ip(page):
for line in page:
if 'Your IP address is ' in line and ' in <a href="http' in line:
line = line.replace('Your','\nYour').splitlines()[1]
line = line.split(' ')
print(f'{line[0]} {line[1]} {line[2]} {line[3]} {line[4]} ')
def main():
page = requesting_ip()
if page:
parsing_ip(page)
if __name__ == '__main__':
main()
| DemolitionLovers/PythonCode | Requests-DDG.py | Requests-DDG.py | py | 1,134 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
}
] |
32601137836 | #!/usr/bin/python3
from typing import List
import json
from bplib.butil import TreeNode, arr2TreeNode, btreeconnect
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
st = {}
for n in nums1:
if n not in st:
st[n] = 0
st[n] += 1
ans = []
for n in nums2:
if n in st and st[n] > 0:
st[n] -= 1
ans.append(n)
return ans
arr1 = json.loads(input())
arr2 = json.loads(input())
sol = Solution()
print(sol.intersect(arr1, arr2))
| negibokken/sandbox | leetcode/350_intersection_of_two_arrays_II/main.py | main.py | py | 588 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
}
] |
7807302748 | # importing Numpy package
import numpy as np
# To declare symbol variables x and y
from sympy import symbols
# You can't use numpy arrays for symbolical calculations with sympy. Instead, use a sympy Matrix
# (https://stackoverflow.com/questions/68589864/matrix-determinant-symbolic-in-python)
import sympy as sp
# creating a 3X3 Numpy matrix
n_array = np.array([[55, 25, 15],
[30, 44, 2],
[11, 45, 77]])
# Displaying the Matrix
print("Numpy Matrix is:")
print(n_array)
# calculating the determinant of matrix
det = np.linalg.det(n_array)
print("\nDeterminant of given 3X3 square matrix:")
print(int(det))
# FOR DETERMINANTS WITH SYMBOLS VARIABLES
# Problemas de clase 1 ejercicio 2
# Declaring symbol variables x and y
x = sp.symbols('x')
y = sp.symbols('y')
z = sp.symbols('z')
# creating a 3X3 sp matrix
matrix = sp.Matrix(([1, x, x**2],
[1, y, y**2],
[1, z, z**2]))
# Displaying the Matrix
print("Matrix is:")
print(matrix)
# calculating the determinant of sp nmatrix
det3 = sp.det(matrix)
print("\nDeterminant of given 3X3 square matrix:")
print((det3)) | Sergio-Ibarra-1795/Python-self-1 | Primer_semestre/Mate/Determinant.py | Determinant.py | py | 1,173 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.det",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sympy.symbols",
"l... |
38901553531 | import os
import traceback
from prefect import flow
from conf.config import DATA_DIR, DATASET_EXCEL_LINKS
from src.utils import write_local_to_parquet
from src.flows.pipeline_components import ingest_data_from_list_files, load_csv_dataset, save_dataset_to_csv, upload_to_gcs_bucket, write_data_google_bq
@flow(name="end_to_end_pipeline_from_http_to_bq",
description="Flow that download the dataset in excel format given a list of links (excel), "
"Combine the data, save it to csv, convert to parquet and then upload to google cloud bucket",
log_prints=True)
def end_to_end_pipeline_from_http_to_bq(filename_arr_path: list = DATASET_EXCEL_LINKS,
name_output_dataset="global_terrorism_db"):
"""
End to end pipeline
Download files given an array of links > convert to csv > save to parquet format > upload to google cloud bucket
> upload to google big query
:param filename_arr_path:
:param name_output_dataset:
:return:
"""
try:
# 0. Check beforehand if there is CSV dataset so that we don't download again
filename_csv = name_output_dataset + '.csv'
path_filename_csv = DATA_DIR.joinpath(filename_csv)
if not os.path.isfile(path_filename_csv):
# 1. Download excel data from links (github)
combined_data_df = ingest_data_from_list_files(filename_arr_path)
# 2. Convert data to csv
path_filename_csv = save_dataset_to_csv(combined_data_df, name_output_dataset)
else:
print("CSV file already exists")
# 3. Load the csv dataset freshly created
data_df = load_csv_dataset(path_filename_csv)
# 4. save to parquet format
dataset_parquet_path = os.path.splitext(path_filename_csv)[0] + ".parquet"
write_local_to_parquet(data_df, dataset_parquet_path)
# 5. Upload to GCS
upload_to_gcs_bucket(dataset_parquet_path)
# 6. Upload to Google Big query
write_data_google_bq(data_df)
except Exception as ex:
print("Exception raised: ", ex)
print(traceback.format_exc())
exit(-1)
if __name__ == '__main__':
name_output_data = "global_terrorism_db"
filename_arr_path = DATASET_EXCEL_LINKS
end_to_end_pipeline_from_http_to_bq(filename_arr_path=DATASET_EXCEL_LINKS, name_output_dataset=name_output_data)
| lironesamoun/data-engineering-capstone-project | src/flows/parameterized_flow_http_pipeline.py | parameterized_flow_http_pipeline.py | py | 2,406 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "conf.config.DATASET_EXCEL_LINKS",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "conf.config.DATA_DIR.joinpath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "conf.config.DATA_DIR",
"line_number": 26,
"usage_type": "name"
},
{
... |
20962583832 | from pyspark.sql import SparkSession
from pyspark.sql import S
Logger.getLogger("org").setLevel(Level.ERROR)
sparkConf = newSparkConf()
sparkConf.set("spark.app.name", "My Application 1")
sparkConf.set("spark.master", "local[2]")
spark = SparkSession.builder()\
.config(sparkConf)\
.getOrCreate()
DDLString = "country String, weeknum Int, numinvoices Int, totalquantity Int, invoicevalue Double"
input = spark.read\
.format("csv")\
.schema(DDLString)\
.option("path", "C:/Users/chnis/Downloads/Big Data/week11/windowdata.csv")\
.load
input.write\
.format("avro")\
.partitionBy("country")\
.mode("Overwrite")\
.option("path", "C:/Users/chnis/Downloads/Big Data/week11/windowdata_output_avro")\
.save() | Nishant-001/BigData_PySpark | week11assignment.py | week11assignment.py | py | 750 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 9,
"usage_type": "name"
}
] |
26021338569 | from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
from seishinkan.website.feeds import NewsFeed, TerminFeed
admin.autodiscover()
feeds = {
'termine': TerminFeed,
'news': NewsFeed,
}
urlpatterns = patterns('',
(r'^i18n/', include('django.conf.urls.i18n')),
(r'^verwaltung/doc/', include('django.contrib.admindocs.urls')),
(r'^verwaltung/', include(admin.site.urls)),
(r'^feed/termine/$', TerminFeed()),
(r'^feed/news/$', NewsFeed()),
)
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^rosetta/', include('rosetta.urls')),
)
# Simple Generic Views
urlpatterns += patterns('django.views.generic.simple',
(r'^accounts/$', 'redirect_to', {'url': '/'}),
)
# Seishinkan Website Views
urlpatterns += patterns('seishinkan.website.views',
(r'^$', 'index'),
(r'^login/$', 'seishinkan_login'),
(r'^lang/(.*)/$', 'set_lang'),
(r'^logout/$', 'seishinkan_logout'),
(r'^seite/(\d+)/$', 'index'),
(r'^links/$', 'links'),
(r'^kontakt/$', 'kontakt'),
(r'^news/$', 'news'),
(r'^news/(\d+)/$', 'news'),
(r'^news/archiv/$', 'news'),
(r'^termin/$', 'termin'),
(r'^termin/(\d+)/$', 'termin'),
(r'^termin/archiv/$', 'termin'),
(r'^termine/$', 'termin'),
(r'^termine/archiv/$', 'termin'),
(r'^impressum/$', 'impressum'),
(r'^info/$', 'info'),
(r'^sysinfo/$', 'sysinfo'),
(r'^videos/$', 'video'),
(r'^bilder/$', 'picasa_albums'),
(r'^bilder/(.+)/$', 'picasa_photos'),
(r'^downloads/$', 'downloads'),
# (r'^videos/(.+)/$', 'video'),
(r'^email/$', 'mailinglist'),
(r'^log/$', 'admin_log'),
(r'^permissions/$', 'permissions'),
(r'^graduierungen/$', 'graduierungen'),
(r'^mitglieder/$', 'mitgliederlisten'),
(r'^mitglieder/csv/$', 'mitglieder_csv'),
(r'^mitglieder/csv/(\d+)/$', 'mitglieder_csv'),
(r'^mitglieder/xls/$', 'mitglieder_xls'),
(r'^mitglieder/xls/(\d+)/$', 'mitglieder_xls'),
(r'^trainerliste/(\d+)/(\d+)/$', 'trainerliste_xls'),
(r'^teilnehmerliste/(\d+)/(\d+)/$', 'teilnehmerliste_xls'),
# dynamic_url muss am Ende stehen
(r'^(.+)/$', 'dynamic_url'),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
handler404 = 'seishinkan.website.views.my_404'
| marcusti/seishinkan | urls.py | urls.py | py | 2,423 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "seishinkan.website.feeds.TerminFeed",
"line_number": 9,
"usage_type": "name"
},
... |
24893300426 | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from lxml import etree
from WaiBaoSpider.utils.csvWriter import CSVDumper
from WaiBaoSpider.utils.base import unicode_body, deal_ntr
import os
class LuAnSpider(scrapy.Spider):
name = "luan"
# base_url = "http://www.luan.gov.cn/nocache/supervision/?product_id=3&page={}"
base_url = "http://www.luan.gov.cn/zxly/?type=product&page={}"
data_path = os.getcwd() + "/WaiBaoSpider/data/"
if os.path.exists(data_path):
pass
else:
os.mkdir(data_path)
dump_list = CSVDumper(data_path + "%s_list.csv" % name)
dump_detail = CSVDumper(data_path + "%s_detail.csv" % name)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
}
# custom_settings = {
# }
def start_requests(self):
for i in range(1, 915):
# for i in range(2, 4):
url = self.base_url.format(i)
print(url)
yield Request(url, headers=self.headers, callback=self.parse_list)
# def parse_list(self, response):
# body = unicode_body(response)
# html = etree.HTML(body)
# lines = html.xpath("//table[@class='is-xjnr']/tr")
# print(len(lines))
# for info in lines:
# item = {}
# item[u"类别"] = info.xpath("./td[1]/text()")[0].strip() if info.xpath("./td[1]/text()") else ""
# title = info.xpath("./td[3]/a//text()") if info.xpath("./td[3]/a//text()") else []
# item[u"来信标题"] = deal_ntr("".join(title).strip())
# item[u"来信时间"] = info.xpath("./td[5]/text()")[0].strip() if info.xpath("./td[5]/text()") else ""
# item[u"处理状态"] = info.xpath("./td[7]/span/text()")[0].strip() if info.xpath("./td[7]/span/text()") else ""
# item[u"浏览次数"] = info.xpath("./td[9]/text()")[0].strip() if info.xpath("./td[9]/text()") else ""
# item[u"回复评价"] = info.xpath("./td[11]/a/text()")[0].strip() if info.xpath("./td[11]/a/text()") else ""
# item[u"链接"] = "http://www.luan.gov.cn{}".format(
# info.xpath("./td[3]/a/@href")[0].strip() if info.xpath("./td[3]/a/@href") else "")
# if info.xpath("./td[3]/a/span/text()"):
# item[u"是否公开"] = u"不公开"
# item_detail = {
# u"链接": item[u"链接"],
# u"标题": item[u"来信标题"],
# u"来信人": u"",
# u"来信时间": item[u"来信时间"],
# u"处理情况": item[u"处理状态"],
# u"督办部门": u"",
# u"问题类别": u"",
# u"浏览": item[u"浏览次数"],
# u"来信内容": u"",
# u"回复内容": u"",
# u"回复单位": u"",
# u"回复时间": u"",
# u"是否公开": u"否",
# }
# self.dump_detail.process_item(item_detail)
# else:
# item[u"是否公开"] = u"公开"
# yield Request(item[u"链接"], headers=self.headers, callback=self.parse_detail,
# meta={"url": item[u"链接"], "llcs": item[u"浏览次数"], "clzt": item[u"处理状态"]})
# self.dump_list.process_item(item)
def parse_list(self, response):
body = unicode_body(response)
html = etree.HTML(body)
lines = html.xpath("//ul[@class='is-listnews']/li")
print(len(lines))
for info in lines:
item = {}
item[u"来信标题"] = info.xpath("./a/text()")[0].strip() if info.xpath("./a/text()") else ""
item[u"来信时间"] = info.xpath("./span/text()")[0].strip() if info.xpath("./span/text()") else ""
item[u"链接"] = "http://www.luan.gov.cn{}".format(
info.xpath("./a/@href")[0].strip() if info.xpath("./a/@href") else "")
self.dump_list.process_item(item)
yield Request(item[u"链接"], headers=self.headers, callback=self.parse_detail,
meta={"url": item[u"链接"]})
def parse_detail(self, response):
body = unicode_body(response)
data = response.meta
html = etree.HTML(body)
item = {}
item[u"链接"] = data["url"]
item[u"标题"] = html.xpath("//div[@class='is-newstitle']/text()")[0].strip() if html.xpath(
"//div[@class='is-newstitle']/text()") else ""
item[u"来信人"] = html.xpath("//div[@class='is-mailinfo']/text()[1]")[0].strip().replace(u"来信人:",
u"") if html.xpath(
"//div[@class='is-mailinfo']/text()[1]") else ""
item[u"来信时间"] = html.xpath("//div[@class='is-mailinfo']/text()[2]")[0].strip().replace(u"来信时间:",
u"") if html.xpath(
"//div[@class='is-mailinfo']/text()[2]") else ""
item[u"处理情况"] = html.xpath("//span[@class='orange state']/text()")[0].strip() if html.xpath(
"//span[@class='orange state']/text()") else ""
dbbm = html.xpath("//div[@class='is-mailinfo']/span[4]/text()[1]")[0].strip().replace(u"|",
u"").replace(
u"督办部门:", u"") if html.xpath("//div[@class='is-mailinfo']/span[4]/text()[1]") else ""
item[u"督办部门"] = deal_ntr(dbbm)
item[u"问题类别"] = html.xpath("//div[@class='is-mailinfo']/span[4]/span[@class='red'][1]/text()")[
0].strip() if html.xpath("//div[@class='is-mailinfo']/span[4]/span[@class='red'][1]/text()") else ""
item[u"浏览"] = html.xpath("//div[@class='is-mailinfo']/span[4]/span[@class='red'][2]/text()")[
0].strip() if html.xpath("//div[@class='is-mailinfo']/span[4]/span[@class='red'][2]/text()") else ""
lxnr = html.xpath("//div[@class='is-mailwen']/p//text()") if html.xpath(
"//div[@class='is-mailwen']/p//text()") else []
item[u"来信内容"] = deal_ntr("".join(lxnr))
hf_content = html.xpath("//div[@class='is-hfcontent']//text()") if html.xpath(
"//div[@class='is-hfcontent']//text()") else []
item[u"回复内容"] = deal_ntr("".join(hf_content))
item[u"回复单位"] = html.xpath("//div[@class='is-mialhf']/h1/span[2]/text()")[0].strip().replace(u"回复单位:",
u"") if html.xpath(
"//div[@class='is-mialhf']/h1/span[2]/text()") else ""
item[u"回复时间"] = html.xpath("//div[@class='is-mialhf']/h1/span[1]/text()")[0].strip().replace(u"回复时间:",
u"") if html.xpath(
"//div[@class='is-mialhf']/h1/span[1]/text()") else ""
self.dump_detail.process_item(item)
| jamesfyp/WaiBaoSpider | WaiBaoSpider/spiders/liuan.py | liuan.py | py | 7,327 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
11196328840 | import numpy as np
from scipy import interpolate
class SkeletonASCIIPhraser(object):
def __init__(self, export_path):
self.__export_path = export_path
def save(self, beams):
file = open(self.__export_path, 'w')
self.__plot_analytical_skeleton_beams(file, beams)
file.close()
def __plot_analytical_skeleton_beams(self, file, beams):
# Save the ids and the export to the ids in a dictionary
point_ids = {}
vector_ids = {}
spline_ids = {}
curve_ids = {}
cross_sections_ids = {}
beam_ids = {}
junction_connections = {}
# ID counters
point_id = 0
spline_id = 0
curve_id = 0
vector_id = 0
cross_section_id = 0
beam_id = 0
junction_id_point_list = {}
start_end_cross_section_correspondence = {}
for beam in beams:
spline = beam.spline
# calculate points
point_id += 1
p_jun_start_id = point_id
point_ids[point_id] = [spline.x[0], spline.y[0], spline.z[0]]
point_id += 1
# Save point to point id
point_ids[point_id] = [spline.x[-1], spline.y[-1], spline.z[-1]]
p_jun_end_id = point_id
# Calculate vectors
vector_id += 1
# Build up vector + id
n_x1 = spline.degree * (spline.x[0] - spline.x[1])
n_y1 = spline.degree * (spline.y[0] - spline.y[1])
n_z1 = spline.degree * (spline.z[0] - spline.z[1])
norm = np.sqrt(n_x1 ** 2 + n_y1 ** 2 + n_z1 ** 2)
norm_vec = 1.0 / norm * np.array([n_x1, n_y1, n_z1])
vector_ids[vector_id] = [norm_vec[0], norm_vec[1], norm_vec[2]]
vector_id += 1
n_x1 = spline.degree * (spline.x[-1] - spline.x[-2])
n_y1 = spline.degree * (spline.y[-1] - spline.y[-2])
n_z1 = spline.degree * (spline.z[-1] - spline.z[-2])
norm = np.sqrt(n_x1 ** 2 + n_y1 ** 2 + n_z1 ** 2)
norm_vec = 1.0 / norm * np.array([n_x1, n_y1, n_z1])
vector_ids[vector_id] = [norm_vec[0], norm_vec[1], norm_vec[2]]
# Calculate spline and curve
spline_id += 1
spline_ids[spline_id] = [point_id -1, point_id, vector_id -1, vector_id]
start_end_cross_section_correspondence[p_jun_start_id] = spline_id
start_end_cross_section_correspondence[p_jun_end_id] = spline_id
curve_id += 1
curve_ids[curve_id] = [spline_id]
# ---------- Cross section location start
cut_times = beam.cut_times
tck = [np.array(spline.t),
[np.array(spline.x), np.array(spline.y), np.array(spline.z)],
spline.degree]
[x_coordinates, y_coordinates, z_coordinates] = interpolate.splev(cut_times, tck)
[x_normals, y_normals, z_normals] = interpolate.splev(cut_times, tck, der=1)
point_id += 1
point_ids[point_id] = [x_coordinates[0], y_coordinates[0], z_coordinates[0]]
# Cross section location end
point_id += 1
point_ids[point_id] = [x_coordinates[1], y_coordinates[1], z_coordinates[1]]
p_c_start = point_id -1
p_c_end = point_id
# ----------- Cross section normal vector
vector_id += 1
vector_ids[vector_id] = [x_normals[0], y_normals[0], z_normals[0]]
vector_id += 1
vector_ids[vector_id] = [x_normals[1], y_normals[1], z_normals[1]]
v_start = vector_id -1
v_end = vector_id
# Calculate beam
beam_id += 1
beam_id_list = []
# Ad spline for beam
beam_id_list.append(curve_id)
# Add al valid cross section which are found
# Calculate start cross sections
if beam.crossSection1[0] != None:
[xx, yy, zz] = beam.crossSection1[0]
splines = []
for id in range(len(xx)):
point_id += 1
point_ids[point_id] = [xx[id], yy[id], zz[id]]
if id == 0:
spline_start_point = point_id
if id != 0:
spline_id += 1
spline_ids[spline_id] = [point_id - 1, point_id]
splines.append(spline_id)
# Last point and close polygon
if id == len(xx) - 1:
spline_id += 1
spline_ids[spline_id] = [point_id, spline_start_point]
splines.append(spline_id)
curve_id += 1
curve_ids[curve_id] = splines
beam_id_list.append(curve_id)
cross_section_id += 1
cross_sections_ids[cross_section_id] = [p_c_start, v_start, curve_id]
# Calculate end cross sections
if beam.crossSection2[0] != None:
[xx, yy, zz] = beam.crossSection2[0]
splines = []
for id in range(len(xx)):
point_id += 1
point_ids[point_id] = [xx[id], yy[id], zz[id]]
if id == 0:
spline_start_point = point_id
if id != 0:
spline_id += 1
spline_ids[spline_id] = [point_id - 1, point_id]
splines.append(spline_id)
# Last point
if id == len(xx) - 1:
spline_id += 1
spline_ids[spline_id] = [point_id, spline_start_point]
splines.append(spline_id)
curve_id += 1
curve_ids[curve_id] = splines
beam_id_list.append(curve_id)
cross_section_id += 1
cross_sections_ids[cross_section_id] = [p_c_end, v_end, curve_id]
beam_ids[beam_id] = beam_id_list
# Save the junction information
end_marker = spline.end_connection_id
start_marker = spline.start_connection_id
if end_marker != None:
if tuple(end_marker) in junction_connections:
junction_connections[tuple(end_marker)].append(cross_section_id)
else:
junction_connections[tuple(end_marker)] = [cross_section_id]
# Check if junction point is alreadcy set and then insert them
if tuple(end_marker) in junction_id_point_list:
point_id_jun_end = junction_id_point_list[tuple(end_marker)] = point_id
else:
point_id += 1
point_ids[point_id] = [end_marker[0], end_marker[1], end_marker[2]]
point_id_jun_end = point_id
spline_id += 1
spline_ids[spline_id] = [p_jun_end_id, point_id_jun_end]
curve_id += 1
curve_ids[curve_id] = spline_id
# Create spline between junction and beam
if start_marker != None:
if tuple(start_marker) in junction_connections:
junction_connections[tuple(start_marker)].append(cross_section_id - 1)
else:
junction_connections[tuple(start_marker)] = [cross_section_id - 1]
# Check if junction point is alreadcy set and then insert them
if tuple(start_marker) in junction_id_point_list:
point_id_jun_start = junction_id_point_list[tuple(start_marker)] = point_id
else:
point_id += 1
point_ids[point_id] = [start_marker[0], start_marker[1], start_marker[2]]
point_id_jun_start = point_id
spline_id += 1
spline_ids[spline_id] = [p_jun_start_id, point_id_jun_start]
curve_id += 1
curve_ids[curve_id] = spline_id
# Create spline between junction and beam
file.write('POINT \n')
for point_key in point_ids:
if point_key in start_end_cross_section_correspondence:
file.write(
f'{point_key}, {point_ids[point_key][0]}, {point_ids[point_key][1]}, {point_ids[point_key][2]},'
f' {start_end_cross_section_correspondence[point_key]} \n')
else:
file.write(f'{point_key}, {point_ids[point_key][0]}, {point_ids[point_key][1]}, {point_ids[point_key][2]} \n')
file.write('ENDPOINT \n')
file.write('VECTOR \n')
for vector_key in vector_ids:
file.write(f'{vector_key}, {vector_ids[vector_key][0]}, {vector_ids[vector_key][1]}, {vector_ids[vector_key][2]} \n')
file.write('ENDVECTOR \n')
file.write('SPLINE \n')
for spline_key in spline_ids:
if len(spline_ids[spline_key]) == 2:
file.write(f'{spline_key}, {spline_ids[spline_key][0]}, {spline_ids[spline_key][1]} \n')
else:
file.write(
f'{spline_key}, {spline_ids[spline_key][0]}, {spline_ids[spline_key][1]}, {spline_ids[spline_key][2]}, {spline_ids[spline_key][3]} \n')
file.write('ENDSPLINE \n')
file.write('CURVE \n')
for curve_key in curve_ids:
spline_ids = curve_ids[curve_key]
if type(spline_ids) == int:
file.write(f'{curve_key}, {spline_ids}\n')
continue
if len(spline_ids) == 0: continue
file.write(f'{curve_key}')
for sp_id in spline_ids:
file.write(f', {sp_id}')
file.write("\n")
file.write('ENDCURVE \n')
file.write('SECTION \n')
for cross_key in cross_sections_ids:
file.write(f'{cross_key}, {cross_sections_ids[cross_key][0]}, {cross_sections_ids[cross_key][1]}, {cross_sections_ids[cross_key][2]}')
file.write("\n")
file.write('ENDSECTION \n')
file.write('BEAM \n')
for beam_key in beam_ids:
file.write(f'{beam_key}')
for beam_id_value in beam_ids[beam_key]:
file.write(f', {beam_id_value}')
file.write("\n")
file.write('ENDBEAM \n')
file.write('JUNCTION \n')
junction_id = 0
for junction_key, beam_ids in junction_connections.items():
if len(beam_ids) < 2:
continue
junction_id += 1
file.write(f'{junction_id}')
for cross_key in cross_sections_ids:
for beam_id in beam_ids:
if beam_id == cross_key:
print(cross_key)
print(cross_sections_ids[cross_key][2])
file.write(f', {cross_sections_ids[cross_key][2]}')
file.write("\n")
file.write('ENDJUNCTION \n')
file.close()
| Foxelmanian/ParametrizationSkeleton | SkeletonASCIIPhraser.py | SkeletonASCIIPhraser.py | py | 11,419 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.sqrt",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57... |
29640416232 | import datatable as dt
import pytest
import random
import re
from datatable import stype, f
from datatable.internal import frame_integrity_check
from tests import noop, random_string, assert_equals
def test_issue_1912():
# Check that the expression `A==None` works if A is a string column
DT = dt.Frame(A=["dfv", None, None, "adfknlkad", None])
RES = DT[:, f.A == None]
assert RES.to_list()[0] == [0, 1, 1, 0, 1]
#-------------------------------------------------------------------------------
# split_into_nhot
#-------------------------------------------------------------------------------
def test_split_into_nhot_noarg():
with pytest.raises(ValueError) as e:
noop(dt.str.split_into_nhot())
assert ("Required parameter frame is missing" == str(e.value))
def test_split_into_nhot_none():
f0 = dt.str.split_into_nhot(None)
assert f0 is None
def test_split_into_nhot_empty():
f0 = dt.str.split_into_nhot(dt.Frame(["", None]))
assert_equals(f0, dt.Frame())
@pytest.mark.parametrize('sort', [False, True])
def test_split_into_nhot0(sort):
f0 = dt.Frame(["cat, dog, mouse, peacock, frog",
"armadillo, fox, hedgehog",
None,
"dog, fox, mouse, cat, peacock",
"horse, raccoon, cat, frog, dog"])
f1 = dt.str.split_into_nhot(f0, sort = sort)
frame_integrity_check(f1)
fr = dt.Frame({"cat": [1, 0, None, 1, 1],
"dog": [1, 0, None, 1, 1],
"mouse": [1, 0, None, 1, 0],
"peacock": [1, 0, None, 1, 0],
"frog": [1, 0, None, 0, 1],
"armadillo": [0, 1, None, 0, 0],
"fox": [0, 1, None, 1, 0],
"hedgehog": [0, 1, None, 0, 0],
"horse": [0, 0, None, 0, 1],
"raccoon": [0, 0, None, 0, 1]})
assert set(f1.names) == set(fr.names)
if sort :
assert list(f1.names) == sorted(fr.names)
fr = fr[:, f1.names]
assert f1.names == fr.names
assert f1.stypes == (dt.stype.bool8, ) * f1.ncols
assert f1.shape == fr.shape
assert f1.to_list() == fr.to_list()
def test_split_into_nhot1():
f0 = dt.Frame([" meow \n",
None,
"[ meow]",
"['meow' ,purr]",
'(\t"meow", \'purr\')',
"{purr}"])
f1 = dt.str.split_into_nhot(f0)
frame_integrity_check(f1)
fr = dt.Frame(meow=[1, None, 1, 1, 1, 0], purr=[0, None, 0, 1, 1, 1])
assert set(f1.names) == set(fr.names)
fr = fr[..., f1.names]
assert f1.shape == fr.shape == (6, 2)
assert f1.stypes == (dt.stype.bool8, dt.stype.bool8)
assert f1.to_list() == fr.to_list()
def test_split_into_nhot_sep():
f0 = dt.Frame(["a|b|c", "b|a", None, "a|c"])
f1 = dt.str.split_into_nhot(f0, sep="|")
assert set(f1.names) == {"a", "b", "c"}
fr = dt.Frame(a=[1, 1, None, 1], b=[1, 1, None, 0], c=[1, 0, None, 1])
assert set(f1.names) == set(fr.names)
assert f1.to_list() == fr[:, f1.names].to_list()
def test_split_into_nhot_quotes():
f0 = dt.str.split_into_nhot(dt.Frame(['foo, "bar, baz"']))
f1 = dt.str.split_into_nhot(dt.Frame(['foo, "bar, baz']))
assert set(f0.names) == {"foo", "bar, baz"}
assert set(f1.names) == {"foo", '"bar', "baz"}
def test_split_into_nhot_bad():
f0 = dt.Frame([[1.25], ["foo"], ["bar"]])
with pytest.raises(ValueError) as e:
dt.str.split_into_nhot(f0)
assert ("Function split_into_nhot() may only be applied to a single-column "
"Frame of type string; got frame with 3 columns" == str(e.value))
with pytest.raises(TypeError) as e:
dt.str.split_into_nhot(f0[:, 0])
assert ("Function split_into_nhot() may only be applied to a single-column "
"Frame of type string; received a column of type float64" ==
str(e.value))
with pytest.raises(ValueError) as e:
dt.str.split_into_nhot(f0[:, 1], sep=",;-")
assert ("Parameter sep in split_into_nhot() must be a single character"
in str(e.value))
@pytest.mark.parametrize("seed, st", [(random.getrandbits(32), stype.str32),
(random.getrandbits(32), stype.str64)])
def test_split_into_nhot_long(seed, st):
random.seed(seed)
n = int(random.expovariate(0.0001) + 100)
col1 = [random.getrandbits(1) for _ in range(n)]
col2 = [random.getrandbits(1) for _ in range(n)]
col3 = [random.getrandbits(1) for _ in range(n)]
col4 = [0] * n
data = [",".join(["liberty"] * col1[i] +
["equality"] * col2[i] +
["justice"] * col3[i]) for i in range(n)]
# Introduce 1% of None's, making sure we preserve data at freedom_index
na_indices = random.sample(range(n), n // 100)
freedom_index = random.randint(0, n - 1)
for i in na_indices:
if i == freedom_index: continue
col1[i] = None
col2[i] = None
col3[i] = None
col4[i] = None
data[i] = None
col4[freedom_index] = 1
data[freedom_index] += ", freedom"
f0 = dt.Frame(data, stype=st)
assert f0.stypes == (st,)
assert f0.shape == (n, 1)
f1 = dt.str.split_into_nhot(f0)
assert f1.shape == (n, 4)
fr = dt.Frame(liberty=col1, equality=col2, justice=col3, freedom=col4)
assert set(f1.names) == set(fr.names)
f1 = f1[..., fr.names]
assert f1.to_list() == fr.to_list()
def test_split_into_nhot_view():
f0 = dt.Frame(A=["cat,dog,mouse", "mouse", None, "dog, cat"])
f1 = dt.str.split_into_nhot(f0[::-1, :])
f2 = dt.str.split_into_nhot(f0[3, :])
assert set(f1.names) == {"cat", "dog", "mouse"}
assert f1[:, ["cat", "dog", "mouse"]].to_list() == \
[[1, None, 0, 1], [1, None, 0, 1], [0, None, 1, 1]]
assert set(f2.names) == {"cat", "dog"}
assert f2[:, ["cat", "dog"]].to_list() == [[1], [1]]
def test_split_into_nhot_deprecated():
DT = dt.Frame(["a, b, c"])
with pytest.warns(FutureWarning):
RES = dt.split_into_nhot(DT)
EXP = dt.Frame([[1], [1], [1]], names=["a", "b", "c"], stype=dt.bool8)
assert_equals(RES, EXP)
| h2oai/datatable | tests/munging/test-str.py | test-str.py | py | 6,248 | python | en | code | 1,763 | github-code | 1 | [
{
"api_name": "datatable.Frame",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datatable.f.A",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "datatable.f",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"l... |
8595088170 | import hashlib
import zlib
from PyQt5 import QtCore, QtGui, QtWidgets
from file_browse import *
data=[
{'Check':'','More':'','Details':'','@timestamp':'2023/03/12 12:00:00','Rule':'Enumeration of users or Groups','Severity':'low','Risk Score':'21','Reason':'process event with process dsmemberutil, parent process bash, by root'},
{'Check':'','More':'','Details':'','@timestamp':'2023/03/12 12:00:01','Rule':'Potential persitance via login hook','Severity':'medium','Risk Score':'50','Reason':'modify key-value pairs in plist files to influence system behaviors, such as hiding the execution of an application (i.e. Hidden Window) or running additional commands for persistence '},
{'Check':'','More':'','Details':'','@timestamp':'2023/03/12 12:00:02','Rule':'Malware prevention alert','Severity':'high','Risk Score':'73','Reason':'malware, intrusion_detection file event with process AYHelperService'},
{'Check':'','More':'','Details':'','@timestamp':'2023/03/12 12:00:00','Rule':'Enumeration of users or Groups','Severity':'low','Risk Score':'21','Reason':'process event with process dsmemberutil, parent process bash, by root'}
]
class Ui_FileScan(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.vLayout1 = QtWidgets.QVBoxLayout()
self.vLayout2=QtWidgets.QVBoxLayout()
self.layout = QtWidgets.QHBoxLayout()
self.bigLayout=QtWidgets.QVBoxLayout()
self.add_label("Sandbox",1.5,Qt.AlignCenter,20)
self.add_file_browser_layer1()
self.add_submit_url_layer2()
self.buttonClose = QtWidgets.QPushButton('Back')
self.buttonClose.clicked.connect(self.close)
self.vLayout1.addStretch()
self.vLayout2.addStretch()
self.bigLayout.addLayout(self.layout)
self.add_label("Drag your file into left field or click to select a file",0.5,Qt.AlignVCenter,10)
self.bigLayout.addStretch()
self.setLayout(self.bigLayout)
def add_label(self,title,width,alignment,fontsize):
label_princ = QtWidgets.QLabel(title, self)
label_princ.setStyleSheet("border: "+str(width)+"px solid black;")
label_princ.setAlignment(alignment)
label_princ.setFont(QtGui.QFont('Times',fontsize))
self.bigLayout.addWidget(label_princ)
def add_file_browser_layer1(self):
self.fileBrowser=FileBrowser('Open File')
self.vLayout1.addWidget(self.fileBrowser)
self.buttonAnayze = QtWidgets.QPushButton('Analyze')
self.buttonAnayze.clicked.connect(self.analize_file)
self.buttonAnayze.setMaximumWidth(200)
self.vLayout1.addWidget(self.buttonAnayze)
self.layout.addLayout(self.vLayout1)
def add_submit_url_layer2(self):
self.url_label= QtWidgets.QLineEdit( self)
self.url_label.setPlaceholderText("Submit url or hash")
self.url_label.setAlignment(Qt.AlignLeft | Qt.AlignHCenter)
self.url_label.setStyleSheet("border: 1px solid black; background-color: rgb(229, 228, 226);")
self.url_label.setFixedSize(400,400)
buttonClose = QtWidgets.QPushButton('Submit')
buttonClose.clicked.connect(self.action_submit)
self.vLayout2.addWidget(self.url_label)
self.vLayout2.addWidget(buttonClose)
self.layout.addLayout(self.vLayout2)
def action_submit(self):
#https://www.travelandleisure.com/thmb/n4LZNPWDaJnGGl4jz988ms4u-Pk=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/header-PARISPANDA1221-87f0c4cc46bf423ebcaf669c50912c3c.jpg
print(self.url_label.text())
def analize_file(self):
try:
file=self.fileBrowser.getPaths()[0]
except:
file='./table.py'
# icon_1=QtWidgets.QLabel()
# pixmap=QtGui.QPixmap('./icons/done.svg')
# pixmap= pixmap.scaled(30, 30,Qt.KeepAspectRatio)
# icon_1.setPixmap(pixmap)
# icon_1.setAlignment(Qt.AlignLeft)
# self.vLayout1.addWidget(icon_1)
label_2 = QtWidgets.QLabel("Summary", self)
label_2.setStyleSheet("border: 1px solid black;")
label_2.setAlignment(Qt.AlignLeft | Qt.AlignHCenter)
self.vLayout1.addWidget(label_2)
tableWidget=QtWidgets.QTableWidget()
cols=2
rows=8
tableWidget.setColumnCount(cols)
tableWidget.setRowCount(rows)
tableWidget.setAlternatingRowColors(True)
tableWidget.setWindowTitle("File analysis")
tableWidget.setHorizontalHeaderLabels(data[0].keys())
[tableWidget.horizontalHeader().setSectionResizeMode(i,QtWidgets.QHeaderView.ResizeToContents) for i in range(cols-1)]
tableWidget.horizontalHeader().setSectionResizeMode(cols-1,QtWidgets.QHeaderView.Stretch)
self.vLayout1.addWidget(tableWidget)
with open(file,'rb') as f:
content=f.read()
file_data={"Filename":file,"File type":"binary data","MD5":hashlib.md5(content).hexdigest(),"Sha256":hashlib.sha256(content).hexdigest(),"Sha512":hashlib.sha512(content).hexdigest(),"CRC32":hex(zlib.crc32(content) & 0xffffffff),"Yara":"None Matched","Score":"32"}
for i, (k, v) in enumerate(file_data.items()):
tableWidget.setItem(i,0,QtWidgets.QTableWidgetItem(k))
tableWidget.setItem(i,1,QtWidgets.QTableWidgetItem(v))
self.buttonAnayze.hide()
self.fileBrowser.hide()
self.bigLayout.addWidget(self.buttonClose) | marioara-biblioteca/IOC | file_scan.py | file_scan.py | py | 5,566 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 15,
"usage_type": "call"
},
{
"api_name... |
29455061606 | import pygame
import field_and_pointer as fp
import argparser as ap
# Stack, field, pointer and the 2-input operators
stackstack = [[]]
the_field = fp.Field(fp.load_code())
pointer = fp.Pointer((0, 0), (1, 0))
operators = {
"+": lambda x1, x2: stackstack[-1].append(x1 + x2),
"-": lambda x1, x2: stackstack[-1].append(x2 - x1),
"*": lambda x1, x2: stackstack[-1].append(x1 * x2),
"/": lambda x1, x2:
stackstack[-1].append(int(float(x2) // float(x1)))
if x1 else stackstack[-1].append(0),
"%": lambda x1, x2: stackstack[-1].append(x2 % x1),
"`": lambda x1, x2:
stackstack[-1].append(1) if x2 > x1 else stackstack[-1].append(0),
"\\": lambda x1, x2: stackstack[-1].extend([x1, x2]),
"g": lambda x1, x2: stackstack[-1].append(ord(the_field.get_char(x2, x1)))}
# Global constants related to pygame
CHAR_WIDTH = 12
CHAR_HEIGHT = 28
SCREEN_HEIGHT_MODIFIER = 300
SCREEN_HEIGHT = the_field.Y * CHAR_HEIGHT + SCREEN_HEIGHT_MODIFIER
SCREEN_WIDTH = the_field.X * CHAR_WIDTH + 500
BG_COLOR = (52, 52, 52)
STACK_BG_COLOR = (0, 0, 0, 100)
STACK_OUTPUT_COLOR = (230, 200, 70)
SOSS_OUTPUT_COLOR = (70, 230, 200)
POINTER_COLOR = (255, 255, 255, 130)
STACK_CHAR_HEIGHT = 16
STACK_CHAR_WIDTH = 10
ARGS = ap.parse_arguments()
_paused = False
_step_once = False
_reset = False
# Pygame surface inits
if not ARGS.OUTPUT_MODE:
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
background = pygame.Surface(screen.get_size()).convert()
pointer_rect = pygame.Surface((CHAR_WIDTH, CHAR_HEIGHT), pygame.SRCALPHA)
pointer_rect.fill(POINTER_COLOR)
stacksurf = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT_MODIFIER),
pygame.SRCALPHA)
outsurf = pygame.Surface((int(float(SCREEN_WIDTH) / 2.0),
SCREEN_HEIGHT_MODIFIER),
pygame.SRCALPHA)
pygame.display.set_caption("Befunge-98 Interpreter")
# Pygame font inits
pygame.font.init()
stackfont = pygame.font.Font("../font/Inconsolata.otf", 18)
codefont = pygame.font.Font("../font/Inconsolata.otf", 24)
# Global constants related to bf98.py
_outcount = 0
_outline = 0
_instring = ""
| johanasplund/befunge-98 | lib/initialize.py | initialize.py | py | 2,188 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "field_and_pointer.Field",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "field_and_pointer.load_code",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "field_and_pointer.Pointer",
"line_number": 8,
"usage_type": "call"
},
{
"api_na... |
20599371961 | from flask import Flask, request
app = Flask(__name__)
@app.route('/02-server')
def server01_views():
return "This is my first response by Ajax"
@app.route('/03-server')
def server03_views():
uname = request.args['uname']
return "欢迎" + uname
if __name__ == '__main__':
app.run(debug=True)
| demo112/1809 | PythonWeb/Ajax/1809/Day01/1809self/run01.py | run01.py | py | 316 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
}
] |
32942262031 | import datetime
import json
import os
import random
import string
import sys
import time
import urllib.request
os.system("title WARP-PLUS-CLOUDFLARE By Tran Thai Tuan Anh")
os.system('cls' if os.name == 'nt' else 'clear')
print ("[+] About script: With this script, you can getting unlimited GB on Warp+")
print ("[+] This script coded by Tran Thai Tuan Anh")
print ("----------")
referrer = input("[#] Enter your WARP+ ID:")
def genString(stringLength):
try:
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(stringLength))
except Exception as error:
print(error)
def digitString(stringLength):
try:
digit = string.digits
return ''.join((random.choice(digit) for i in range(stringLength)))
except Exception as error:
print(error)
url = f'https://api.cloudflareclient.com/v0a{digitString(3)}/reg'
def run():
try:
install_id = genString(22)
body = {"key": "{}=".format(genString(43)),
"install_id": install_id,
"fcm_token": "{}:APA91b{}".format(install_id, genString(134)),
"referrer": referrer,
"warp_enabled": False,
"tos": datetime.datetime.now().isoformat()[:-3] + "+02:00",
"type": "Android",
"locale": "es_ES"}
data = json.dumps(body).encode('utf8')
headers = {'Content-Type': 'application/json; charset=UTF-8',
'Host': 'api.cloudflareclient.com',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
'User-Agent': 'okhttp/3.12.1'
}
req = urllib.request.Request(url, data, headers)
response = urllib.request.urlopen(req)
status_code = response.getcode()
return status_code
except Exception as error:
print(error)
good = 0
bad = 0
while True:
result = run()
if result == 200:
good += 1
os.system('cls' if os.name == 'nt' else 'clear')
print("WARP-PLUS-CLOUDFLARE (script) by Tran Thai Tuan Anh")
print("")
animation = ["10%","20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"]
for i in range(len(animation)):
time.sleep(0.5)
sys.stdout.write("\rprocessing..." + animation[i % len(animation)])
sys.stdout.flush()
os.system('cls' if os.name == 'nt' else 'clear')
print("WARP-PLUS-CLOUDFLARE (script) by Tran Thai Tuan Anh")
print(f"\n[-] ID: {referrer}")
print(f"[-] {good} GB has been successfully added to your account.")
print(f"[-] Total: {good} Good {bad} Bad")
print("[-] After 18 seconds, a new request will be sent.")
time.sleep(18)
else:
bad += 1
os.system('cls' if os.name == 'nt' else 'clear')
print("WARP-PLUS-CLOUDFLARE (script) by Tran Thai Tuan Anh")
print("")
print("[-] Error when connecting to server.")
print(f"[-] Total: {good} Good {bad} Bad")
print("[-] After 18 seconds, a new request will be sent.")
time.sleep(18)
| tranthaituananh/mini_projects | buffKey_1111.py | buffKey_1111.py | py | 2,770 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.system",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "string.ascii_letters",
"line_num... |
40938120333 | # This defines the model
import torch.nn as nn
from torchvision import models
import torch.nn as nn
import torch
class MyEnsemble(nn.Module):
def __init__(self, modelA, modelB, input):
super(MyEnsemble, self).__init__()
self.modelA = modelA
self.modelB = modelB
self.fc1 = nn.Linear(input, 2)
def forward(self, x):
out1 = self.modelA(x)
out2 = self.modelB(x)
out = out1 + out2
x = self.fc1(out)
return torch.softmax(x, dim=1)
"""
def build_model( num_classes=2):
model1 = models.resnet101(pretrained=True)
num_ftrs1 = model1.fc.in_features
model1.fc = nn.Linear(num_ftrs1, num_classes)
model2 = models.resnet152(pretrained=True)
num_ftrs2 = model2.fc.in_features
model2.fc = nn.Linear(num_ftrs2, num_classes)
model = MyEnsemble(model1, model2,num_classes)
return model
"""
def build_model(pretrained=True, fine_tune=True, num_classes=10):
model=models.resnet152(pretrained=True)
num_ftrs1 = model.fc.in_features
model.fc = nn.Linear(num_ftrs1, num_classes)
return model
| agossouema2011/WCEBleedGenChallenge_Colorlab_Team | Classification/model.py | model.py | py | 1,124 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
25449310072 | from django.shortcuts import render
from .models import AreaOfExpertise, Expert
def browser(request):
aoe = AreaOfExpertise.objects.all().order_by('field')
experts = Expert.objects.all()
institutes = set()
for expert in experts:
institutes.add(expert.institute) if expert.institute is not None else None
name = request.GET.get('name')
institute = request.GET.get('institute')
expertise = request.GET.get('expertise')
if name:
experts = experts.filter(name__contains=request.GET.get('name'))
if institute:
experts = experts.filter(institute=request.GET.get('institute'))
if expertise:
experts = experts.filter(expertise=AreaOfExpertise.objects.filter(field=expertise)).distinct()
return render(request, 'expertBrowser.html', {'experts': experts, 'institutes': institutes, 'aoe': aoe, 'name': name, 'institute': institute, 'expertise': expertise})
def viewer(request, id):
expert = Expert.objects.get(id=id)
return render(request, 'expertViewer.html', {'expert': expert})
| ztimson/OACPL | expert_witnesses/views.py | views.py | py | 1,066 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.AreaOfExpertise.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.AreaOfExpertise.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.AreaOfExpertise",
"line_number": 7,
"usage_type": "name"
},... |
14282958757 | import serial
import numpy as np
import matplotlib.pyplot as plt
# シリアルポートの設定(必要に応じて変更)
ser = serial.Serial('/dev/ttyACM0', 115200)
# グラフの初期化
fig, ax = plt.subplots()
x_data, y_data = [], []
line, = ax.plot(x_data, y_data, 'o', markersize=6)
# 3秒間のデータを保持するためのリングバッファ
max_data_points = 300 # 1秒あたり100データ点 × 3秒
x_ring_buffer = [0] * max_data_points
y_ring_buffer = [0] * max_data_points
current_index = 0
def update_plot(x, y):
# データをプロット
x_data.append(x)
y_data.append(y)
line.set_data(x_data, y_data)
# データが3秒分を超えた場合、古いデータを削除
if len(x_data) > max_data_points:
x_data.pop(0)
y_data.pop(0)
# グラフを更新
plt.pause(0.01)
while True:
data = ser.readline().decode().strip()
if data.startswith("(") and data.endswith(")"):
data = data[1:-1]
x, y, _ = map(int, data.split(", "))
# リアルタイムプロットを更新
update_plot(x, y)
| Altairu/raspberrypi | python/PCaruduino2.py | PCaruduino2.py | py | 1,149 | python | ja | code | 0 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
22377846455 | from django import http
from django.http.response import Http404, HttpResponseForbidden
from rest_framework import viewsets, mixins, status
from rest_framework.response import Response
from rest_framework.decorators import action, api_view, permission_classes
from django.shortcuts import get_object_or_404
from .models import Answer, Enrollment, Offering, User, Exercise, UserAnswerSummary
from .serializers import AnswerSerializer, OfferingSerializer, UserAnswerSummarySerializer, UserSerializer, ExerciseSerializer
from .permissions import IsAdminOrSelf, IsAdminUser, IsEnrolledInOfferingOrIsStaff
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAdminOrSelf]
class OfferingViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Offering.objects.all()
serializer_class = OfferingSerializer
permission_classes = [IsAdminUser]
@api_view(['GET'])
@permission_classes([IsAdminUser])
def get_enrolled_students(request, off_pk):
students = User.objects.filter(enrollment__offering_id=off_pk)
return Response(UserSerializer(students, many=True).data)
class ExerciseViewSet(mixins.CreateModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = ExerciseSerializer
def get_permissions(self):
permissions = [IsAdminUser]
if self.action == 'list':
permissions = [IsEnrolledInOfferingOrIsStaff]
return [permission() for permission in permissions]
def create(self, request, off_pk=None):
offering = get_object_or_404(Offering, pk=off_pk)
exercise, new = offering.exercise_set.update_or_create(slug=request.data['slug'],
defaults={
'url': request.data['url'],
'type': request.data['type'],
'topic': request.data['topic'],
'group': request.data['group'],
})
s = ExerciseSerializer(exercise)
return Response(s.data, status=status.HTTP_201_CREATED)
def list(self, request, off_pk=None):
offering = get_object_or_404(Offering, pk=off_pk)
exercises_json = ExerciseSerializer(offering.exercise_set.all(), many=True)
return Response(exercises_json.data)
class AnswerViewSet(viewsets.ModelViewSet):
serializer_class = AnswerSerializer
permission_classes = [IsEnrolledInOfferingOrIsStaff]
def get_queryset(self):
offering_pk = self.kwargs.get('off_pk')
get_object_or_404(Offering, pk=offering_pk)
exercise = get_object_or_404(Exercise, offering=offering_pk, slug=self.kwargs.get('ex_slug'))
f = user_filter(self.request)
return Answer.objects.filter(exercise=exercise, **f)
def create(self, request, off_pk=None, ex_slug=None):
get_object_or_404(Offering, pk=off_pk)
exercise = get_object_or_404(Exercise, slug=ex_slug, offering=off_pk)
if not exercise.allow_submissions:
return HttpResponseForbidden()
answer_data = {
'user': request.user.pk,
'exercise': exercise.pk,
'test_results': request.data['test_results'],
'student_input': request.data['student_input'],
'points': request.data['points']
}
answer = AnswerSerializer(data=answer_data)
if answer.is_valid():
answer.save()
return Response(answer.data, status=status.HTTP_201_CREATED)
return Response(answer.errors, status=status.HTTP_400_BAD_REQUEST)
def list_answers_by_student(self, request, off_pk, ex_slug, student_pk):
get_object_or_404(Offering, pk=off_pk)
exercise = get_object_or_404(Exercise, offering=off_pk, slug=ex_slug)
filters = {
'exercise': exercise
}
if self.request.user.is_staff:
filters['user'] = self.kwargs.get('student_pk')
elif self.request.user.pk != int(student_pk):
return HttpResponseForbidden()
answers = Answer.objects.filter(**filters)
answers_json = AnswerSerializer(answers, many=True)
return Response(answers_json.data, status=status.HTTP_200_OK)
def user_filter(request):
user_pk = request.user.pk
request_user_pk = request.GET.get('user')
if request_user_pk and request_user_pk != user_pk and request.user.is_staff:
user_pk = request_user_pk
filters = {}
if not request.user.is_staff or request_user_pk:
# Can only see own summaries if not admin
filters['user__pk'] = user_pk
return filters
@api_view(['GET'])
@permission_classes([IsEnrolledInOfferingOrIsStaff])
def get_latest_answer_by_student(request, off_pk, ex_slug, student_pk):
offering = get_object_or_404(Offering, pk=off_pk)
try:
answer = Answer.objects.filter(
user__pk=student_pk,
exercise__slug=ex_slug,
exercise__offering=offering,
).latest('pk')
except Answer.DoesNotExist:
raise Http404
return Response(AnswerSerializer(answer).data)
@api_view(['GET'])
@permission_classes([IsEnrolledInOfferingOrIsStaff])
def get_answer(request, off_pk, ex_slug, ans_pk):
get_object_or_404(Offering, pk=off_pk)
answer = get_object_or_404(Answer, pk=ans_pk, **user_filter(request))
return Response(AnswerSerializer(answer).data)
@api_view(['GET'])
@permission_classes([IsEnrolledInOfferingOrIsStaff])
def get_previous_answer(request, off_pk, ex_slug, ans_pk):
get_object_or_404(Offering, pk=off_pk)
filters = user_filter(request)
try:
answer = Answer.objects.filter(
exercise__slug=ex_slug,
pk__lt=ans_pk,
**filters).latest('pk')
except Answer.DoesNotExist:
raise Http404()
return Response(AnswerSerializer(answer).data)
@api_view(['GET'])
@permission_classes([IsEnrolledInOfferingOrIsStaff])
def get_next_answer(request, off_pk, ex_slug, ans_pk):
get_object_or_404(Offering, pk=off_pk)
filters = user_filter(request)
try:
answer = Answer.objects.filter(
exercise__slug=ex_slug,
pk__gt=ans_pk,
**filters).earliest('pk')
except Answer.DoesNotExist:
raise Http404()
return Response(AnswerSerializer(answer).data)
@api_view(['GET'])
@permission_classes([IsEnrolledInOfferingOrIsStaff])
def list_summaries(request, off_pk):
offering = get_object_or_404(Offering, pk=off_pk)
filters = user_filter(request)
all_summaries = UserAnswerSummary.objects.filter(**filters).filter(exercise__offering=offering).prefetch_related('exercise')
all_summaries_json = UserAnswerSummarySerializer(all_summaries, many=True)
return Response(all_summaries_json.data, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsEnrolledInOfferingOrIsStaff])
def list_summaries_for_exercise(request, off_pk, ex_slug):
get_object_or_404(Offering, pk=off_pk)
exercise = get_object_or_404(Exercise, offering=off_pk, slug=ex_slug)
filters = user_filter(request)
filters['exercise'] = exercise
all_summaries = UserAnswerSummary.objects.filter(**filters).prefetch_related('exercise')
all_summaries_json = UserAnswerSummarySerializer(all_summaries, many=True)
return Response(all_summaries_json.data, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAdminUser])
def list_students_that_tried_exercise(request, off_pk, ex_slug):
get_object_or_404(Offering, pk=off_pk)
exercise = get_object_or_404(Exercise, offering=off_pk, slug=ex_slug)
all_students = User.objects.filter(answer__exercise__slug=ex_slug).distinct()
all_students_json = UserSerializer(all_students, many=True)
return Response(all_students_json.data, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAdminUser])
def activate_exercise(request, off_pk, ex_slug):
get_object_or_404(Offering, pk=off_pk)
exercise = get_object_or_404(Exercise, offering=off_pk, slug=ex_slug)
exercise.allow_submissions = True
exercise.save()
return Response('OK', status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAdminUser])
def deactivate_exercise(request, off_pk, ex_slug):
get_object_or_404(Offering, pk=off_pk)
exercise = get_object_or_404(Exercise, offering=off_pk, slug=ex_slug)
exercise.allow_submissions = False
exercise.save()
return Response('OK', status=status.HTTP_200_OK)
| insper-education/devlife-support-api | core/views.py | views.py | py | 8,481 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "models.User.objects.all",
"line_number": 15,
"usage_type": "call"
},
... |
2997714658 | # -*- coding: utf-8 -*-
import requests
import os
# 6000 is a large number to make sure we get all the components of a collection. Please do note that RISE also has a pagination feature,
# which can be implemented by clients if they wish.
per_page = 6000
# getting the list of collections that the user has access to:
collections_response = requests.get(f'https://rise.mpiwg-berlin.mpg.de/api/collections?per_page={per_page}')
collections = collections_response.json()
# each accessible collections has a name, a uuid, and a number of resources.
# print(collections)
idx = 1
for collection in collections:
print(f'collection at index: {idx}')
idx += 1
print(collection)
# picking a collection by its index
# collection_index = 1
# collection = collections[collection_index]
results = list(filter(lambda collection: collection['name'] == 'MPIWG - 哈佛燕京圖書館藏珍稀方志', collections))
collection = results[0]
print(collection['uuid'])
collection_uuid = collection['uuid']
# we grab all resources for this collection
resources_response = requests.get(f'https://rise.mpiwg-berlin.mpg.de/api/collections/{collection_uuid}/resources?per_page={per_page}')
corpus_path = './corpus'
if not os.path.exists(corpus_path):
os.makedirs(corpus_path)
for resource in resources_response.json():
uuid = resource['uuid']
resource_name = resource['name']
print(resource_name)
if not os.path.exists(corpus_path + "/" + resource_name):
os.makedirs(corpus_path + "/" + resource_name)
sections = requests.get("https://rise.mpiwg-berlin.mpg.de/api/resources/"+ resource['uuid'] +"/sections")
for section in sections.json():
print(section)
print(section['uuid'])
section_name = section['name']
section_path = corpus_path + "/" + resource_name + "/" + section_name
file = open(section_path +".txt", "w")
content_units = requests.get("https://rise.mpiwg-berlin.mpg.de/api/sections/"+ section['uuid'] +"/content_units?per_page=6000")
for content_unit in content_units.json():
print(content_unit)
file.write(content_unit['content'])
file.close() | RISE-MPIWG/hylg | FetchTextFromRISE.py | FetchTextFromRISE.py | py | 2,253 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
25500941930 | from fastapi import HTTPException, status
from odmantic.bson import ObjectId
from typing import List
# Import helpers
from helpers.database import db
# Import Models
from models.currency import Currency
async def currency_exists(cid: ObjectId, loc=None):
if loc is None:
loc = []
loc = ['body'] + loc + ['currency']
currency = await db.find_one(Currency, Currency.id == cid)
if currency is None:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=[
dict(
loc=loc,
msg='not exists',
type='value_error.not.exists'
)
]
)
return currency
async def currencies_exists(cids: List[ObjectId], loc=None):
if loc is None:
loc = []
loc = ['body'] + loc
errors = list()
if isinstance(cids, List):
for i, cid in enumerate(cids):
country = await db.find_one(Currency, Currency.id == cid)
if country is None:
errors.append(
dict(
loc=loc + [i, 'currency'],
msg='not exists',
type='value_error.not.exists'
)
)
if len(errors) > 0:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=errors
)
| arif-sajal/mondol-int-accounts-backend | validators/form/currencyExists.py | currencyExists.py | py | 1,453 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "odmantic.bson.ObjectId",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "helpers.database.db.find_one",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.currency.Currency",
"line_number": 17,
"usage_type": "argument"
},
{
"... |
43652346391 | from os import name
from django.urls import path
#from .views import HomePageView
from . import views
urlpatterns = [
path('',views.teacher_index,name='teacher'), # Homepag
path('<slug:teacher_id>/<int:choice>/Classes/', views.teacher_home, name="teacher_home"), #Student Batches
path('<slug:classid>/Students', views.view_teacher_students, name='teacher_stud'),
path('info/<slug:student>/', views.each_student_info, name='each_student_info'),
path('<slug:stud>/<slug:course>/<slug:teach>/', views.view_student_attendence, name='student_attendence'),
# Assignment Routes
path('<slug:class_id>/<slug:course>/', views.view_assignments_page, name='view_assignments_page'),
path('/add/<slug:class_id>/<slug:course>/added/',views.add_assigment,name='add_assigment'),
# get_assigment is used to view teacter's submitted Assignments and Delete is for to Delete Their Assignments
path('get_assignments_data/', views.get_submitted_assignments, name='view_assignments'),
path('delete_assignmet/<slug:ta>/deleted', views.delete_assignments, name='delete_assignments'),
path('view_own_attendance',views.view_attendance,name='view_attendance'),
path('promote/', views.promote_students, name='promote_students'),
path('msgfromhod/',views.messages_from_hod, name='messages_from_hod'),
# Marks Routes
path('marks/<slug:stud>/<slug:course>/<slug:teach>/', views.teacher_view_marks, name='teacher_view_marks'),
path('<slug:stud>/<slug:course>/<slug:teach>/take', views.take_marks, name='take_marks'),
path('profile', views.teacher_profile, name='teacher_profiles'),
path('profile/<slug:teacher>/update', views.update_profile, name='update_profile'),
# path('teacher/<int:id>/teacher_view_students/',views.teacher_view_students, name='teacher_view_students'),
path('logout/',views.logOut, name='logOut'),
]
| abidgulshahid/Department-Managment-System | teacher/urls.py | urls.py | py | 1,885 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
35601265161 | from extract_words import extract_words
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('WebKit', '3.0')
from gi.repository import Gtk, Gdk, WebKit
class MainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title='My Window Title')
self.AUTHEN_CODE = None # authentication code for API of Shanbay.com
self.set_title('English Reading Companion')
self.set_default_size(800, 600)
self.set_border_width(10)
self.connect('delete-event', self.quit)
self.connect('key-press-event', self.on_key_click)
# Left List
self.wl_store = Gtk.ListStore(str)
self.wl_treeview = Gtk.TreeView(model=self.wl_store)
self.wl_column = Gtk.TreeViewColumn('New [%d]' % len(self.wl_store), Gtk.CellRendererText(), text=0)
self.wl_treeview.append_column(self.wl_column)
self.wl_treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
self.wl_treeview.override_background_color(Gtk.StateType.NORMAL,
Gdk.RGBA.from_color(Gdk.color_parse('CadetBlue3')))
wl_scrolled_window = Gtk.ScrolledWindow()
wl_scrolled_window.set_policy(
Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
wl_scrolled_window.add(self.wl_treeview)
# Middle List
self.wm_store = Gtk.ListStore(str)
try:
with open('undetermined_words.dat', 'r') as f:
for line in f:
self.wm_store.append([line.strip()])
except FileNotFoundError:
pass
self.wm_treeview = Gtk.TreeView(model=self.wm_store)
self.wm_column = Gtk.TreeViewColumn('Undertermined [%d]' % len(self.wm_store), Gtk.CellRendererText(), text=0)
self.wm_treeview.append_column(self.wm_column)
self.wm_treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
self.wm_treeview.override_background_color(Gtk.StateType.NORMAL,
Gdk.RGBA.from_color(Gdk.color_parse('CadetBlue3')))
wm_scrolled_window = Gtk.ScrolledWindow()
wm_scrolled_window.set_policy(
Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
wm_scrolled_window.add(self.wm_treeview)
# Right List
self.wr_store = Gtk.ListStore(str)
self.wr_treeview = Gtk.TreeView(model=self.wr_store)
self.wr_column = Gtk.TreeViewColumn('Mastered [%d]' % len(self.wr_store), Gtk.CellRendererText(), text=0)
self.wr_treeview.append_column(self.wr_column)
self.wr_treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
self.wr_treeview.override_background_color(Gtk.StateType.NORMAL,
Gdk.RGBA.from_color(Gdk.color_parse('CadetBlue3')))
wr_scrolled_window = Gtk.ScrolledWindow()
wr_scrolled_window.set_policy(
Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
wr_scrolled_window.add(self.wr_treeview)
box_outer = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
# Left Part
l_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
push_btn = Gtk.Button(label='Push To Shanbay.com')
push_btn.connect('clicked', self.push_to_shanbay)
l_box.pack_start(wl_scrolled_window, True, True, 0)
l_box.pack_start(push_btn, False, False, 0)
box_outer.pack_start(l_box, True, True, 0)
# Middle Part
m_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
m_btn_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
left_btn = Gtk.Button()
left_btn.add(Gtk.Arrow(Gtk.ArrowType.LEFT, Gtk.ShadowType.NONE))
left_btn.connect('clicked', self.word_move_left)
right_btn = Gtk.Button()
right_btn.add(Gtk.Arrow(Gtk.ArrowType.RIGHT, Gtk.ShadowType.NONE))
right_btn.connect('clicked', self.word_move_right)
m_btn_box.pack_start(left_btn, True, True, 0)
m_btn_box.pack_start(right_btn, True, True, 0)
import_btn = Gtk.Button(label='Import Words')
import_btn.connect('clicked', self.import_words)
m_box.pack_start(wm_scrolled_window, True, True, 0)
m_box.pack_start(m_btn_box, False, False, 0)
m_box.pack_start(import_btn, False, False, 0)
box_outer.pack_start(m_box, True, True, 0)
# Right Part
r_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
save_btn = Gtk.Button(label='Save To File & Clear')
save_btn.connect('clicked', self.save_to_file)
r_box.pack_start(wr_scrolled_window, True, True, 0)
r_box.pack_start(save_btn, False, False, 0)
box_outer.pack_start(r_box, True, True, 0)
self.add(box_outer)
self.show_all()
def word_move_left(self, *args):
selection = self.wm_treeview.get_selection()
model, paths = selection.get_selected_rows()
for path in paths:
self.wl_store.insert(0, model[path][:])
for path in paths[::-1]:
model.remove(model.get_iter(path))
self.wl_treeview.set_cursor(Gtk.TreePath(0))
self.wl_column.set_title('New [%d]' % len(self.wl_store))
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def word_move_right(self, *args):
selection = self.wm_treeview.get_selection()
model, paths = selection.get_selected_rows()
for path in paths:
self.wr_store.insert(0, model[path][:])
for path in paths[::-1]:
model.remove(model.get_iter(path))
self.wr_treeview.set_cursor(Gtk.TreePath(0))
self.wr_column.set_title('Mastered [%d]' % len(self.wr_store))
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def move_right_back(self, widget):
model, paths = self.wr_treeview.get_selection().get_selected_rows()
for path in paths:
self.wm_store.insert(0, model[path][:])
for path in paths[::-1]:
model.remove(model.get_iter(path))
self.wm_treeview.set_cursor(Gtk.TreePath(0))
self.wr_column.set_title('Mastered [%d]' % len(self.wr_store))
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def move_left_back(self, widget):
model, paths = self.wl_treeview.get_selection().get_selected_rows()
for path in paths:
self.wm_store.insert(0, model[path][:])
for path in paths[::-1]:
model.remove(model.get_iter(path))
self.wm_treeview.set_cursor(Gtk.TreePath(0))
self.wl_column.set_title('New [%d]' % len(self.wl_store))
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def save_to_file(self, widget):
words_to_save = [row[:][0] for row in self.wr_store]
if len(words_to_save) > 0:
with open('known_words.dat', 'a') as f:
f.write('\n'.join(words_to_save) + '\n')
self.wr_store.clear()
self.wr_column.set_title('Mastered [%d]' % len(self.wr_store))
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def push_to_shanbay(self, widget):
if self.AUTHEN_CODE is None:
self.AUTHEN_CODE = self.authenticate(self)
if self.AUTHEN_CODE is None:
return
words_to_push = [row[:][0] for row in self.wl_store]
if len(words_to_push) > 0:
with open('learning_words.dat', 'a') as f:
f.write('\n'.join(words_to_push) + '\n')
self.wl_store.clear()
self.wl_column.set_title('New [%d]' % len(self.wl_store))
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def import_words(self, widget):
words_in_window = set([row[:][0] for row in self.wl_store] +
[row[:][0] for row in self.wm_store] +
[row[:][0] for row in self.wr_store])
dialog = Gtk.FileChooserDialog("Select file to be opened", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
dialog.show()
response = dialog.run()
if response == Gtk.ResponseType.OK:
words = extract_words(dialog.get_filename(), words_in_window)
elif response == Gtk.ResponseType.CANCEL:
words = []
dialog.destroy()
for word in words:
self.wm_store.append([word])
self.wm_column.set_title('Undetermined [%d]' % len(self.wm_store))
def on_key_click(self, widget, ev):
ctrl = ev.state & Gdk.ModifierType.CONTROL_MASK
if ctrl:
if ev.keyval == Gdk.KEY_Left:
self.move_right_back(widget)
elif ev.keyval == Gdk.KEY_Right:
self.move_left_back(widget)
else:
pass
else:
if ev.keyval == Gdk.KEY_Left:
self.word_move_left(widget)
elif ev.keyval == Gdk.KEY_Right:
self.word_move_right(widget)
else:
pass
def _javascript_console_message(self, view, message, line, sourceid):
return True # True prevents calling original handler
def authenticate(self, *args, **kwargs):
dialog = Gtk.Dialog("Authenticate", self, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
dialog.set_default_size(800, 600)
box = dialog.get_content_area()
box.set_homogeneous(False)
webview = WebKit.WebView()
webview.connect("console-message", self._javascript_console_message)
webview.load_uri("https://api.shanbay.com/oauth2/authorize/?cliend_id=05623ce286a180e4e8c1&response_type=code&state=123")
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(webview)
scrolled_window.set_size_request(750, 650)
box.add(scrolled_window)
dialog.show_all()
response = dialog.run()
if response == Gtk.ResponseType.OK:
uri = webview.get_uri()
elif response == Gtk.ResponseType.CANCEL:
uri = None
dialog.destroy()
self.AUTHEN_CODE = uri
def quit(self, *args, **kwargs):
with open('undetermined_words.dat', 'w') as f:
f.write('\n'.join([row[:][0] for row in self.wm_store]))
Gtk.main_quit(self, *args, **kwargs)
win = MainWindow()
Gtk.main()
| qzhqzh/EnglishReadingCompanion | src-python/GUI.py | GUI.py | py | 10,726 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gi.require_version",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "gi.require_version",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Window",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "gi.r... |
44587979411 | """Control module for the Vapourtec R4 heater."""
from __future__ import annotations
from collections import namedtuple
from collections.abc import Iterable
import aioserial
import pint
from loguru import logger
from flowchem import ureg
from flowchem.components.device_info import DeviceInfo
from flowchem.components.technical.temperature import TempRange
from flowchem.devices.flowchem_device import FlowchemDevice
from flowchem.devices.vapourtec.r4_heater_channel_control import R4HeaterChannelControl
from flowchem.utils.exceptions import InvalidConfigurationError
from flowchem.utils.people import dario, jakob, wei_hsin
try:
# noinspection PyUnresolvedReferences
from flowchem_vapourtec import VapourtecR4Commands
HAS_VAPOURTEC_COMMANDS = True
except ImportError:
HAS_VAPOURTEC_COMMANDS = False
class R4Heater(FlowchemDevice):
"""R4 reactor heater control class."""
DEFAULT_CONFIG = {
"timeout": 0.1,
"baudrate": 19200,
"parity": aioserial.PARITY_NONE,
"stopbits": aioserial.STOPBITS_ONE,
"bytesize": aioserial.EIGHTBITS,
}
ChannelStatus = namedtuple("ChannelStatus", "state, temperature")
def __init__(
self,
name: str = "",
min_temp: float | list[float] = -100,
max_temp: float | list[float] = 250,
**config,
) -> None:
super().__init__(name)
# Set min and max temp for all 4 channels
if not isinstance(min_temp, Iterable):
min_temp = [min_temp] * 4
if not isinstance(max_temp, Iterable):
max_temp = [max_temp] * 4
assert len(min_temp) == len(max_temp) == 4
self._min_t = min_temp * ureg.degreeC
self._max_t = max_temp * ureg.degreeC
if not HAS_VAPOURTEC_COMMANDS:
msg = (
"You tried to use a Vapourtec device but the relevant commands are missing!"
"Unfortunately, we cannot publish those as they were provided under NDA."
"Contact Vapourtec for further assistance."
)
raise InvalidConfigurationError(
msg,
)
self.cmd = VapourtecR4Commands()
# Merge default settings, including serial, with provided ones.
configuration = R4Heater.DEFAULT_CONFIG | config
try:
self._serial = aioserial.AioSerial(**configuration)
except aioserial.SerialException as ex:
msg = f"Cannot connect to the R4Heater on the port <{config.get('port')}>"
raise InvalidConfigurationError(
msg,
) from ex
self.device_info = DeviceInfo(
authors=[dario, jakob, wei_hsin],
manufacturer="Vapourtec",
model="R4 reactor module",
)
async def initialize(self):
"""Ensure connection."""
self.device_info.version = await self.version()
logger.info(f"Connected with R4Heater version {self.device_info.version}")
temp_limits = {
ch_num: TempRange(
min=ureg.Quantity(f"{t[0]} °C"),
max=ureg.Quantity(f"{t[1]} °C"),
)
for ch_num, t in enumerate(zip(self._min_t, self._max_t, strict=True))
}
reactor_positions = [
R4HeaterChannelControl(f"reactor{n+1}", self, n, temp_limits[n])
for n in range(4)
]
self.components.extend(reactor_positions)
async def _write(self, command: str):
"""Write a command to the pump."""
cmd = command + "\r\n"
await self._serial.write_async(cmd.encode("ascii"))
logger.debug(f"Sent command: {command!r}")
async def _read_reply(self) -> str:
"""Read the pump reply from serial communication."""
reply_string = await self._serial.readline_async()
logger.debug(f"Reply received: {reply_string.decode('ascii').rstrip()}")
return reply_string.decode("ascii")
async def write_and_read_reply(self, command: str) -> str:
"""Send a command to the pump, read the replies and return it, optionally parsed."""
self._serial.reset_input_buffer()
await self._write(command)
logger.debug(f"Command {command} sent to R4!")
response = await self._read_reply()
if not response:
msg = "No response received from heating module!"
raise InvalidConfigurationError(msg)
logger.debug(f"Reply received: {response}")
return response.rstrip()
async def version(self):
"""Get firmware version."""
return await self.write_and_read_reply(self.cmd.VERSION)
async def set_temperature(self, channel, temperature: pint.Quantity):
"""Set temperature to channel."""
cmd = self.cmd.SET_TEMPERATURE.format(
channel=channel,
temperature_in_C=round(temperature.m_as("°C")),
)
await self.write_and_read_reply(cmd)
# Set temperature implies channel on
await self.power_on(channel)
# Verify it is not unplugged
status = await self.get_status(channel)
if status.state == "U":
logger.error(
f"TARGET CHANNEL {channel} UNPLUGGED! (Note: numbering starts at 0)",
)
async def get_status(self, channel) -> ChannelStatus:
"""Get status from channel."""
# This command is a bit fragile for unknown reasons.
failure = 0
while True:
try:
raw_status = await self.write_and_read_reply(
self.cmd.GET_STATUS.format(channel=channel),
)
return R4Heater.ChannelStatus(raw_status[:1], raw_status[1:])
except InvalidConfigurationError as ex:
failure += 1
# Allows 3 failures cause the R4 is choosy at times...
if failure > 3:
raise ex
else:
continue
async def get_temperature(self, channel):
"""Get temperature (in Celsius) from channel."""
state = await self.get_status(channel)
return None if state.temperature == "281.2" else state.temperature
async def power_on(self, channel):
"""Turn on channel."""
await self.write_and_read_reply(self.cmd.POWER_ON.format(channel=channel))
async def power_off(self, channel):
"""Turn off channel."""
await self.write_and_read_reply(self.cmd.POWER_OFF.format(channel=channel))
if __name__ == "__main__":
import asyncio
r4_device = R4Heater(port="COM1")
async def main(heat):
"""Test function."""
await heat.initialize()
# Get reactors
r1, r2, r3, r4 = heat.components()
await r1.set_temperature("30 °C")
print(f"Temperature is {await r1.get_temperature()}")
asyncio.run(main(r4_device))
| cambiegroup/flowchem | src/flowchem/devices/vapourtec/r4_heater.py | r4_heater.py | py | 6,914 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "flowchem.devices.flowchem_device.FlowchemDevice",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "aioserial.PARITY_NONE",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "aioserial.STOPBITS_ONE",
"line_number": 35,
"usage_type": "att... |
40420378554 | from pathlib import Path
import numpy as np
from collections import Counter
from itertools import product
DIRECTIONS = ("up", "down", "left", "right")
ROTATIONS = (0, 90, 180, 270)
def get_tile_borders(tile, dir="all"):
borders = []
border = None
for d in DIRECTIONS:
if dir in [d, "all"]:
id = tile_to_id(tile, d, False)
id_flip = tile_to_id(tile, d, True)
border = min(id, id_flip)
borders.append(border)
if dir != "all":
return border
return borders
def tile_to_id(tile, dir, reverse=False):
step = 1
if reverse:
step = -1
if dir == "up":
return border_to_id(tile[0, ::step])
if dir == "down":
return border_to_id(tile[-1, ::step])
if dir == "left":
return border_to_id(tile[::step, 0])
if dir == "right":
return border_to_id(tile[::step, -1])
def border_to_id(border):
return int("".join([str(el) for el in border.astype(int)]), 2)
def rotate(tile, deg, flip_hor=False):
if deg not in [0, 90, 180, 270]:
raise RuntimeError("Invalid degrees")
arr = np.copy(tile)
if flip_hor:
arr = arr[:, ::-1]
if deg == 90:
return arr[::-1, :].T
elif deg == 180:
return arr[::-1, ::-1]
elif deg == 270:
return arr[:, ::-1].T
return arr
class Image:
def __init__(self, data, pattern):
self.tiles = dict()
self.borders = dict()
self.n_tiles = 0
self.tile_dim = None
self.corners = set()
self.edges = set()
self.interiors = set()
self.border_counter = None
self.corner_prod = None
i = 0
lines = data.split("\n")
while i < len(lines):
if lines[i].startswith("Tile"):
id = int(lines[i][5:-1])
i += 1
tile = []
while (i < len(lines)) and lines[i]:
tile.append([int(l == "#") for l in lines[i]])
i += 1
if self.tile_dim is None:
self.tile_dim = len(tile)
tile = np.array(tile, dtype=np.uint8)
self.tiles[id] = tile
self.borders[id] = get_tile_borders(tile)
self.n_tiles += 1
i += 1
for id in self.tiles:
assert self.tiles[id].shape[0] == self.tile_dim
assert self.tiles[id].shape[1] == self.tile_dim
self.puzzle_dim = int(np.round(np.sqrt(self.n_tiles)))
assert self.puzzle_dim ** 2 == self.n_tiles
self.solution = np.zeros(
(self.puzzle_dim, self.puzzle_dim), dtype=int
)
self._solve()
lines = pattern.split("\n")
self.pattern = []
for line in lines:
self.pattern.append([l == "#" for l in line])
self.pattern = np.array(self.pattern, dtype=bool)
im_size = self.puzzle_dim * (self.tile_dim - 2)
self.image = np.zeros((im_size, im_size), dtype=np.uint8)
self._assemble_image()
self.n_monsters = self._mark_sea_monsters()
def _check_direction_unique(self, tile, direction):
id = get_tile_borders(tile, direction)
return self.border_counter[id] == 1
def _solve(self):
borders = []
for id in self.borders:
borders.extend(self.borders[id])
self.border_counter = Counter(borders)
max_edge_count = 0
for id in self.borders:
n_unique = 0
for border in self.borders[id]:
c = self.border_counter[border]
n_unique += c == 1
if c > max_edge_count:
max_edge_count = c
if n_unique == 2:
self.corners.add(id)
elif n_unique == 1:
self.edges.add(id)
elif n_unique == 0:
self.interiors.add(id)
assert max_edge_count == 2
self.corner_prod = self.find_corner_product()
dim_range = range(self.puzzle_dim)
for col, row in product(dim_range, dim_range):
self._solve_coord(row, col)
def _solve_coord(self, row, col):
unique_edges = (
(row == 0)
+ (col == 0)
+ (row == self.puzzle_dim - 1)
+ (col == self.puzzle_dim - 1)
)
pieces = None
if unique_edges == 0:
pieces = self.interiors
elif unique_edges == 1:
pieces = self.edges
if unique_edges == 2:
pieces = self.corners
orientations = list(product(ROTATIONS, [True, False]))
cand = None
tile = None
for cand in pieces:
valid = False
for rot, flip in orientations:
tile = rotate(self.tiles[cand], rot, flip)
valid = True
if row == 0:
valid &= self._check_direction_unique(tile, "up")
else:
up = self.tiles[self.solution[row - 1, col]]
valid &= tile_to_id(tile, "up") == tile_to_id(
up, "down"
)
if col == 0:
valid &= self._check_direction_unique(tile, "left")
else:
left = self.tiles[self.solution[row, col - 1]]
valid &= tile_to_id(tile, "left") == tile_to_id(
left, "right"
)
if row == self.puzzle_dim - 1:
valid &= self._check_direction_unique(tile, "down")
if col == self.puzzle_dim - 1:
valid &= self._check_direction_unique(tile, "right")
if valid:
break
if valid:
break
self.solution[row, col] = cand
pieces.remove(cand)
self.tiles[cand] = tile
def _assemble_image(self):
im_col = 0
for col in range(self.puzzle_dim):
im_row = 0
for row in range(self.puzzle_dim):
tile = np.copy(self.tiles[self.solution[row, col]])[
1:-1, 1:-1
]
self.image[
im_row : im_row + tile.shape[0],
im_col : im_col + tile.shape[1],
] = tile
im_row += tile.shape[0]
im_col += tile.shape[1]
def _mark_sea_monsters(self):
orientations = list(product(ROTATIONS, [True, False]))
n_monsters = 0
image = np.copy(self.image)
for rot, flip in orientations:
found_pattern = False
image = rotate(self.image, rot, flip)
n_monsters = 0
for row in range(image.shape[0] - self.pattern.shape[0] + 1):
for col in range(
image.shape[1] - self.pattern.shape[1] + 1
):
part = image[
row : row + self.pattern.shape[0],
col : col + self.pattern.shape[1],
]
if part[self.pattern].all():
part[self.pattern] = 2
n_monsters += 1
found_pattern = True
if found_pattern:
break
self.image = image
return n_monsters
def print_image(self, image):
line = []
char = {1: "#", 0: ".", 2: "O"}
for row in range(image.shape[0]):
s = ""
for col in range(image.shape[1]):
s += char[image[row, col]]
line.append(s)
line.append("")
output = "\n".join(line)
print(output)
def find_corner_product(self):
corner_prod = 1
for corner in self.corners:
corner_prod *= corner
return corner_prod
def find_habitat_roughness(self):
return np.sum(self.image == 1)
def main():
data_folder = Path(__file__).parent.resolve()
data = data_folder.joinpath("input.txt").read_text()
pattern = data_folder.joinpath("seamonster.txt").read_text()
t = Image(data, pattern)
print("Part 1")
print("The product of the IDs of the four corner ")
print(f"tiles is {t.corner_prod}")
print()
print("Part 2")
print(f"The habitat's roughness is {t.find_habitat_roughness()}")
print()
print(f"Final image with {t.n_monsters} marked sea monsters:")
t.print_image(t.image)
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2020/20/sol.py | sol.py | py | 8,548 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.copy",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_numbe... |
43767212162 | import re
import datetime
from .dean import Dean
from bs4 import BeautifulSoup
from freeclass.models import Classroom
from course.models import Course
from main import get_now_week
class FreeClassroom:
session = Dean().dean_session
week = get_now_week()
building_dict = {
"1": "思源楼",
"2": "思源西楼",
"3": "思源东楼",
"4": "第九教学楼",
"5": "第八教学楼",
"6": "第五教学楼",
"7": "第二教学楼",
"8": "东区一教",
"9": "东区二教",
"10": "东教三楼",
"11": "逸夫教学楼",
"12": "机械楼",
"13": "第十七号教学楼",
"90": "科技大厦",
"91": "天佑会堂",
"92": "工程素质",
"93": "综合实验楼",
"94": "机械实验馆",
"100": "学生活动服务中心"
}
status_dict = {
'#fff': '空闲',
'#394ed6': '考试占用',
'#e46868': '排课占用',
'#77bf6d': '预约',
'#d8cc56': '个人占用'
}
def _get_building_room_info(self, building_no, page=1):
url = 'https://dean.bjtu.edu.cn/classroom/timeholdresult/room_stat/'
queries = {
'zxjxjhh': '2019-2020-1-2',
'zc': self.week,
'jxlh': building_no,
'jash': '',
'submit': '查询+',
'has_advance_query': '',
'page': page,
'perpage': 200
}
building = self.building_dict[str(building_no)]
content = self.session.get(url, params=queries).content.decode()
table = BeautifulSoup(content, 'html5lib').find('table', {'class': 'table table-bordered'})
weekday = datetime.datetime.now().isoweekday()
classroom_list = table.find_all('tr')[2:]
for classroom in classroom_list:
course_list = classroom.find_all('td', {'title': re.compile(r'星期{}'.format(weekday))})
name = classroom.td.text.split()[0]
room = Classroom.objects.get_or_create(name=name, building=building)[0]
for course in course_list:
no = int(course.get('title')[-2])
color = course.get('style').replace('background-color: ', '')
status = self.status_dict[color]
if status != '空闲':
print(self.week, building, name, no, weekday)
course = Course.objects.filter(week__regex=r',{week},|^{week},|,{week}$'.format(week=self.week),
building=building, classroom=name, day_no=no,
day=weekday)
if course:
try:
teacher = course[0].teacher.name
except:
teacher = ''
status = course[0].name + '-' + teacher
room.__setattr__('class{}'.format(no), status)
print(name, no, status)
room.save()
def update(self):
for building_no in self.building_dict.keys():
self._get_building_room_info(building_no=building_no)
def run():
FreeClassroom().update()
| jlytwhx/bjtubox_python | utils/freeclass.py | freeclass.py | py | 3,277 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "dean.Dean",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "main.get_now_week",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",... |
21642185259 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
# In[2]:
train=pd.read_csv('sales_train.csv')
items=pd.read_csv('items.csv')
shops=pd.read_csv('shops.csv')
cats=pd.read_csv('item_categories.csv')
# In[3]:
sns.boxplot(train.item_cnt_day)
# In[4]:
train = train[train.item_cnt_day < 1000]
# In[5]:
sns.boxplot(train.item_price)
# In[6]:
train = train[train.item_price < 300000]
# In[7]:
train = train[train.item_price > 0].reset_index(drop = True)
train.loc[train.item_cnt_day < 1, "item_cnt_day"] = 0
# In[8]:
train.loc[train.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 10, 'shop_id'] = 11
train.loc[train.shop_id == 40, 'shop_id'] = 39
# In[9]:
shops.loc[ shops.shop_name == 'Сергиев Посад ТЦ "7Я"',"shop_name" ] = 'СергиевПосад ТЦ "7Я"'
shops["city"] = shops.shop_name.str.split(" ").map( lambda x: x[0] )
shops["category"] = shops.shop_name.str.split(" ").map( lambda x: x[1] )
shops.loc[shops.city == "!Якутск", "city"] = "Якутск"
# In[10]:
category = []
for cat in shops.category.unique():
if len(shops[shops.category == cat]) >= 5:
category.append(cat)
shops.category = shops.category.apply( lambda x: x if (x in category) else "other" )
# In[11]:
from sklearn.preprocessing import LabelEncoder
shops["shop_category"] = LabelEncoder().fit_transform( shops.category )
shops["shop_city"] = LabelEncoder().fit_transform( shops.city )
shops = shops[["shop_id", "shop_category", "shop_city"]]
shops=shops.drop([0,1,10,40]).reset_index(drop=True)
# In[12]:
cats["type_code"] = cats.item_category_name.apply( lambda x: x.split(" ")[0] ).astype(str)
cats.loc[ (cats.type_code == "Игровые")| (cats.type_code == "Аксессуары"), "category" ] = "Игры"
# In[13]:
category = []
for cat in cats.type_code.unique():
if len(cats[cats.type_code == cat]) >= 5:
category.append( cat )
cats.type_code = cats.type_code.apply(lambda x: x if (x in category) else "etc")
# In[14]:
cats.type_code = LabelEncoder().fit_transform(cats.type_code)
cats["split"] = cats.item_category_name.apply(lambda x: x.split("-"))
cats["subtype"] = cats.split.apply(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats["subtype_code"] = LabelEncoder().fit_transform( cats["subtype"] )
cats = cats[["item_category_id", "subtype_code", "type_code"]]
# In[15]:
import re
def name_correction(x):
x = x.lower() # all letters lower case
x = x.partition('[')[0] # partition by square brackets
x = x.partition('(')[0] # partition by curly brackets
x = re.sub('[^A-Za-z0-9А-Яа-я]+', ' ', x) # remove special characters
x = x.replace(' ', ' ') # replace double spaces with single spaces
x = x.strip() # remove leading and trailing white space
return x
# In[16]:
items["name1"], items["name2"] = items.item_name.str.split("[", 1).str
items["name1"], items["name3"] = items.item_name.str.split("(", 1).str
items["name2"] = items.name2.str.replace('[^A-Za-z0-9А-Яа-я]+', " ").str.lower()
items["name3"] = items.name3.str.replace('[^A-Za-z0-9А-Яа-я]+', " ").str.lower()
items = items.fillna('0')
items["item_name"] = items["item_name"].apply(lambda x: name_correction(x))
items.name2 = items.name2.apply( lambda x: x[:-1] if x !="0" else "0")
# In[17]:
items["type"] = items.name2.apply(lambda x: x[0:8] if x.split(" ")[0] == "xbox" else x.split(" ")[0] )
items.loc[(items.type == "x360") | (items.type == "xbox360") | (items.type == "xbox 360") ,"type"] = "xbox 360"
items.loc[ items.type == "", "type"] = "mac"
items.type = items.type.apply( lambda x: x.replace(" ", "") )
items.loc[ (items.type == 'pc' )| (items.type == 'pс') | (items.type == "pc"), "type" ] = "pc"
items.loc[ items.type == 'рs3' , "type"] = "ps3"
# In[18]:
group_sum = items.groupby(["type"]).agg({"item_id": "count"})
group_sum = group_sum.reset_index()
drop_cols = []
for cat in group_sum.type.unique():
if group_sum.loc[(group_sum.type == cat), "item_id"].values[0] <40:
drop_cols.append(cat)
items.name2 = items.name2.apply( lambda x: "other" if (x in drop_cols) else x )
items = items.drop(["type"], axis = 1)
# In[19]:
items.name2 = LabelEncoder().fit_transform(items.name2)
items.name3 = LabelEncoder().fit_transform(items.name3)
items.drop(["item_name", "name1"],axis = 1, inplace= True)
# In[20]:
price=train[['item_id','item_price']]
price=price.groupby(['item_id']).mean()
price=price.reset_index()
items=pd.merge(items,price,on=['item_id'],how='inner')
items=pd.merge(items,cats,on=['item_category_id'],how='inner')
# In[21]:
train=train.drop('date',axis=1)
train=train.groupby(['date_block_num', 'shop_id', 'item_id']).sum()
train.drop('item_price',axis=1,inplace=True)
train=train.reset_index()
train=train.rename(columns = {'item_cnt_day' : 'item_cnt_month'})
# In[22]:
from itertools import product
matrix = []
cols = ["date_block_num", "shop_id", "item_id"]
for i in range(34):
sales = train[train.date_block_num == i]
matrix.append( np.array(list( product( [i], sales.shop_id.unique(), sales.item_id.unique() ) ), dtype = np.int16) )
matrix = pd.DataFrame( np.vstack(matrix), columns = cols )
matrix["date_block_num"] = matrix["date_block_num"].astype(np.int8)
matrix["shop_id"] = matrix["shop_id"].astype(np.int8)
matrix["item_id"] = matrix["item_id"].astype(np.int16)
matrix.sort_values( cols, inplace = True )
train=pd.merge(matrix,train,on=["date_block_num", "shop_id", "item_id"],how='left')
train=train.fillna(0)
train=pd.merge(train,items,on=['item_id'],how='inner')
train=pd.merge(train,shops,on=['shop_id'],how='inner')
train.sort_values(["date_block_num", "shop_id", "item_id"], inplace = True )
train=train.reset_index(drop=True)
# In[23]:
train["month"] = train["date_block_num"] % 12
days = pd.Series([31,28,31,30,31,30,31,31,30,31,30,31])
train["days"] = train["month"].map(days).astype(np.int8)
train["year"] = 2013+(train["date_block_num"]/12).astype(np.int16)
train['month']=train['month']+1
# In[24]:
train=train[['date_block_num', 'shop_id', 'item_id', 'shop_category', 'shop_city',
'item_category_id', 'name2', 'name3', 'subtype_code', 'type_code', 'days', 'month', 'year',
'item_price', 'item_cnt_month']]
items.to_csv('items_data.csv')
shops.to_csv('shops_data.csv')
# In[25]:
X_train = train[train.date_block_num < 33].drop(['item_cnt_month'], axis=1)
Y_train = train[train.date_block_num < 33]['item_cnt_month']
X_valid = train[train.date_block_num == 33].drop(['item_cnt_month'], axis=1)
Y_valid = train[train.date_block_num == 33]['item_cnt_month']
# In[26]:
Y_train = Y_train.clip(0, 20)
Y_valid = Y_valid.clip(0, 20)
# In[27]:
from xgboost import XGBRegressor
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
# In[28]:
model = XGBRegressor(
max_depth=10,
n_estimators=1000,
min_child_weight=0.5,
colsample_bytree=0.8,
subsample=0.8,
eta=0.1,
# tree_method='gpu_hist',
seed=42)
model.fit(
X_train,
Y_train,
eval_metric="rmse",
eval_set=[(X_train, Y_train), (X_valid, Y_valid)],
verbose=True,
early_stopping_rounds = 20)
pickle.dump(model,open('model.pkl','wb'))
model=pickle.load(open('model.pkl','rb'))
| rishabhagarwal8979/Predicting-Future-Sales-Web-API | model.py | model.py | py | 7,422 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
5823107738 | from PythonPrograms.Oblig2_A_star.Vertex import Vertex
from PythonPrograms.Oblig2_A_star.Edge import Edge
from PythonPrograms.Oblig2_A_star.Graph import Graph
import pygame as pg
'''
The AStar class inherits the Graph class
'''
class AStar(Graph):
"""
#
# delay: seconds between each iteration when visualizing is turned on
# visual: Turns pygame visualization on/off
#
"""
def __init__(self, delay=0.001, visual=True):
super().__init__()
self.pygame = visual
self.background = None
self.LIGHTGREY = (180, 180, 180)
self.DARKGREEN = (0, 255, 0)
self.PINK = (255, 200, 200)
self.GREEN = (200, 255, 200)
self.WHITE = (245, 245, 245)
self.BLACK = (0, 0, 0)
self.RED = (255, 0, 0)
self.BLUE = (0, 0, 255)
self.delay = delay
# walls define obstacles in grid, e.g. walls, boxes etc, by defining each position in grid that is part of an
# obstacle
self.walls = []
'''
#
# Defines obstacles provided in removevertecies from the graph by removing all edges
# to vertecies that is defined as an obstacle
#
'''
def removeVertecies(self, removevertecies=[]):
err_v = []
err_e = []
# Removing the vertecies
for r in removevertecies:
self.walls.append(self.vertecies[r])
self.vertecies.pop(r)
# Checking the edges ...
for v in self.vertecies:
vtex = self.vertecies[v]
for edge in vtex.adjecent:
vertex = edge.vertex
if vertex.name not in self.vertecies:
err_v.append(vtex)
err_e.append(edge)
for i in range(0, len(err_v)):
err_v[i].adjecent.remove(err_e[i])
return removevertecies
'''
#
# Read in the list of obstacles (defined by tag "remove"), the startnode from which traversal is defined,
# and the targetnode
#
'''
def readLimitations(self, filename):
import pandas as pd
from collections import namedtuple
columnnames = ['item', 'valuelist']
df = pd.read_csv(filename, error_bad_lines=False,
encoding='latin-1', warn_bad_lines=False,
names=columnnames, header=None, sep=':')
for i in range(0, 3):
if df.iat[i, 0] == 'startvertex':
startVertex = df.iat[i, 1]
elif df.iat[i, 0] == 'targetvertex':
targetVertex = df.iat[i, 1]
elif df.iat[i, 0] == 'remove':
removed = self.removeVertecies(df.iat[i, 1].split(';'))
return startVertex, targetVertex, removed
'''
# Initialize pygame visualization, if visualization has been defined
# Creates a dynamic visual grid according to the number of columns and rows
# read/defined during initialization. The visual limitations are defined by 1000x1000 pixels
'''
def initPygame(self):
if self.pygame:
xmin = ymin = xmax = ymax = 0
for v in self.vertecies:
vertex = self.vertecies[v]
x, y = vertex.position()
if x < xmin:
xmin = x
elif x > xmax:
xmax = x
if y < ymin:
ymin = y
elif y > ymax:
ymax = y
pg.init()
w, h = 1000, 1000
self.xboxsize = int(w / ((xmax + 1) - xmin))
self.yboxsize = int(w / ((ymax + 1) - ymin))
w = self.xboxsize * ((int(w / self.xboxsize)))
h = self.yboxsize * ((int(h / self.yboxsize)))
self.background = pg.display.set_mode((w, h))
background = self.background
self.clock = pg.time.Clock()
self.clock.tick()
for c in range(0, (int(w / self.xboxsize)) + 1):
for l in range(0, (int(h / self.yboxsize)) + 1):
pg.draw.rect(background, self.WHITE,
(c * self.xboxsize, l * self.yboxsize, self.xboxsize, self.yboxsize))
pg.draw.line(background, self.BLACK, (c * self.xboxsize, l * self.yboxsize),
(c * self.xboxsize + self.xboxsize, l * self.yboxsize))
pg.draw.line(background, self.BLACK, (c * self.xboxsize, l * self.yboxsize),
(c * self.xboxsize, l * self.yboxsize + self.yboxsize))
pg.draw.line(background, self.BLACK, (c * self.xboxsize + self.xboxsize, l * self.yboxsize),
(c * self.xboxsize + self.xboxsize, l * self.yboxsize + self.yboxsize))
pg.draw.line(background, self.BLACK, (c * self.xboxsize, l * self.yboxsize + self.yboxsize),
(c * self.xboxsize + self.xboxsize, l * self.yboxsize + self.yboxsize))
for wall in self.walls:
self.pygameState(wall, self.BLACK)
pg.display.flip()
'''
# Draw a box, representing the current vertex in the position defined by the name of the vertex.
# The color-parameter defines the (R,G,B)-value of the box
# If no visualization is used (self.pygame == False), no box is visualized
'''
def pygameState(self, current, color):
import time
if self.pygame:
background = self.background
x, y = current.position()
pg.draw.rect(background, color, (x * self.xboxsize, y * self.yboxsize, self.xboxsize, self.yboxsize))
pg.draw.line(background, self.BLACK, (x * self.xboxsize, y * self.yboxsize),
(x * self.xboxsize + self.xboxsize, y * self.yboxsize))
pg.draw.line(background, self.BLACK, (x * self.xboxsize, y * self.yboxsize),
(x * self.xboxsize, y * self.yboxsize + self.yboxsize))
pg.draw.line(background, self.BLACK, (x * self.xboxsize + self.xboxsize, y * self.yboxsize),
(x * self.xboxsize + self.xboxsize, y * self.yboxsize + self.yboxsize))
pg.draw.line(background, self.BLACK, (x * self.xboxsize, y * self.yboxsize + self.yboxsize),
(x * self.xboxsize + self.xboxsize, y * self.yboxsize + self.yboxsize))
if color not in [self.BLUE, self.RED, self.BLACK]:
time.sleep(self.delay)
pass
pg.display.flip()
'''
#
# Defining the heuristics used to calculate the estimated distance between the node being handled (startVertexName),
# and the targetnode (targetVertexName)
# Please note that the name of the vertecies are passed, not the vertex itself. The name is used for lookup
# in the list of vertecies in the graph.
# Further, the name of the vertex has the syntax: xNNyMM, where NN and MM are numerical and indicates column and row.
# E.g. x4y15 means column 4 of row 15.
# By identifying the column and row of a vertex, the estimated shorted distance between two vertecies may be
# calculated using the Manhatten distance
#
'''
def heuristics(self, startVertexName=None, targetVertexName=None):
if not startVertexName or not targetVertexName:
raise KeyError("VertexLookup need the names of the Vertecies addressed.")
if startVertexName not in self.vertecies:
raise KeyError("Node/Vertex defined as FROM-vertex is not present in graph")
if targetVertexName not in self.vertecies:
raise KeyError("Node/Vertex defined as TO-vertex is not present in graph")
xstart, ystart = self.vertecies[startVertexName].position()
xend, yend = self.vertecies[targetVertexName].position()
#
# Manhatten heuristics
#
dx = abs(xstart - xend)
dy = abs(ystart - yend)
D = 1
return D * (dx + dy)
'''
#
# The Dijkstra's algorithm has been adapted to support visualization as defined by the graph
# It has further been adopted to present a targetVetrx even though Dijkstra's has no use of
# it during execution. The tragetVertex is used only for visualization purposes.
'''
def Dijkstra(self, startVertexName=None, targetVertexName=None):
self.initPygame()
# Check to see that startvertex is in Graph
if startVertexName not in self.vertecies:
raise KeyError("Start node not present in graph")
# Reset visited and previous pointer before running algorithm
vertex = self.vertecies[startVertexName]
vertex.distance = distance = weight = 0
previous_node = None
startNode = self.vertecies[startVertexName]
toNode = self.vertecies[targetVertexName]
#
# Create priority queue, priority = current weight on edge ...
# No duplicate edges in queue allowed
#
edge = Edge(0, vertex)
from queue import PriorityQueue
priqueue = PriorityQueue()
# Defines enqueue/dequeue methods on priqueue
def enqueue(data):
priqueue.put(data)
def dequeue():
return priqueue.get()
enqueue(edge)
while not priqueue.empty():
# Get the element with lowest priority (i.e. weight on edge)
edge = dequeue()
eyeball = edge.vertex
self.pygameState(eyeball, self.GREEN)
self.pygameState(startNode, self.BLUE)
self.pygameState(toNode, self.RED)
# If not visited previously, we need to define the distance
if not eyeball.known:
eyeball.distance = distance
eyeball.previous = previous_node
eyeball.known = True
# If the vertex pointed to by the edge has an adjecency list, we need to iterate on it
for adjecentedge in eyeball.adjecent:
if not adjecentedge.vertex.known:
adjecentedge.vertex.distance = eyeball.distance + adjecentedge.weight
adjecentedge.vertex.previous = eyeball
adjecentedge.vertex.known = True
enqueue(adjecentedge)
self.pygameState(adjecentedge.vertex, self.PINK)
else:
if adjecentedge.vertex.distance > eyeball.distance + adjecentedge.weight:
adjecentedge.vertex.distance = eyeball.distance + adjecentedge.weight
adjecentedge.vertex.previous = eyeball
enqueue(adjecentedge)
self.pygameState(eyeball, self.LIGHTGREY)
for n in self.getPath(startVertexName, targetVertexName):
self.pygameState(n, self.DARKGREEN)
return self.getPath(startVertexName, targetVertexName)
'''
###############################################################################
#
# def AStarSearch(self, startVertexName = None, targetVertexName = None)
#
# Implement your code below.
# Please note that no other parts of this code or provided code should be altered
#
###############################################################################
'''
"""
#################################################
#
# Karaktersatt Obligatorisk Oppgave #2, A* Pathfinding
#
# Halil Ibrahim Keser
#
#
# DOKUMENTASJON:
# Kode dokumentasjon er skrevet i selve koden som kommentarer.
#
# Denne koden bruker mindre noder enn i fasiten. Og finner en kortere vei.
#
# Implementasjons ide og pseudo kode er hentet fra to nettsider + forelesning:
#
# http://theory.stanford.edu/~amitp/GameProgramming/AStarComparison.html
# https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
#
##################################################
"""
def AStarSearch(self, startVertexName=None, targetVertexName=None):
"""Returns vertex objects as a path from the given start to the given end in the given graph"""
"""
The Initialization part
"""
self.initPygame() # initialize the game
# Check to see that startvertex is in Grap.....................................................
if startVertexName not in self.vertecies:
raise KeyError("Start node not present in graph")
# Reset visited and previous pointer before running algorithm..................................
# Create start and end node as on Dijkstras funskjon
vertex = self.vertecies[startVertexName]
vertex.distance = 0
start_node = self.vertecies[startVertexName]
toNode = self.vertecies[targetVertexName]
# Initialize open list, no need for closed list..................................................
# Open_list as PriorityQueue for faster iteration of Big data
from queue import PriorityQueue
edge = Edge(0, vertex) # Initialize the Edge with 0 weight
open_list = PriorityQueue()
closed_list = []
def enqueue_open_list(data):
open_list.put(data)
def dequeue_open_list():
return open_list.get()
# Add the start node
open_list.put((edge.vertex.g, edge.vertex))
"""
The looping part, main part
"""
# Loop until you find the end....................................................................
while not open_list.empty():
# Get the current node, current node here is the eyeball..................
# Removing current from open list
edge = dequeue_open_list()
# Get the current index from edge, it is the first index of edge
# current node that getting treated now
# it contains edges of the current node.
eyeball = edge[1]
# Update the colours of eyeball, startnode and endnode...................
self.pygameState(eyeball, self.GREEN)
self.pygameState(start_node, self.BLUE)
self.pygameState(toNode, self.RED)
# Found the goal!.........................................................
# if currentNode (eyeball) is the goal,
# continue and make the eyeball.known as True
# The path will be created later at the button. We have a function for it.
if eyeball == toNode:
break
eyeball.known = True
closed_list.append(eyeball)
# Generate children. The children is the adjacents of the node ...........
# If the vertex pointed to by the edge has an adjacent list,
# we need to iterate on it
# Loop through children
for adjecentedge in eyeball.adjecent:
# Loop through those we have not been through, and not known
if not adjecentedge.vertex.known: # not known, not in open list
if adjecentedge.vertex not in closed_list: # not in closed_list
# Initialize the previous children
adjecentedge.vertex.previous = eyeball
# Create the f, g, and h values
# A - algorithm: F(n) = G(n) + H(n)
# We have a function for heuristics. We will use it to update vertex.h
adjecentedge.vertex.g = eyeball.g + adjecentedge.weight
adjecentedge.vertex.h = self.heuristics(adjecentedge.vertex.name, toNode.name)
adjecentedge.vertex.f = adjecentedge.vertex.g + adjecentedge.vertex.h
# Add the edge.node to open list as tuple
open_list.put((adjecentedge.vertex.f, adjecentedge.vertex))
adjecentedge.vertex.known = True
# Update the colours of known children.............................
self.pygameState(adjecentedge.vertex, self.PINK)
else:
# Update
# If edge.node weight greater than current.node weight + edge.node weight
if adjecentedge.vertex.g > (eyeball.g + adjecentedge.weight):
adjecentedge.vertex.previous = eyeball
adjecentedge.vertex.g = eyeball.g + adjecentedge.weight
adjecentedge.vertex.f = adjecentedge.vertex.g + adjecentedge.vertex.h
# Back-propagation
if not adjecentedge.vertex.known:
if adjecentedge.vertex in closed_list:
closed_list.remove(adjecentedge.vertex)
open_list.put((adjecentedge.vertex.f, adjecentedge.vertex))
# Update the colours of known vertexes.....................................
self.pygameState(eyeball, self.LIGHTGREY)
# Get the path and, update the colours for path.....................................................
# Return reversed path.
# We will use the getPath function from the Graph class
for n in self.getPath(startVertexName, targetVertexName):
self.pygameState(n, self.DARKGREEN)
return self.getPath(startVertexName, targetVertexName)
astar = AStar(delay=0, visual=True)
# astar.readFile('minigraf.txt')
# startVertexName, targetVertexName, removed = astar.readLimitations('minigraf_xtras.txt')
# astar.readFile('astjernegraf.txt')
# startVertexName, targetVertexName, removed = astar.readLimitations('xtras.txt')
astar.readFile('biggraph.txt')
startVertexName, targetVertexName, removed = astar.readLimitations('biggraph_xtras.txt')
# astar.readFile('AStarObligGraf.txt')
# startVertexName, targetVertexName, removed = astar.readLimitations('AStarObligGraf_xtras.txt')
# astar.Dijkstra(startVertexName, targetVertexName)
astar.AStarSearch(startVertexName, targetVertexName)
if astar.pygame:
from pygame.locals import *
while astar.pygame:
for events in pg.event.get():
if events.type == QUIT:
exit(0)
pg.quit()
else:
print(astar.getPathAsString(startVertexName, targetVertexName))
| HalilIbrahimKeser/AStarAndKalmanPython | PythonPrograms/Oblig2_A_star/AStar.py | AStar.py | py | 18,462 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PythonPrograms.Oblig2_A_star.Graph.Graph",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": ... |
9726738798 | from flask import Response, Flask
from time import sleep
import random
import prometheus_client
from prometheus_client import Counter, Histogram
app = Flask('prometheus-app')
REQUESTS = Counter(
'requests', 'Application Request Count',
['endpoint']
)
TIMER = Histogram(
'slow', 'Slow Requests',
['endpoint']
)
@app.route('/metrics/')
def metrics():
return Response(
prometheus_client.generate_latest(),
mimetype='text/plain; version=0.0.4; charset=utf-8'
)
@app.route('/')
def index():
REQUESTS.labels('/').inc()
return '<h1> Przykładowa aplikacja Flask z obsługą systemu Prometheus </h1>'
@app.route('/database/')
def database():
with TIMER.labels('/database').time():
sleep(random.uniform(1, 3))
return '<h1>Zakończono obsługę kosztownej operacji w bazie danych</h1>'
| leonekwolfik/python_devops | python-dla-devops-naucz-sie-bezlitosnie-skutecznej-automatyzacji-noah-gift-kennedy-behrman-alfredo-deza-grig-ghe/src/roz07-Monitoring/web.py | web.py | py | 847 | python | pl | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "prometheus_client.Counter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "prometheus_client.Histogram",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask... |
28013600994 | import citations as cit
import json
from flask import Flask, request
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
years = [ 2015, 2016, 2017, 2018, 2019 ]
class_names = [
"-",
"A",
"B",
"C",
"D",
"E",
]
sectors = [
"MAT01",
"MAT02",
"MAT03",
"MAT05",
"MAT06",
"MAT07",
"MAT08"
]
classifications = {
"mcq": {
"db": "scopus",
"folder": "MCQ-SCOPUS",
"sheet": "article_MCQ"
},
"sjr": {
"db": "scopus",
"folder": "SJR-SNIP",
"sheet": "article_SJR"
},
"snip": {
"db": "scopus",
"folder": "SJR-SNIP",
"sheet": "article_SNIP"
},
"mcq-wos": {
"db": "wos",
"folder": "MCQ-WOS",
"sheet": "article_MCQ"
}
}
sectors_db = {}
for sector in sectors:
sectors_db[sector] = {}
for (cl, classification) in classifications.items():
print("Loading %s / %s ..." % (sector, cl), end = '')
sectors_db[sector][cl] = { year: db for (year, db) in zip(
years,
map(lambda x : cit.get_journals_classification(x, classification["db"], sector, classification["folder"], classification["sheet"]), years)
) }
print(" done.")
# We also build a set with all Journal names
print("Building index of Journal names ... ", end = '')
journal_names = set()
for sector in sectors:
for classification in classifications:
for year in years:
for name in sectors_db[sector][classification][year].keys():
journal_names.add(name)
print(" done.")
@app.route("/")
def index():
return "Hello"
@app.route("/api/v1/search-journal", methods = [ "POST" ])
def search_journal():
d = json.loads(request.data)
name = d["journal"].lower()
print("Searching %s" % name)
print(journal_names)
matching_j = list(filter(lambda x : d["journal"].lower() in x, journal_names))
return json.dumps(matching_j)
@app.route("/api/v1/journals", methods = [ "GET" ])
def journals():
return json.dumps(list(journal_names))
@app.route("/api/v1/classify", methods = [ "POST" ])
def classify():
d = json.loads(request.data)
journal = d["journal"].lower()
scopus_citations = d["scopus-citations"]
wos_citations = d["wos-citations"]
year = int(d["year"])
sector = d["sector"].replace("/", "")
cl_mcq = cit.get_classification(journal, scopus_citations, sectors_db[sector]["mcq"][year])
cl_sjr = cit.get_classification(journal, scopus_citations, sectors_db[sector]["sjr"][year])
cl_snip = cit.get_classification(journal, scopus_citations, sectors_db[sector]["snip"][year])
cl_wos = cit.get_classification(journal, wos_citations, sectors_db[sector]["mcq-wos"][year])
return json.dumps({
"mcq": class_names[cl_mcq+1],
"sjr": class_names[cl_sjr+1],
"snip": class_names[cl_snip+1],
"mcq-wos": class_names[cl_wos+1]
})
| robol/citation-count | citationserver.py | citationserver.py | py | 2,855 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "citations.get_journals_classification",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "json.... |
16196933934 | import time
import random
import zmq
context = zmq.Context()
zmq_socket = context.socket(zmq.PUSH)
zmq_socket.connect("tcp://127.0.0.1:5557")
# Start your result manager and workers before you start your producers
consumer_id = random.randrange(1,10005)
print("I am consumer #%s" % (consumer_id))
for num in range(20000):
work_message = { 'num' : num }
zmq_socket.send_json({'id': consumer_id})
time.sleep(1) | telminov/my_notes_for_various-_programlanguages | python/сокеты/ZMQ с очередями/Zmq/messaging_pattern/3.5/push_pull_(many_pushed)/push_.py | push_.py | py | 423 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "zmq.Context",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "zmq.PUSH",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_numbe... |
2759222029 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Author: chenhao
# Date: 2020-09-13
# Description:
#-------------------------------------------------------------------------------
import torch
import torch.nn as nn
from torch.nn import functional as F
class textcnn(nn.Module):
def __init__(self, param):
# super(textcnn, self).__init__()
super().__init__()
vocab_size = param.vocab_size
kernel_num = param.kernel_num
kernel_size = param.kernel_size
embedding_dim = param.embedding_dim
class_num = param.class_num
dropout = param.dropout
self.embed = nn.Embedding(vocab_size, embedding_dim, padding_idx=1)
self.conv0 = nn.Conv2d(1, kernel_num, (kernel_size[0], embedding_dim))
self.conv1 = nn.Conv2d(1, kernel_num, (kernel_size[1], embedding_dim))
self.conv2 = nn.Conv2d(1, kernel_num, (kernel_size[2], embedding_dim))
self.dropout = nn.Dropout(dropout)
self.line = nn.Linear(len(kernel_size)*kernel_num, class_num)
@classmethod
def from_dict(cls, obj_dict, **kwargs):
return cls(**obj_dict, **kwargs)
@staticmethod
def conv_and_pool(x, conv):
# x: (batch, 1, sentence_length, )
x = conv(x)
# x: (batch, kernel_num, H_out, 1)
x = F.relu(x.squeeze(3))
# x: (batch, kernel_num, H_out)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
# (batch, kernel_num)
return x
def forward(self, x):
x = self.embed(x)
x = x.unsqueeze(1)
out1 = self.conv_and_pool(x, self.conv0)
out2 = self.conv_and_pool(x, self.conv1)
out3 = self.conv_and_pool(x, self.conv2)
x = torch.cat([out1, out2, out3], 1)
x = self.dropout(x)
x= self.line(x)
# logit = F.softmax(x, dim=1)
logit = F.log_softmax(x, dim=1)#这里由于最后用NLLLoss所以这里直接取log
return logit
| chenhaoenen/FCTest | nlp/ChnSentiCorp_htl_all/src/model/textcnn.py | textcnn.py | py | 2,010 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
17885667606 | import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, sessionmaker
import pdb
from src import config
from src.domain import model
from src.services_layer import unit_of_work
from src.adapters import repository
@pytest.fixture
def tear_down():
session_factory = postgres_db_session()
yield session_factory
clean_table(session_factory)
def clean_table(session_factory):
session = session_factory()
session.query(model.Title).delete()
session.commit()
def postgres_create_engine():
db_uri = config.get_postgres_uri()
return create_engine(db_uri)
def postgres_db_session():
session_factory = sessionmaker(postgres_create_engine())
return session_factory
def add_rows_to_db():
titles = [
model.Title(title="Marian"),
model.Title(title="W pustyni i w puszczy"),
model.Title(title="Armagedon"),
model.Title(title="Kanibal"),
]
engine = postgres_create_engine()
with Session(engine) as session:
for title in titles:
session.add(title)
session.commit()
session.close()
def test_uow_can_retrive_title_from_source(tear_down):
add_rows_to_db()
session_factory = tear_down
uow = unit_of_work.SqlAlchemyUnitOfWork(session_factory)
title_to_find = "Kanibal"
with uow:
finded_title = uow.repo.get(title_to_find)
assert finded_title == model.Title(title=title_to_find)
def test_uow_add_row_to_source(tear_down):
session_factory = tear_down
add_rows_to_db()
title_to_add = "Kosmos"
uow = unit_of_work.SqlAlchemyUnitOfWork(session_factory)
with uow:
uow.repo.add(title_to_add)
uow.commit()
session = session_factory()
repo = repository.SQLReopsitory(session)
rows = repo.get_all_rows()
assert rows == [
model.Title(title="Marian"),
model.Title(title="W pustyni i w puszczy"),
model.Title(title="Armagedon"),
model.Title(title="Kanibal"),
model.Title(title=title_to_add),
]
| yellowBunnyy/audio_teka | tests/test_uow.py | test_uow.py | py | 2,066 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "src.domain.model.Title",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "src.domain.model",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "src.... |
22418570468 | #!/usr/bin/env python3
import argparse
import requests
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str, default="http://0.0.0.0:8000/model")
parser.add_argument("--host", type=str)
parser.add_argument("--port", type=str)
parser.add_argument("-n", "--name", type=str, required=True)
parser.add_argument("-p", "--pre_trained_model_type", type=str, required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.host is not None and args.port is not None:
url = "http://" + args.host + ":" + args.port + "/model"
else:
url = args.url
headers = {"accept": "application/json", "Content-Type": "application/json"}
params = {"name": args.name, "pre_trained_model_type": args.pre_trained_model_type}
response = requests.post(url=url, headers=headers, params=params)
print(response.status_code, response.reason)
print(response)
print(response.json())
if __name__ == "__main__":
main()
| ktro2828/DenseMatching-API | example/load_model.py | load_model.py | py | 1,046 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 26,
"usage_type": "call"
}
] |
27537495283 | import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, args, data, vectors):
super(CNN, self).__init__()
self.args = args
self.word_emb = nn.Embedding(args.embed_num, args.embed_dim, padding_idx=1)
# initialize word embedding with pretrained word2vec
if args.mode != 'rand':
self.word_emb.weight.data.copy_(torch.from_numpy(vectors))
if args.mode in ('static', 'multichannel'):
self.word_emb.weight.requires_grad = False
if args.mode == 'multichannel':
self.word_emb_multi = nn.Embedding(args.embed_num, args.embed_dim, padding_idx=1)
self.word_emb_multi.weight.data.copy_(torch.from_numpy(vectors))
self.in_channels = 2
else:
self.in_channels = 1
# <unk> vectors is randomly initialized
nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)
for filter_size in args.kernel_sizes:
conv = nn.Conv1d(self.in_channels, args.kernel_num, args.embed_dim * filter_size, stride=args.embed_dim)
setattr(self, 'conv_' + str(filter_size), conv)
self.fc = nn.Linear(len(args.kernel_sizes) * args.kernel_num, args.class_num)
def forward(self, batch):
x = torch.t(batch.text)
#x = torch.t(batch)
batch_size, seq_len = x.size()
conv_in = self.word_emb(x).view(batch_size, 1, -1)
if self.args.mode == 'multichannel':
#xembed = self.word_emb_multi(x)
conv_in_multi = self.word_emb_multi(x).view(batch_size, 1, -1)
conv_in = torch.cat((conv_in, conv_in_multi), 1)
conv_result = [
F.max_pool1d(F.relu(getattr(self, 'conv_' + str(filter_size))(conv_in)), seq_len - filter_size + 1).view(-1,
self.args.kernel_num)
for filter_size in self.args.kernel_sizes]
out = torch.cat(conv_result, 1)
out = F.dropout(out, p=self.args.dropout, training=self.training)
out = self.fc(out)
return out | UVa-NLP/HEDGE | cnn/cnn_model.py | cnn_model.py | py | 1,848 | python | en | code | 30 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
75168964833 | """This module includes dependencies."""
from fastapi import Header, status, HTTPException
from settings import INTERNAL_CONFIGS
def verify_api_key(
x_auth: str = Header(description="API key"),
x_from_name: str = Header(description="Name of services from where request is came"),
):
"""Check if user is logged in."""
api_config = INTERNAL_CONFIGS.get(x_from_name)
if not api_config:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="X-From-Name header is invalid"
)
if api_config["api_key"] != x_auth:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="X-Auth header is invalid"
)
| Monoboard/monoboard.api.auth | src/dependencies.py | dependencies.py | py | 707 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.Header",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "fastapi.Header",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "settings.INTERNAL_CONFIGS.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "settings.I... |
30262906364 | import time
from logging import getLogger
from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import confusion_matrix, f1_score
from torch.utils.data import DataLoader
from .meter import AverageMeter, ProgressMeter
from .metric import calc_accuracy
__all__ = ["train", "evaluate"]
logger = getLogger(__name__)
def do_one_iteration(
sample: Dict[str, Any],
model: nn.Module,
criterion: Any,
device: str,
iter_type: str,
optimizer: Optional[optim.Optimizer] = None,
) -> Tuple[int, float, float, np.ndarray, np.ndarray]:
if iter_type not in ["train", "evaluate"]:
message = "iter_type must be either 'train' or 'evaluate'."
logger.error(message)
raise ValueError(message)
if iter_type == "train" and optimizer is None:
message = "optimizer must be set during training."
logger.error(message)
raise ValueError(message)
x = sample["img"].to(device)
t = sample["class_id"].to(device)
batch_size = x.shape[0]
# compute output and loss
output = model(x)
loss = criterion(output, t)
# measure accuracy and record loss
accs = calc_accuracy(output, t, topk=(1,))
acc1 = accs[0]
# keep predicted results and gts for calculate F1 Score
_, pred = output.max(dim=1)
gt = t.to("cpu").numpy()
pred = pred.to("cpu").numpy()
if iter_type == "train" and optimizer is not None:
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return batch_size, loss.item(), acc1, gt, pred
def train(
loader: DataLoader,
model: nn.Module,
criterion: Any,
optimizer: optim.Optimizer,
epoch: int,
device: str,
interval_of_progress: int = 50,
) -> Tuple[float, float, float]:
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
progress = ProgressMeter(
len(loader),
[batch_time, data_time, losses, top1],
prefix="Epoch: [{}]".format(epoch),
)
# keep predicted results and gts for calculate F1 Score
gts = []
preds = []
# switch to train mode
model.train()
end = time.time()
for i, sample in enumerate(loader):
# measure data loading time
data_time.update(time.time() - end)
batch_size, loss, acc1, gt, pred = do_one_iteration(
sample, model, criterion, device, "train", optimizer
)
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
# save the ground truths and predictions in lists
gts += list(gt)
preds += list(pred)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# show progress bar per 50 iteration
if i != 0 and i % interval_of_progress == 0:
progress.display(i)
# calculate F1 Score
f1s = f1_score(gts, preds, average="macro")
return losses.get_average(), top1.get_average(), f1s
def evaluate(
loader: DataLoader, model: nn.Module, criterion: Any, device: str
) -> Tuple[float, float, float, np.ndarray]:
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
# keep predicted results and gts for calculate F1 Score
gts = []
preds = []
# calculate confusion matrix
n_classes = loader.dataset.get_n_classes()
c_matrix = np.zeros((n_classes, n_classes), dtype=np.int32)
# switch to evaluate mode
model.eval()
with torch.no_grad():
for sample in loader:
batch_size, loss, acc1, gt, pred = do_one_iteration(
sample, model, criterion, device, "evaluate"
)
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
# keep predicted results and gts for calculate F1 Score
gts += list(gt)
preds += list(pred)
c_matrix += confusion_matrix(
gt,
pred,
labels=[i for i in range(n_classes)],
)
f1s = f1_score(gts, preds, average="macro")
return losses.get_average(), top1.get_average(), f1s, c_matrix
| yiskw713/pytorch_template | src/libs/helper.py | helper.py | py | 4,370 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_... |
16662772162 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
# In[3]:
df=pd.read_csv(r'C:\Users\msid4\OneDrive\Desktop\cc\music.tsv',sep='\t',encoding='utf-8',error_bad_lines=False)
# In[3]:
df.head()
# In[90]:
df.shape
# In[4]:
import findspark
# In[5]:
findspark.init('C:\spark')
# In[6]:
from pyspark.sql import SparkSession
# In[7]:
from pyspark import SparkConf,SparkContext
# In[8]:
from pyspark.sql import SQLContext
# In[9]:
conf=SparkConf().setMaster("local").setAppName("Assignment2")
sc=SparkContext(conf=conf)
# In[10]:
sqlContext = SQLContext(sc)
# In[11]:
data1 = sqlContext.read.format("com.databricks.spark.csv").options(header='true').option("delimiter", "\t").load(r"C:\Users\msid4\OneDrive\Desktop\cc\music.tsv")
# In[12]:
v2=data1.select('review_body','customer_id')
# In[13]:
v2.show()
# In[176]:
v3 = v2.groupBy("customer_id").count()
# In[177]:
v3.printSchema
# In[ ]:
v3.registerTempTable("v3")
# In[ ]:
sqlContext.sql("SELECT percentile_approx(count, 0.5) as median FROM v3").show()
# In[15]:
v4=data1.select('review_body','product_id')
v4.registerTempTable("v4")
# In[16]:
v6=v4.select('review_body','product_id').groupBy("product_id").count()
v6.show()
# In[17]:
v6.registerTempTable("v6")
# In[124]:
median_product=sqlContext.sql("SELECT percentile_approx(count, 0.5) as median FROM v6")
# In[107]:
from pyspark.sql import functions as F
# In[22]:
v6=v6.withColumnRenamed("product_id", "productid")
# In[23]:
data1 = data1.join(v6, data1.product_id == v6.productid)
data1.show()
# In[29]:
data1_output=data1.where('count>=2')
# In[30]:
v10=data1_output.select('product_id','review_body','count')
v10.registerTempTable("v10")
# In[33]:
top_10_products=sqlContext.sql("SELECT review_body,product_id FROM v10 order by count desc limit 10 ").show()
# In[ ]:
# In[78]:
regex_Tokenizer=RegexTokenizer(inputCol='review_body', outputCol='sentences',pattern='\\.')
# In[79]:
count_tokens=udf(lambda sentences:len(sentences),IntegerType())
# In[81]:
reg_tokenized=regex_Tokenizer.transform(data1_output)
# In[ ]:
reg_tokenized.show()
# In[ ]:
data2=reg_tokenized.withColumn('tokens',count_tokens(col('sentences'))).show()
# In[ ]:
filtered_data=data2.where('tokens>=2')
| msid4459/Cloud-Computing-Spark_ML_MapReduce-project | stage2.py | stage2.py | py | 2,337 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "findspark.init",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext... |
33082316879 | import re
from googletrans import Translator
# Dòng chứa thông tin sinh viên
student_info = "Full name: BUI HUY HOANG\nDoB\n11/12/2003\nGender: Male\nIntake\n2021-2024\nCourse: Bachelor\nMajor\nInformation and Communication Technology\nBI12-170\nValidity: 30/10/2024"
# Tìm và ghép các phần của đối tượng lại với nhau
full_name = re.search(r"Full name: (.+)", student_info).group(1)
dob = re.search(r"DoB\n(.+)", student_info).group(1)
gender = re.search(r"Gender: (.+)", student_info).group(1)
intake = re.search(r"Intake\n(.+)", student_info).group(1)
course = re.search(r"Course: (.+)", student_info).group(1)
major = re.search(r"Major\n(.+)\n", student_info).group(1)
# Tìm kiếm student ID trong chuỗi thông tin sinh viên
match = re.search(r"B[I][0-9]{2}-[0-9]{3}", student_info)
if match:
student_id = match.group(0)
else:
student_id = ""
validity = re.search(r"Validity: (.+)", student_info).group(1)
# Lưu thông tin vào biến student_info_oneline
student_info_oneline = f"Full name: {full_name}\nGender: {gender}\nDoB: {dob}\nCourse: {course}\nIntake: {intake}\nMajor: {major}\nStudent ID: {student_id}\nValidity: {validity}"
translator = Translator()
# Dịch sang tiếng Việt
result_vi = translator.translate(
student_info_oneline, src='en', dest='vi').text
# Dịch sang tiếng Pháp
result_fr = translator.translate(
student_info_oneline, src='en', dest='fr').text
# Hiển thị kết quả dịch
# Hiển thị biến student_info_oneline
print(student_info_oneline, end='\n\n\n')
print('Thông tin sinh viên (Tiếng Việt):')
print(result_vi, end='\n\n\n')
print('Thông tin sinh viên (Tiếng Pháp):')
print(result_fr)
| Huyen165/ML2 | Src/Identify_info.py | Identify_info.py | py | 1,705 | python | vi | code | 0 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 12,
"... |
2715638565 | import os,glob
from Bio import SeqIO
import statistics
import numpy as np
from Bio.Seq import Seq
import re
vcf_folder = '/scratch/users/anniz44/genomes/donor_species/vcf_round2/merge/details/'
output_folder = '/scratch/users/anniz44/genomes/donor_species/vcf_round2/BS/'
target_TF = '%s/target.TF.faa'%(output_folder)
ref_BS = '%s/ccpA_BS.fa'%(output_folder)
input_bs_file = '%s/binding_results_ccpA.txt'%(output_folder)
BS_folder = ''
distance = 5000 # extract 5kb neighbourhood
distance_TF = 5000 # TF with BS nearby
def find_strains(vcf_file,genomewithSNP):
mut_strains = []
for linesvcf in open(vcf_file, 'r'):
if linesvcf.startswith('CHR'):
linesvcf_set = linesvcf.split('\n')[0].split('\t')
allgenome = linesvcf_set[9:]
i = 1
# find mutated strains
for genome in allgenome:
if str(i) in genomewithSNP:
mut_strains.append(genome)
i += 1
break
return [mut_strains,allgenome]
def load_genes(input_faa):
Mapping_loci_all = dict()
for record in SeqIO.parse(input_faa, 'fasta'):
record_id = str(record.id)
contig = '_'.join(record_id.split('_')[0:-1])
description = str(record.description).replace(' ', '').split('#')
Mapping_loci_all.setdefault(contig, [])
Mapping_loci_all[contig].append([int(description[1]) - 1,
int(description[2]) - 1, record_id])
return Mapping_loci_all
def contig_length(CHR):
try:
total_length = CHR.split('size')[1]
except IndexError:
try:
total_length = CHR.split('length_')[1].split('_cov')[0]
except IndexError:
total_length = 10000
return int(total_length)
def load_BS_old(BS_file,Mapping_loci_all):
allBS = []
allBS.append('BS\tpvalue\tlocus\tcontig\tstrand\ttargetgane\tlocusgene\n')
target_gene_list = dict()
BS_loci = dict()
i = 0
for lines in open(BS_file, 'r'):
if not lines.startswith('#') and not lines.startswith('motif_id') and lines != '\n':
lines_set = lines.split('\n')[0].split('\t')
pvalue = lines_set[7]
contig, locus1, locus2, strand = lines_set[2:6]
if contig_length(contig) >= 5000:
i += 1
BS_loci.setdefault(contig, [])
locus1 = int(locus1)
locus2 = int(locus2)
targetgene = ''
locus_target = 0
if contig in Mapping_loci_all:
for locus in Mapping_loci_all[contig]:
locusre1, locusref2, genename = locus
if locus2 <= locusref2 and targetgene == '':
targetgene = genename
locus_target = locusre1
seq = lines_set[9]
if targetgene != '':
if strand == '-':
# the gene before
gene_locus = int(targetgene.split('_')[-1])
if gene_locus > 1:
targetgene = '_'.join(targetgene.split('_')[0:-1]) + '_%s' % (
int(targetgene.split('_')[-1]) - 1)
else:
targetgene = '%s_1' % (contig)
allBS.append('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
seq, pvalue, locus1, contig, strand, targetgene, locus_target))
target_gene_list.setdefault(targetgene, set())
target_gene_list[targetgene].add(seq)
BS_loci[contig].append([locus1, locus2, seq, targetgene,pvalue])
f1 = open('%s.BS.txt' % (output_file), 'w')
f1.write(''.join(allBS))
f1.close()
aa_output = []
for record in SeqIO.parse(input_faa, 'fasta'):
record_id = str(record.id)
if record_id in target_gene_list:
for seq in target_gene_list[record_id]:
aa_output.append('>%s_%s_C_%s_G_%s\n%s\n' % (
seq, lineage, record_id.split('_')[1], record_id.split('_')[-1], str(record.seq)))
select_seq_faa.setdefault(seq,'>%s_%s_C_%s_G_%s\n%s\n' % (
seq, lineage, record_id.split('_')[1], record_id.split('_')[-1], str(record.seq)))
f1 = open('%s.BS.faa' % (output_file), 'w')
f1.write(''.join(aa_output))
f1.close()
return BS_loci
def load_BS(BS_file,Mapping_loci_all,BS_loci,TF_locus):
allBS = []
allBS.append('BS\tpvalue\tlocus\tcontig\tstrand\ttargetgane\tlocusgene\n')
target_gene_list = dict()
i = 0
genomename = os.path.split(BS_file)[-1].split('.fimo.tsv')[0]
check_TF_BS = False
TF_locus_genome = TF_locus.get(genomename,[])
TF_contig = [i[0] for i in TF_locus_genome]
if TF_locus_genome != []:
# check TF
for lines in open(BS_file, 'r'):
if not check_TF_BS and not lines.startswith('#') and not lines.startswith('motif_id') and lines != '\n':
lines_set = lines.split('\n')[0].split('\t')
contig, locus1, locus2, strand = lines_set[2:6]
if contig in TF_contig:
locus1 = int(locus1)
locus2 = int(locus2)
for contig_TF,locus1_TF,locus2_TF,gene in TF_locus_genome:
if contig_TF == contig and locus1_TF - distance_TF <= locus2 and locus2_TF + distance_TF >= locus1:
#print(locus1_TF - distance_TF,locus2_TF + distance_TF,locus1,locus2,gene)
check_TF_BS = True
break
if not check_TF_BS:
print('not BS near TF for genome %s'%(genomename))
else:
# BS near TF
print('found BS near TF for genome %s' % (genomename))
pass_genome.append(genomename)
# load BS
for lines in open(BS_file, 'r'):
if not lines.startswith('#') and not lines.startswith('motif_id') and lines != '\n':
lines_set = lines.split('\n')[0].split('\t')
pvalue = lines_set[7]
contig, locus1, locus2, strand = lines_set[2:6]
i += 1
locus1 = int(locus1)
locus2 = int(locus2)
targetgene = ''
locus_target = 0
if contig in Mapping_loci_all:
for locus in Mapping_loci_all[contig]:
locusre1, locusref2, genename = locus
if locus2 <= locusref2 and targetgene == '':
targetgene = genename
locus_target = locusre1
seq = lines_set[9]
if targetgene != '':
if strand == '-':
# the gene before
gene_locus = int(targetgene.split('_')[-1])
if gene_locus > 1:
targetgene = '_'.join(targetgene.split('_')[0:-1]) + '_%s' % (
int(targetgene.split('_')[-1]) - 1)
else:
targetgene = '%s_1' % (contig)
allBS.append('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
seq, pvalue, locus1, contig, strand, targetgene, locus_target))
target_gene_list.setdefault(targetgene, set())
target_gene_list[targetgene].add(seq)
BS_loci.setdefault(seq, [])
BS_loci[seq].append([genomename, locus1, locus2, '%s_C_%s_G_%s' % (genomename,
targetgene.split('_')[1],
targetgene.split('_')[-1])])
# output BS and gene target
# output BS and gene target
f1 = open('%s.BS.txt' % (output_file), 'w')
f1.write(''.join(allBS))
f1.close()
aa_output = []
for record in SeqIO.parse(input_faa, 'fasta'):
record_id = str(record.id)
if record_id in target_gene_list:
for seq in target_gene_list[record_id]:
record_seq = str(record.seq)
aa_output.append('>%s_%s_C_%s_G_%s\n%s\n' % (
seq, genomename, record_id.split('_')[1], record_id.split('_')[-1], record_seq))
select_seq_faa.setdefault(seq,['',set()])
if record_seq not in select_seq_faa[seq][-1]:
select_seq_faa[seq][-1].add(record_seq)
select_seq_faa[seq][0]+=('>%s_%s_C_%s_G_%s\n%s\n' % (
seq, genomename, record_id.split('_')[1], record_id.split('_')[-1], record_seq))
f1 = open('%s.BS.faa' % (output_file), 'w')
f1.write(''.join(aa_output))
f1.close()
return BS_loci
def annotate(fasta_output):
cutoff = 0.7
cmd_cluster = ('%s -sort length -cluster_fast %s -id %s -centroids %s.cluster.aa -uc %s.uc -threads %s\n'
% ('usearch', fasta_output, cutoff, fasta_output,
fasta_output, 40))
os.system(cmd_cluster)
fasta_output = fasta_output + '.cluster.aa'
cutoff = 0.01
database = '/scratch/users/mit_alm/database/eggnog/xaa.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.1.txt --cpu 40 -E %s %s %s\n') % (
fasta_output, cutoff, database, fasta_output)
database = '/scratch/users/mit_alm/database/eggnog/xab.hmm'
cmds += ('hmmsearch --tblout %s.eggnog.2.txt --cpu 40 -E %s %s %s\n') % (
fasta_output, cutoff, database, fasta_output)
database = '/scratch/users/mit_alm/database/eggnog/xac.hmm'
cmds += ('hmmsearch --tblout %s.eggnog.3.txt --cpu 40 -E %s %s %s\n') % (
fasta_output, cutoff, database, fasta_output)
f1 = open(output_file_BS + '.eggnog.sh', 'w')
f1.write(
'#!/bin/bash\nsource ~/.bashrc\nexport LD_LIBRARY_PATH=/scratch/users/anniz44/bin/pro/lib/gsl-2.6:/scratch/users/anniz44/bin/pro/lib/glibc-2.14-build:/scratch/users/anniz44/bin/pro/lib/:/scratch/users/anniz44/bin/miniconda3/lib:$LD_LIBRARY_PATH\n%s' % (
cmds))
f1.close()
def allele_freq_to_allele(genotype, ALT_set):
genotype = [int(i) for i in genotype.split(':')[-1].split(',')]
Major_ALT = '-'
if sum(genotype) > 0:
ALT_set_sample = dict()
ALT_frq_set = set()
for i in range(0,len(ALT_set)):
ALT_frq = int(genotype[i])
ALT_set_sample.setdefault(ALT_frq, set())
ALT_set_sample[ALT_frq].add(ALT_set[i])
ALT_frq_set.add(ALT_frq)
ALT_frq_set = sorted(ALT_frq_set, reverse=True)
for ALT_frq in ALT_frq_set:
for alleles in ALT_set_sample[ALT_frq]:
if Major_ALT == '-':
Major_ALT = alleles
return Major_ALT
def find_mutations_on_BS_old(vcf_file2,mut_strains,BS_loci):
BS_SNP_output = []
mut_strains_set = []
wild_type_set = []
allseqout = set()
genomewithSNPvcf = []
genomewithnoSNPvcf = []
samplefile = vcf_file2.replace('.all.raw.vcf.all','.all.raw.vcf.filtered.samplename.txt')
for lines in open(samplefile, 'r'):
# set sample name
sample_set = lines.split('\n')[0].split('\t')
i = 9
for samples in sample_set:
genome = samples
if genome in mut_strains:
genomewithSNPvcf.append(i)
mut_strains_set.append('M.%s' % (genome))
elif genome in allgenome:
genomewithnoSNPvcf.append(i)
wild_type_set.append('W.%s' % (genome))
i += 1
BS_SNP_output.append('BS\tBS_in_ref\tCHR\tPOS\tPOS_on_BS\tTarget\tpvalue\tMutated_alleles\tWildtype_alleles\t%s\t%s\n' % (
'\t'.join(mut_strains_set),
'\t'.join(
wild_type_set)
))
#print(mut_strains,mut_strains_set,wild_type_set)
for linesvcf in open(vcf_file2, 'r'):
linesvcf_set = linesvcf.split('\n')[0].split('\t')
if not linesvcf.startswith('#'):
contig, POS = linesvcf_set[0:2]
if contig in BS_loci:
POS = int(POS)
for BSpos in BS_loci[contig]:
BSpos1, BSpos2, seq, targetgene,pvalue = BSpos
if POS >= BSpos1 - distance and POS <= BSpos2 + distance :
REF = linesvcf_set[3]
ALT_set = [REF]
ALT = linesvcf_set[4]
for a_ALT in ALT.split(','):
if a_ALT != '.':
ALT_set.append(a_ALT)
# BS site
Mut_allele = []
Wild_allele = []
# Major_ALT in mutated strains
for i in genomewithSNPvcf:
genotype = linesvcf_set[i]
Major_ALT = allele_freq_to_allele(genotype, ALT_set)
Mut_allele.append(Major_ALT)
# Major_ALT in wild type strains
for i in genomewithnoSNPvcf:
genotype = linesvcf_set[i]
Major_ALT = allele_freq_to_allele(genotype, ALT_set)
Wild_allele.append(Major_ALT)
Mut_allele_set = sorted(set(Mut_allele))
Wild_allele_set = sorted(set(Wild_allele))
if any(i not in Wild_allele_set for i in Mut_allele_set):
# different alleles
inref = False
if seq in Ref:
inref = True
BS_SNP_output.append('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
seq, inref, contig, POS, POS - BSpos1 + 1, targetgene,pvalue,
';'.join(list(Mut_allele_set)),
';'.join(list(Wild_allele_set)),
'\t'.join(Mut_allele),
'\t'.join(Wild_allele)
))
allseqout.add(select_seq_faa.get(seq, ''))
f1 = open('%s.BSsum.txt' % (output_file_BS),'w')
f1.write(''.join(BS_SNP_output))
f1.close()
if len(allseqout)>0:
f1 = open('%s.BS.faa' % (output_file_BS), 'w')
f1.write(''.join(list(allseqout)))
f1.close()
annotate('%s.BS.faa' % (output_file_BS))
def find_mutations_on_BS(vcf_file2,mut_strains,BS_loci_co_assembly):
BS_SNP_output = []
mut_strains_set = []
wild_type_set = []
allseqout = set()
genomewithSNPvcf = []
genomewithnoSNPvcf = []
samplefile = vcf_file2.replace('.all.raw.vcf.all','.all.raw.vcf.filtered.samplename.txt')
for lines in open(samplefile, 'r'):
# set sample name
sample_set = lines.split('\n')[0].split('\t')
i = 9
for samples in sample_set:
genome = samples
if genome in mut_strains:
genomewithSNPvcf.append(i)
mut_strains_set.append('M.%s' % (genome))
elif genome in allgenome:
genomewithnoSNPvcf.append(i)
wild_type_set.append('W.%s' % (genome))
i += 1
BS_SNP_output.append('BS\tBS_in_ref\tCHR\tPOS\tPOS_on_BS\tMutated_alleles\tWildtype_alleles\t%s\t%s\n' % (
'\t'.join(mut_strains_set),
'\t'.join(
wild_type_set)
))
#print(mut_strains,mut_strains_set,wild_type_set)
finished_set = []
for linesvcf in open(vcf_file2, 'r'):
linesvcf_set = linesvcf.split('\n')[0].split('\t')
if not linesvcf.startswith('#'):
contig, POS = linesvcf_set[0:2]
if contig in BS_loci_co_assembly:
POS = int(POS)
for seq_loci_sub,seq in BS_loci_co_assembly[contig]:
if [seq_loci_sub,seq] not in finished_set:
finished_set.append([seq_loci_sub,seq])
BSpos1, BSpos2 = seq_loci_sub
if POS >= BSpos1 - distance and POS <= BSpos2 + distance:
REF = linesvcf_set[3]
ALT_set = [REF]
ALT = linesvcf_set[4]
for a_ALT in ALT.split(','):
if a_ALT != '.':
ALT_set.append(a_ALT)
# BS site
Mut_allele = []
Wild_allele = []
# Major_ALT in mutated strains
for i in genomewithSNPvcf:
genotype = linesvcf_set[i]
Major_ALT = allele_freq_to_allele(genotype, ALT_set)
Mut_allele.append(Major_ALT)
# Major_ALT in wild type strains
for i in genomewithnoSNPvcf:
genotype = linesvcf_set[i]
Major_ALT = allele_freq_to_allele(genotype, ALT_set)
Wild_allele.append(Major_ALT)
Mut_allele_set = sorted(set(Mut_allele))
Wild_allele_set = sorted(set(Wild_allele))
if any(i not in Wild_allele_set for i in Mut_allele_set):
# different alleles
inref = False
if seq in Ref:
inref = True
#targetgene = [i[-1] for i in BS_loci[seq]]
BS_SNP_output.append('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
seq, inref, contig, POS, POS - BSpos1 + 1,
';'.join(list(Mut_allele_set)),
';'.join(list(Wild_allele_set)),
'\t'.join(Mut_allele),
'\t'.join(Wild_allele)
))
allseqout.add(select_seq_faa.get(seq, '')[0])
f1 = open('%s.BSsum.txt' % (output_file_BS), 'w')
f1.write(''.join(BS_SNP_output))
f1.close()
if len(allseqout)>0:
f1 = open('%s.BS.faa' % (output_file_BS), 'w')
f1.write(''.join(list(allseqout)))
f1.close()
annotate('%s.BS.faa' % (output_file_BS))
def find_TF(input_faa,blast_out,alloutput):
withTF = False
genomename = os.path.split(input_faa)[-1].split('.faa')[0]
#donor = genomename.split('_')[0]
TF_set = dict()
for lines in open(blast_out):
lines_set = lines.split('\t')
query,reference = lines_set[0:2]
TF_set.setdefault(query,[])
TF_set[query].append(reference)
for record in SeqIO.parse(input_faa, 'fasta'):
record_id = str(record.id)
if record_id in TF_set:
for reference in TF_set[record_id]:
#if donor in reference:
description = str(record.description).replace(' ', '').split('#')
alloutput.append('%s\t%s\t%s\t%s\t%s\n'%(genomename,record_id,description[1],description[2],reference))
withTF = True
if not withTF:
print('TF not found for %s'%(input_faa))
return alloutput
def load_TF(TF_locus_file):
TF_locus = dict()
for lines in open(TF_locus_file,'r'):
lines_set = lines.split('\t')
genome, gene,locus1,locus2 = lines_set[0:4]
genome = genome.split('.all')[0]
contig = '_'.join(gene.split('_')[0:-1])
TF_locus.setdefault(genome,[])
TF_locus[genome].append([contig,int(locus1),int(locus2),gene])
return TF_locus
def find_target_BS(mut_strains, BS_loci,allgenome):
BS_select = set()
wild_strains = [i for i in allgenome if i not in mut_strains]
if any(genome in mut_strains for genome in pass_genome) and any(genome in wild_strains for genome in pass_genome):
# mutated genome and wild strains all passed
print('found enough genomes passing the criterion of BS near TF')
for seq in BS_loci:
allgenomewithBS = [i[0] for i in BS_loci[seq]]
# only in mutated
if (any(genome in mut_strains for genome in allgenomewithBS) and not any(
genome in wild_strains for genome in allgenomewithBS)):
BS_select.add(seq)
else:
print('not enough genomes passing the criterion of BS near TF')
BS_select = set()
return BS_select
def find_BS_coassembly(co_assembly,BS_select):
BS_loci_co_assembly = dict()
for record in SeqIO.parse(co_assembly, 'fasta'):
record_id = str(record.id)
record_seq = str(record.seq)
for seq in BS_select:
seq_loci = [[m.start()+1, m.end()+1] for m in re.finditer(seq, record_seq)]
if seq_loci!=[]:
BS_loci_co_assembly.setdefault(record_id,[])
for seq_loci_sub in seq_loci:
BS_loci_co_assembly[record_id].append([seq_loci_sub,seq])
seq_rev = str(Seq(seq).reverse_complement())
seq_loci = [[m.start()+1, m.end()+1] for m in re.finditer(seq_rev, record_seq)]
if seq_loci!=[]:
if seq_rev in select_seq_faa:
seq = seq_rev
BS_loci_co_assembly.setdefault(record_id, [])
for seq_loci_sub in seq_loci:
BS_loci_co_assembly[record_id].append([seq_loci_sub, seq])
return BS_loci_co_assembly
def TF_near_BS(TF_locus,BS_file_all):
BS_TF = []
for BS_file in BS_file_all:
check_TF_BS = False
genome = os.path.split(BS_file)[-1].split('.BS.txt')[0]
TF_locus_genome = TF_locus.get(genome, [])
TF_contig = [i[0] for i in TF_locus_genome]
for lines in open(BS_file,'r'):
if not lines.startswith('BS'):
lines_set = lines.split('\n')[0].split('\t')
seq,contig,locus = lines_set[0:3]
if contig in TF_contig:
locus = int(locus)
for contig_TF, locus1_TF, locus2_TF,gene in TF_locus_genome:
if contig_TF == contig and locus1_TF - distance_TF <= locus and locus2_TF + distance_TF >= locus:
check_TF_BS = True
BS_TF.append('%s\t%s\t%s\t%s\t%s\n'%(genome,contig,seq,locus,gene))
if not check_TF_BS:
print('BS not found near TF %s'%(genome))
f1 = open('%s/allBSnearTF.txt' % (output_folder), 'w')
f1.write(''.join(BS_TF))
f1.close()
# find all TF
try:
f1 = open('%s/allTFloci.txt' % (output_folder), 'r')
except FileNotFoundError:
cmds = 'export LD_LIBRARY_PATH=/scratch/users/anniz44/bin/pro/lib/gsl-2.6:/scratch/users/anniz44/bin/pro/lib/glibc-2.14-build:/scratch/users/anniz44/bin/pro/lib/:/scratch/users/anniz44/bin/miniconda3/lib:$LD_LIBRARY_PATH\n'
os.system(cmds)
alloutput = []
allinput_faa = glob.glob('%s/*/*.faa'%(output_folder))
for input_faa in allinput_faa:
cmds = (
"diamond blastp --query %s --db %s.dmnd --out %s.TF.txt --id 80 --outfmt 6 --max-target-seqs 10 --evalue 1e-1 --threads 40\n"%
(
input_faa,target_TF,input_faa,
))
os.system(cmds)
alloutput = find_TF(input_faa, input_faa + '.TF.txt', alloutput)
f1 = open('%s/allTFloci.txt' % (output_folder), 'w')
f1.write(''.join(alloutput))
f1.close()
# load TF loci
TF_locus = load_TF('%s/allTFloci.txt' % (output_folder))
# find BS predicted in genomes near TF
try:
f1 = open('%s/allBSnearTF.txt' % (output_folder), 'r')
except FileNotFoundError:
BS_file_all = glob.glob('%s/*/*.BS.txt'%(output_folder))
#BS_file_all = glob.glob('%s/*.BS.txt' % (output_folder))
TF_near_BS(TF_locus, BS_file_all)
print('finished processing TF near BS')
# load ref
Ref = []
if ref_BS != 'None':
for record in SeqIO.parse(ref_BS, 'fasta'):
Ref.append(str(record.seq))
# process each SNP
for lines in open(input_bs_file,'r'):
if not lines.startswith('AA_POS_ref'):
try:
lines_set = lines.split('\t')
lineage = lines_set[4].split('__')[0].replace('CL', 'clustercluster')
species = lines_set[4].split('_')[0]
donor = lines_set[5]
SNP = lines_set[3]
output_file_BS = '%s/%s/%s_%s_%s' % (output_folder, BS_folder,species, donor, SNP)
print(output_file_BS)
try:
f1 = open('%s.BSsum.txt' % (output_file_BS), 'r')
except FileNotFoundError:
select_seq_faa = dict()
# find genome names
vcf_file = '%s/%s%s' % (
vcf_folder, lineage, '.all.parsi.fasta.linktrunc.sum.txt')
genomewithSNP = lines_set[-9].split(';')
mut_strains, allgenome = find_strains(vcf_file, genomewithSNP)
print('process %s mutated strains %s'%(lineage,mut_strains))
lineage = lineage.split('.donor')[0]
# process fino results
donor_species = '%s_%s'%(species,donor)
BS_file_all = glob.glob('%s/%s/%s/*.fimo.tsv'%(output_folder,BS_folder,donor_species))
BS_loci = dict()
pass_genome = []
for BS_file in BS_file_all:
output_file = BS_file.split('.fimo.tsv')[0]
input_faa = '%s/%s/%s.faa'%(output_folder,donor_species,os.path.split(output_file)[-1])
# load all gene position
Mapping_loci_all = load_genes(input_faa)
# load BS
BS_loci = load_BS(BS_file, Mapping_loci_all,BS_loci,TF_locus)
# find targeted BS only in mutated or only in WT
BS_select = find_target_BS(mut_strains, BS_loci,allgenome)
if BS_select!= set():
# find genome locus for BS_select
co_assembly = '%s/co-assembly/%s.all.spades2.fasta'%(output_folder,lineage)
BS_loci_co_assembly = find_BS_coassembly(co_assembly,BS_select)
print(BS_loci_co_assembly)
# find mutations on BS in mutated strains
vcf_file2 = '%s/moredetails/%s.all.raw.vcf.all' % (
vcf_folder, lineage)
print(vcf_file2)
find_mutations_on_BS(vcf_file2, mut_strains, BS_loci_co_assembly)
# filter BS
BS_file = '%s.BSsum.txt' % (output_file_BS)
# filter BS
BS_set = set()
for lines in open(BS_file, 'r'):
if not lines.startswith('BS'):
lines_set = lines.split('\t')
BS = lines_set[0]
POS_on_BS = int(lines_set[4])
Mut = lines_set[5]
if POS_on_BS >= -20 and POS_on_BS <= 20:
# BS with changes on BS itself and not just loss
BS_set.add(BS)
BS_file_out = []
for lines in open(BS_file, 'r'):
if lines.startswith('BS'):
BS_file_out.append(lines)
elif lines != '':
seq = lines.split('\t')[0]
if seq in BS_set:
# BS with changes on BS itself and not just loss
BS_file_out.append(lines)
f1 = open('%s.BSsum.filtered.txt' % (output_file_BS), 'w')
f1.write(''.join(BS_file_out))
f1.close()
except FileNotFoundError:
pass
alleggnog = glob.glob(os.path.join(output_folder, '%s/*eggnog.sh'%(BS_folder)))
if alleggnog!= []:
f1 = open(os.path.join(output_folder, '%s/allanno.sh'%(BS_folder)), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n')
for sub_scripts in alleggnog:
f1.write('jobmit %s %s small1\n' % (sub_scripts, os.path.split(sub_scripts)[-1]))
f1.close()
print('please run %s/%s/allanno.sh'%(output_folder,BS_folder))
| caozhichongchong/snp_finder | snp_finder/scripts/compareBS_coassembly.py | compareBS_coassembly.py | py | 28,977 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "Bio.SeqIO.parse",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_numbe... |
35872126644 | """\
Reference Directive for L.E.A.R.N
=================================
Author: Akshay Mestry <xa@mes3.dev>
Created on: Friday, July 28 2023
Last updated on: Monday, July 31 2023
This module provides a custom directive for L.E.A.R.N's custom theme,
that allows authors and contributors to add a dedicated references
section to their documentation. The directive identifies and collects all
the links present within the documentation page and collates them in a
dedicated section for easy reference.
L.E.A.R.N's external references are created using the ``references``
directive, which is included as part of this project. The directive is
implemented below, and is available to use by authors and contributors
when building the documentation.
.. note:: This directive is compatible with Sphinx 3.0 and higher.
.. note:: It is encouraged to mention references for credibility.
.. note::
This module is designed specifically for the Sphinx's L.E.A.R.N
theme, hence the directive may not be available or may be
implemented differently for different themes. Please consult the
documentation for more information.
The references can be styled using CSS. The class encapsulating the
reference is ``learn-references`` and the references itself are
``references``.
.. code-block:: css
.learn-references ol {
...
text-align: left;
font-size: 1.375rem;
...
}
.learn-references>ol>li>p {
...
text-align: left;
...
}
.references-border {
...
font-size: 1.375rem;
...
}
This will style the list of references with required colors and format.
.. versionadded:: 1.0.2
Added support for creating a dedicated references section through
the ``references`` directive. The directive handles all the
external links used in the current document and puts them together
as a list.
"""
from __future__ import annotations
import typing as t
import jinja2
from docutils import nodes
from docutils.nodes import Element
from docutils.nodes import Node
from docutils.parsers.rst import Directive
from docutils.parsers.rst import roles
from sphinx.writers.html import HTMLTranslator
__all__ = [
"References",
"ReferencesNode",
"depart_references_html",
"visit_references_html",
]
REFERENCES_TEMPLATE: t.Final[jinja2.Template] = jinja2.Template(
"""\
<h2>References</h2>
<ol>
{% for (title, link) in content %}
<li>
<p>{{ title }}, <a href="{{ link }}" target="_blank"
class="references-border">{{ link }}</a></p>
</li>
{% endfor %}
</ol>
"""
)
class ReferencesNode(Element):
pass
class References(Directive):
"""A directive class for the ``references`` directive.
This class allows using a custom ``references`` directive and maps
the source reStructuredText to ``ReferencesNode`` element doctree
node.
By using the ``references`` directive, it allows contributors or
authors to render a dedicated references section for the provided
content. This class' rendering HTML behavior is further extended
using another Python function, ``visit_references_div``.
Example::
.. references::
arxiv >> Papers are published on arXiv every day.
python >> We will be using Python all the time.
:var has_content: A boolean flag to allow content in the directive,
defaults to ``True``.
:var final_argument_whitespace: A boolean flag, may the final
argument contain whitespace, set to
``True``.
"""
has_content: bool = True
final_argument_whitespace: bool = True
def run(self) -> list[Node]:
"""Create node from the reStructuredText source.
This method processes the directive's arguments, options and
content, and return a list of Docutils/Sphinx nodes that will be
inserted into the document tree at the point where the directive
was encountered.
:return: List of Docutils node for ``references`` directive.
"""
self.options["class"] = ["learn-references"]
roles.set_classes(self.options)
node = ReferencesNode("\n".join(self.content), **self.options)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def visit_references_html(self: HTMLTranslator, node: ReferencesNode) -> None:
"""Node visitor function which maps ``ReferencesNode`` element to the
HTML output.
This function allows the rendering of the references of a topic on the
webpage. It relies on ``Jinja`` templating to perform the expansion
and rendering of the HTML source code.
"""
content: list[tuple[str, str]] = []
pairs: dict[str, str] = {}
corrections = node.children[0].astext().splitlines()
node.remove(node.children[0])
for correction in corrections:
doc, new = correction.split(" >> ")
pairs[doc] = new
for target in node._document.findall(nodes.target):
if target.hasattr("refuri") and target.indirect_reference_name:
title = target["names"][0]
content.append((pairs.get(title, title.title()), target["refuri"]))
html_src = REFERENCES_TEMPLATE.render(content=content)
self.body.append(f"{self.starttag(node, 'div')}{html_src}")
def depart_references_html(self: HTMLTranslator, node: ReferencesNode) -> None:
self.body.append("</div>")
| xames3/learn | docs/source/_extensions/sphinx/ext/learn/references.py | references.py | py | 5,586 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "typing.Final",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "jinja2.Template",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "docutils.nodes.Element",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "docutil... |
11742668018 | import eval7
from tqdm import tqdm
import traceback
import sys
from pprint import pprint
sys.path.insert(0, "../")
sys.path.insert(0, "../.libs")
from pokereval import PokerEval
pokereval = PokerEval()
from pprint import pprint
import numpy as np
import operator
from tqdm import tqdm
import datetime
import re
rank = ['A','K','Q','J','T','9','8','7','6','5','4','3','2']
def hand_init(init):
init_hand = {}
for idx1,i in enumerate(rank):
for idx2,j in enumerate(rank):
if idx1==idx2:
hand0 = rank[idx1] + rank[idx2]
init_hand[hand0]=init
if idx1<idx2:
hand1 = rank[idx1] + rank[idx2] + 'o'
init_hand[hand1]=init
hand2 = rank[idx1] + rank[idx2] + 's'
init_hand[hand2]=init
# print(len(init_hand))
# pprint(init_hand)
return init_hand
def listToStringHand(cardList):
card1=eval7.Card(cardList[0])
card2=eval7.Card(cardList[1])
if card1.rank < card2.rank:
temp=card1
card1=card2
card2=temp
if card1.suit != card2.suit:
if card1.rank != card2.rank:
hand = str(card1)[0]+str(card2)[0]+'o'
else:
hand = str(card1)[0]+str(card2)[0]
else:
hand = str(card1)[0]+str(card2)[0]+'s'
return hand
def good_hand(hand, benchmark):
new_hand = [i for i in hand if hand[i]>=benchmark]
return new_hand
def handlist(hand):
new_hand = [i for i in hand]
return new_hand
def hand_proceed(file):
with open(file, "r") as ins:
pos=[]
for line in ins:
pos.append(line.split(','))
# cardStringList1 = [i[0] for i in pos ]
# hand = [i[0] for i in pos if float(re.sub('\s','',i[1]))>0]
# hand = percentile(cardStringList1,30)
hand = {}
for i in pos:
hand[i[0]] = float(re.sub('\s','',i[1]))
return hand
def percentile(cardStringList, n):
hr=[]
for i in range(len(cardStringList)):
hr += eval7.HandRange(cardStringList[i]).hands
i=len(hr)
index=np.percentile(np.arange(i), n, interpolation='nearest')
# print hr[index]
# print( str(hr[index][0][0]) )
if str(hr[index][0][0])[1] != str(hr[index][0][1])[1]:
if str(hr[index][0][0])[0]!= str(hr[index][0][1])[0]:
hand = str(hr[index][0][0])[0]+str(hr[index][0][1])[0] +'o'
else:
hand = str(hr[index][0][0])[0]+str(hr[index][0][1])[0]
else:
hand = str(hr[index][0][0])[0]+str(hr[index][0][1])[0] +'s'
# print(hand)
new_list=cardStringList[:(cardStringList.index(hand)+1)]
# print(new_list)
return new_list
########################################################
hand_preceed = 'proceed'
rounds=150
R1C=1 # Bet structure: Assume raise followed by call.
n = 1 # number of players except hero and bb.
iteration=200 # for each hand range eg. 89O
noise=np.zeros((rounds, 169))
benchmark = 3 # good hand benchmark for investigation.
##########################################################
if hand_preceed == 'proceed' :
hand0=hand_proceed("./log3/guru_initial_palyers=3_betStruct=1_rounds=20_'2019-11-12T23:55:36'.txt")
list_hands = good_hand(hand0, benchmark) # update hands better than benchmark.
# list_hands = handlist(hand_init(benchmark)) # update all hands
else:
hand0=hand_init(benchmark)
list_hands = handlist(hand_init(benchmark)) # update all hands
# pprint(good_hand(hand0))
# pprint(list_hands)
for round in tqdm(range(rounds)):
report={}
for index in range(len(list_hands)):
# hand['32o']=-100
hr = eval7.HandRange(list_hands[index])
good_hand1 = good_hand(hand0, benchmark)
number = iteration*len(hr.hands)
sum=0
# pprint(hand0)
for itr in range(number):
# good_hand1 = good_hand(hand0)
# pprint(good_hand1)
deck = eval7.Deck()
deck.shuffle()
cards = deck.cards
# print(list_hands[index])
# hr = eval7.HandRange(list_hands[index])
# pprint(hr.hands) #[((Card("Qc"), Card("7c")), 1.0),
# pprint(hr.string) #'Q7s'
select_one = np.random.randint(len(hr.hands))
remove_card1 = hr.hands[select_one][0][0]
remove_card2 = hr.hands[select_one][0][1]
# print(select_one)
# print(remove_card1,remove_card2)
cards.remove(remove_card1)
cards.remove(remove_card2)
# print(len(cards))
dealt_cards = cards[:21]
# print(len(dealt_cards))
hands = [str(i) for i in dealt_cards]
hand1=[str(remove_card1),str(remove_card2)]
r1=[hands[2*n+5],hands[2*n+6]]
'''
For simplicity: Assume BB share good hand with other player.
'''
# n = 8
# bb_play = False
player=['','','','','','','']
for i in range(n):
list1 = [hands[2*i],hands[1+2*i]]
hand = listToStringHand(list1)
if hand in good_hand1:
player[i] = [hands[2*i],hands[1+2*i]]
# if i==0:
# bb_play = True
participation = [i for i in player if i!='']
pockets = [hand1, r1] + participation
board = [hands[2*n],hands[2*n+1],hands[2*n+2],hands[2*n+3],hands[2*n+4]]
# pprint(hand1)
# pprint(hands)
# pprint(pockets)
# pprint(board)
# print(len(good_hand1))
result = pokereval.poker_eval(game='holdem', pockets=pockets, board=board)
# pprint(result['eval'])
m = len(participation)
pot = R1C*(m + 2)
# if bb_play:
# pot = R1C*(m + 1)
# else:
# pot = R1C*(m + 1) + 1 # 1BB from bb as dead money.
winners = [i for i in result['eval'] if int(i['ev'])>0]
# for index, value in enumerate(result['eval']):
# if value['ev']>0:
# # print(pockets[index], "wins", pot*1.0/len(winners))
# stringHand=listToStringHand(pockets[index])
# # print(stringHand)
# hand0[stringHand]+=pot*1.0/len(winners)
# else:
# # print(pockets[index], "lose 1")
# stringHand=listToStringHand(pockets[index])
# hand0[stringHand]-=1
# pprint(good_hand(hand0))
# pprint(len(good_hand(hand0)))
if result['eval'][0]['ev']>0:
sum = sum + pot*1.0/len(winners) - 1*R1C
else:
sum = sum - 1*R1C
# Add diminishing noise factor.
# print(np.random.uniform(-1,1)/(itr+1))
# hand0[list_hands[index]]+=sum + 0.1*np.random.uniform(-1,1)/(round*itr+1)
noise[round][index]= 0.5*np.random.uniform(-1,1)/(round+1)
hand0[list_hands[index]]+=sum*1.0/number #+ noise[round][index]
report[list_hands[index]]= hand0[list_hands[index]] #,noise[round][index]]
# print(list_hands[index],"=",hand0[list_hands[index]])
# print(list_hands[index])
# pprint(hand0)
# pprint(good_hand(hand0))
# pprint(len(good_hand(hand0)))
pprint(report)
hand1={}
for i in list_hands:
hand1[i] = hand0[i] #/rounds
rank_hand0 = sorted(hand1.items(), key=operator.itemgetter(1), reverse=True)
pprint(rank_hand0)
pprint(len(good_hand(hand0, benchmark)))
t1 = datetime.datetime.now()
f=open("./log3/guru_"+hand_preceed+"_palyers="+str(n+2)+"_"+"betStruct="+str(R1C)+"_"+"rounds="+str(rounds)+"_"+repr(t1.isoformat()[:19])+".txt","a+")
for i in range(len(rank_hand0)):
f.write("%.4s, %s\n" % (str(rank_hand0[i][0]), str(rank_hand0[i][1])))
f.close()
| jinyiabc/holdem_board_analyzer | hr/fictious_raise.py | fictious_raise.py | py | 7,896 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
19118236265 | import pandas as pd
import matplotlib.pyplot as plt
# Load the CSV data into a DataFrame
df = pd.read_csv("suicide_reates.csv")
# Menu for user's choice
print("Select a graph to display:")
print("1. Bar Graph")
print("2. Histogram")
print("3. Scatter Plot")
print("4. Line Graph") # Add Line Graph option
choice = input("Enter your choice (1/2/3/4): ")
# Convert the choice to an integer
choice = int(choice)
# Check the user's choice and create the corresponding plot
if choice == 1:
# Bar Graph
plt.figure(figsize=(10, 6))
plt.bar(df["YEAR"], df["ESTIMATE"])
plt.title("Bar Graph: Suicide Death Rates Over Time")
plt.xlabel("Year")
plt.ylabel("Death Rate (per 100,000 population)")
plt.grid(axis="y")
plt.show()
elif choice == 2:
# Histogram
plt.figure(figsize=(10, 6))
plt.hist(df["ESTIMATE"], bins=5, edgecolor="k")
plt.title("Histogram: Suicide Death Rates")
plt.xlabel("Death Rate (per 100,000 population)")
plt.ylabel("Frequency")
plt.grid(axis="y")
plt.show()
elif choice == 3:
# Scatter Plot
plt.figure(figsize=(10, 6))
plt.scatter(df["YEAR"], df["ESTIMATE"], s=50, alpha=0.7)
plt.title("Scatter Plot: Suicide Death Rates Over Time")
plt.xlabel("Year")
plt.ylabel("Death Rate (per 100,000 population)")
plt.grid(True)
plt.show()
elif choice == 4:
# Line Graph
plt.figure(figsize=(10, 6))
plt.plot(df["YEAR"], df["ESTIMATE"], marker='o', linestyle='-')
plt.title("Line Graph: Suicide Death Rates Over Time")
plt.xlabel("Year")
plt.ylabel("Death Rate (per 100,000 population)")
plt.grid(True)
plt.show()
else:
print("Invalid choice. Please select 1, 2, 3, or 4.")
| praveenkumar-byte/ml | visualize.py | visualize.py | py | 1,709 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.... |
42895748829 | import torch
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
from joblib import Parallel, delayed #conda install -c anaconda joblib
class DatasetHandler:
def __init__(self, dataframes, config_dict):
"""
cols_input, cols_target, dataframes_descriptions,
keep_na=False, include_all_timestamps_between=None, all_timestamps_intervals=None,
cols_channels_input=None, cols_channels_target=None, as_float32=True):
dataframes - list of dataframes, supposed to work per year. Each dataframe contains np.arrays with dtype = object
The shapes of np.arrays are assumed to be [c,h,w] for meteorology dats or [1] for dust, and assuming
target or input can be only dust or only meteorology
dataframes_description - a must have description of the dataset (please describe the channels).
e.g.: {"input":{0:"Z500",1:"PV310"},"target":{0:"dust_0",1:"lags_0"}}
cols_channels_input/target - dict with {cols:[idxs]}, e.g. {"PV":[0], "Z":None, "U":[1,3]}.
If None - will use all channels (for dust dataframes keep it None)
include_all_timestamps_between - list of [first,last] to include all dates between them, both are pandas DateTime.
If include_all_timestamps_between==[], will init first and last from dataframes.
all_timestamps_intervals - the intervals of all timestamps dataframe, default to "3h"
as_float32 - if False, will set to float64 on numpy to torch conversion. Defaults to True
"""
self.dataframes = dataframes
self.cols_input = config_dict["cols_input"]
self.cols_target = config_dict["cols_target"]
self.cols_to_keep = self.cols_input+self.cols_target
self.cols_channels_input = config_dict["cols_channels_input"]
self.cols_channels_target = config_dict["cols_channels_target"]
self.dataframes_descriptions = config_dict["dataframes_descriptions"]
self.keep_na = config_dict["keep_na"]
self.replace_na_in_target = config_dict["replace_na_in_target"]
# self.mask_tensor = config_dict["mask_tensor"]
self.as_float32 = config_dict["as_float32"]
self.wanted_year = config_dict["wanted_year"] # can be set to None
self.include_all_timestamps_between = config_dict["include_all_timestamps_between"]
self.all_timestamps_intervals = config_dict["all_timestamps_intervals"] or "3h"
# self.order_of_cols = config_dict["order_of_cols"]
if self.include_all_timestamps_between: self.init_all_timestamps()
self.shapes = {"input": None, "target": None}
self.col_channels_idxs = {"input": {}, "target": {}}
self.init_cols_channels()
self.combine_dataframes()
self.init_shapes_and_idxs()
def init_all_timestamps(self):
first = self.dataframes[0].index[0]
last = self.dataframes[0].index[-1]
if len(self.dataframes)>1:
for df in self.dataframes[1:]:
if df.index[0] < first: first = df.index[0]
if df.index[-1] > last: last = df.index[-1]
self.include_all_timestamps_between = [first, last]
all_dates = pd.date_range(start=first, end=last, freq=self.all_timestamps_intervals, tz="UTC")
all_dates_df = pd.DataFrame({},index=all_dates)
if self.wanted_year is not None:
all_dates_df = all_dates_df[all_dates_df.index.year==self.wanted_year]
self.dataframes.append(all_dates_df)
def init_cols_channels(self):
cols_channels_input_new = {col: None for col in self.cols_input}
cols_channels_target_new = {col: None for col in self.cols_target}
cols_channels_new = [cols_channels_input_new, cols_channels_target_new]
for i,cols_channels_list in enumerate([self.cols_channels_input,self.cols_channels_target]):
if cols_channels_list is not None:
all_cols = self.cols_input if cols_channels_list==self.cols_channels_input else self.cols_target
for col in cols_channels_list:
if col not in all_cols:
print(f"Error! {col} from cols_channels is not in cols list: {all_cols}. Aborting...")
return None
cols_channels_new[i][col] = cols_channels_list[col]
self.cols_channels_input,self.cols_channels_target = cols_channels_new
def combine_dataframes(self, ):
# If include_all_timestamps - assume first and last are not None (else - ignore them)
print("Combining dataframes...")
self.combined_dataframe = self.dataframes[0]
if len(self.dataframes)>1:
for df in self.dataframes[1:]:
self.combined_dataframe = self.combined_dataframe.join(df, how="outer")
self.combined_dataframe = self.combined_dataframe[self.cols_to_keep]
if not self.keep_na:
print(f"Removing NaN values: from length of {len(self.combined_dataframe)} ...")
self.combined_dataframe = self.combined_dataframe.dropna(how="any")
print(f"... keeping {len(self.combined_dataframe)}...")
elif self.replace_na_in_target is not None:
print(f"Replacing NaN values with {self.replace_na_in_target}...")
for col in self.cols_target:
for i in range(len(self.combined_dataframe[col])):
if np.isnan(self.combined_dataframe[col][i]):
d = self.combined_dataframe.index[i]
self.combined_dataframe.loc[d,col]=self.replace_na_in_target
print("...Done! Fixing shapes of singular data cols...")
good_sample_idx = self.get_good_combined_idx()
for col in self.cols_to_keep:
if len(self.combined_dataframe[col][good_sample_idx].shape)<=1:
self.combined_dataframe[col] = self.combined_dataframe[col].astype("object")
self.combined_dataframe[col] = [p for p in np.expand_dims(np.array(self.combined_dataframe[col]),1)]
print("...Done! The resulting dataframe is kept in self.combined_dataframe:")
self.print_statistics(self.combined_dataframe)
def print_statistics(self, df):
print("Number of values per column:")
print(df.count())
print(f"Dates: {df.index[:5]}\n...\n{df.index[-5:]}")
def get_good_combined_idx(self):
for i,date in enumerate(self.combined_dataframe.index):
if(self.combined_dataframe[i:i+1].isna().values.any()):
continue
return i
print("Did not find any row without NaN")
return None
def get_shapes_of_data(self, x):
shapes = x.shape
if len(shapes)<=1:
return 1,0,0
else:
return shapes
def init_shapes_and_idxs(self):
"""
shapes are assumed to be [c,h,w] for meteorology data, or [1] for dust, and [h,w] are the same for
for all inputs and targets (separately)
"""
good_idx = self.get_good_combined_idx() # An idx with full row in self.combined_dataframe
dataset_types = ["input","target"]
# if self.cols_target == []:
# dataset_types = ["input"]
# if self.cols_input == []:
# dataset_types = ["target"]
for dataset_type in ["input","target"]:
channels_counter = 0
cols_list = self.cols_input if dataset_type=="input" else self.cols_target
if cols_list==[]:
self.shapes[dataset_type] = None
continue
sample_data = self.combined_dataframe[cols_list[0]][good_idx]
_,h_all,w_all=self.get_shapes_of_data(sample_data)
channels_from_cols = self.cols_channels_input if dataset_type=="input" else self.cols_channels_target
for col in cols_list:
x = self.combined_dataframe[col][good_idx]
if channels_from_cols[col] is not None:
channels_to_keep = np.array(channels_from_cols[col])
x = x[channels_to_keep,:,:]
c,h,w=self.get_shapes_of_data(x)
if w!=w_all or h!=h_all:
print(f"Bad shapes of input parameters! {h,w} and in {col} not {h_all,w_all}. Aborting...")
return
self.col_channels_idxs[dataset_type][col] = np.arange(channels_counter,channels_counter+c)
channels_counter+=c
self.shapes[dataset_type] = [channels_counter,h_all,w_all]
def create_tensor_from_dataset_type(self, dataset_type):
shape = self.shapes[dataset_type]
if shape is None:
return None
N,C,H,W = len(self.combined_dataframe), shape[0], shape[1], shape[2]
if H==0 and W==0:
x = torch.zeros([N,C],dtype=torch.float32)
else:
x = torch.zeros([N,C,H,W],dtype=torch.float32)
cols_list = self.cols_input if dataset_type=="input" else self.cols_target
for col in cols_list:
channels = self.col_channels_idxs[dataset_type][col]
channels_from_df = np.arange(self.combined_dataframe[col][0].shape[0])
channels_from_cols = self.cols_channels_input if dataset_type=="input" else self.cols_channels_target
if channels_from_cols[col] is not None:
channels_from_df = np.array(channels_from_cols[col])
if H==0 and W==0:
x[:,channels] = torch.tensor([c.astype("float32") for c in self.combined_dataframe[col]])
else:
x_tf = torch.tensor([c.astype("float32") for c in self.combined_dataframe[col]])
x[:,channels,:,:] = x_tf[:,channels_from_df,:,:]
return x
def create_and_save_dataset(self, dir_path, base_filename):
"""
Creates a dataset from self.combined_dataframe and saves it
A directroy named "metadata" has to be created inside dir_path
"""
x_input = self.create_tensor_from_dataset_type("input")
x_target = self.create_tensor_from_dataset_type("target")
timestamps = self.combined_dataframe.index
self.save_dataset(dir_path, base_filename, x_input, x_target, timestamps)
def save_dataset(self, dir_path, base_filename, x_input, x_target, timestamps):
filename_input = dir_path+"/"+base_filename+"_input.pkl"
filename_target = dir_path+"/"+base_filename+"_target.pkl"
filename_timestamps = dir_path+"/"+base_filename+"_timestamps.pkl"
filename_description = dir_path+"/metadata/"+base_filename+"_descriptions.pkl"
if x_input is not None: torch.save(x_input, filename_input)
if x_target is not None: torch.save(x_target, filename_target)
if timestamps is not None: torch.save(timestamps, filename_timestamps)
if self.dataframes_descriptions is not None: torch.save(self.dataframes_descriptions, filename_description)
@staticmethod
def create_and_save_one_dataset_from_path(dataframes_paths, dataset_arguments, save_as):
dataframes = [torch.load(path) for path in dataframes_paths]
try:
handler = DatasetHandler(dataframes, dataset_arguments)
handler.create_and_save_dataset(save_as["dir_path"], save_as["base_filename"])
except KeyError:
print("...An error occured (most likely all are NaN's), skipping dataset......")
@staticmethod
def create_and_save_datasets_from_paths(dataframes_paths, datasets_arguments, save_as_list, njobs=3):
"""
datasets_arguments is a list of dictionaries required to construct each dataset, while the dataframes are given
as a list of paths to be loaded when constructing and saving the dataset
dataframes_paths: the paths to dataframes to be loaded
datasets_arguments:
{
cols_input
cols_target
dataframes_descriptions
keep_na
include_all_timestamps_between
all_timestamps_intervals
cols_channels_input
cols_channels_target
as_float32
wanted_year
}
save_as_list: [{"dir_path":..., "base_filename":...},]
"""
num_datasets_to_save = len(dataframes_paths)
Parallel(n_jobs=njobs,verbose=100)(delayed(
DatasetHandler.create_and_save_one_dataset_from_path)(dataframes_paths[i], datasets_arguments[i], save_as_list[i])
for i in range(num_datasets_to_save)
)
| DoriNiss/dust_prediction_using_deep_learning | packages/data_handlers/DatasetHandler.py | DatasetHandler.py | py | 12,765 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.date_range",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
... |
7900094304 |
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("allPosts",views.allPosts,name="allPosts"),
path("profilePage\<str:userid>",views.profilePage,name="profilePage"),
path("follow\<str:userid>",views.follow,name="follow"),
path("unfollow\<str:userid>",views.unfollow,name="unfollow"),
path("following",views.following,name="following"),
# API Routes
path("posts/<int:number>",views.savePost,name="savePost"),
path("like",views.like,name="like")
]
| keshavanand/Network | network/urls.py | urls.py | py | 702 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
29640359742 | import math
import pytest
from datatable import f, dt, update
from tests import assert_equals
stypes_int = dt.ltype.int.stypes
stypes_float = dt.ltype.real.stypes
stypes_str = dt.ltype.str.stypes
stypes_all = [dt.bool8, dt.obj64] + stypes_int + stypes_float + stypes_str
#-------------------------------------------------------------------------------
# Assign None
#-------------------------------------------------------------------------------
def test_assign_none_single():
DT = dt.Frame(A=range(5))
DT[:, f.A] = None
assert_equals(DT, dt.Frame(A=[None]*5, stype=dt.int32))
def test_assign_none_new():
DT = dt.Frame(A=range(5))
DT[:, "B"] = None
assert_equals(DT, dt.Frame(A=range(5), B=[None]*5,
stypes={"A": dt.int32, "B": dt.bool8}))
def test_assign_none_all():
DT = dt.Frame([[True, False], [1, 5], [999, -12], [34.2, math.inf],
[0.001, 1e-100], ['foo', 'bar'], ['buzz', '?']],
names=list("ABCDEFG"),
stypes=['bool', 'int8', 'int64', 'float32', 'float64',
'str32', 'str64'])
DT[:, :] = None
assert_equals(DT, dt.Frame([[None, None]] * 7,
names=tuple("ABCDEFG"),
stypes=(dt.bool8, dt.int8, dt.int64, dt.float32,
dt.float64, dt.str32, dt.str64)))
#-------------------------------------------------------------------------------
# Assign boolean values
#-------------------------------------------------------------------------------
def test_assign_boolean():
DT = dt.Frame(A=[None])
DT[:, "A"] = True
assert_equals(DT, dt.Frame(A=[True]))
def test_assign_boolean2():
DT = dt.Frame(A=[True, False, True])
DT[:, "A"] = False
assert_equals(DT, dt.Frame(A=[False] * 3))
@pytest.mark.parametrize("stype", stypes_int + stypes_float + stypes_str)
def test_assign_boolean_to_different_type(stype):
DT = dt.Frame(A=[5, 7], stype=stype)
assert DT.stype == stype
DT[:, "A"] = False
assert_equals(DT, dt.Frame(A=[False, False], stype=bool))
def test_assign_boolean_partial():
DT = dt.Frame(A=range(5))
DT[2, "B"] = False
assert_equals(DT, dt.Frame(A=range(5), B=[None, None, False, None, None]))
#-------------------------------------------------------------------------------
# Assign integer values
#-------------------------------------------------------------------------------
def test_assign_integer_out_of_range():
DT = dt.Frame(A=[1, 2, 3], stype=dt.int8)
assert DT.stype == dt.int8
DT[:, "A"] = 5000000
assert_equals(DT, dt.Frame(A=[5000000] * 3, stype=dt.int32))
def test_assign_integer_out_of_range_to_subset():
DT = dt.Frame(A=range(10), stype=dt.int8)
assert DT.stype == dt.int8
DT[:3, "A"] = 999
assert_equals(DT, dt.Frame(A=[999, 999, 999, 3, 4, 5, 6, 7, 8, 9],
stype=dt.int16))
DT[-1, "A"] = 10**10
assert_equals(DT, dt.Frame(A=[999, 999, 999, 3, 4, 5, 6, 7, 8, 10**10],
stype=dt.int64))
def test_assign_int_overflow():
# When integer overflows, it becomes a float64 value
DT = dt.Frame(A=range(5), B=[0.0]*5)
DT[:, "A"] = 10**100
DT[:, "B"] = 10**100
assert_equals(DT, dt.Frame(A=[1.0e100]*5, B=[1.0e100]*5))
@pytest.mark.parametrize("stype", [dt.bool8] + stypes_str)
def test_assign_integer_to_different_type(stype):
DT = dt.Frame(A=[5], stype=stype)
assert DT.stype == stype
DT[:, "A"] = 777
assert_equals(DT, dt.Frame(A=[777], stype=dt.int32))
#-------------------------------------------------------------------------------
# Assign float values
#-------------------------------------------------------------------------------
def test_assign_to_newcolumn_subset():
DT = dt.Frame(A=range(5))
DT[[1, 4], "B"] = 3.7
assert_equals(DT, dt.Frame(A=range(5), B=[None, 3.7, None, None, 3.7]))
def test_assign_float_upcast():
# When float32 overflows, it is converted to float64
DT = dt.Frame(A=[1.3, 2.7], stype=dt.float32)
DT[:, "A"] = 1.5e+100
assert_equals(DT, dt.Frame(A=[1.5e100, 1.5e100]))
def test_assign_to_float32_column():
DT = dt.Frame(A=range(5), stype=dt.float32)
DT[:, "A"] = 3.14159
assert_equals(DT, dt.Frame(A=[3.14159] * 5, stype=dt.float32))
@pytest.mark.parametrize("stype", [dt.bool8] + stypes_int + stypes_str)
def test_assign_float_to_different_type(stype):
DT = dt.Frame(A=[5], stype=stype)
assert DT.stype == stype
DT[:, "A"] = 3.14159265
assert_equals(DT, dt.Frame(A=[3.14159265]))
#-------------------------------------------------------------------------------
# Assign string values
#-------------------------------------------------------------------------------
def test_assign_string_to_str64():
DT = dt.Frame(A=["wonder", None, "ful", None], stype=dt.str64)
DT[:, "A"] = "beep"
assert_equals(DT, dt.Frame(A=["beep"] * 4, stype=dt.str64))
@pytest.mark.parametrize("stype", [dt.bool8] + stypes_int + stypes_float)
def test_assign_string_to_different_type(stype):
DT = dt.Frame(A=[5], stype=stype)
assert DT.stype == stype
DT[:, "A"] = 'what?'
assert_equals(DT, dt.Frame(A=['what?']))
def test_assign_str_to_empty_frame():
# See issue #2043, the assignment used to deadlock
DT = dt.Frame(W=[], stype=dt.str32)
DT[f.W == '', f.W] = 'yay!'
assert_equals(DT, dt.Frame(W=[], stype=dt.str32))
#-------------------------------------------------------------------------------
# Assign types
#-------------------------------------------------------------------------------
def test_assign_type_simple():
DT = dt.Frame(A=range(5))
DT["A"] = dt.float64
assert_equals(DT, dt.Frame(A=range(5), stype=dt.float64))
def test_assign_type_to_many_columns():
DT = dt.Frame(A=[True, False], B=[3, 7], C=[2.4, 9.9])
DT[:, :] = dt.int64
assert_equals(DT, dt.Frame(A=[1, 0], B=[3, 7], C=[2, 9], stype=dt.int64))
def test_assign_type_to_some_columns():
DT = dt.Frame(A=['bii', 'doo'], B=[3, 5], C=[4, 4])
DT[:, int] = float
assert_equals(DT, dt.Frame(A=['bii', 'doo'], B=[3.0, 5.0], C=[4.0, 4.0]))
@pytest.mark.parametrize("st", stypes_all)
def test_assign_stypes(st):
DT = dt.Frame(A=[1])
DT["A"] = st
assert DT.stype == st
@pytest.mark.parametrize("pt", [bool, int, float, str, object])
def test_assign_python_type(pt):
DT = dt.Frame(A=[7])
DT["A"] = pt
if pt is object:
assert DT.stype == dt.obj64
else:
assert type(DT[0, 0]) is pt
@pytest.mark.parametrize("lt", [dt.ltype.bool, dt.ltype.int, dt.ltype.real,
dt.ltype.str, dt.ltype.obj])
def test_assign_ltype(lt):
DT = dt.Frame(A=[7])
DT["A"] = lt
assert DT.ltypes[0] == lt
def test_assign_type_without_stype_change():
# Verify that if a column already has same ltype, then that ltype will
# not change upon the assignment
DT = dt.Frame([[0], [1], [2], [3], [4], [5]],
stypes=[dt.bool8, dt.int8, dt.int16, dt.int32, dt.int64,
dt.float32])
DT[:, "C0":"C5"] = int
assert_equals(DT, dt.Frame([[0], [1], [2], [3], [4], [5]],
stypes=[dt.int32, dt.int8, dt.int16, dt.int32,
dt.int64, dt.int32]))
def test_assign_stype_to_new_column():
DT = dt.Frame(S=range(5))
DT["N"] = dt.float64
DT["M"] = dt.float32
assert_equals(DT, dt.Frame(S=range(5), N=[None]*5, M=[None]*5,
stypes=dict(N=dt.float64, M=dt.float32)))
def test_assign_bad_type():
DT = dt.Frame(A=range(5))
with pytest.raises(ValueError, match="Unknown type <class 'type'> used in "
"the replacement expression"):
DT["A"] = type
def test_assign_different_types():
DT = dt.Frame(A=range(5), B=list("ABCDE"))
assert DT.stypes == (dt.int32, dt.str32)
DT[:, update(A=dt.float32, B=dt.str64)]
assert_equals(DT, dt.Frame(A=range(5), B=list("ABCDE"),
stypes=dict(A=dt.float32, B=dt.str64)))
def test_assign_types_partial():
DT = dt.Frame(A=range(5))
with pytest.raises(ValueError, match="Partial reassignment of Column's "
"type is not possible"):
DT[:2, "A"] = str
#-------------------------------------------------------------------------------
# Assign range
#-------------------------------------------------------------------------------
def test_assign_range():
DT = dt.Frame(A=[3, 4, 0])
DT["B"] = range(3)
assert_equals(DT, dt.Frame(A=[3, 4, 0], B=[0, 1, 2]))
def test_assign_range2():
DT = dt.Frame(A=[7]*7, stype=dt.float32)
DT["A"] = range(3, 10)
assert_equals(DT, dt.Frame(A=range(3, 10)))
def test_assign_range_subframe():
DT = dt.Frame(A=range(20))
DT[10:, "A"] = range(10)
assert_equals(DT, dt.Frame(A=list(range(10))*2))
def test_assign_range_compute():
DT = dt.Frame(A=[5, 10, 100])
DT["B"] = f.A * range(3)
assert_equals(DT, dt.Frame(A=[5, 10, 100], B=[0, 10, 200]))
| h2oai/datatable | tests/ijby/test-assign-scalar.py | test-assign-scalar.py | py | 9,236 | python | en | code | 1,763 | github-code | 1 | [
{
"api_name": "datatable.dt.ltype",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "datatable.dt",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "datatable.dt.ltype",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "datatable.d... |
27666235820 | # -*- coding: utf-8 -*-
import logging
from .utils import parse_date
from .config import Config
import xmltodict
logger = logging.getLogger()
class XMLParser(object):
def __init__(self, xml, config=None):
self.xml = xml
self.errors = None
if config is None:
config = Config()
self.config = config
self.parse_xml(xml)
logger.debug(self.__dict__)
def parse_xml(self, xml):
try:
parsed = xmltodict.parse(xml, encoding="iso-8859-1")
except Exception as e:
logger.debug('Cannot parse the returned xml "%s" -> "%s"', xml, e)
parsed = {}
if 'errors' in parsed:
self.errors = parsed['errors']['error']
return parsed
class PagSeguroNotificationResponse(XMLParser):
def __getitem__(self, key):
getattr(self, key, None)
def parse_xml(self, xml):
parsed = super(PagSeguroNotificationResponse, self).parse_xml(xml)
if self.errors:
return
transaction = parsed.get('transaction', {})
for k, v in transaction.items():
setattr(self, k, v)
class PagSeguroPreApprovalNotificationResponse(XMLParser):
def __getitem__(self, key):
getattr(self, key, None)
def parse_xml(self, xml):
parsed = super(PagSeguroPreApprovalNotificationResponse,
self).parse_xml(xml)
if self.errors:
return
transaction = parsed.get('transaction', {})
for k, v in transaction.items():
setattr(self, k, v)
class PagSeguroPreApprovalCancel(XMLParser):
def __getitem__(self, key):
getattr(self, key, None)
def parse_xml(self, xml):
parsed = super(PagSeguroPreApprovalCancel, self).parse_xml(xml)
if self.errors:
return
transaction = parsed.get('transaction', {})
for k, v in transaction.items():
setattr(self, k, v)
class PagSeguroCheckoutSession(XMLParser):
def __init__(self, xml, config=None):
self.session_id = None
super(PagSeguroCheckoutSession, self).__init__(xml, config)
def parse_xml(self, xml):
parsed = super(PagSeguroCheckoutSession, self).parse_xml(xml)
if self.errors:
return
session = parsed.get('session', {})
self.session_id = session.get('id')
class PagSeguroPreApprovalPayment(XMLParser):
def __init__(self, xml, config=None):
self.code = None
super(PagSeguroPreApprovalPayment, self).__init__(xml, config)
def parse_xml(self, xml):
parsed = super(PagSeguroPreApprovalPayment, self).parse_xml(xml)
if self.errors:
return
result = parsed.get('result', {})
self.code = result.get('transactionCode')
self.date = parse_date(result.get('date'))
class PagSeguroCheckoutResponse(XMLParser):
def __init__(self, xml, config=None):
self.code = None
self.date = None
self.payment_url = None
self.payment_link = None
self.transaction = None
super(PagSeguroCheckoutResponse, self).__init__(xml, config)
def parse_xml(self, xml):
parsed = super(PagSeguroCheckoutResponse, self).parse_xml(xml)
if self.errors:
return
checkout = parsed.get('checkout', {})
self.code = checkout.get('code')
self.date = parse_date(checkout.get('date'))
self.payment_url = self.config.PAYMENT_URL % self.code
# this is used only for transparent checkout process
self.transaction = parsed.get('transaction', {})
self.payment_link = self.transaction.get('paymentLink')
class PagSeguroTransactionSearchResult(XMLParser):
current_page = None
total_pages = None
results_in_page = None
transactions = []
def __getitem__(self, key):
getattr(self, key, None)
def parse_xml(self, xml):
parsed = super(PagSeguroTransactionSearchResult, self).parse_xml(xml)
if self.errors:
return
search_result = parsed.get('transactionSearchResult', {})
self.transactions = search_result.get('transactions', {})
self.transactions = self.transactions.get('transaction', [])
if not isinstance(self.transactions, list):
self.transactions = [self.transactions]
self.current_page = search_result.get('currentPage', None)
if self.current_page is not None:
self.current_page = int(self.current_page)
self.results_in_page = search_result.get('resultsInThisPage', None)
if self.results_in_page is not None:
self.results_in_page = int(self.results_in_page)
self.total_pages = search_result.get('totalPages', None)
if self.total_pages is not None:
self.total_pages = int(self.total_pages)
class PagSeguroPreApproval(XMLParser):
def __getitem__(self, key):
getattr(self, key, None)
def parse_xml(self, xml):
parsed = super(PagSeguroPreApproval, self).parse_xml(xml)
if self.errors:
return
result = parsed.get('preApproval', {})
self.name = result.get('name', None)
self.code = result.get('code', None)
self.date = parse_date(result.get('date'))
self.tracker = result.get('tracker', None)
self.status = result.get('status', None)
self.reference = result.get('reference', None)
self.last_event_date = result.get('lastEventDate', None)
self.charge = result.get('charge', None)
self.sender = result.get('sender', {})
class PagSeguroPreApprovalSearch(XMLParser):
current_page = None
total_pages = None
results_in_page = None
pre_approvals = []
def __getitem__(self, key):
getattr(self, key, None)
def parse_xml(self, xml):
parsed = super(PagSeguroPreApprovalSearch, self).parse_xml(xml)
if self.errors:
return
search_result = parsed.get('preApprovalSearchResult', {})
self.pre_approvals = search_result.get('preApprovals', {})
self.pre_approvals = self.pre_approvals.get('preApproval', [])
if not isinstance(self.pre_approvals, list):
self.pre_approvals = [self.pre_approvals]
self.current_page = search_result.get('currentPage', None)
if self.current_page is not None:
self.current_page = int(self.current_page)
self.results_in_page = search_result.get('resultsInThisPage', None)
if self.results_in_page is not None:
self.results_in_page = int(self.results_in_page)
self.total_pages = search_result.get('totalPages', None)
if self.total_pages is not None:
self.total_pages = int(self.total_pages)
| Japle/python-pagseguro | pagseguro/parsers.py | parsers.py | py | 6,801 | python | en | code | 173 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.Config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "xmltodict.parse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "utils.parse_date",
... |
74356016992 | # importint the necessary lirbraries
# import tensorflow.keras as kerasfrom __future__ import print_function
from __future__ import print_function
import keras
from keras.layers import Convolution2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import *
from keras.layers.normalization import BatchNormalization
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import initializers
from keras.preprocessing import image
from keras.preprocessing.image import img_to_array
from keras import layers
from keras.models import load_model
import matplotlib.image as mpimg
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from keras import backend as K
from keras.layers import Layer
from keras import activations
from keras import utils
from keras import models
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, SGD
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from os import listdir
import os
from imutils import paths
import numpy as np
import pickle
import matplotlib.pyplot as plt
import cv2
import random
import os
class Length(layers.Layer):
"""
Compute the length of vectors. This is used to compute a Tensor that has the same shape with y_true in margin_loss.
Using this layer as model's output can directly predict labels by using `y_pred = np.argmax(model.predict(x), 1)`
inputs: shape=[None, num_vectors, dim_vector]
output: shape=[None, num_vectors]
"""
def call(self, inputs, **kwargs):
return K.sqrt(K.sum(K.square(inputs), -1) + K.epsilon())
def compute_output_shape(self, input_shape):
return input_shape[:-1]
def get_config(self):
config = super(Length, self).get_config()
return config
class Mask(layers.Layer):
"""
Mask a Tensor with shape=[None, num_capsule, dim_vector] either by the capsule with max length or by an additional
input mask. Except the max-length capsule (or specified capsule), all vectors are masked to zeros. Then flatten the
masked Tensor.
For example:
```
x = keras.layers.Input(shape=[8, 3, 2]) # batch_size=8, each sample contains 3 capsules with dim_vector=2
y = keras.layers.Input(shape=[8, 3]) # True labels. 8 samples, 3 classes, one-hot coding.
out = Mask()(x) # out.shape=[8, 6]
# or
out2 = Mask()([x, y]) # out2.shape=[8,6]. Masked with true labels y. Of course y can also be manipulated.
```
"""
def call(self, inputs, **kwargs):
if type(inputs) is list: # true label is provided with shape = [None, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, mask = inputs
else: # if no true label, mask by the max length of capsules. Mainly used for prediction
# compute lengths of capsules
x = K.sqrt(K.sum(K.square(inputs), -1))
# generate the mask which is a one-hot code.
# mask.shape=[None, n_classes]=[None, num_capsule]
mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])
# inputs.shape=[None, num_capsule, dim_capsule]
# mask.shape=[None, num_capsule]
# masked.shape=[None, num_capsule * dim_capsule]
masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
return masked
def compute_output_shape(self, input_shape):
if type(input_shape[0]) is tuple: # true label provided
return tuple([None, input_shape[0][1] * input_shape[0][2]])
else: # no true label provided
return tuple([None, input_shape[1] * input_shape[2]])
def get_config(self):
config = super(Mask, self).get_config()
return config
def squash(vectors, axis=-1):
"""
The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
:param vectors: some vectors to be squashed, N-dim tensor
:param axis: the axis to squash
:return: a Tensor with same shape as input vectors
"""
s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())
return scale * vectors
class CapsuleLayer(layers.Layer):
"""
The capsule layer. It is similar to Dense layer. Dense layer has `in_num` inputs, each is a scalar, the output of the
neuron from the former layer, and it has `out_num` output neurons. CapsuleLayer just expand the output of the neuron
from scalar to vector. So its input shape = [None, input_num_capsule, input_dim_capsule] and output shape = \
[None, num_capsule, dim_capsule]. For Dense Layer, input_dim_capsule = dim_capsule = 1.
:param num_capsule: number of capsules in this layer
:param dim_capsule: dimension of the output vectors of the capsules in this layer
:param routings: number of iterations for the routing algorithm
"""
def __init__(self, num_capsule, dim_capsule, routings=3,
kernel_initializer='glorot_uniform',
**kwargs):
super(CapsuleLayer, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
assert len(input_shape) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]"
self.input_num_capsule = input_shape[1]
self.input_dim_capsule = input_shape[2]
# Transform matrix
self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,
self.dim_capsule, self.input_dim_capsule],
initializer=self.kernel_initializer,
name='W')
self.built = True
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_capsule]
# inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
inputs_expand = K.expand_dims(inputs, 1)
# Replicate num_capsule dimension to prepare being multiplied by W
# inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
# Compute `inputs * W` by scanning inputs_tiled on dimension 0.
# x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
# W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
# Regard the first two dimensions as `batch` dimension,
# then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
# inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)
# Begin: Routing algorithm ---------------------------------------------------------------------#
# The prior for coupling coefficient, initialized as zeros.
# b.shape = [None, self.num_capsule, self.input_num_capsule].
b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
# c.shape=[batch_size, num_capsule, input_num_capsule]
c = tf.nn.softmax(b, dim=1)
# c.shape = [batch_size, num_capsule, input_num_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
# outputs.shape=[None, num_capsule, dim_capsule]
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])) # [None, 10, 16]
if i < self.routings - 1:
# outputs.shape = [None, num_capsule, dim_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
# b.shape=[batch_size, num_capsule, input_num_capsule]
b += K.batch_dot(outputs, inputs_hat, [2, 3])
# End: Routing algorithm -----------------------------------------------------------------------#
return outputs
def compute_output_shape(self, input_shape):
return tuple([None, self.num_capsule, self.dim_capsule])
def get_config(self):
config = {
'num_capsule': self.num_capsule,
'dim_capsule': self.dim_capsule,
'routings': self.routings
}
base_config = super(CapsuleLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding):
"""
Apply Conv2D `n_channels` times and concatenate all capsules
:param inputs: 4D tensor, shape=[None, width, height, channels]
:param dim_capsule: the dim of the output vector of capsule
:param n_channels: the number of types of capsules
:return: output tensor, shape=[None, num_capsule, dim_capsule]
"""
output = layers.Conv2D(filters=dim_capsule * n_channels, kernel_size=kernel_size, strides=strides, padding=padding,
name='primarycap_conv2d')(inputs)
outputs = layers.Reshape(target_shape=[-1, dim_capsule], name='primarycap_reshape')(output)
return layers.Lambda(squash, name='primarycap_squash')(outputs)
"""
# The following is another way to implement primary capsule layer. This is much slower.
# Apply Conv2D `n_channels` times and concatenate all capsules
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding):
outputs = []
for _ in range(n_channels):
output = layers.Conv2D(filters=dim_capsule, kernel_size=kernel_size, strides=strides, padding=padding)(inputs)
outputs.append(layers.Reshape([output.get_shape().as_list()[1] ** 2, dim_capsule])(output))
outputs = layers.Concatenate(axis=1)(outputs)
return layers.Lambda(squash)(outputs)
"""
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the second one for evaluation.
`eval_model` can also be used for training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=128, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1))
def capsule_prediction(image_path):
# define the model
model, eval_model, manipulate_model = CapsNet(input_shape=(28, 28, 3),
n_class=10,
routings=3)
eval_model.summary()
dir_name = os.path.dirname(os.path.abspath(__file__))
eval_model.load_weights(dir_name + "/best_weights_capsule_train.h5")
# setting up predictions
new_img = None
try:
new_img = image.load_img(image_path, target_size=(28, 28))
except OSError:
return {'error': 'could not identify image'}
the_image = image.load_img(image_path)
img = image.img_to_array(new_img)
img = np.expand_dims(img, axis=0)
img = img / 255
prediction, y_recon = eval_model.predict(img)
li = ["Bacterial_spot", "Early_blight", "healthy", "Late_blight", "Leaf_Mold", "mosaic_virus", "Septoria_leaf_spot",
"spider_mite", "Target_Spot", "Yellow_Leaf_Curl"]
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
class_name = li[prediction.argmax()]
'''
d = prediction.flatten()
j = d.max()
for index,item in enumerate(d):
if item == j:
class_name = li[index]
'''
# ploting image with predicted class name
'''plt.figure(figsize=(4, 4))
plt.imshow(the_image)
plt.axis('off')
plt.title(class_name)'''
K.clear_session()
return {'disease': class_name}
| Tobenna-KA/mimeai-api | api/src/capsule_net.py | capsule_net.py | py | 15,008 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.layers.Layer",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "keras.layers",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "keras.backend.sqrt",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "keras.backend... |
71632700195 | import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def do_add_remove_maglev(test, table, add, remove, exp_table, exp_prev_ele_map):
logger.info("===Add/Remove Maglev test===")
if add:
logger.info("Adding {} to the table.".format(add))
table.add(add)
if remove:
logger.info("Removing {} from the table.".format(remove))
table.remove(remove)
act_table = table.get_table()
act_prev_ele_map = table.get_prev_elements_map()
test.assertEqual(exp_table, act_table)
test.assertEqual(act_prev_ele_map, exp_prev_ele_map)
logger.info("Number of elements replaced {}".format(
table.elements_replaced))
logger.info("Number of elements replacing {}".format(
table.elements_replacing))
| futurewei-cloud/zeta | test/helper.py | helper.py | py | 803 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 5,
"usage_type": "attribute"
}
] |
19986667975 | import numpy as np
import pandas as pd
import sqlite3
from FoodTables.FoundationTable import FoundationTable
from FoodTables.MarketTable import MarketTable
VOLUME_CONVERSIONS_TABLE = "volume_conversions"
MASS_CONVERSIONS_TABLE = "mass_conversions"
FOOD_TABLE = "food"
con = sqlite3.connect("data.sqlite")
def reset_db():
con.execute(f"DROP TABLE IF EXISTS {VOLUME_CONVERSIONS_TABLE}")
con.execute(f"CREATE TABLE IF NOT EXISTS `{VOLUME_CONVERSIONS_TABLE}` "
f"(`id` TEXT NOT NULL, `name` TEXT NOT NULL, `equiv` REAL NOT NULL, PRIMARY KEY(`id`))")
if __name__ == '__main__':
reset_db()
u_df = pd.read_csv("conversions.csv", dtype={"id": "string", "name": "string", "equiv": np.float64})
u_df.to_sql(
name=VOLUME_CONVERSIONS_TABLE,
con=con,
if_exists="append",
index=False
)
pd.concat(
[
FoundationTable().get_df(),
MarketTable().get_df()
],
ignore_index=True
).to_sql(
name=FOOD_TABLE,
con=sqlite3.connect("data.sqlite"),
if_exists="replace",
index=False
)
| danzou56/food-data | make_db.py | make_db.py | py | 1,128 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
... |
35010952644 | # 스포츠 기사 크롤링
import requests
from bs4 import BeautifulSoup as bs
from apps.resources.models import Article
def crawling_entertain_news():
entertain_url = "https://entertain.naver.com"
entertain_url_home = entertain_url + "/home"
response = requests.get(entertain_url_home)
soup = bs(response.text, "html.parser")
ranking = soup.find("div", {"class": "rank_lst"})
lis = ranking.findAll("li")
entertain_news_list = []
for li in lis:
entertain_news_list.append(entertain_url + li.find("a")["href"])
# 연예기사 넘 많아서 5개로 줄임
entertain_news_list = entertain_news_list[:5]
parsed_news = []
for news in entertain_news_list:
response = requests.get(news)
soup = bs(response.text, "html.parser")
title = (
soup.select(".end_tit")[0]
.text.replace("\n", "")
.replace("\t", "")
.replace("\r", "")
)
content = (
soup.find("div", {"id": "articeBody"})
.text.replace("\n", "")
.replace("\t", "")
.replace("\r", "")
)
url = news
if len(content) > 500 & len(content) < 4000:
parsed_news.append(
{
"title": title,
"content": content,
"url": url,
}
)
if len(parsed_news) > 0:
for news in parsed_news:
if Article.objects.filter(title=news["title"]).exists():
continue
else:
Article.objects.create(
kind=Article.ArticleKindChoices.ENTERTAINMENT,
title=news["title"],
content=news["content"],
url=news["url"],
)
| Billionaire-Project/four_hours_service | scheduler/news_crawling_entertain.py | news_crawling_entertain.py | py | 1,835 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
26218292301 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0019_auto_20150121_1722'),
]
operations = [
migrations.AlterField(
model_name='contestentry',
name='jam_points',
field=models.DecimalField(default=0, max_digits=9, decimal_places=1),
preserve_default=True,
),
]
| mikeparisstuff/nostrajamus | api/migrations/0020_auto_20150122_0435.py | 0020_auto_20150122_0435.py | py | 474 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
32746010298 | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
class clusteredData():
def __init__(self,dataSet)-> None:
self.__dataSet = dataSet
self.__centriods,self.__belongsTo = elbowMethod(dataSet,8)
#self.__centriods,self.__belongsTo = kmeans(3,dataSet)
#self.plot()
def plot(self) -> None:
k = len(self.__centriods)
colors = ['r', 'g','y']
#color=iter(cm.rainbow(np.linspace(0,1,k)))
__, ax = plt.subplots()
for i in range(len(self.__dataSet)):
x = self.__dataSet[i][0]
y = self.__dataSet[i][1]
ax.plot(x, y, (colors[int(self.__belongsTo[i])] + 'o'))
for i in range(k):
ax.plot(self.__centriods[i][0], self.__centriods[i][1], 'bo')
plt.show()
def getBelongsTo(self):
return self.__belongsTo
def getCentriods(self):
return self.__centriods
def euclidian(a, b):
return np.linalg.norm(a-b)
def kmeans(k: int, dataSet: np.array):
numInstances, numFeatures = dataSet.shape
# intializes the centriods at a random starting point
startingIndexes = np.random.randint(0, numInstances - 1, size=k)
unique = False
while(not unique):
indices = set()
unique = True
for index in startingIndexes:
if index in indices:
unique = False
startingIndexes = np.random.randint(0, numInstances - 1, size=k)
break
else:
indices.add(index)
centriods = dataSet[startingIndexes]
oldCentriods = np.zeros(centriods.shape)
belongsTo = np.zeros((numInstances, 1))
norm = euclidian(centriods,oldCentriods)
while norm > 0:
norm = euclidian(centriods,oldCentriods)
for dataIndex, data in enumerate(dataSet):
distanceToCentriod = np.zeros((k,1))
for centriodIndex, centroid in enumerate(centriods):
distanceToCentriod[centriodIndex] = euclidian(centroid,data)
belongsTo[dataIndex][0] = np.argmin(distanceToCentriod)
tmpCentriods = np.zeros((k, numFeatures))
for index in range(len(centriods)):
# data points assigned to cluster
assignedData = [i for i in range(len(belongsTo)) if belongsTo[i] == index]
centriod = np.mean(dataSet[assignedData], axis=0)
tmpCentriods [index, :] = centriod
oldCentriods = centriods
centriods = tmpCentriods
return centriods, belongsTo
def getLine(x1,y1,x2,y2):
m =(y2-y1)/(x2-x1)
b = y2 - (m*x2)
return m,b
def getClusters(centriods, belongsTo, dataSet):
clusters = []
for centriod in centriods:
cluster = {}
cluster["centriod"] = centriod
cluster["dataPoints"] = []
clusters.append(cluster)
for itemIndex, item in enumerate(belongsTo):
clusters[int(item)]["dataPoints"].append(dataSet[itemIndex])
return clusters
def getClusterMean(cluster):
distance = 0
for datapoint in cluster["dataPoints"]:
distance += euclidian(cluster["centriod"],datapoint)
try:
return distance/len(cluster["dataPoints"])
except ZeroDivisionError:
print("zero sized cluster")
return None
def plotElbowMethod(datas):
#color=iter(cm.rainbow(np.linspace(0,1,k)))
__, ax = plt.subplots()
for dataIndex, data in enumerate(datas):
x = dataIndex
y = data["sse"]
ax.plot(x, y, "bo")
plt.show()
def elbowMethod(dataSet, maxK = 8,plot =False):
datas = []
for i in range(1,maxK+1):
data = {}
centriods, belongsTo = kmeans(i,dataSet)
data['centriods'] = centriods
data['belongsTo'] = belongsTo
data['sse'] = 0
clusters = getClusters(centriods, belongsTo, dataSet)
for cluster in clusters:
mean = getClusterMean(cluster)
for dataPoint in cluster["dataPoints"]:
data['sse'] += (euclidian(dataPoint,cluster["centriod"]) - mean )**2
datas.append(data)
#plots elbow curve
if plot:
plotElbowMethod(datas)
#gets line of starting and end points
m,b = getLine(0,datas[0]['sse'],len(datas)-1,datas[len(datas)-1]['sse'])
best = 0
length = 0
for i in range(len(datas)):
y = m*i +b
#print(y-datas[i]["sse"])
if y - datas[i]["sse"] > length:
best = i
length = y - datas[i]["sse"]
print(best+1)
return datas[best]['centriods'], datas[best]['belongsTo']
| danieljimenez1337/tlds-parser | kmeans_util.py | kmeans_util.py | py | 4,685 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "m... |
30289814434 | import logging
from google.cloud import secretmanager
from google.cloud import pubsub
# format logs
formatter = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=formatter, level=logging.DEBUG)
logging.basicConfig()
logger = logging.getLogger(__name__)
# get secrets
secrets = secretmanager.SecretManagerServiceClient()
gcp_project_id = secrets.access_secret_version(request={"name": "projects/952416783871/secrets/gcp_project_id/versions/1"}).payload.data.decode()
# connect to resources
publisher = pubsub.PublisherClient()
# sends FEC file paths to be imported into BigQuery
def federal_fec_ingest_queue_import(message, context):
# list of files to import from Google Cloud Storage
files = [
"weball22/weball22.txt",
"cn22/cn.txt",
"ccl22/ccl.txt",
"webl22/webl22.txt",
"cm22/cm.txt",
"webk22/webk22.txt",
"indiv22/itcont.txt",
"pas222/itpas2.txt",
"oth22/itoth.txt",
"oppexp22/oppexp.txt",
"independent_expenditure_2022/independent_expenditure_2022.csv",
"ElectioneeringComm_2022/ElectioneeringComm_2022.csv",
"CommunicationCosts_2022/CommunicationCosts_2022.csv"
]
for filepath in files:
# sends a message to Pub/Sub to import the files
topic = 'projects/' + gcp_project_id + '/topics/federal_fec_ingest_import_bigquery'
publisher.publish(topic, b'import FEC file', filepath=filepath)
logger.info(' - '.join(['COMPLETED', 'file sent to be imported', filepath]))
# return number of files queued
return len(files)
| codefordemocracy/data | federal/fec/functions/federal_fec_ingest_queue_import/main.py | main.py | py | 1,606 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLo... |
26385311712 |
import numpy as np
# from sklearn.metrics import accuracy_score
from sortedcontainers import SortedList
import math
import matplotlib.pyplot as plt
class ml_model(object):
"""
Parameters
------------
method : str
which ML lib
learning_rate : float (default: 0.01)
Learning rate (between 0.0 and 1.0)
num_epochs : int (default: 100)
How many run to train dataset.
shuffle : bool (default: True)
Shuffles training data every epoch
Attributes
------------
w_ : 1d-array, float
Weights
errors_ : list, float
"""
def __init__(self, method=None, learning_rate=0.01,
num_epochs=100, shuffle=None):
self.method = method
self.learning_rate = learning_rate
self.num_epochs = num_epochs
self.shuffle = shuffle
self.w_ = []
self.cost_ = []
# print("ml_model: __init__")
def fit(self, X_train, Y, standardize=False):
# Dimensions, Features of X_Train
self.N, self.D = X_train.shape
print("X_train has {0} samples with {1} \
features".format(self.N, self.D))
self.Y = Y
# print("X_train:", X_train)
if(standardize is True):
X_std = np.copy(X_train)
for i in range(self.D):
X_std[:, i] = \
(X_train[:, i] - X_train[:, i].mean()) / X_train[:, i].std()
self.X_train = np.copy(X_std)
else:
self.X_train = np.copy(X_train)
# print("X_train_std:", self.X_train)
if (self.shuffle is True):
# init w_ as random numbers
self.w_ = np.random.randn(1 + self.D)
# shuffle X_train, Y
# r = np.random.permutation(self.N)
# self.X_train = self.X_train[r]
# self.Y = self.Y[r]
else:
# init w_ as zeros with w0
self.w_ = np.zeros((1 + self.D))
print("shape of X_train:", self.X_train.shape)
print("shape of w_:", self.w_.shape)
print("shape of Y:", self.Y.shape)
def activation_fn(self, z, activation=None):
if(activation is None):
return z
elif(activation == "sign"):
return np.piecewise(z, [z < 0, z >= 0], [-1, 1])
elif(activation == "step"):
return np.piecewise(z, [z < 0, z >= 0], [0, 1])
elif(activation == "sigmoid"):
return 1.0 / (1.0 + np.exp(-z))
elif(activation == "tanh"):
return (1.0 - np.exp(-2*z))/(1.0 + np.exp(-2*z))
def net_input(self, X_data):
# net input: w0x0 + w1x1... + wixi
# print("net_input:")
# print("shape of X_data:", X_data.shape)
# print("shape of self.w_[1:]:", self.w_[1:].shape)
return np.dot(X_data, self.w_[1:]) + self.w_[0]
def predict(self):
pass
def r2_evl(self, Y, Y_hat):
d1 = Y - Y_hat
d2 = Y - Y.mean()
# print("Shape of d1:", d1.shape)
# print("Shape of d2:", d2.shape)
r2 = 1 - (d1.dot(d1) / d2.dot(d2))
print("R2:", r2)
def score(self, Y, Y_hat):
print('Misclassified samples: %d' % (Y != Y_hat).sum())
# print('Accuracy: %.2f' % accuracy_score(Y, Y_hat))
print('Score:', np.mean(Y_hat == Y))
class myPerceptron(ml_model):
def __init__(self, method=None, learning_rate=0.01,
num_epochs=100, shuffle=True, activation="sign"):
# print("myPerceptron: __init__")
super().__init__(method, learning_rate, num_epochs, shuffle)
self.activation = activation
"""
Parameters
------------
X_train : array, float
Dataset for traninig
Y : array, float
"True" Y
"""
def fit(self, X_train, Y, standardize=False):
super().fit(X_train, Y, standardize)
self.errors_ = []
for epoch in range(self.num_epochs):
errors = 0
for xi, y in zip(self.X_train, Y):
y_hat = self.predict(xi)
error = y - y_hat
update = self.learning_rate * error
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(error != 0.0)
self.errors_.append(errors)
# print('errors:', errors)
print("final w:\n", self.w_, "\nepochs:",
(epoch+1), "/", self.num_epochs)
plt.plot(range(1, len(self.errors_) + 1),
self.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of erros')
plt.show()
def predict(self, X_test, standardize=False):
# print("shape of X_test:", X_test.shape)
if(standardize is True):
for i in range(self.D):
X_test[:, i] = \
(X_test[:, i] - X_test[:, i].mean()) / X_test[:, i].std()
z = self.net_input(X_test)
# print("z:",z)
Y_hat = super().activation_fn(z, self.activation)
# print("Y_hat:",Y_hat)
return Y_hat
class myAdaline(ml_model):
def __init__(self, method=None, learning_rate=0.0001,
num_epochs=200, shuffle=True, mini_batch=False):
# print("myAdaline: __init__")
super().__init__(method, learning_rate, num_epochs, shuffle)
self.mini_batch = mini_batch
"""
Parameters
------------
X_train : array, float
Dataset for traninig
Y : array, float
"True" Y
"""
def _update_weights(self, xi, target):
"""Apply Adaline learning rule to update the weights"""
output = super().activation_fn(self.net_input(xi), activation=None)
error = (target - output)
# print("shape of xi:", xi.shape)
# print("shape of error:", error.shape)
self.w_[1:] += self.learning_rate * xi.dot(error)
self.w_[0] += self.learning_rate * error
cost = 0.5 * error**2
return cost
def fit(self, X_train, Y, standardize=False):
super().fit(X_train, Y, standardize)
cost = 0
avg_cost = 0
for epoch in range(self.num_epochs):
if(self.mini_batch is False):
z = self.net_input(self.X_train)
output = super().activation_fn(z, activation=None)
error = (self.Y - output)
self.w_[1:] += self.learning_rate * \
np.dot(self.X_train.T, error)
self.w_[0] += self.learning_rate * error.sum()
cost = (error**2).sum() / 2.0
self.cost_.append(cost)
else:
cost = []
for xi, target in zip(X_train, Y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost) / len(Y)
self.cost_.append(avg_cost)
if(self.mini_batch is False):
print("final w:\n", self.w_, "\nFinal cost:\n", cost,
"\nepochs:\n", (epoch+1), "/", self.num_epochs)
else:
print("final w:\n", self.w_, "\nFinal cost:\n", avg_cost,
"\nepochs:\n", (epoch+1), "/", self.num_epochs)
plt.plot(range(1, len(self.cost_) + 1), self.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.title('Adaline - Learning rate: {0}'.format(self.learning_rate))
plt.tight_layout()
plt.show()
def partial_fit(self, X_train, Y):
"""Fit training data without reinitializing the weights"""
if Y.ravel().shape[0] > 1:
for xi, target in zip(X_train, Y):
self._update_weights(xi, target)
else:
self._update_weights(X_train, Y)
return self
def predict(self, X_test, standardize=False):
print("shape of X_test:", X_test.shape)
if(standardize is True):
for i in range(self.D):
X_test[:, i] = (X_test[:, i] - X_test[:, i].mean()) / X_test[:, i].std()
z = self.net_input(X_test)
# output = super().activation_fn(z, activation=None)
Y_hat = super().activation_fn(z, activation="sign")
return Y_hat
class myLogistic(ml_model):
def __init__(self, method=None, learning_rate=0.0001,
num_epochs=200, shuffle=True):
# print("myLogistic: __init__")
super().__init__(method, learning_rate, num_epochs, shuffle)
"""
Parameters
------------
X_train : array, float
Dataset for traninig
Y : array, float
"True" Y
"""
def fit(self, X_train, Y, standardize=False):
super().fit(X_train, Y, standardize)
cost = 0
for epoch in range(self.num_epochs):
z = self.net_input(self.X_train)
# print("z:", z)
output = super().activation_fn(z, activation='sigmoid')
# print("Shape of output:", output.shape)
# print("output:", output)
error = (self.Y - output)
self.w_[1:] += self.learning_rate * np.dot(self.X_train.T, error)
self.w_[0] += self.learning_rate * error.sum()
cost = -Y.dot(np.log(output)) - ((1 - Y).dot(np.log(1 - output)))
self.cost_.append(cost)
print("final w:\n", self.w_, "\nFinal cost:\n", cost,
"\nepochs:\n", (epoch+1), "/", self.num_epochs)
plt.plot(range(1, len(self.cost_) + 1), self.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.title('Adaline - Learning rate: {0}'.format(self.learning_rate))
plt.tight_layout()
plt.show()
def predict(self, X_test, standardize=False):
print("shape of X_test:", X_test.shape)
if(standardize is True):
for i in range(self.D):
X_test[:, i] = (X_test[:, i] - X_test[:, i].mean()) / X_test[:, i].std()
z = self.net_input(X_test)
# output = super().activation_fn(z, activation='sigmoid')
Y_hat = super().activation_fn(z, activation="step")
return Y_hat
class myKNN(ml_model):
def __init__(self, K):
self.K = K
"""
Parameters
------------
X_train : array, float
Dataset for traninig
Y : array, float
"True" Y
"""
def fit(self, X_train, Y):
self.X_train = X_train
self.Y = Y
# print("self.Y:", self.Y)
# print("self.X_train:", self.X_train)
def predict(self, X_test):
print("shape of X_test:", X_test.shape)
Y_pred = np.zeros((X_test.shape[0]))
# print("X_test:", X_test)
for i, xi_test in enumerate(X_test):
sl = SortedList()
for j, xi_train in enumerate(self.X_train):
diff = xi_test - xi_train
dist = math.sqrt(diff.dot(diff))
if(len(sl) < self.K):
sl.add((dist, self.Y[j]))
else:
if(dist < sl[-1][0]):
del sl[-1]
sl.add((dist, self.Y[j]))
# print("sl:", sl)
# vote
votes = {}
for _, v in sl:
# print("v:", v)
votes[v] = votes.get(v, 0) + 1
max_votes = 0
max_votes_class = -1
for v, count in votes.items():
if count > max_votes:
max_votes = count
max_votes_class = v
Y_pred[i] = max_votes_class
return Y_pred
class myBayes(ml_model):
def __init__(self, naive=True, pdf='gaussian'):
self.naive = naive
self.pdf = pdf
"""
Parameters
------------
X_train : array, float
Dataset for traninig
Y : array, float
"True" Y
"""
def fit(self, X_train, Y, smoothing=1e-2):
self.X_train = X_train
self.Y = Y
# print("self.Y:", self.Y)
# print("self.X_train:", self.X_train)
N, D = X_train.shape
print("X_train has {0} samples with {1} features".format(
N, D))
self.lable_Prob_in_Y = {}
labels = set(Y)
self.lable_numbers = len(labels)
if self.pdf is 'gaussian':
if self.naive is True:
self.X_train_mean_var_in_Y = {}
for c in labels:
such_X_train = X_train[Y == c]
self.X_train_mean_var_in_Y[c] = {
"mean": such_X_train.mean(axis=0),
'var': np.var(such_X_train) + smoothing
}
self.lable_Prob_in_Y[c] = float(len(Y[Y == c])) / len(Y)
print("len of X_train_mean_var_in_Y:", len(self.X_train_mean_var_in_Y))
# print("X_train_mean_var_in_Y:", self.X_train_mean_var_in_Y)
else:
self.X_train_mean_cov_in_Y = {}
for c in labels:
such_X_train = X_train[Y == c]
self.X_train_mean_cov_in_Y[c] = {
"mean": such_X_train.mean(axis=0),
'cov': np.cov(such_X_train.T) + np.eye(D)*smoothing
}
self.lable_Prob_in_Y[c] = float(len(Y[Y == c])) / len(Y)
print("len of X_train_mean_cov_in_Y:", len(self.X_train_mean_cov_in_Y))
# print("X_train_mean_cov_in_Y:", self.X_train_mean_cov_in_Y)
print("len of X_train_Prob_in_Y:", len(self.lable_Prob_in_Y))
# print("lable_Prob_in_Y:", self.lable_Prob_in_Y)
else:
pass
def predict(self, X_test):
from scipy.stats import multivariate_normal as mvn
N = X_test.shape[0]
print("shape of X_test:", X_test.shape)
Y_pred_P = np.zeros((N, self.lable_numbers))
if self.naive is True:
for c, g in self.X_train_mean_var_in_Y.items():
mean, var = g["mean"], g["var"]
Y_pred_P[:, c] = mvn.logpdf(X_test, mean=mean, cov=var) + np.log(self.lable_Prob_in_Y[c])
# print("c:{0}, g:{1}".format(c, g))
else:
for c, g in self.X_train_mean_cov_in_Y.items():
mean, cov = g["mean"], g["cov"]
Y_pred_P[:, c] = mvn.logpdf(X_test, mean=mean, cov=cov) + np.log(self.lable_Prob_in_Y[c])
# print("c:{0}, g:{1}".format(c, g))
# print("Y_pred_P:{0}".format(Y_pred_P))
Y_pred = np.argmax(Y_pred_P, axis=1)
return Y_pred
class myMLP(ml_model):
def activation_derivative(self, z, activation=None):
if(activation is None):
return 0
elif(activation == "sigmoid"):
return (super().activation_fn(z, "sigmoid")*(1-super().activation_fn(z, "sigmoid")))
elif(activation == "tanh"):
return (1 + super().activation_fn(z, "tanh"))*(1 - super().activation_fn(z, "tanh"))
def __init__(self, net_arch=[2, 3, 2], activation_h='tanh', l2=0.,
num_epochs=100, learning_rate=0.001,
shuffle=True, minibatch_size=1):
self.activation_h = activation_h
self.layers = len(net_arch)
self.net_arch = net_arch
self.l2 = l2
self.num_epochs = num_epochs
self.learning_rate = learning_rate
self.minibatch_size = minibatch_size
self.shuffle = shuffle
def _forward(self, X):
"""Compute forward propagation step"""
A_in = [X]
# step 1: net input of hidden layer
# [n_samples, n_features] dot [n_features, n_hidden]
# -> [n_samples, n_hidden]
# [n_samples, n_hidden] dot [i layer n_hidden, i+1 n_hidden]
# print("shape of X:", X.shape)
for i in range(len(self.weights)-1):
print("shape of A_in[{0}]:{1}".format(i, A_in[i].shape))
print("shape of weights({0}):{1}".format(i, self.weights[i].shape))
z_h = np.dot(A_in[i], self.weights[i])
a_h = self.activation_fn(z_h, self.activation_h)
print("shape of a_h({0}):{1}".format(i, a_h.shape))
# add the bias for the next layer
ones = np.ones((1, X.shape[0]))
a_h = np.concatenate((ones.T, a_h), axis=1)
print("Shape of a_h with bias:", a_h.shape)
A_in.append(a_h)
print("Len of A_in:", len(A_in))
# step 2: net input of output layer
# [n_samples, n_hidden] dot [n_hidden, n_classlabels]
# -> [n_samples, n_classlabels]
z_o = np.dot(A_in[-1], self.weights[-1])
a_o = self.activation_fn(z_o, self.activation_h)
return A_in, a_o
def _compute_cost(self, y, y_hat):
"""Compute cost function.
Parameters
----------
y : array, shape = (n_samples, n_labels)
true class labels.
y_hat : array, shape = [n_samples, n_output_units]
Activation of the output layer (forward propagation)
Returns
---------
cost : float
Regularized cost
"""
"""
L2_term = (self.l2 *
(np.sum(self.w_h ** 2.) +
np.sum(self.w_out ** 2.)))
"""
L2_term = 0
y = y.reshape(y_hat.shape[0], y_hat.shape[1])
print("shape of y:", y.shape)
print("shape of y_hat:", y_hat.shape)
term1 = -y * (np.log(y_hat))
term2 = (1. - y) * np.log(1. - y_hat)
print("shape of term1:", term1.shape)
print("shape of term2:", term2.shape)
cost = np.sum(term1 - term2) + L2_term
return cost
"""
Parameters
------------
X_train : array, float
Dataset for traninig
Y : array, float
"True" Y
"""
def fit(self, X_train, Y, standardize=False):
# Dimensions, Features of X_Train
self.N, self.D = X_train.shape
print("X_train has {0} samples with {1} features".format(self.N, self.D))
self.Y = Y
# print("X_train:", X_train)
if(standardize is True):
X_std = np.copy(X_train)
for i in range(self.D):
X_std[:, i] = \
(X_train[:, i] - X_train[:, i].mean()) / X_train[:, i].std()
self.X_train = np.copy(X_std)
else:
self.X_train = np.copy(X_train)
# print("X_train_std:", self.X_train)
ones = np.ones((1, self.X_train.shape[0]))
self.X_train = np.concatenate((ones.T, self.X_train), axis=1)
print("Shape of X_train with bias:", self.X_train.shape)
self.weights = []
if (self.shuffle is True):
# init weights as random numbers
print("init weights as random numbers:")
for layer in range(len(self.net_arch) - 1):
w_ = np.random.randn(self.net_arch[layer] + 1, self.net_arch[layer+1])
print("for layer {0} to {1}".format(layer, layer+1))
print("shape of w_:", w_.shape)
self.weights.append(w_)
# shuffle X_train, Y
# r = np.random.permutation(self.N)
# self.X_train = self.X_train[r]
# self.Y = self.Y[r]
else:
# init w_ as zeros with w0
print("init weights as 0:")
for layer in range(len(self.net_arch) - 1):
w_ = np.zeros(self.net_arch[layer] + 1, self.net_arch[layer+1])
print("for layer {0} to {1}".format(layer, layer+1))
print("shape of w_:", w_.shape)
self.weights.append(w_)
print("shape of X_train:", self.X_train.shape)
print("len of weights:", len(self.weights))
print("shape of Y:", self.Y.shape)
# iterate over training epochs
for i in range(self.num_epochs):
indices = np.arange(X_train.shape[0])
for start_idx in range(0, indices.shape[0] - self.minibatch_size +
1, self.minibatch_size):
batch_idx = indices[start_idx:start_idx + self.minibatch_size]
A_in, y_hat = self._forward(self.X_train[batch_idx])
##################
# Backpropagation
##################
# [n_samples, n_classlabels]
print("shape of y[batch_idx]:", y[batch_idx].shape)
print("shape of y_hat:", y_hat.shape)
delta_out = y_hat - y[batch_idx].reshape(y_hat.shape[0], y_hat.shape[1])
print("shape of delta_out:", delta_out.shape)
# error for the output layer
#self._compute_cost(Y, y_hat)
mlp = myMLP(net_arch=[2, 4, 1], minibatch_size=4)
X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y = np.array([0, 1, 1, 0])
mlp.fit(X, y) | u8913557/myDataScience | Model/ml_model.py | ml_model.py | py | 21,075 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.copy",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_numbe... |
41304347612 | import torch
print(torch.__version__)
torch.get_default_dtype()
torch.get_num_threads()
torch.set_default_dtype(torch.float64)
torch.get_default_dtype()
tensor_arr = torch.Tensor([[1, 2, 3], [4, 5, 6]])
torch.is_tensor(tensor_arr)
torch.numel(tensor_arr) #Gives the number of elements in the tensor
tensor_uninitialized = torch.Tensor(2, 2)
tensor_uninitialized
tensor_initialized = torch.rand(2, 2)
tensor_initialized
temsor_cpu_int = torch.Tensor([5, 3]).type(torch.IntTensor)
tensor_cuda_int = torch.Tensor([5, 3]).type(torch.cuda.IntTensor)
torch_cpu_short = torch.ShortTensor([1.0, 2.0, 3.0])
tensor_cuda_short = torch.cuda.ShortTensor([1.0, 2.0, 3.0]) #cuda commands create tensor in GPU
tensor_float = torch.cuda.HalfTensor([1.0, 2.0, 3.0]) #takes half the memory
torch_cpu_fill = torch.full((2, 6), fill_value = 10)
torch_cuda_fill = torch.full((2, 6), fill_value = 5).type(torch.cuda.FloatTensor)
tensor_of_ones_cpu = torch.ones([2, 4], dtype = torch.int32)
tensor_of_ones_cuda = torch.ones([2, 4], dtype = torch.int32).type(torch.cuda.IntTensor)
tensor_of_zeros__cpu = torch.zeros_like(tensor_of_ones_cpu)
tensor_of_zeros_gpu = torch.zeros_like(tensor_of_ones_cuda)
tensor_eye_cpu = torch.eye(5)
tensor_eye_gpu = torch.eye(5).type(torch.cuda.FloatTensor)
non_zero_indices = torch.nonzero(tensor_eye_cpu)
non_zero_indices = torch.nonzero(tensor_of_ones_cuda)
i = torch.tensor([[0, 1, 1],
[2, 2, 0]])
v = torch.tensor([3, 4, 5], dtype = torch.float32)
sparse_tensor = torch.sparse_coo_tensor(i, v, [2, 5])
sparse_tensor.data
#Simple operations on Tensors
initial_tensor = torch.rand(2, 3)
initial_tensor.fill_(10)
new_tensor = initial_tensor.add(5)
initial_tensor.add_(8)
new_tensor.sqrt_()
x = torch.linspace(start = 0.1, end = 10, steps = 15)
tensor_chunk = torch.chunk(x, 3, 0)
tensor_1 = tensor_chunk[0]
tensor_2 = tensor_chunk[1]
tensor_3 = torch.tensor([3.0, 4.0, 5.0])
torch.cat((tensor_1, tensor_2, tensor_3), 0)
random_tensor = torch.Tensor([[20, 8, 40], [5, 43, 6], [78, 54, 6]])
random_tensor[0, 1]
random_tensor[1:, 1:]
random_tensor.size()
resized_tensor = random_tensor.view(9)
resized_tensor.size()
random_tensor[2, 2] = 200.0
resized_tensor
#Elementwise and Matrix Operations
tensor_unsqueeze = torch.unsqueeze(random_tensor, 2)
tensor_unsqueeze.size()
tensor_transpose = torch.transpose(initial_tensor, 0, 1)
sorted_tensor, sorted_indices = torch.sort(random_tensor)
tensor_float = torch.FloatTensor([-1.1, -2.2, -3.3])
tensor_abs = torch.abs(tensor_float)
rand1 = torch.abs(torch.rand(2,3))
rand2 = torch.abs(torch.rand(2,3))
add1 = rand1 + rand2
tensor_just = torch.Tensor([[1, 2, 3], [-1, -2, -3]])
tensor_div = torch.div(tensor_just, tensor_just + 0.3)
tensor_mul = torch.mul(tensor_just, tensor_just)
tensor_clamp = torch.clamp(tensor_just, min = -0.2, max = 2)
t1 = torch.Tensor([2, 3])
t2 = torch.Tensor([4, 5])
dot_product = torch.dot(t1, t2)
matrix = torch.Tensor([[1, 2, 3], [4, 5, 6]])
vector = torch.Tensor([0, 1, 2])
matrix_vector = torch.mv(matrix, vector)
another_matrix = torch.Tensor([[10,20], [30, 0], [0,50]])
matrix_mul = torch.mm(matrix, another_matrix)
torch.argmax(matrix_mul, dim = 1)
torch.argmin(matrix_mul, dim = 1)
#Converting between PyTorch and numpy tensors
import numpy as np
tensor = torch.rand(4,3)
type(tensor)
numpy_from_tensor = tensor.numpy()
type(numpy_from_tensor)
torch.is_tensor(tensor)
torch.is_tensor(numpy_from_tensor)
numpy_from_tensor[0, 0] = 100.0
numpy_from_tensor
tensor
numpy_arr = np.array([[1.0, 2.0, 3.0], [10.0, 20.0, 30.0], [100.0, 200.0, 300.0]])
tensor_from_numpy = torch.from_numpy(numpy_arr)
type(tensor_from_numpy)
torch.is_tensor(tensor_from_numpy)
tensor_from_numpy_arr = torch.as_tensor(numpy_arr)
| subhankar453/ExploringPyTorch | models/ExploringPyTorch.py | ExploringPyTorch.py | py | 3,807 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.__version__",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "torch.get_default_dtype",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "torch.get_num_threads",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tor... |
24925249732 | # -*- coding: utf-8 -*-
import json
import os
from locale import getdefaultlocale
from .internationalization import SUPPORTED_LANGUAGES
_lang = getdefaultlocale()[0]
for _supported_lang in SUPPORTED_LANGUAGES.keys():
if _lang in _supported_lang or _supported_lang in _lang:
_lang = _supported_lang
break
else:
_lang = 'en'
SETTINGS_FILE = os.path.expanduser('~/.wbwr_cfg')
DEFAULT_SETTINGS = {
'blink_warning_shown': False,
'theme': 1,
'speed': 125,
'cpf': 20,
'language': _lang,
'font': 'serif',
'pause_on_image': True,
'font_size_focus': 45
}
class _settings_type(dict):
def __getitem__(self, name):
try:
return super(_settings_type, self).__getitem__(name)
except KeyError:
val = DEFAULT_SETTINGS[name]
self[name] = val
return val
def save(self):
with open(SETTINGS_FILE, 'w') as file:
json.dump(self, file)
def load(self):
self.clear()
with open(SETTINGS_FILE, 'r') as file:
for k, v in json.load(file).items():
self[k] = v
Settings = _settings_type()
try:
Settings.load()
except OSError:
pass # Settings will be populated and saved on demand
| Sa-RSt/WordByWord | wordbyword/settings.py | settings.py | py | 1,265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "locale.getdefaultlocale",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "internationalization.SUPPORTED_LANGUAGES.keys",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "internationalization.SUPPORTED_LANGUAGES",
"line_number": 10,
"usage... |
24000459929 | from django import forms
from .models import Booking
from django.core.exceptions import ValidationError
class BookingForm(forms.ModelForm):
"""Form for the booking model."""
class Meta:
model = Booking
fields = (
'first_name',
'last_name',
'phone_number',
'email_address',
'date',
'service_type',
)
date = forms.DateField(
widget=forms.widgets.DateInput(
attrs={
'type': 'date'}))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| rocrill/velo_city | bookservice/forms.py | forms.py | py | 613 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.Booking",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.Dat... |
18204569793 | from requests.exceptions import ReadTimeout
from django.http import JsonResponse
from ..keycloak_services import check_ldap_connection
from ..keycloak_services import check_ldap_authentication
from ..keycloak_services import request_access_token
def process_request(request, parameter_keys, kc_call):
token = request_access_token()
parameters = []
missing_parameters = []
for key in parameter_keys:
parameter = request.GET.get(key, None)
if parameter:
parameters.append(parameter)
else:
missing_parameters.append(key)
if missing_parameters:
error = "parameters missing: {keys}".format(keys=missing_parameters)
return JsonResponse(
{'errorMessage': error}, status=400
)
status = 400
json_data = {'errorMessage': "LDAP test error"}
try:
check_response = kc_call(
'master', token, *parameters, timeout=0.4)
if check_response.status_code == 204:
status = 200
json_data = {'successMessage': "LDAP test success"}
else:
status = check_response.status_code
json_data = check_response.json()
except ReadTimeout:
status = 408
json_data['errorMessage'] = "Keycloak: no response"
finally:
return JsonResponse(json_data, status=status) # noqa: B012,
# return inside finally blocks cause exceptions to be silenced
def verify_connection(request):
return process_request(request, ['url'], check_ldap_connection)
def verify_authentication(request):
return process_request(
request,
['url', 'bind_dn', 'bind_credential'],
check_ldap_authentication,
)
| os2datascanner/os2datascanner | src/os2datascanner/projects/admin/import_services/views/keycloak_api_views.py | keycloak_api_views.py | py | 1,718 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "keycloak_services.request_access_token",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.ReadTimeout",
"line_number": 36,
"usage_type": "name"... |
19229736770 | import ddt
import unittest,requests
@ddt.ddt
class TestLogin(unittest.TestCase):
@ddt.file_data(r'E:\login.yml')
@ddt.unpack
def test_run(self,**kwargs):
method = kwargs.get('method')
url = kwargs.get('url')
data = kwargs.get('data',{})
header = kwargs.get('header',{})
is_json = kwargs.get('is_json',0)
cookie = kwargs.get('cookie',{})
check = kwargs.get('check')
if method=='post':
if is_json:
r = requests.post(url,json=data,headers=header,cookies=cookie)
else:
r = requests.post(url,data=data, headers=header, cookies=cookie)
else:
r = requests.get(url,params=data,headers=header, cookies=cookie)
for c in check:
self.assertIn(c,r.text) | hedyxy/APIauto | cases/TestLogin.py | TestLogin.py | py | 683 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
10841797597 | import logging
from . import BinanceBrokers
from .requests import unauthorizrd_request
from core.config import (
BINANCE_SPOT_KLINES_URL,
BINANCE_UM_KLINES_URL,
BINANCE_CM_KLINES_URL,
BINANCE_SPOT_MARKET_INFO_URL,
BINANCE_UM_MARKET_INFO_URL,
BINANCE_CM_MARKET_INFO_URL,
)
async def get_klines(
broker: str,
symbol: str,
interval: str,
limit: int = 500,
) -> dict:
"""The function gets klines history from Binance
Args:
symbol (str): symbol like BTCUSDT
interval (str): interval (timeframe) like '15m'
stop (asyncio.Event): stop event from the bot
limit (int, optional): limit of klines. Defaults to 500.
Returns:
dict: parsed JSON response
"""
logger = logging.getLogger('get_klines')
params = {
'symbol': symbol.upper(),
'interval': interval,
'limit': limit,
}
if broker == 'Binance-spot':
url = BINANCE_SPOT_KLINES_URL
elif broker == 'Binance-UM-Futures':
url = BINANCE_UM_KLINES_URL
elif broker == 'Binance-CM-Futures':
url = BINANCE_CM_KLINES_URL
else:
raise ValueError(f'Wrong broker {broker}')
return await unauthorizrd_request(broker, url, 'get', params, logger)
async def get_market_info(broker: str) -> dict:
"""The function gets exchange info (symbols, limits)
Args:
brokers (BinanceBrokers): Binance brokers enum
Returns:
dict: parsed JSON data
"""
logger = logging.getLogger('get_market_info')
if broker == 'Binance-spot':
url = BINANCE_SPOT_MARKET_INFO_URL
elif broker == 'Binance-UM-Futures':
url = BINANCE_UM_MARKET_INFO_URL
elif broker == 'Binance-CM-Futures':
url = BINANCE_CM_MARKET_INFO_URL
else:
raise ValueError(f'Wrong broker {broker}')
return await unauthorizrd_request(broker, url, 'get', {}, logger) | Hudrolax/invest_tools | app/brokers/binance/market_data.py | market_data.py | py | 1,915 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "core.config.BINANCE_SPOT_KLINES_URL",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "core.config.BINANCE_UM_KLINES_URL",
"line_number": 45,
"usage_type": "name"
},
... |
70205744354 | '''
텔레포트 3
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율
2 초 512 MB 303 122 101 43.348%
문제
수빈이는 크기가 무한대인 격자판 위에 살고 있다. 격자판의 각 점은 두 정수의 쌍 (x, y)로 나타낼 수 있다.
제일 처음에 수빈이의 위치는 (xs, ys)이고, 집이 위치한 (xe, ye)로 이동하려고 한다.
수빈이는 두 가지 방법으로 이동할 수 있다. 첫 번째 방법은 점프를 하는 것이다. 예를 들어 (x, y)에 있는 경우에 (x+1, y), (x-1, y), (x, y+1), (x, y-1)로 이동할 수 있다. 점프는 1초가 걸린다.
두 번째 방법은 텔레포트를 사용하는 것이다. 텔레포트를 할 수 있는 방법은 총 세 가지가 있으며, 미리 정해져 있다. 텔레포트는 네 좌표 (x1, y1), (x2, y2)로 나타낼 수 있으며, (x1, y1)에서 (x2, y2)로 또는 (x2, y2)에서 (x1, y1)로 이동할 수 있다는 것이다. 텔레포트는 10초가 걸린다.
수빈이의 위치와 집의 위치가 주어졌을 때, 집에 가는 가장 빠른 시간을 구하는 프로그램을 작성하시오.
입력
첫째 줄에 xs와 ys가, 둘째 줄에 xe, ye가 주어진다. (0 ≤ xs, ys, xe, ye ≤ 1,000,000,000)
셋째 줄부터 세 개의 줄에는 텔레포트의 정보 x1, y1, x2, y2가 주어진다. (0 ≤ x1, y1, x2, y2 ≤ 1,000,000,000)
입력으로 주어지는 모든 좌표 6개는 서로 다르다.
출력
수빈이가 집에 가는 가장 빠른 시간을 출력한다.
'''
# check all teleport-use ways with brute-force
import sys
from collections import defaultdict
from itertools import permutations
xs, ys = map(int, sys.stdin.readline().split())
xe, ye = map(int, sys.stdin.readline().split())
m_route = abs(xe-xs) + abs(ye-ys)
tels = defaultdict(tuple)
for i in range(3):
x1, y1, x2, y2 = list(map(int, sys.stdin.readline().split()))
tels[(x1, y1)], tels[(x2, y2)] = (x2, y2), (x1, y1)
for n in range(1,4):
for q in list(permutations(tels.keys(),n)):
x, y, cnt = xs, ys, 0
for xt, yt in q:
new_x, new_y = tels[(xt, yt)]
if abs(xt-x) + abs(yt-y) + 10 < abs(new_x-x) + abs(new_y-y):
cnt += abs(xt-x) + abs(yt-y) + 10
x, y = new_x, new_y
else:
cnt = m_route
break
m_route = cnt + abs(xe-x) + abs(ye-y) if cnt + abs(xe-x) + abs(ye-y) < m_route else m_route
print(m_route)
# It exceeds time limit
import sys
from collections import deque, defaultdict
xs, ys = map(int, sys.stdin.readline().split())
xe, ye = map(int, sys.stdin.readline().split())
m_route = abs(xe-xs) + abs(ye-ys)
print(m_route)
tels = defaultdict(tuple)
for i in range(3):
x1, y1, x2, y2 = list(map(int, sys.stdin.readline().split()))
tels[(x1, y1)], tels[(x2, y2)] = (x2, y2), (x1, y1)
grid_x = max([k[0] for k in tels.keys()] + [xs, xe]) + 1
grid_y = max([k[1] for k in tels.keys()] + [ys, ye]) + 1
visited = [[ m_route for _ in range(grid_y)] for __ in range(grid_x)]
visited[xs][ys] = 0
queue = deque([(xs, ys, 0)])
while queue:
x, y, cnt = queue.popleft()
if cnt > visited[xe][ye]: continue
if tels[(x, y)]:
new_x, new_y = tels[(x, y)]
if visited[new_x][new_y] > cnt + 10:
queue.append((new_x, new_y, cnt + 10))
visited[new_x][new_y] = cnt + 10
for dx, dy in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
if x + dx < 0 or x + dx >= grid_x or y + dy < 0 or y + dy >= grid_y: continue
if visited[x+dx][y+dy] > cnt+1:
queue.append((x+dx, y+dy, cnt+1))
visited[x+dx][y+dy] = cnt+1
print(visited[xe][ye])
| hanseul-jeong/Coding_test | Backjoon/단계별로풀어보기/12908.py | 12908.py | py | 3,661 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin.readline",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
... |
30058213410 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 04 10:04:46 2016
@author: nfette
A single effect LiBr absorption chiller model.
"""
import numpy as np
import tabulate
from scipy.optimize import fsolve
from scipy.interpolate import PchipInterpolator
from collections import namedtuple
import CoolProp.CoolProp as CP
from hw2_1 import CelsiusToKelvin as C2K
from hw2_1 import KelvinToCelsius as K2C
import libr_props, libr_props2
import HRHX_integral_model
water = 'HEOS::Water'
librname = lambda x: 'INCOMP::LiBr[{}]'.format(x)
pwater = CP.AbstractState("HEOS","water")
pointType = np.dtype(dict(names="name m x T p Q h D C".split(),formats=['S32']+['d']*8))
class ProcessPoint(object):
def __init__(self,row):
self.row = row
def makeprop(i):
def getx(self):
return self.row[i]
def setx(self,value):
self.row[i]=value
def delx(self):
del self.row[i]
return property(getx,setx,delx)
for i in pointType.names:
setattr(ProcessPoint,i,makeprop(i))
class ProcessTable(object):
pass
def makePointTable(names):
table=np.zeros(len(names),dtype=pointType)
points=ProcessTable()
for i,name in enumerate(names):
points.__setattr__(name,ProcessPoint(table[i]))
table[i][0]=name
return table,points
# Units in this file:
# temperature [C]
# enthalpy [J/kg]
# pressure [Pa]
# mass fraction [kg/kg total]
# effectiveness [K/K]
# The LiBr enthalpy is zero at 293.15 K, 1 atm, per
# http://www.coolprop.org/fluid_properties/Incompressibles.html#general-introduction
# We need to evaluate water enthalpy relative to that, but it is not built-in.
# http://www.coolprop.org/coolprop/HighLevelAPI.html#reference-states
T_ref = 20
P_ref = 101325
pwater.update(CP.PT_INPUTS,P_ref,C2K(T_ref))
h_w_ref = pwater.hmass()
class GeneratorLiBr(object):
"""Provide process heat canonical curve for generator in various forms.
For purpose of external heat exchange, generator process goes ...
P_cond, T_gen_pre, x1 (subcooled)
P_cond, T_gen_inlet, x1 (saturated liquid) + backwards flowing vapor
P_cond, T_gen_outlet, x2 (saturated liquid)
Args:
P : Pressure (Pa)
The process is assumed to occur at constant pressure level.
m_in : Solution inlet mass flow rate (kg/s)
The mass flow rate of the stream pumped up from low pressure.
T_in : Solution inlet temperature (C)
Needed to determine inlet state, which may be subcooled.
x_in : Solution inlet mass fraction (kg/kg)
The LiBr mass relative to total mass
x_out : Solution outlet mass fraction (kg/kg)
The outlet solution LiBr mass fraction
"""
def __init__(self, P, m_in, T_in, x_in, x_out):
self.update(P, m_in, T_in, x_in, x_out)
# Create the functions ...
self.q = np.vectorize(self._q)
self.T = np.vectorize(self._T)
# Create faster functions using splines ...
def update(self, P, m_in, T_in, x_in, x_out):
self.P = P
self.m_in = m_in
self.T_in = T_in
self.x_in = x_in
self.x_out = x_out
# Find inlet enthalpy.
# Find saturation point at inlet concentration.
# Find ...
self.T_sat = K2C(libr_props.temperature(self.P * 1e-5, self.x_in))
self.T_out = K2C(libr_props.temperature(self.P * 1e-5, self.x_out))
self.h_sat = libr_props.massSpecificEnthalpy(C2K(self.T_sat), self.x_in)
self.h_out = libr_props.massSpecificEnthalpy(C2K(self.T_out),self.x_out)
self.cp_in = libr_props.massSpecificHeat(C2K(self.T_in),self.x_in)
# If h_in is an input:
if False:
preheat = (self.h_sat - self.h_in)/self.cp_in
self.T_in = self.T_sat - preheat
else:
preheat = self.T_sat - self.T_in
self.h_in = self.h_sat - preheat * self.cp_in
pwater.update(CP.PT_INPUTS, self.P, C2K(self.T_sat))
self.h_vapor_out = pwater.hmass() - h_w_ref
# Mass balance on LiBr
self.m_out = self.m_in * self.x_in / self.x_out
# Mass balance on Water
self.m_vapor_out = self.m_in - self.m_out
self.m_total = self.m_out
self.Q_preheat = self.m_in * (self.h_sat - self.h_in)
self.Q_desorb = self.m_out * self.h_out \
+ self.m_vapor_out * self.h_vapor_out \
- self.m_in * self.h_sat
# Determine a reasonable limit for extending the domain.
self.Tmax = K2C(libr_props.temperature(self.P*1e-5,libr_props.xmax))
def __repr__(self):
import tabulate
names = """P
x_in
x_out
T_in
m_in
T_sat
T_out
m_out
m_vapor_out
h_in
h_sat
h_out
h_vapor_out
Q_preheat
Q_desorb
Q_total""".split()
vals = [self.P,
self.x_in,
self.x_out,
self.T_in,
self.m_in,
self.T_sat,
self.T_out,
self.m_out,
self.m_vapor_out,
self.h_in,
self.h_sat,
self.h_out,
self.h_vapor_out,
self.Q_preheat,
self.Q_desorb,
self.Q_preheat+self.Q_desorb]
units = """Pa
kg/kg kg/kg
C
kg/s
C C
kg/s kg/s
J/kg J/kg J/kg J/kg
W W W""".split()
return tabulate.tabulate(zip(names,vals,units))
def _q_helper(self,T):
x_local = libr_props.massFraction(C2K(T),self.P * 1e-5)
# Could parametrize this by x, but libr_props.temperature also has an
# implicit solve. Only P(T,x) is explicit.
pwater.update(CP.PT_INPUTS, self.P, C2K(T))
h_vapor_local = pwater.hmass() - h_w_ref
h_solution_local = libr_props.massSpecificEnthalpy(C2K(T), x_local)
# Mass balance on LiBr
m_solution_local = self.m_in * self.x_in / x_local
hlv1 = h_vapor_local - h_solution_local
q1 = self.m_total * h_vapor_local - m_solution_local * hlv1
return q1
def _T_helper(self,q,Tguess):
func = lambda T:self._q_helper(T)-q
sol = fsolve(func, Tguess)
return sol[0]
def _q(self,T):
"""Provide process heat canonical curve for generator in various forms.
Assumes that
* inlet solution state is obtained from self,
* generated vapor is flowing in reverse direction and
* it is everywhere in local equilibrium with solution.
Args
----
T : Temperature (deg C)
The local temperature
Returns
-------
q : Local progress index
Measured as cumulative heat flux (W).
Q : Total heat flux (W)
The total amount of heat transferred into the generator.
"""
hlv0 = self.h_vapor_out - self.h_sat
q0 = self.m_total * self.h_vapor_out - self.m_in * hlv0
if T < self.T_in:
raise "Generator outlet must be greater than pre-inlet temperature!"
elif T < self.T_sat:
# The state is either saturated or subcooled.
# Use linear interpolation.
q = self.m_in * self.cp_in * (T - self.T_in)
else:
x_local = libr_props.massFraction(C2K(T),self.P * 1e-5)
pwater.update(CP.PT_INPUTS, self.P, C2K(T))
h_vapor_local = pwater.hmass() - h_w_ref
h_solution_local = libr_props.massSpecificEnthalpy(C2K(T), x_local)
# Mass balance on LiBr
m_solution_local = self.m_in * self.x_in / x_local
hlv1 = h_vapor_local - h_solution_local
q1 = self.m_total * h_vapor_local - m_solution_local * hlv1
q = (q1 - q0) + self.Q_preheat
# TODO
# If q > Q (iff T > T_solution_outlet) then result is invalid.
return q
def _T(self,q):
if q < 0:
raise "Process index must be greater than 0!"
elif q < self.Q_preheat:
T = self.T_in + q / (self.m_in * self.cp_in)
#elif q < self.Q_desorb + self.Q_preheat:
else:
hlv0 = self.h_vapor_out - self.h_sat
q0 = self.m_total * self.h_vapor_out - self.m_in * hlv0
q1 = q0 + (q - self.Q_preheat)
alpha = (q - self.Q_preheat) / self.Q_desorb
Tguess = alpha*self.T_sat+(1-alpha)*self.T_out
T = self._T_helper(q1,Tguess)
#else:
# raise "Process index extends past process
return T
class GeneratorLiBrInterpolated(GeneratorLiBr):
def __init__(self, P, m_in, T_in, x_in, x_out, debug=False):
print("I am still alive!")
self.update(P, m_in, T_in, x_in, x_out)
TT = np.linspace(self.T_in,self.Tmax,100)
qfunc = np.vectorize(self._q)
qq = qfunc(TT)
if np.isnan(qq).any():
print("There is a problem with some nans")
# Create extended domain such that interpolate saturates at endpoints.
qq1 = np.resize(qq,qq.size+1)
TT1 = np.resize(TT,TT.size+1)
qq1[-1] = qq1[-2]
TT1[-1] = TT1[-2] + 1
if (np.diff(qq1) < 0).any():
print("Captain, it's a non-monotonic function!")
self.q = PchipInterpolator(TT1,qq1,extrapolate=True)
# Need to use fresh arrays because they are referenced.
qq2 = np.resize(qq,qq.size+1)
TT2 = np.resize(TT,TT.size+1)
qq2[-1] = qq2[-2] * 1.02
TT2[-1] = TT2[-2]
self.T = PchipInterpolator(qq2,TT2,extrapolate=True)
# Show that it worked
if debug:
print(tabulate.tabulate(zip(TT1,qq1)))
print(tabulate.tabulate(zip(TT2,qq2)))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(TT1,qq1,'.'); plt.title("qqmod vs TTmod for q()")
plt.figure()
plt.plot(qq2,TT2,'.'); plt.title("TTmod vs qqmod for T()")
class AbsorberLiBr1(object):
"""Provides a canonical heat (output) curve for a LiBr water vapor absorber.
Assumes:
Vapor comes into equilibrium with surface only where it is absorbed.
Inputs
------
P : number (Pa)
Process pressure
m_in : number (kg/s)
Solution inlet mass flow rate
h_in : number (J/kg)
Solution inlet enthalpy
x_in : number (kg/kg)
Solution inlet mass fraction LiBr
T_vapor_inlet : number (deg C)
Vapor inlet temperature
"""
def __init__(self,P,m_in,h_in,x_in,h_vapor_inlet,debug=False):
self.P=P
self.m_in=m_in
self.h_in=h_in
self.x_in=x_in
self.h_vapor_inlet = h_vapor_inlet
self.T_sat = K2C(libr_props.temperature(self.P * 1e-5,
self.x_in))
self.h_sat = libr_props.massSpecificEnthalpy(C2K(self.T_sat),self.x_in)
if self.h_in > self.h_sat:
q,t,xl = libr_props.twoPhaseProps(self.h_in,self.P*1e-5,self.x_in)
# Pre-cooling is required to reach saturation temperature
self.Q_pre_cool = self.m_in * (self.h_sat - self.h_in)
self.T_in = K2C(t)
prepoints_x = np.linspace(xl,self.x_in,20,endpoint=False)
prepoints_T = np.zeros_like(prepoints_x)
prepoints_q = np.zeros_like(prepoints_x)
for i,x in enumerate(prepoints_x):
#q,t,xl = libr_props.twoPhaseProps(h,self.P*1e-5,self.x_in)
t = libr_props.temperature(self.P * 1e-5, x)
h = libr_props.massSpecificEnthalpy(t,x)
prepoints_T[i] = K2C(t)
prepoints_q[i] = self.m_in * (h - self.h_in)
prepoints_x[i] = x
else:
self.Q_pre_cool = 0
self.T_in = K2C(libr_props.temperature(self.P * 1e-5, self.x_in))
prepoints_T = []
prepoints_q = []
prepoints_x = []
# Set up bounds and points for interpolation.
# Absorber limit is liquid water.
pwater.update(CP.PQ_INPUTS,P,0)
self.Tmin = pwater.T()
x_points = np.linspace(x_in,0.1,100)
T_points = np.zeros_like(x_points)
q_points = np.zeros_like(x_points)
#q_func = np.vectorize(self._q)
#q_points = q_func(T_points)
for i,x in enumerate(x_points):
T_points[i],q_points[i] = self._qx(x)
x_points = np.concatenate([prepoints_x,x_points])
T_points = np.concatenate([prepoints_T,T_points])
q_points = np.concatenate([prepoints_q,q_points])
# Forward function
T_points1 = np.resize(T_points,len(T_points)+1)
q_points1 = np.resize(q_points,len(T_points)+1)
T_points1[-1] = T_points[-1] - 5
q_points1[-1] = q_points[-1]
# Inverse function
T_points2 = np.resize(T_points,len(T_points)+1)
q_points2 = np.resize(q_points,len(T_points)+1)
T_points2[-1] = T_points[-1]
q_points2[-1] = q_points[-1] * 1.05
if debug:
import matplotlib.pyplot as plt
import tabulate
xmod = np.resize(x_points,len(x_points)+1)
print(tabulate.tabulate(zip(xmod,T_points1,q_points1,
T_points2,q_points2),
headers=['x','T1','q1','T2','q2']))
plt.figure(); plt.plot(T_points1,q_points1); plt.title("q(T)")
plt.figure(); plt.plot(q_points2,T_points2); plt.title("T(q)")
# Interpolate data must be in increasing order, so we reverse it
# compared to reaction direction.
self.q = PchipInterpolator(T_points1[::-1], q_points1[::-1])
self.T = PchipInterpolator(q_points2[::-1], T_points2[::-1])
def _q(self,T):
# First, determine the mass fraction here.
if T > self.T_sat:
raise ValueError("This function is for subcooled LiBr...")
else:
x_local = libr_props.massFraction(C2K(T),self.P*1e-5)
return self._qx(x_local)
def _qx(self,x_local):
T = K2C(libr_props.temperature(self.P*1e-5,x_local))
dx = self.x_in - x_local
# TODO
h_local = libr_props.massSpecificEnthalpy(C2K(T),x_local)
# And calculate
m_vapor = self.m_in * dx / x_local
m_out = self.m_in + m_vapor
# Heat is inbound.
result = -self.m_in * self.h_in - m_vapor * self.h_vapor_inlet \
+ m_out * h_local
return T,result
def __repr__(self):
names = """P
m_in
x_in
T_in
h_in
h_sat
h_vapor_inlet""".split()
vals = [self.P,
self.m_in,
self.x_in,
self.T_in,
self.h_in,
self.h_sat,
self.h_vapor_inlet]
units = """Pa
kg/kg kg/kg
C
kg/s
C
kg/s kg/s
J/kg J/kg J/kg J/kg
W W W""".split()
return tabulate.tabulate(zip(names,vals,units))
class ChillerLiBr1(object):
def __init__(self,
T_evap=1.5, T_cond=39.9,
x1=0.567, x2=0.624,
Eff_SHX=0.64, m_pump=0.05):
"""Args
----
T_evap : float
Evaporator saturation temperature (deg C)
T_cond : float
Condenser saturation temperature (deg C)
x1 : float
Pump side mass fraction of LiBr in stream (low) [kg/kg]
x2 : float
Return side mass fraction of LiBr in stream (high/concentrate)
Eff_SHX : float
Effectiveness of the solution heat exchanger (K/K), 0 to 1
m_pump : float
Mass flow rate through the solution pump (kg/s)
"""
self.T_evap = T_evap
self.T_cond = T_cond
self.x1 = x1
self.x2 = x2
self.m_pump = m_pump
self.Eff_SHX = Eff_SHX
self.dx = x1 - x2
pwater.update(CP.QT_INPUTS, 1, C2K(T_evap))
self.P_evap = pwater.p()
pwater.update(CP.QT_INPUTS, 1, C2K(T_cond))
self.P_cond = pwater.p()
self.stateLabels = """abs_outlet
pump_outlet
gen_inlet
gen_sat_liquid
gen_outlet
SHX_conc_outlet
abs_inlet
abs_sat_liquid
gen_vapor_outlet
cond_sat_vapor
cond_outlet
evap_inlet
evap_sat_liquid
evap_sat_vapor
evap_outlet""".split('\n')
#self.states=dict((k, nullPP('LiBrH2O')) for k in self.stateLabels)
self.stateTable,self.states=makePointTable(self.stateLabels)
self.T_gen_inlet = 0
self.T_gen_outlet = 0
self.T_abs_inlet_max = 0
self.T_abs_outlet_max = 0
self.h_gen_inlet = 0
self.h_gen_outlet = 0
self.h_abs_inlet = 0
self.h_abs_outlet = 0
self.m_concentrate = 0
self.m_refrig = 0
self.T_SHX_concentrate_outlet = 0
self.Q_SHX = 0
self.T_abs_pre = np.nan
self.h_abs_pre = np.nan
self.Q_abs_pre_cool = 0
self.P_abs_pre = np.nan
self.Q_abs_main = 0
self.Q_abs_total = 0
self.T_gen_pre = np.nan
self.Q_gen_pre_heat = 0
self.Q_gen_main = 0
self.Q_gen_total = 0
self.Q_condenser_reject = 0
self.Q_evap_heat = 0
self.COP = 0
self.W_pump = 0
self.f = np.inf
self.x_abs_pre = self.x2
# These routines allow updating solution
def setT_evap(self,T_evap):
self.T_evap = T_evap
pwater.update(CP.QT_INPUTS, 1, C2K(T_evap))
self.P_evap = pwater.p()
def setT_cond(self,T_cond):
self.T_cond = T_cond
pwater.update(CP.QT_INPUTS, 1, C2K(T_cond))
self.P_cond = pwater.p()
def ZeroCheck(self):
return self.W_pump + self.Q_evap_heat + self.Q_gen_total - self.Q_condenser_reject - self.Q_abs_total
def iterate1(self):
"""Update the internal parameters."""
self.T_gen_inlet = K2C(libr_props.temperature(self.P_cond*1e-5,
self.x1))
self.T_gen_outlet = K2C(libr_props.temperature(self.P_cond * 1e-5,
self.x2))
self.T_abs_inlet_max = K2C(libr_props.temperature(self.P_evap * 1e-5,
self.x2))
self.T_abs_outlet_max = K2C(libr_props.temperature(self.P_evap * 1e-5,
self.x1))
self.h_gen_inlet = libr_props.massSpecificEnthalpy(
C2K(self.T_gen_inlet), self.x1)
self.h_gen_outlet = libr_props.massSpecificEnthalpy(
C2K(self.T_gen_outlet), self.x2)
self.h_abs_inlet = libr_props.massSpecificEnthalpy(
C2K(self.T_abs_inlet_max), self.x2)
self.h_abs_outlet = libr_props.massSpecificEnthalpy(
C2K(self.T_abs_outlet_max), self.x1)
# Mass balance on LiBr
self.m_concentrate = self.m_pump * self.x1 / self.x2
# Mass balance on Water
self.m_refrig = self.m_pump - self.m_concentrate
self.f = self.m_pump / self.m_refrig
# Compute SHX outlets, assuming concentrate limits heat flow (C_min)
# Neglect pump work for the present.
DeltaT_max = self.T_gen_outlet - self.T_abs_outlet_max
DeltaT_SHX_concentrate = self.Eff_SHX * DeltaT_max
self.T_SHX_concentrate_outlet = self.T_gen_outlet \
- DeltaT_SHX_concentrate
self.h_SHX_concentrate_outlet = libr_props.massSpecificEnthalpy(
C2K(self.T_SHX_concentrate_outlet), self.x2)
self.Q_SHX = self.m_concentrate \
* (self.h_gen_outlet - self.h_SHX_concentrate_outlet)
# Expansion valve
self.h_abs_pre = self.h_SHX_concentrate_outlet
if self.h_abs_pre > self.h_abs_inlet:
# Pre-cooling is required to reach saturation temperature
self.Q_abs_pre_cool = self.m_concentrate \
* (self.h_abs_pre - self.h_abs_inlet)
q,t,xl = libr_props.twoPhaseProps(self.h_abs_pre,
self.P_evap*1e-5,
self.x2)
self.T_abs_pre = K2C(t)
self.x_abs_pre = xl
# ignore vapor quality, q
# Minimum vapor pressure for absorption to occur
self.P_abs_pre = np.inf
else:
self.Q_abs_pre_cool = 0
#self.T_abs_pre = K2C(CP.PropsSI('T',
# 'H', self.h_abs_pre,
# 'P', self.P_evap,
# librname(self.x2)))
self.T_abs_pre = np.nan
# Minimum vapor pressure for absorption to occur
# self.P_abs_pre = CP.PropsSI('P',
# 'T', C2K(self.T_abs_pre),
# 'Q', 0,
# librname(self.x2))
self.P_abs_pre = np.nan
# Heat rejection in absorber: energy balance
pwater.update(CP.PQ_INPUTS, self.P_evap, 1)
self.h_abs_vapor_inlet = pwater.hmass() - h_w_ref
self.Q_abs_main = self.m_refrig * self.h_abs_vapor_inlet \
+ self.m_concentrate * self.h_abs_inlet \
- self.m_pump * self.h_abs_outlet
self.Q_abs_total = self.Q_abs_main + self.Q_abs_pre_cool
# Energy balance in SHX, pump side
D_in = CP.PropsSI('D',
'T',C2K(self.T_abs_outlet_max),
'Q',0,
librname(self.x1))
DeltaH_pump = (self.P_cond - self.P_evap) / D_in
self.W_pump = self.m_pump * DeltaH_pump
self.h_pump_outlet = self.h_abs_outlet + DeltaH_pump
DeltaH_SHX_pumpside = self.Q_SHX / self.m_pump
self.h_gen_pre = self.h_pump_outlet + DeltaH_SHX_pumpside
if self.h_gen_pre > self.h_gen_inlet:
# Flash steam
self.T_gen_pre = np.nan
else:
# The state is either saturated or subcooled.
# We need to calculate the temperature from specific heat.
cp = libr_props.massSpecificHeat(C2K(self.T_gen_inlet),self.x1)
deltaHsub = self.h_gen_inlet - self.h_gen_pre
deltaT = deltaHsub / cp
self.T_gen_pre = self.T_gen_inlet - deltaT
self.Q_gen_pre_heat = self.m_pump * (self.h_gen_inlet - self.h_gen_pre)
# Heat input to generator: energy balance
pwater.update(CP.PT_INPUTS, self.P_cond, C2K(self.T_gen_inlet))
self.h_gen_vapor_outlet = pwater.hmass() - h_w_ref
self.vapor_superheat = self.T_gen_inlet - self.T_cond
self.Q_gen_main = self.m_refrig * self.h_gen_vapor_outlet \
+ self.m_concentrate * self.h_gen_outlet \
- self.m_pump * self.h_gen_inlet
self.Q_gen_total = self.Q_gen_main + self.Q_gen_pre_heat
# Condenser
pwater.update(CP.PQ_INPUTS, self.P_cond, 0)
self.h_condenser_outlet = pwater.hmass() - h_w_ref
self.Q_condenser_reject = self.m_refrig * (self.h_gen_vapor_outlet
- self.h_condenser_outlet)
# Expansion valve
self.h_evap_inlet = self.h_condenser_outlet
# Evaporator
self.h_evap_outlet = self.h_abs_vapor_inlet
self.Q_evap_heat = self.m_refrig * (self.h_evap_outlet
- self.h_evap_inlet)
self.COP = self.Q_evap_heat / self.Q_gen_total
def updateGenerator(self,Q_gen):
genStream = self.getGeneratorStream()
self.h_gen_inlet = genStream.h_sat
self.T_gen_inlet = genStream.T_sat
if Q_gen < genStream.Q_preheat:
self.T_gen_outlet = genStream.T(Q_gen)
self.x2 = self.x1
# error?
else:
self.T_gen_outlet = genStream.T(Q_gen)
self.x2 = libr_props.massFraction(C2K(self.T_gen_outlet),
self.P_cond)
genStream.update(self.P_cond, self.m_pump, self.T_gen_pre,
self.x1, self.x2)
self.h_gen_outlet = genStream.h_out
self.h_gen_vapor_outlet = genStream.h_vapor_out
self.dx = self.x1 - self.x2
# Mass balance on LiBr
self.m_concentrate = self.m_pump * self.x1 / self.x2
# Mass balance on Water
self.m_refrig = self.m_pump - self.m_concentrate
self.f = self.m_pump / self.m_refrig
def updateSHX_hot_side(self):
DeltaT_max = self.T_gen_outlet - self.T_abs_outlet_max
DeltaT_SHX_concentrate = self.Eff_SHX * DeltaT_max
self.T_SHX_concentrate_outlet = self.T_gen_outlet \
- DeltaT_SHX_concentrate
self.h_SHX_concentrate_outlet = libr_props.massSpecificEnthalpy(
C2K(self.T_SHX_concentrate_outlet), self.x2)
self.Q_SHX = self.m_concentrate \
* (self.h_gen_outlet - self.h_SHX_concentrate_outlet)
def getHeatCurve(self):
"""Returns (Heat,T), arrays showing progress of the process.
Note: absorber heat input is negative.
Learn this from (my revision to) example 3.3.
"""
Q, result = 0, []
# Starting coordinates
result.append((0,self.T_abs_pre))
# Heat input to reach a saturated state.
Q += self.m_concentrate * (self.h_abs_inlet - self.h_abs_pre)
result.append((Q,self.T_abs_inlet_max))
# Heat input to reach outlet.
Q += self.m_pump * self.h_abs_outlet \
- self.m_concentrate * self.h_abs_inlet \
- self.m_refrig * self.h_abs_vapor_inlet
result.append((Q,self.T_abs_outlet_max))
# Pump -- no heat, just work
# Pump outlet to generator through SHX
Q += self.m_pump * self.h_gen_pre - self.m_pump * self.h_abs_outlet
result.append((Q,self.T_gen_pre))
# Generator preheat
Q += self.m_concentrate * self.h_gen_inlet \
- self.m_concentrate * self.h_gen_pre
result.append((Q,self.T_gen_inlet))
# Generator proper
Q += self.m_concentrate * self.h_gen_outlet \
+ self.m_refrig * self.h_gen_vapor_outlet \
- self.m_pump * self.h_gen_inlet
result.append((Q,self.T_gen_outlet))
# SHX, concentrate side
Q += self.m_concentrate * self.h_SHX_concentrate_outlet \
- self.m_concentrate * self.h_gen_outlet
result.append((Q,self.T_SHX_concentrate_outlet))
# Solution expander
result.append((Q,self.T_abs_pre))
# Condenser cool to saturated
result.append((Q,self.T_gen_inlet))
pwater.update(CP.QT_INPUTS,0,C2K(self.T_cond))
h_condenser_sat = pwater.hmass() - h_w_ref
Q += self.m_refrig * h_condenser_sat \
- self.m_refrig * self.h_gen_vapor_outlet
result.append((Q,self.T_cond))
# Real condense
Q += self.m_refrig * (self.h_condenser_outlet - h_condenser_sat)
result.append((Q,self.T_cond))
# What if condenser subcools? Later.
# Expander
pwater.update(CP.HmassP_INPUTS,self.h_condenser_outlet + h_w_ref,
self.P_evap)
T_into_evap = pwater.T()
result.append((Q,T_into_evap))
# Evaporator
Q += self.m_refrig * (self.h_evap_outlet - self.h_evap_inlet)
result.append((Q,self.T_evap))
return zip(*result)
def iterate2(self,T_gen_outlet,T_abs_outlet):
"""Resolve the concentrations. Not yet implemented."""
pass
def buildGeneratorHeatCurve(self):
"""Provide process heat canonical curve for generator in various forms.
For purpose of external heat exchange, generator process goes ...
P_cond, T_gen_pre, x1 (subcooled)
P_cond, T_gen_inlet, x1 (saturated liquid) + backwards flowing vapor
P_cond, T_gen_outlet, x2 (saturated liquid)
Returns
-------
T : callable
Maps process progress in terms of heat flux (W) to local
temperature T (deg C).
q : callable
Maps process local temperature T (deg C) and total heat flux Q (W)
to the local progress index, cumulative heat flux q (W).
"""
self.genpoints, Q = [], 0
# 0, pre-inlet
self.genpoints.append((Q,self.T_gen_pre))
# 1, Generator preheat
Q += self.m_concentrate * self.h_gen_inlet \
- self.m_concentrate * self.h_gen_pre
self.genpoints.append((Q,self.T_gen_inlet))
# 2, Generator proper
Q += self.m_concentrate * self.h_gen_outlet \
+ self.m_refrig * self.h_gen_vapor_outlet \
- self.m_pump * self.h_gen_inlet
self.genpoints.append((Q,self.T_gen_outlet))
def generatorHeatCurveQ(self,T,x_out):
"""Provide process heat canonical curve for generator in various forms.
Assumes that
* inlet solution state is obtained from self,
* generated vapor is flowing in reverse direction and
* it is everywhere in local equilibrium with solution.
Args
----
T : Temperature (deg C)
The local temperature
x_out : Mass fraction (kg/kg)
The outlet solution LiBr mass fraction
Returns
-------
q : Local progress index
Measured as cumulative heat flux (W).
Q : Total heat flux (W)
The total amount of heat transferred into the generator.
"""
# We may need m_concentrate. It depends on Q -- solve from inlet.
h_solution_inlet = self.h_gen_inlet
h_vapor_outlet = self.h_gen_vapor_outlet
T_solution_outlet = K2C(libr_props.temperature(self.P_cond * 1e-5,
x_out))
h_solution_outlet = libr_props.massSpecificEnthalpy(
C2K(T_solution_outlet), x_out)
# Mass balance on LiBr
m_solution_outlet = self.m_pump * self.x1 / x_out
# Mass balance on Water
m_vapor_outlet = self.m_pump - m_solution_outlet
m_total = m_solution_outlet
hlv0 = h_vapor_outlet - h_solution_inlet
q0 = m_total * h_vapor_outlet - self.m_pump * hlv0
Q = m_solution_outlet * h_solution_outlet \
+ m_vapor_outlet * h_vapor_outlet \
- self.m_pump * h_solution_inlet
q = 0
T0,T1,T2=self.genpoints[0][1],self.genpoints[1][1],self.genpoints[2][1]
Q0,Q1,Q2=self.genpoints[0][0],self.genpoints[1][0],self.genpoints[2][0]
if T < T0:
raise "Generator outlet must be greater than pre-inlet temperature!"
elif T < T1:
# The state is either saturated or subcooled.
# Use linear interpolation.
q = (T - T0) / (T1 - T0) * (Q1 - Q0)
else:
x_local = libr_props.massFraction(C2K(T),self.P_cond * 1e-5)
pwater.update(CP.PT_INPUTS, self.P_cond, C2K(T))
h_vapor_local = pwater.hmass() - h_w_ref
h_solution_local = libr_props.massSpecificEnthalpy(C2K(T), x_local)
# Mass balance on LiBr
m_solution_local = self.m_pump * self.x1 / x_local
hlv1 = h_vapor_local - h_solution_local
q1 = m_total * h_vapor_local - m_solution_local * hlv1
q = (q1 - q0) + (Q1 - Q0)
# TODO
# If q > Q (iff T > T_solution_outlet) then result is invalid.
return q, Q
def generatorHeatCurveT(self,Q):
"""Provide process heat canonical curve for generator in various forms."""
T = 0
return T
def getGeneratorStream(self):
gen = GeneratorLiBrInterpolated(self.P_cond,self.m_pump,self.T_gen_pre,
self.x1,self.x2)
return gen
def getAbsorberStream(self):
absorber = AbsorberLiBr1(self.P_evap,
self.m_concentrate,
self.h_abs_pre,
self.x_abs_pre,
self.h_evap_outlet)
return absorber
def getCondenserStream(self):
h_rel = self.h_gen_vapor_outlet + h_w_ref
condenser = HRHX_integral_model.waterStreamInterpolated(
self.P_cond, h_rel, self.m_refrig, -self.Q_condenser_reject)
return condenser
def getEvaporatorStream(self):
h_rel = self.h_evap_inlet + h_w_ref
evaporator = HRHX_integral_model.waterStreamInterpolated(
self.P_evap, h_rel, self.m_refrig, self.Q_evap_heat)
return evaporator
def __repr__(self):
names = """T_evap T_cond P_evap P_cond
x1 x2
T_gen_inlet T_gen_outlet T_abs_inlet_max T_abs_outlet_max
h_gen_inlet h_gen_outlet h_abs_inlet h_abs_outlet
m_pump m_concentrate m_refrig
Eff_SHX
T_SHX_concentrate_outlet Q_SHX
T_abs_pre h_abs_pre x_abs_pre Q_abs_pre_cool P_abs_pre
Q_abs_main Q_abs_total
T_gen_pre
Q_gen_pre_heat Q_gen_main Q_gen_total
Q_condenser_reject Q_evap_heat COP
W_pump
self.f
ZeroCheck
string""".split()
vals = [self.T_evap, self.T_cond, self.P_evap, self.P_cond,
self.x1, self.x2,
self.T_gen_inlet, self.T_gen_outlet, self.T_abs_inlet_max, self.T_abs_outlet_max,
self.h_gen_inlet, self.h_gen_outlet, self.h_abs_inlet, self.h_abs_outlet,
self.m_pump, self.m_concentrate, self.m_refrig,
self.Eff_SHX,
self.T_SHX_concentrate_outlet, self.Q_SHX,
self.T_abs_pre, self.h_abs_pre, self.x_abs_pre,
self.Q_abs_pre_cool, self.P_abs_pre,
self.Q_abs_main, self.Q_abs_total,
self.T_gen_pre,
self.Q_gen_pre_heat, self.Q_gen_main, self.Q_gen_total,
self.Q_condenser_reject, self.Q_evap_heat, self.COP,
self.W_pump,
self.f,
self.ZeroCheck(),
False]
units = """C C Pa Pa
kg/kg kg/kg
C C C C
J/kg J/kg J/kg J/kg
kg/s kg/s kg/s
K/K
C W
C J/kg kg/kg W Pa
W W
C
W W W
W W W/W
W
kg/kg
W
none""".split()
vartable = tabulate.tabulate(zip(names,vals,units))
statetable = tabulate.tabulate(self.stateTable,pointType.names)
return vartable + "\nComing soon ...\n" + statetable
def main():
if True:
# Example 6.1 in the book
P1,P2 = 673, 7445
T1 = K2C(CP.PropsSI('T','P',P1,'Q',1,water))
T2 = K2C(CP.PropsSI('T','P',P2,'Q',1,water))
# Trying different inputs
T1, T2 = 1, 35
c = ChillerLiBr1(T1,T2,0.5,0.7)
c.x2=libr_props2.Xsat(89.9,c.P_cond)
c.x1=libr_props2.Xsat(32.7,c.P_evap)
# Custom example
c = ChillerLiBr1(T_evap=5,T_cond=45,x1=0.6026,x2=0.66)
print("Initializing...")
print(c)
print("Iterating...")
try:
c.iterate1()
finally:
print(c)
if True:
# Figure 6.3 in the book
Eff_SHX = np.linspace(0,1)
COP = np.zeros_like(Eff_SHX)
for i in range(len(Eff_SHX)):
c = ChillerLiBr1(Eff_SHX=Eff_SHX[i])
try:
c.iterate1()
COP[i] = c.COP
except:
pass
if False:
import matplotlib.pyplot as plt
plt.plot(Eff_SHX, COP)
plt.show()
return c
if __name__ == "__main__":
c=main() | nfette/openACHP | src/libr3.py | libr3.py | py | 36,513 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "CoolProp.CoolProp.AbstractState",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "CoolProp.CoolProp",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.dtype",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.z... |
16671735068 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2018/7/10 PM4:59
# @Author : L
# @Email : L862608263@163.com
# @File : google.py
# @Software: PyCharm
import json
import urllib
import urllib.request
import urllib.parse
import re
def url_open(url, data=None):
request = urllib.request.Request(url)
request.add_header('User-Agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) '
'Version/11.1 Safari/605.1.15')
response = urllib.request.urlopen(request, data)
return response.read()
class TranslateGoogle:
_tkk_url = "http://translate.google.cn/"
_translate_url = "https://translate.google.cn/translate_a/single"
def translate_word(self, translate_string):
tk = self.get_tk(translate_string)
print('请求需要的tk参数 ', tk)
# 这里的字典有很多个dt的key, 按理说请求前就会被覆盖, 不知道有什么用
# 'dt': 'bd', 'dt': 'ex', 'dt': 'ld', 'dt': 'md', 'dt': 'qca', 'dt': 'rw', 'dt': 'rm', 'dt': 'ss', 'dt': 't'
data = {'client': 't', 'sl': 'auto', 'tl': 'en', 'hl': 'zh - CN', 'dt': 'at',
'ie': 'UTF - 8',
'oe': 'UTF - 8', 'source': 'bh', 'ssel': '0', 'tsel': '0', 'kc': '1', 'tk': str(tk),
'q': translate_string}
data = urllib.parse.urlencode(data).encode('utf-8')
result = url_open(self._translate_url, data).decode('utf-8')
print('翻译结果: ', json.loads(result))
def get_tk(self, input_str):
a = 4211913557
b = -4140706334
number = 422392
e = "{0}.{1}".format(number, a + b).split('.')
h = int(e[0]) or 0
g = []
for f in range(0, len(input_str)):
c = ord(input_str[f])
if 128 > c:
g.append(c)
else:
if 2048 > c:
g.append(c >> 6 | 192)
else:
if (0xd800 == (c & 0xfc00)) and \
(f + 1 < len(input_str)) and \
(0xfc00 == (ord(input_str[f + 1]) & 0xdc00)):
f += 1
c = 0x10000 + ((c & 0x3ff) << 10) + (ord(input_str[f]) & 0x3ff)
g.append(c >> 18 | 240)
g.append(c >> 12 & 63 | 128)
else:
g.append(c >> 12 | 224)
g.append(c >> 6 & 63 | 128)
g.append(c & 63 | 128)
input_str = h
for d in range(len(g)):
input_str += g[d]
input_str = self.operation(input_str, '+-a^+6')
input_str = self.operation(input_str, '+-3^+b+-f')
input_str ^= int(e[1]) or 0
if 0 > input_str:
input_str = (input_str & (pow(2, 31) - 1)) + pow(2, 31)
input_str %= pow(10, 6)
return "%d.%d" % (input_str, input_str ^ h)
# noinspection PyMethodMayBeStatic
def operation(self, a, b):
for d in range(0, len(b) - 2, 3):
c = b[d + 2]
c = ord(c[0]) - 87 if 'a' <= c else int(c)
c = a >> c if '+' == b[d + 1] else a << c
a = a + c & (pow(2, 32) - 1) if '+' == b[d] else a ^ c
return a
# 谷歌翻译
# 重点是翻译时候的header里面需要tk这个键的值, 如果这个值不正确的话翻译请求是会被禁止的
# 这个tk的值的生成算法 可以在 https://translate.google.cn 审查元素 找到以下js代码
# go jfk-button-action 翻译按钮id
# 谷歌翻译 TKK 转换源js代码
# sr = function(a, b)
# {
# for (var c = 0; c < b.length - 2; c += 3) {
# var d = b.charAt(c + 2);
# d = "a" <= d ? d.charCodeAt(0) - 87: Number(d);
# d = "+" == b.charAt(c + 1) ? a >> > d: a << d;
# a = "+" == b.charAt(c) ? a + d & 4294967295: a ^ d
# }
# return a
# }, tr = null, ur = function(a)
# {
# if (null !== tr) var b = tr; else {
# b = rr(String.fromCharCode(84));
# var c = rr(String.fromCharCode(75));
# b =[b(), b()];
# b[1] = c();
# b = (tr = window[b.join(c())] | | "") | | ""
# }
# var
# d = rr(String.fromCharCode(116));
# c = rr(String.fromCharCode(107));
# d = [d(), d()];
# d[1] = c();
# c = "&" + d.join("") + "=";
# d = b.split(".");
# b = Number(d[0]) | | 0;
# for (var e =[], f = 0, g = 0; g < a.length; g++) {
# var l = a.charCodeAt(g);
# 128 > l ? e[f++] = l: (2048 > l ? e[f++] = l >> 6 | 192: (55296 == (l & 64512) & & g + 1 < a.length & & 56320 == (
# a.charCodeAt(g + 1) & 64512) ? (l = 65536 + ((l & 1023) << 10) + (a.charCodeAt(++g) & 1023), e[
# f + +] = l >> 18 | 240, e[f + +] = l >> 12 & 63 | 128):
# e[f + +] = l >> 12 | 224, e[f + +] = l >> 6 & 63 | 128),
# e[f + +] = l & 63 | 128)
# }
# a = b;
# for (f = 0; f < e.length; f++) a += e[f], a = sr(a, "+-a^+6");
# a = sr(a, "+-3^+b+-f");
# a ^= Number(d[1]) | | 0;
# 0 > a & & (a = (a & 2147483647) + 2147483648);
# a %= 1E6;
# return c + (a.toString() + "." +
# (a ^ b))
#
# };
if __name__ == "__main__":
translate = TranslateGoogle()
translate.translate_word("我爱你")
| SnowStorm-L/WebCrawler | Code/google/google_translate.py | google_translate.py | py | 5,312 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "url... |
33970522684 | import logging
from typing import Any, Callable, Dict, Iterable, Iterator, Optional, Tuple, Union
from swh.core.db import BaseDb
from swh.model.model import (
BaseModel,
Directory,
DirectoryEntry,
ExtID,
RawExtrinsicMetadata,
Release,
Revision,
Snapshot,
SnapshotBranch,
TargetType,
)
from swh.model.swhids import ExtendedObjectType
from swh.storage.postgresql.converters import (
db_to_extid,
db_to_raw_extrinsic_metadata,
db_to_release,
db_to_revision,
)
from swh.storage.replay import OBJECT_CONVERTERS
from swh.storage.writer import JournalWriter
logger = logging.getLogger(__name__)
PARTITION_KEY = {
"content": "sha1",
"skipped_content": "sha1",
"directory": "id",
"extid": "target",
"metadata_authority": "type, url",
"metadata_fetcher": "name, version",
"raw_extrinsic_metadata": "target",
"revision": "revision.id",
"release": "release.id",
"snapshot": "id",
"origin": "id",
"origin_visit": "origin_visit.origin",
"origin_visit_status": "origin_visit_status.origin",
}
COLUMNS = {
"content": [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"status",
"ctime",
],
"skipped_content": [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
"reason",
],
"directory": ["id", "dir_entries", "file_entries", "rev_entries", "raw_manifest"],
"extid": ["extid_type", "extid", "extid_version", "target_type", "target"],
"metadata_authority": ["type", "url"],
"metadata_fetcher": ["name", "version"],
"origin": ["url"],
"origin_visit": [
"visit",
"type",
("origin.url", "origin"),
"date",
],
"origin_visit_status": [
("origin_visit_status.visit", "visit"),
("origin.url", "origin"),
("origin_visit_status.date", "date"),
"type",
"snapshot",
"status",
"metadata",
],
"raw_extrinsic_metadata": [
"raw_extrinsic_metadata.type",
"raw_extrinsic_metadata.target",
"metadata_authority.type",
"metadata_authority.url",
"metadata_fetcher.name",
"metadata_fetcher.version",
"discovery_date",
"format",
"raw_extrinsic_metadata.metadata",
"origin",
"visit",
"snapshot",
"release",
"revision",
"path",
"directory",
],
"revision": [
("revision.id", "id"),
"date",
"date_offset_bytes",
"committer_date",
"committer_date_offset_bytes",
"type",
"directory",
"message",
"synthetic",
"metadata",
"extra_headers",
(
"array(select parent_id::bytea from revision_history rh "
"where rh.id = revision.id order by rh.parent_rank asc)",
"parents",
),
"raw_manifest",
("a.id", "author_id"),
("a.name", "author_name"),
("a.email", "author_email"),
("a.fullname", "author_fullname"),
("c.id", "committer_id"),
("c.name", "committer_name"),
("c.email", "committer_email"),
("c.fullname", "committer_fullname"),
],
"release": [
("release.id", "id"),
"date",
"date_offset_bytes",
"comment",
("release.name", "name"),
"synthetic",
"target",
"target_type",
("a.id", "author_id"),
("a.name", "author_name"),
("a.email", "author_email"),
("a.fullname", "author_fullname"),
"raw_manifest",
],
"snapshot": ["id", "object_id"],
}
JOINS = {
"release": ["person a on release.author=a.id"],
"revision": [
"person a on revision.author=a.id",
"person c on revision.committer=c.id",
],
"origin_visit": ["origin on origin_visit.origin=origin.id"],
"origin_visit_status": [
"origin on origin_visit_status.origin=origin.id",
],
"raw_extrinsic_metadata": [
"metadata_authority on "
"raw_extrinsic_metadata.authority_id=metadata_authority.id",
"metadata_fetcher on raw_extrinsic_metadata.fetcher_id=metadata_fetcher.id",
],
}
EXTRA_WHERE = {
# hack to force the right index usage on table extid
"extid": "target_type in ('revision', 'release', 'content', 'directory')"
}
def directory_converter(db: BaseDb, directory_d: Dict[str, Any]) -> Directory:
"""Convert directory from the flat representation to swh model
compatible objects.
"""
columns = ["target", "name", "perms"]
query_template = """
select %(columns)s
from directory_entry_%(type)s
where id in %%s
"""
types = ["file", "dir", "rev"]
entries = []
with db.cursor() as cur:
for type in types:
ids = directory_d.pop("%s_entries" % type)
if not ids:
continue
query = query_template % {
"columns": ",".join(columns),
"type": type,
}
cur.execute(query, (tuple(ids),))
for row in cur:
entry_d = dict(zip(columns, row))
entry = DirectoryEntry(
name=entry_d["name"],
type=type,
target=entry_d["target"],
perms=entry_d["perms"],
)
entries.append(entry)
(is_corrupt, dir_) = Directory.from_possibly_duplicated_entries(
id=directory_d["id"],
entries=tuple(entries),
raw_manifest=directory_d["raw_manifest"],
)
if is_corrupt:
logger.info("%s has duplicated entries", dir_.swhid())
return dir_
def raw_extrinsic_metadata_converter(
db: BaseDb, metadata: Dict[str, Any]
) -> RawExtrinsicMetadata:
"""Convert a raw extrinsic metadata from the flat representation to swh model
compatible objects.
"""
return db_to_raw_extrinsic_metadata(metadata)
def extid_converter(db: BaseDb, extid: Dict[str, Any]) -> ExtID:
"""Convert an extid from the flat representation to swh model
compatible objects.
"""
return db_to_extid(extid)
def revision_converter(db: BaseDb, revision_d: Dict[str, Any]) -> Revision:
"""Convert revision from the flat representation to swh model
compatible objects.
"""
revision = db_to_revision(revision_d)
assert revision is not None, revision_d["id"]
return revision
def release_converter(db: BaseDb, release_d: Dict[str, Any]) -> Release:
"""Convert release from the flat representation to swh model
compatible objects.
"""
release = db_to_release(release_d)
assert release is not None, release_d["id"]
return release
def snapshot_converter(db: BaseDb, snapshot_d: Dict[str, Any]) -> Snapshot:
"""Convert snapshot from the flat representation to swh model
compatible objects.
"""
columns = ["name", "target", "target_type"]
query = """
select %s
from snapshot_branches sbs
inner join snapshot_branch sb on sb.object_id=sbs.branch_id
where sbs.snapshot_id=%%s
""" % ", ".join(
columns
)
with db.cursor() as cur:
cur.execute(query, (snapshot_d["object_id"],))
branches = {}
for name, *row in cur:
branch_d = dict(zip(columns[1:], row))
if branch_d["target"] is not None and branch_d["target_type"] is not None:
branch: Optional[SnapshotBranch] = SnapshotBranch(
target=branch_d["target"],
target_type=TargetType(branch_d["target_type"]),
)
else:
branch = None
branches[name] = branch
return Snapshot(
id=snapshot_d["id"],
branches=branches,
)
CONVERTERS: Dict[str, Callable[[BaseDb, Dict[str, Any]], BaseModel]] = {
"directory": directory_converter,
"extid": extid_converter,
"raw_extrinsic_metadata": raw_extrinsic_metadata_converter,
"revision": revision_converter,
"release": release_converter,
"snapshot": snapshot_converter,
}
def object_to_offset(object_id, numbits):
"""Compute the index of the range containing object id, when dividing
space into 2^numbits.
Args:
object_id (str): The hex representation of object_id
numbits (int): Number of bits in which we divide input space
Returns:
The index of the range containing object id
"""
q, r = divmod(numbits, 8)
length = q + (r != 0)
shift_bits = 8 - r if r else 0
truncated_id = object_id[: length * 2]
if len(truncated_id) < length * 2:
truncated_id += "0" * (length * 2 - len(truncated_id))
truncated_id_bytes = bytes.fromhex(truncated_id)
return int.from_bytes(truncated_id_bytes, byteorder="big") >> shift_bits
def byte_ranges(
numbits: int, start_object: Optional[str] = None, end_object: Optional[str] = None
) -> Iterator[Tuple[Optional[bytes], Optional[bytes]]]:
"""Generate start/end pairs of bytes spanning numbits bits and
constrained by optional start_object and end_object.
Args:
numbits: Number of bits in which we divide input space
start_object: Hex object id contained in the first range
returned
end_object: Hex object id contained in the last range
returned
Yields:
2^numbits pairs of bytes
"""
q, r = divmod(numbits, 8)
length = q + (r != 0)
shift_bits = 8 - r if r else 0
def to_bytes(i):
return int.to_bytes(i << shift_bits, length=length, byteorder="big")
start_offset = 0
end_offset = 1 << numbits
if start_object is not None:
start_offset = object_to_offset(start_object, numbits)
if end_object is not None:
end_offset = object_to_offset(end_object, numbits) + 1
for start in range(start_offset, end_offset):
end = start + 1
if start == 0:
yield None, to_bytes(end)
elif end == 1 << numbits:
yield to_bytes(start), None
else:
yield to_bytes(start), to_bytes(end)
def raw_extrinsic_metadata_target_ranges(
start_object: Optional[str] = None, end_object: Optional[str] = None
) -> Iterator[Tuple[Optional[str], Optional[str]]]:
"""Generate ranges of values for the `target` attribute of `raw_extrinsic_metadata`
objects.
This generates one range for all values before the first SWHID (which would
correspond to raw origin URLs), then a number of hex-based ranges for each
known type of SWHID (2**12 ranges for directories, 2**8 ranges for all other
types). Finally, it generates one extra range for values above all possible
SWHIDs.
"""
if start_object is None:
start_object = ""
swhid_target_types = sorted(type.value for type in ExtendedObjectType)
first_swhid = f"swh:1:{swhid_target_types[0]}:"
# Generate a range for url targets, if the starting object is before SWHIDs
if start_object < first_swhid:
yield start_object, (
first_swhid
if end_object is None or end_object >= first_swhid
else end_object
)
if end_object is not None and end_object <= first_swhid:
return
# Prime the following loop, which uses the upper bound of the previous range
# as lower bound, to account for potential targets between two valid types
# of SWHIDs (even though they would eventually be rejected by the
# RawExtrinsicMetadata parser, they /might/ exist...)
end_swhid = first_swhid
# Generate ranges for swhid targets
for target_type in swhid_target_types:
finished = False
base_swhid = f"swh:1:{target_type}:"
last_swhid = base_swhid + ("f" * 40)
if start_object > last_swhid:
continue
# Generate 2**8 or 2**12 ranges
for _, end in byte_ranges(12 if target_type == "dir" else 8):
# Reuse previous upper bound
start_swhid = end_swhid
# Use last_swhid for this object type if on the last byte range
end_swhid = (base_swhid + end.hex()) if end is not None else last_swhid
# Ignore out of bounds ranges
if start_object >= end_swhid:
continue
# Potentially clamp start of range to the first object requested
start_swhid = max(start_swhid, start_object)
# Handle ending the loop early if the last requested object id is in
# the current range
if end_object is not None and end_swhid >= end_object:
end_swhid = end_object
finished = True
yield start_swhid, end_swhid
if finished:
return
# Generate one final range for potential raw origin URLs after the last
# valid SWHID
start_swhid = max(start_object, end_swhid)
yield start_swhid, end_object
def integer_ranges(
start: str, end: str, block_size: int = 1000
) -> Iterator[Tuple[Optional[int], Optional[int]]]:
for range_start in range(int(start), int(end), block_size):
if range_start == 0:
yield None, block_size
elif range_start + block_size > int(end):
yield range_start, int(end)
else:
yield range_start, range_start + block_size
RANGE_GENERATORS: Dict[
str,
Union[
Callable[[str, str], Iterable[Tuple[Optional[str], Optional[str]]]],
Callable[[str, str], Iterable[Tuple[Optional[bytes], Optional[bytes]]]],
Callable[[str, str], Iterable[Tuple[Optional[int], Optional[int]]]],
],
] = {
"content": lambda start, end: byte_ranges(24, start, end),
"skipped_content": lambda start, end: [(None, None)],
"directory": lambda start, end: byte_ranges(24, start, end),
"extid": lambda start, end: byte_ranges(24, start, end),
"revision": lambda start, end: byte_ranges(24, start, end),
"release": lambda start, end: byte_ranges(16, start, end),
"raw_extrinsic_metadata": raw_extrinsic_metadata_target_ranges,
"snapshot": lambda start, end: byte_ranges(16, start, end),
"origin": integer_ranges,
"origin_visit": integer_ranges,
"origin_visit_status": integer_ranges,
}
def compute_query(obj_type, start, end):
columns = COLUMNS.get(obj_type)
join_specs = JOINS.get(obj_type, [])
join_clause = "\n".join("left join %s" % clause for clause in join_specs)
additional_where = EXTRA_WHERE.get(obj_type)
where = []
where_args = []
if start:
where.append("%(keys)s >= %%s")
where_args.append(start)
if end:
where.append("%(keys)s < %%s")
where_args.append(end)
if additional_where:
where.append(additional_where)
where_clause = ""
if where:
where_clause = ("where " + " and ".join(where)) % {
"keys": "(%s)" % PARTITION_KEY[obj_type]
}
column_specs = []
column_aliases = []
for column in columns:
if isinstance(column, str):
column_specs.append(column)
column_aliases.append(column)
else:
column_specs.append("%s as %s" % column)
column_aliases.append(column[1])
query = """
select %(columns)s
from %(table)s
%(join)s
%(where)s
""" % {
"columns": ",".join(column_specs),
"table": obj_type,
"join": join_clause,
"where": where_clause,
}
return query, where_args, column_aliases
def fetch(db, obj_type, start, end):
"""Fetch all obj_type's identifiers from db.
This opens one connection, stream objects and when done, close
the connection.
Args:
db (BaseDb): Db connection object
obj_type (str): Object type
start (Union[bytes|Tuple]): Range start identifier
end (Union[bytes|Tuple]): Range end identifier
Raises:
ValueError if obj_type is not supported
Yields:
Objects in the given range
"""
query, where_args, column_aliases = compute_query(obj_type, start, end)
converter = CONVERTERS.get(obj_type)
with db.cursor() as cursor:
logger.debug("Fetching data for table %s", obj_type)
logger.debug("query: %s %s", query, where_args)
cursor.execute(query, where_args)
for row in cursor:
record = dict(zip(column_aliases, row))
if converter:
record = converter(db, record)
else:
record = OBJECT_CONVERTERS[obj_type](record)
logger.debug("record: %s", record)
yield record
def _format_range_bound(bound):
if isinstance(bound, bytes):
return bound.hex()
else:
return str(bound)
MANDATORY_KEYS = ["storage", "journal_writer"]
class JournalBackfiller:
"""Class in charge of reading the storage's objects and sends those
back to the journal's topics.
This is designed to be run periodically.
"""
def __init__(self, config=None):
self.config = config
self.check_config(config)
self._db = None
self.writer = JournalWriter({"cls": "kafka", **self.config["journal_writer"]})
assert self.writer.journal is not None
@property
def db(self):
if self._db is None:
self._db = BaseDb.connect(self.config["storage"]["db"])
return self._db
def check_config(self, config):
missing_keys = []
for key in MANDATORY_KEYS:
if not config.get(key):
missing_keys.append(key)
if missing_keys:
raise ValueError(
"Configuration error: The following keys must be"
" provided: %s" % (",".join(missing_keys),)
)
if "cls" not in config["storage"] or config["storage"]["cls"] not in (
"local",
"postgresql",
):
raise ValueError(
"swh storage backfiller must be configured to use a local"
" (PostgreSQL) storage"
)
def parse_arguments(self, object_type, start_object, end_object):
"""Parse arguments
Raises:
ValueError for unsupported object type
ValueError if object ids are not parseable
Returns:
Parsed start and end object ids
"""
if object_type not in COLUMNS:
raise ValueError(
"Object type %s is not supported. "
"The only possible values are %s"
% (object_type, ", ".join(sorted(COLUMNS.keys())))
)
if object_type in ["origin", "origin_visit", "origin_visit_status"]:
start_object = start_object or "0"
end_object = end_object or str(100_000_000) # hard-coded limit
return start_object, end_object
def run(self, object_type, start_object, end_object, dry_run=False):
"""Reads storage's subscribed object types and send them to the
journal's reading topic.
"""
start_object, end_object = self.parse_arguments(
object_type, start_object, end_object
)
for range_start, range_end in RANGE_GENERATORS[object_type](
start_object, end_object
):
logger.info(
"Processing %s range %s to %s",
object_type,
_format_range_bound(range_start),
_format_range_bound(range_end),
)
objects = fetch(self.db, object_type, start=range_start, end=range_end)
if not dry_run:
self.writer.write_additions(object_type, objects)
else:
# only consume the objects iterator to check for any potential
# decoding/encoding errors
for obj in objects:
pass
if __name__ == "__main__":
print('Please use the "swh-journal backfiller run" command')
| SoftwareHeritage/swh-storage | swh/storage/backfill.py | backfill.py | py | 20,163 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "swh.core.db.BaseDb",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"... |
36690171785 | from collections import deque
import heapq
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def verticalTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
treequeue = deque()
rowidx = 0
colidx = 0
treequeue.append((root, rowidx, colidx))
vertical_tree = []
# iterate through the tree, calculating coordinates
while treequeue:
node, rowidx, colidx = treequeue.pop()
if node:
heapq.heappush(vertical_tree, (colidx, rowidx, node.val))
treequeue.append((node.left, rowidx+1, colidx-1))
treequeue.append((node.right, rowidx+1, colidx+1))
vertical_list = []
mincol = vertical_tree[0][0]
onelist = []
while vertical_tree:
if vertical_tree[0][0] == mincol:
_, _, val = heapq.heappop(vertical_tree)
onelist.append(val)
else:
mincol = vertical_tree[0][0]
vertical_list.append(onelist)
onelist = []
if onelist:
vertical_list.append(onelist) # append the last one
return vertical_list
sol = Solution()
root = TreeNode(1, TreeNode(2, TreeNode(4, None, None), TreeNode(6, None, None)), TreeNode(3, TreeNode(5, None, None), TreeNode(7, None, None)))
result = sol.verticalTraversal(root)
if isinstance(result, TreeNode):
result.prettyprint()
else:
print(result) | lingerxu/leetcode-solutions | heapq.py | heapq.py | py | 1,648 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 35,
"usage_type": "call"
}
] |
72537368994 | from rest_framework import serializers
from .models import Departman,Personel
from django.utils.timezone import now
class DepartmanSerializer(serializers.ModelSerializer):
count = serializers.SerializerMethodField()
class Meta:
model = Departman
fields = (
'id',
'name',
'count',
)
def get_count(self,giris):
return giris.personel.count()
class PersonelSerializer(serializers.ModelSerializer):
departman =serializers.StringRelatedField()
departman_id=serializers.IntegerField()
total = serializers.SerializerMethodField()
class Meta :
model = Personel
fields = (
'id',
'first_name',
'last_name',
'title',
'gender',
'salary',
'departman',
'departman_id',
'start_date',
'total'
)
def get_total(self,toplam):
return (now()- toplam.start_date).days | dedeogluie/Personel_APP | personelApp/serializers.py | serializers.py | py | 1,076 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": ... |
36367686593 | # -*- coding: utf-8 -*-
import numpy as np
import os
import h5py
import logging
from data_exchange import DataExchangeFile, DataExchangeEntry
class Export():
def __init__(xtomo, data=None, data_white=None,
data_dark=None, theta=None,
hdf5_file_name=None, data_exchange_type=None,
sample_name=None, logger=None, log='INFO'):
#xtomo.data = data
xtomo.data_white = data_white
xtomo.data_dark = data_dark
xtomo.theta = theta
# Set the log level.
xtomo.logger = None
xtomo._log_level = str(log).upper()
xtomo._init_logging()
def xtomo_writer(data, output_file=None, x_start=0,
digits=5, axis=0, overwrite=False,
precision=True):
"""
Write 3-D data to a stack of tif files.
Parameters
-----------
output_file : str, optional
Name of the output file.
x_start : scalar, optional
First index of the data on first dimension
of the array.
digits : scalar, optional
Number of digits used for file indexing.
For example if 4: test_XXXX.tiff
axis : scalar, optional
Imaages is read along that axis.
overwrite: bool, optional
if overwrite=True the existing data in the
reconstruction folder will be overwritten
precision : bool, optional
Export data type precision. if True it
saves 32-bit precision. Otherwise it
uses 8-bit precision.
Notes
-----
If file exists, saves it with a modified name.
If output location is not specified, the data is
saved inside ``recon`` folder where the input data
resides. The name of the reconstructed files will
be initialized with ``recon``
Examples
--------
- Save sinogram data:
>>> import tomopy
>>>
>>> # Load data
>>> myfile = 'demo/data.h5'
>>> data, white, dark, theta = tomopy.xtomo_reader(myfile)
>>>
>>> # Save data
>>> output_file='tmp/slice_'
>>> tomopy.xtomo_writer(data, output_file, axis=1)
>>> print "Images are succesfully saved at " + output_file + '...'
- Save first 16 projections:
>>> import tomopy
>>>
>>> # Load data
>>> myfile = 'demo/data.h5'
>>> data, white, dark, theta = tomopy.xtomo_reader(myfile, projections_start=0, projections_end=16)
>>>
>>> # Save data
>>> output_file='tmp/projection_'
>>> tomopy.xtomo_writer(data, output_file, axis=0)
>>> print "Images are succesfully saved at " + output_file + '...'
- Save reconstructed slices:
>>> import tomopy
>>>
>>> # Load data
>>> myfile = 'demo/data.h5'
>>> data, white, dark, theta = tomopy.xtomo_reader(myfile)
>>>
>>> # Perform reconstruction
>>> d = tomopy.xtomo_dataset(log='error')
>>> d.dataset(data, white, dark, theta)
>>> d.center = 661.5
>>> d.gridrec()
>>>
>>> # Save data
>>> output_file='tmp/reconstruction_'
>>> tomopy.xtomo_writer(d.data_recon, output_file, axis=0)
>>> print "Images are succesfully saved at " + output_file + '...'
"""
if output_file == None:
output_file = "tmp/img_"
output_file = os.path.abspath(output_file)
dir_path = os.path.dirname(output_file)
# Remove TIFF extension if there is.
if (output_file.endswith('tif') or
output_file.endswith('tiff')) :
output_file = output_file.split(".")[-2]
if overwrite:
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Create new folders.
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Select desired x from whole data.
num_x, num_y, num_z = data.shape
if axis == 0:
x_end = x_start+num_x
elif axis == 1:
x_end = x_start+num_y
elif axis == 2:
x_end = x_start+num_z
# Write data.
file_index = ["" for x in range(digits)]
for m in range(digits):
file_index[m] = '0' * (digits - m - 1)
ind = range(x_start, x_end)
for m in range(len(ind)):
for n in range(digits):
if ind[m] < np.power(10, n + 1):
file_body = output_file + file_index[n] + str(ind[m])
file_name = file_body + '.tif'
break
if precision:
if axis == 0:
img = misc.toimage(data[m, :, :], mode='F')
elif axis == 1:
img = misc.toimage(data[:, m, :], mode='F')
elif axis == 2:
img = misc.toimage(data[:, :, m], mode='F')
else:
if axis == 0:
img = misc.toimage(data[m, :, :])
elif axis == 1:
img = misc.toimage(data[:, m, :])
elif axis == 2:
img = misc.toimage(data[:, :, m])
# check if file exists.
if os.path.isfile(file_name):
# genarate new file name.
indq = 1
FLAG_SAVE = False
while not FLAG_SAVE:
new_file_body = file_body + '-' + str(indq)
new_file_name = new_file_body + '.tif'
if not os.path.isfile(new_file_name):
img.save(new_file_name)
FLAG_SAVE = True
file_name = new_file_name
else:
indq += 1
else:
img.save(file_name)
def xtomo_exchange(xtomo, data, data_white=None, data_dark=None, theta=None, sample_name=None,
data_exchange_type=None,
hdf5_file_name=None,
log='INFO'
):
"""
Write 3-D data to a data-exchange file.
Parameters
----------
data : ndarray
3-D X-ray absorption tomography raw data.
Size of the dimensions should be:
[projections, slices, pixels].
data_white, data_dark : ndarray, optional
3-D white-field/dark_field data. Multiple
projections are stacked together to obtain
a 3-D matrix. 2nd and 3rd dimensions should
be the same as data: [shots, slices, pixels].
theta : ndarray, optional
Data acquisition angles corresponding
to each projection.
data_excahnge_type : str
label defyining the type of data contained in data exchange file
for raw data tomography data use 'tomography_raw_projections'
hd5_file_name : str
Output file.
Notes
-----
If file exists, does nothing
Examples
--------
- Convert tomographic projection series (raw, dark, white) of tiff in data exchange:
>>> from dataexchange import xtomo_importer as dx
>>> from dataexchange import xtomo_exporter as ex
>>> file_name = '/local/dataraid/databank/Anka/radios/image_.tif'
>>> dark_file_name = '/local/dataraid/databank/Anka/darks/image_.tif'
>>> white_file_name = '/local/dataraid/databank/Anka/flats/image_.tif'
>>>
>>> hdf5_file_name = '/local/dataraid/databank/dataExchange/microCT/xx_yy_Anka.h5'
>>>
>>> projections_start = 0
>>> projections_end = 3167
>>> white_start = 0
>>> white_end = 100
>>> dark_start = 0
>>> dark_end = 100
>>>
>>> sample_name = 'Anka'
>>>
>>> mydata = dx.Import()
>>> # Read series of images
>>> data, white, dark, theta = mydata.series_of_images(file_name,
>>> projections_start = projections_start,
>>> projections_end = projections_end,
>>> white_file_name = white_file_name,
>>> white_start = white_start,
>>> white_end = white_end,
>>> dark_file_name = dark_file_name,
>>> dark_start = dark_start,
>>> dark_end = dark_end,
>>> sample_name = sample_name,
>>> projections_digits = 5,
>>> log='INFO'
>>> )
>>>
>>> mydata = ex.Export()
>>> # Create minimal data exchange hdf5 file
>>> mydata.xtomo_exchange(data = data,
>>> data_white = white,
>>> data_dark = dark,
>>> theta = theta,
>>> hdf5_file_name = hdf5_file_name,
>>> data_exchange_type = 'tomography_raw_projections'
>>> )
"""
if (hdf5_file_name != None):
if os.path.isfile(hdf5_file_name):
xtomo.logger.info("Data Exchange file already exists: [%s]. Next time use the Data Exchange reader instead", hdf5_file_name)
else:
# Create new folder.
dirPath = os.path.dirname(hdf5_file_name)
if not os.path.exists(dirPath):
os.makedirs(dirPath)
# Get the file_name in lower case.
lFn = hdf5_file_name.lower()
# Split the string with the delimeter '.'
end = lFn.split('.')
# Write the Data Exchange HDF5 file.
# Open DataExchange file
f = DataExchangeFile(hdf5_file_name, mode='w')
xtomo.logger.info("Creating Data Exchange File [%s]", hdf5_file_name)
# Create core HDF5 dataset in exchange group for projections_theta_range
# deep stack of x,y images /exchange/data
xtomo.logger.info("Adding projections to Data Exchange File [%s]", hdf5_file_name)
f.add_entry( DataExchangeEntry.data(data={'value': data, 'units':'counts', 'description': 'transmission', 'axes':'theta:y:x', 'dataset_opts': {'compression': 'gzip', 'compression_opts': 4} }))
if (theta != None):
f.add_entry( DataExchangeEntry.data(theta={'value': theta, 'units':'degrees'}))
xtomo.logger.info("Adding theta to Data Exchange File [%s]", hdf5_file_name)
if (data_dark != None):
xtomo.logger.info("Adding dark fields to Data Exchange File [%s]", hdf5_file_name)
f.add_entry( DataExchangeEntry.data(data_dark={'value': data_dark, 'units':'counts', 'axes':'theta_dark:y:x', 'dataset_opts': {'compression': 'gzip', 'compression_opts': 4} }))
if (data_white != None):
xtomo.logger.info("Adding white fields to Data Exchange File [%s]", hdf5_file_name)
f.add_entry( DataExchangeEntry.data(data_white={'value': data_white, 'units':'counts', 'axes':'theta_white:y:x', 'dataset_opts': {'compression': 'gzip', 'compression_opts': 4} }))
if (data_exchange_type != None):
xtomo.logger.info("Adding data type to Data Exchange File [%s]", hdf5_file_name)
f.add_entry( DataExchangeEntry.data(title={'value': data_exchange_type}))
if (sample_name == None):
sample_name = end[0]
f.add_entry( DataExchangeEntry.sample( name={'value':sample_name}, description={'value':'Sample name was assigned by the HDF5 converter and based on the HDF5 file name'}))
else:
f.add_entry( DataExchangeEntry.sample( name={'value':sample_name}, description={'value':'Sample name was read from the user log file'}))
f.close()
xtomo.logger.info("DONE!!!!. Created Data Exchange File [%s]", hdf5_file_name)
else:
xtomo.logger.info("Nothing to do ...")
def _init_logging(xtomo):
"""
Setup and start command line logging.
"""
# Top-level log setup.
xtomo.logger = logging.getLogger("data exchange")
if xtomo._log_level == 'DEBUG':
xtomo.logger.setLevel(logging.DEBUG)
elif xtomo._log_level == 'INFO':
xtomo.logger.setLevel(logging.INFO)
elif xtomo._log_level == 'WARN':
xtomo.logger.setLevel(logging.WARN)
elif xtomo._log_level == 'WARNING':
xtomo.logger.setLevel(logging.WARNING)
elif xtomo._log_level == 'ERROR':
xtomo.logger.setLevel(logging.ERROR)
# Terminal stream log.
ch = logging.StreamHandler()
if xtomo._log_level == 'DEBUG':
ch.setLevel(logging.DEBUG)
elif xtomo._log_level == 'INFO':
ch.setLevel(logging.INFO)
elif xtomo._log_level == 'WARN':
ch.setLevel(logging.WARN)
elif xtomo._log_level == 'WARNING':
ch.setLevel(logging.WARNING)
elif xtomo._log_level == 'ERROR':
ch.setLevel(logging.ERROR)
# Show date and time.
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# Update logger.
if not len(xtomo.logger.handlers): # For fist time create handlers.
xtomo.logger.addHandler(ch)
| decarlof/syncpy | syncpy/dataexchange/xtomo/xtomo_exporter.py | xtomo_exporter.py | py | 14,858 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
5973276889 | import os
import logging
import gzip
from psycopg2 import sql
from django.core.management.base import BaseCommand
from django.db import connection
from oac_search import models, pubmed
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class Command(BaseCommand):
help = 'Search and export full text OAC articles from the database (no on-the-fly XML extraction!)'
def add_arguments(self, parser):
parser.add_argument('queryfile', help="query file")
def handle(self, *args, **options):
queryfile = options['queryfile']
path = os.path.split(os.path.abspath(queryfile))[0]
query = open(queryfile).read().replace('\n', ' ').strip()
idfile = os.path.join(path, queryfile + '.pmcids')
fullfile = os.path.join(path, queryfile + '.fulltext.gz')
a = pubmed.NCBI_search()
pmcids = a.query(query, db='pmc', onlyOAC=True)
pmcids = ['PMC' + x for x in pmcids]
with open(idfile, 'w') as ofp:
ofp.write('\n'.join(pmcids))
logging.info('Total PMC IDs: {}; present in DB: {}'.format(len(pmcids), models.Article.objects.filter(pmcid__in=pmcids).count()))
with gzip.open(fullfile, 'wt', encoding='utf8') as ofp:
with connection.cursor() as cursor:
cursor.execute('''DROP TABLE IF EXISTS _result_pmcids_''')
cursor.execute('''CREATE TABLE _result_pmcids_ (
id serial primary key,
pmcid varchar(20) unique not null)''')
connection.commit()
with open(idfile) as fp:
cursor.copy_from(fp, '_result_pmcids_', columns=('pmcid',))
connection.commit()
cursor.execute('''SELECT oac_search_article.pmcid, oac_search_article.text
FROM oac_search_article
INNER JOIN _result_pmcids_ ON oac_search_article.pmcid = _result_pmcids_.pmcid''')
for pmcid, text in cursor:
ofp.write('{}\t{}\n'.format(pmcid, text))
cursor.execute('''DROP TABLE IF EXISTS _result_pmcids_''')
logging.info('Export complete.')
| vpodpecan/pmcutils | oac_search/management/commands/search_export_fulltext.py | search_export_fulltext.py | py | 2,276 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 15,
"usage_type": "name"
},
{
"api... |
25917719676 | import csv
import io
import json
import os.path
import pickle
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
FINANCE_SPREADSHEET_ID = os.getenv("FINANCE_SPREADSHEET_ID")
TEMPLATE_SHEET_ID = os.getenv("TEMPLATE_SHEET_ID")
def get_google_creds():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_console()
# Save the credentials for the next run
# cloud functions have a read-only fs so just refresh the token every time
# with open('token.pickle', 'wb') as token:
# pickle.dump(creds, token)
return creds
def sheet_exists(service, name):
try:
service.spreadsheets().get(
spreadsheetId=FINANCE_SPREADSHEET_ID,
ranges=[name],
).execute()
return True
except HttpError as e:
parsed = json.loads(e.content)
if parsed['error']['status'] != 'INVALID_ARGUMENT':
raise
return False
def create_sheet(service, name):
new_sheet = service.spreadsheets().sheets().copyTo(
spreadsheetId=FINANCE_SPREADSHEET_ID,
sheetId=TEMPLATE_SHEET_ID,
body={
"destinationSpreadsheetId": FINANCE_SPREADSHEET_ID,
},
).execute()
new_sheet_id = new_sheet['sheetId']
service.spreadsheets().batchUpdate(
spreadsheetId=FINANCE_SPREADSHEET_ID,
body={
"requests": [{
"updateSheetProperties": {
"fields": "title",
"properties": {
"sheetId": new_sheet_id,
"title": name,
},
}
}],
},
).execute()
def add_transaction_to_sheet(service, account, date, name, amount, categories):
# convert categories to csv
categories_buffer = io.StringIO()
categories_writer = csv.writer(categories_buffer)
categories_writer.writerow(categories)
categories_str = categories_buffer.getvalue().rstrip()
values = [
date,
name,
amount, # real amount
categories_str, # real categories
amount, # effective amount
categories[-1] if len(categories) > 0 else '', # effective category
'No', # ack
]
body = {
'values': [values],
}
service.spreadsheets().values().append(
spreadsheetId=FINANCE_SPREADSHEET_ID, range=account,
valueInputOption='USER_ENTERED', body=body).execute()
def process_transaction(request):
body = request.get_json()
account = body['transaction']['accountName']
date = body['transaction']['date']
name = body['transaction']['name']
amount = body['transaction']['amount']
categories = body['transaction']['category']
creds = get_google_creds()
service = build('sheets', 'v4', credentials=creds, cache_discovery=False)
if not sheet_exists(service, account):
create_sheet(service, account)
add_transaction_to_sheet(service, account, date, name, amount, categories)
| jchorl/auditor-functions | sheets/main.py | main.py | py | 3,949 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16... |
38219482418 | from pynput import keyboard
from score_following_game.agents.optimal_agent import OptimalAgent
human_action = 1
def on_press(key):
global human_action
if key == keyboard.Key.left:
human_action = 0
if key == keyboard.Key.right:
human_action = 2
class HumanAgent(OptimalAgent):
def __init__(self, rl_pool):
super(HumanAgent, self).__init__(rl_pool)
keyboard.Listener(on_press=on_press).start()
def select_action(self, state, train=False):
global human_action
action = human_action
human_action = 1
return action
| CPJKU/score_following_game | score_following_game/agents/human_agent.py | human_agent.py | py | 603 | python | en | code | 47 | github-code | 1 | [
{
"api_name": "pynput.keyboard.Key",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pynpu... |
21094423421 | import random
from datetime import datetime
import pytest
from june import paths
from june.activity import activity_hierarchy
from june.epidemiology.epidemiology import Epidemiology
from june.epidemiology.infection import Immunity, InfectionSelector, InfectionSelectors
from june.groups.leisure import leisure
from june.groups.travel import Travel
from june.interaction import Interaction
from june.policy import Hospitalisation, MedicalCarePolicies, Policies
from june.simulator import Simulator
from camps.activity import CampActivityManager
from camps.paths import camp_data_path, camp_configs_path
from camps.world import World
from camps.groups.leisure import generate_leisure_for_world, generate_leisure_for_config
from camps.camp_creation import (
generate_empty_world,
populate_world,
distribute_people_to_households,
) # this is loaded from the ../camp_scripts folder
config_file_path = camp_configs_path / "config_demo.yaml"
interactions_file_path = camp_configs_path / "defaults/interaction/interaction_Survey.yaml"
def test__everyone_has_an_activity(camps_sim):
for person in camps_sim.world.people.members:
assert person.subgroups.iter().count(None) != len(person.subgroups.iter())
def test__apply_activity_hierarchy(camps_sim):
unordered_activities = random.sample(activity_hierarchy, len(activity_hierarchy))
ordered_activities = camps_sim.activity_manager.apply_activity_hierarchy(
unordered_activities
)
assert ordered_activities == activity_hierarchy
def test__clear_world(camps_sim: Simulator):
camps_sim.clear_world()
for group_name in camps_sim.activity_manager.activities_to_super_groups(
camps_sim.activity_manager.all_activities
):
if group_name in ["shelter_visits"]:
continue
grouptype = getattr(camps_sim.world, group_name)
for group in grouptype.members:
for subgroup in group.subgroups:
assert len(subgroup.people) == 0
for person in camps_sim.world.people.members:
assert person.busy is False
def test__move_to_active_subgroup(camps_sim: Simulator):
camps_sim.activity_manager.move_to_active_subgroup(
["residence"], camps_sim.world.people.members[0]
)
assert camps_sim.world.people.members[0].residence.group.spec in ("shelter")
def test__move_people_to_leisure(camps_sim: Simulator):
n_leisure = 0
n_pump_latrines = 0
n_e_vouchers = 0
n_distributions = 0
n_informal_works = 0
n_n_f_distribution_centers = 0
n_play_groups = 0
n_religiouss = 0
n_communals = 0
n_female_communals = 0
repetitions = 100
camps_sim.activity_manager.leisure.generate_leisure_probabilities_for_timestep(
delta_time=3,
working_hours=False,
date=datetime.strptime("2020-03-01-12", "%Y-%m-%d-%H"),
)
for _ in range(repetitions):
camps_sim.clear_world()
camps_sim.activity_manager.move_people_to_active_subgroups(["leisure", "residence"])
for person in camps_sim.world.people.members:
if person.leisure is not None:
n_leisure += 1
if person.leisure.group.spec == "pump_latrine":
n_pump_latrines += 1
elif person.leisure.group.spec == "e_voucher":
n_e_vouchers += 1
elif person.leisure.group.spec == "distribution_center":
n_distributions += 1
elif person.leisure.group.spec == "informal_work":
n_informal_works += 1
elif person.leisure.group.spec == "n_f_distribution_center":
n_n_f_distribution_centers += 1
elif person.leisure.group.spec == "play_group":
n_play_groups += 1
elif person.leisure.group.spec == "religious":
n_religiouss += 1
elif person.leisure.group.spec == "communal":
n_communals += 1
elif person.leisure.group.spec == "female_communal":
n_female_communals += 1
if person not in person.residence.people:
assert person in person.leisure.people
assert n_leisure > 0
assert n_pump_latrines > 0
assert n_e_vouchers > 0
assert n_distributions > 0
assert n_informal_works > 0
assert n_n_f_distribution_centers > 0
assert n_play_groups > 0
assert n_religiouss > 0
assert n_communals > 0
assert n_female_communals > 0
camps_sim.clear_world()
def test__venues_closed_outside_hours(camps_sim: Simulator):
n_leisure = 0
n_communals = 0
n_female_communals = 0
repetitions = 10
camps_sim.activity_manager.leisure.generate_leisure_probabilities_for_timestep(
delta_time=3,
working_hours=False,
date=datetime.strptime("2020-03-01-01", "%Y-%m-%d-%H"),
)
for _ in range(repetitions):
camps_sim.clear_world()
camps_sim.activity_manager.move_people_to_active_subgroups(["leisure", "residence"])
for person in camps_sim.world.people.members:
if person.leisure is not None:
n_leisure += 1
if person.leisure.group.spec == "communal":
n_communals += 1
elif person.leisure.group.spec == "female_communal":
n_female_communals += 1
if person not in person.residence.people:
assert person in person.leisure.people
assert n_communals == 0
assert n_female_communals == 0
camps_sim.clear_world()
def test__bury_the_dead(camps_sim: Simulator):
dummy_person = camps_sim.world.people.members[0]
camps_sim.epidemiology.infection_selectors.infect_person_at_time(dummy_person, 0.0)
camps_sim.epidemiology.bury_the_dead(camps_sim.world, dummy_person)
assert dummy_person in camps_sim.world.cemeteries.members[0].people
assert dummy_person.dead
assert dummy_person.infection is None
| UNGlobalPulse/UNGP-settlement-modelling | test_camps/test_simulator.py | test_simulator.py | py | 6,046 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "camps.paths.camp_configs_path",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "camps.paths.camp_configs_path",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "random.sample",
"line_number": 33,
"usage_type": "call"
},
{
"api_nam... |
8913177888 | from django.urls import path
from .views import cart_detail,cart_add,cart_remove
app_name='shopping'
urlpatterns = [
path('<int:cart_id>/',cart_detail,name='cart_detail'),
path('add/<int:product_id>/', cart_add, name='cart_add'),
path('remove/<int:product_id>/', cart_remove, name='cart_remove'),
]
| UhuruV/group2_greenskiosk | greenskiosk/shopping/urls.py | urls.py | py | 314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.cart_detail",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.cart_add",... |
13799406873 | import argparse
import trimesh
import numpy as np
import json
from math import *
import math
import requests
import geopandas as gpd
from shapely import Polygon
#TODO args better
ap = argparse.ArgumentParser()
ap.add_argument("--method", help="method of gps", type=str)
ap.add_argument("--GPSFile", help="GPSFile", type=str)
ap.add_argument("--latInputPoint", help="latitudeCustom", type=float)
ap.add_argument("--lonInputPoint", help="longitudeCustom", type=float)
ap.add_argument("--geoJson", help="output GeoJson", type=str)
ap.add_argument("--outputObj", help="output obj", type=str)
args = ap.parse_args()
#what method of localisation
if args.method == "auto":
# Opening JSON file
with open(args.GPSFile, 'r') as inputfile:
# Reading from json file
json_object = json.load(inputfile)
latitude = json_object["latitude"]
longitude = json_object["longitude"]
location=(latitude, longitude)
else:
location = (args.latitudeCustom, args.longitudeCustom)
#conversion of the longitude and latitude into tiles
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n)
return (xtile, ytile)
lat_rad, lon_deg = location[0], location[1]
zoom = 15
xtile, ytile = deg2num(lat_deg=lat_rad, lon_deg=lon_deg, zoom=zoom)
#request on OSM Buildings
url = "https://data.osmbuildings.org/0.2/anonymous/tile/{0}/{1}/{2}.json".format(zoom, xtile, ytile)
agentheader = {'User-Agent': 'PostmanRuntime/7.28.4'}
response = requests.get(url,headers = agentheader)
data = json.loads(response.text)
#export the GeoJSON
with open(args.geoJson, 'w') as f:
json.dump(data, f)
#get the polygons and heights in the GeoJSON
gdf = gpd.read_file(args.geoJson)
buildings_geojson = []
heights_geojson = []
for polygon in gdf[gdf.geometry.type == "Polygon"].geometry:
buildings_geojson.append(Polygon(polygon.exterior.coords))
for height in gdf.height:
heights_geojson.append(height)
#convert the coordinates for the mesh
factor = 1000
min_heights = min(heights_geojson)
max_heights = max(heights_geojson)
buildings_mesh = []
heights_2 = []
max_x, max_y = 0, 0
for i in range(len(buildings_geojson)):
if max(buildings_geojson[i].exterior.coords.xy[0]) > max_x:
max_x = max(buildings_geojson[i].exterior.coords.xy[0])
if max(buildings_geojson[i].exterior.coords.xy[1]) > max_y:
max_y = max(buildings_geojson[i].exterior.coords.xy[1])
for i in range(len(buildings_geojson)):
tab = []
for x, y in buildings_geojson[i].exterior.coords:
x = x - max_x
y = y - max_y
x = x * factor
y = y * factor
x = round(x, 3)
y = round(y, 3)
tab.append((x, y))
buildings_mesh.append(Polygon(tab))
for i in range(len(heights_geojson)):
h = heights_geojson[i] / max_heights
heights_2.append(h)
#convert camera position the same way
position_x = location[1]
position_x -= max_x
position_x *= factor
position_x = round(position_x, 3)
position_y = location[0]
position_y -= max_y
position_y *= factor
position_y = round(position_y, 3)
#add all mesh to a scene
scene = trimesh.Scene()
for i in range(len(buildings_mesh)):
mesh = trimesh.creation.extrude_polygon(buildings_mesh[i], heights_2[i])
#relocate the mesh to align to the camera position
mesh.apply_transform(trimesh.transformations.rotation_matrix(np.deg2rad(90), [1, 0, 0]))
mesh.apply_transform(trimesh.transformations.rotation_matrix(np.deg2rad(180), [0, 0, 1]))
mesh.apply_transform(trimesh.transformations.rotation_matrix(np.deg2rad(180), [0, 1, 0]))
mesh.apply_transform(trimesh.transformations.translation_matrix([-position_x, 0, position_y]))
mesh.apply_transform(trimesh.transformations.scale_matrix(2))
scene.add_geometry(mesh)
#export the Obj
with open(args.outputObj, 'w') as file:
scene.export(
file,
file_type='obj',
)
#TODO si on est sur un bord de map, récupérer les maps autour, faire un paramètre | Just-Kiel/MeshroomGeoNode | scripts/OSMBuildings.py | OSMBuildings.py | py | 4,129 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "math.asinh",
"line... |
2548449685 | """Finetune 3D CNN."""
import os
import argparse
import itertools
import time
import math
import random
import builtins
import warnings
import string
import numpy as np
import pandas as pd
from PIL import ImageFilter
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.utils.data
import torch.multiprocessing as mp
import torch.utils.data.distributed
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from datasets.ucf101 import UCF101Dataset_Classify
# from datasets.hmdb51 import HMDB51Dataset
from models.c3d import C3D
from models.r3d import R3DNet
from models.r21d import R2Plus1DNet
class GaussianBlur(object):
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
ft_lr =args.ft_lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
ft_lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
ft_lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
optimizer.param_groups[0]['lr'] = lr
# optimizer.param_groups[1]['lr'] = ft_lr
def load_pretrained_weights(ckpt_path):
"""load pretrained weights and adjust params name."""
adjusted_weights = {}
pretrained_weights = torch.load(ckpt_path, map_location=torch.device('cpu'))
for name, params in pretrained_weights.items():
if 'module' in name and 'base_network' in name and 'encoder_q' in name:
print(name)
name = name.split('base_network.')[1]
adjusted_weights[name] = params
print('Pretrained weight name: [{}]'.format(name))
# if 'module' and 'base_network' in name:
# name = name.split('base_network.')[1]
# # name = name.replace('base_network.','')
# adjusted_weights[name] = params
# print('Pretrained weight name: [{}]'.format(name))
# if 'base_network' in name:
# name = name[name.find('.')+1:]
# adjusted_weights[name] = params
# print('Pretrained weight name: [{}]'.format(name))
return adjusted_weights
def train(args, model, criterion, optimizer, device, train_dataloader, writer, epoch,lr,ft_lr):
torch.set_grad_enabled(True)
model.train()
running_loss = 0.0
correct = 0
for i, data in enumerate(train_dataloader, 1):
# get inputs
clips, idxs = data
inputs = clips.to(device)
targets = idxs.to(device)-1
# forward and backward
outputs = model(inputs) # return logits here
loss = criterion(outputs, targets)
# zero the parameter gradients
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute loss and acc
running_loss += loss
pts = torch.argmax(outputs, dim=1)
correct += torch.sum(targets == pts)
correct = correct.type(torch.float)
torch.distributed.barrier()
# print statistics and write summary every N batch
if i % args.pf == 0:
avg_loss = running_loss / args.pf
avg_acc = correct / (args.pf * args.bs)
reduced_avg_loss = reduce_mean(avg_loss, args.ngpus_per_node)
reduced_avg_acc = reduce_mean(avg_acc, args.ngpus_per_node)
print('[TRAIN] epoch-{}, batch-{}, loss: {:.3f}, acc: {:.3f},lr:{:.6f},ft_lr:{:.6f}'.format(epoch, i, reduced_avg_loss, reduced_avg_acc,lr,ft_lr))
step = (epoch-1)*len(train_dataloader) + i
if args.gpu == 0:
writer.add_scalar('train/CrossEntropyLoss', reduced_avg_loss.item(), step)
writer.add_scalar('train/Accuracy', reduced_avg_acc.item(), step)
running_loss = 0.0
correct = 0
# summary params and grads per eopch
for name, param in model.named_parameters():
if args.gpu == 0:
writer.add_histogram('params/{}'.format(name), param, epoch)
writer.add_histogram('grads/{}'.format(name), param.grad, epoch)
def validate(args, model, criterion, device, val_dataloader, writer, epoch):
torch.set_grad_enabled(False)
model.eval()
total_loss = 0.0
correct = 0
for i, data in enumerate(val_dataloader):
# get inputs
clips, idxs = data
inputs = clips.to(device)
targets = idxs.to(device)-1
# forward
# forward
outputs = model(inputs) # return logits here
loss = criterion(outputs, targets)
# compute loss and acc
total_loss += loss
pts = torch.argmax(outputs, dim=1)
correct += torch.sum(targets == pts)
correct = correct.type(torch.float)
# print('correct: {}, {}, {}'.format(correct, targets, pts))
avg_loss = total_loss / (i + 1)
avg_acc = correct / (i + 1) / args.bs
torch.distributed.barrier()
reduced_avg_loss = reduce_mean(avg_loss, args.ngpus_per_node)
reduced_avg_acc = reduce_mean(avg_acc, args.ngpus_per_node)
if args.gpu == 0:
writer.add_scalar('val/CrossEntropyLoss', reduced_avg_loss.item(), epoch)
writer.add_scalar('val/Accuracy', reduced_avg_acc.item(), epoch)
print('[VAL] loss: {:.3f}, acc: {:.3f}'.format(reduced_avg_loss, reduced_avg_acc))
return reduced_avg_loss
def test(args, model, criterion, device, test_dataloader):
torch.set_grad_enabled(False)
model.eval()
total_loss = 0.0
correct = 0
for i, data in enumerate(test_dataloader, 1):
sampled_clips, idxs = data
targets = idxs.to(device)-1
# forward
# forward
#outputs = model(inputs)
outputs = []
for clips in sampled_clips:
inputs = clips.to(device)
# forward
o = model(inputs)
# print(o.shape)
o = torch.mean(o, dim=0)
# print(o.shape)
# exit()
outputs.append(o)
outputs = torch.stack(outputs)
loss = criterion(outputs, targets)
# compute loss and acc
total_loss += loss
pts = torch.argmax(outputs, dim=1)
# print(pts, targets)
correct += torch.sum(targets == pts)
#correct = correct.type(torch.float)
# print('correct: {}, {}, {}'.format(correct, targets, pts))
torch.distributed.barrier()
reduced_sum_loss = reduce_sum(total_loss)
reduced_sum_acc = reduce_sum(correct)
print(len(test_dataloader), len(test_dataloader.dataset))
reduced_avg_loss = reduced_sum_loss / len(test_dataloader)
reduced_sum_acc = reduced_sum_acc.type(torch.float)
reduced_avg_acc = reduced_sum_acc / len(test_dataloader.dataset)
print('[TEST] loss: {:.3f}, acc: {:.3f}'.format(reduced_avg_loss, reduced_avg_acc))
return reduced_avg_loss, reduced_avg_acc
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt
def reduce_sum(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt
def parse_args():
parser = argparse.ArgumentParser(description='Video Clip Order Prediction')
parser.add_argument('--mode', type=str, default='train', help='train/test')
parser.add_argument('--model', type=str, default='c3d', help='c3d/r3d/r21d')
parser.add_argument('--dataset', type=str, default='ucf101', help='ucf101/hmdb51')
parser.add_argument('--cl', type=int, default=16, help='clip length')
parser.add_argument('--split', type=str, default='1', help='dataset split')
parser.add_argument('--gpu', type=int, default=None, help='GPU id')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--ft_lr', type=float, default=1e-3, help='finetune learning rate')
parser.add_argument('--momentum', type=float, default=9e-1, help='momentum')
parser.add_argument('--wd', type=float, default=5e-4, help='weight decay')
parser.add_argument('--log', type=str, help='log directory')
parser.add_argument('--ckpt', type=str, help='checkpoint path')
parser.add_argument('--desp', type=str, help='additional description')
parser.add_argument('--schedule', default=[120,150], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--epochs', type=int, default=180, help='number of total epochs to run')
parser.add_argument('--start-epoch', type=int, default=1, help='manual epoch number (useful on restarts)')
parser.add_argument('--bs', type=int, default=8, help='mini-batch size')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--pf', type=int, default=100, help='print frequency every batch')
parser.add_argument('--seed', type=int, default=None, help='seed for initializing training.')
parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
args.ngpus_per_node = ngpus_per_node
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
cudnn.benchmark = True
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
print(vars(args))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.dataset == 'ucf101':
class_num = 101
elif args.dataset == 'hmdb51':
class_num = 51
if args.model == 'c3d':
model = C3D(with_classifier=True, num_classes=class_num)
elif args.model == 'r3d':
model = R3DNet(layer_sizes=(3,4,6,3), with_classifier=True, num_classes=class_num)
elif args.model == 'r21d':
model = R2Plus1DNet(layer_sizes=(1,1,1,1), with_classifier=True, num_classes=class_num)
if args.mode == 'train': # ########## Train #############
if args.ckpt: # resume training
pretrained_weights = load_pretrained_weights(args.ckpt)
print(model)
load_result = model.load_state_dict(pretrained_weights, strict=False)
print(load_result)
if args.distributed:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.bs = int(args.bs / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
# vcopn.cuda()
# # DistributedDataParallel will divide and allocate batch_size to all
# # available GPUs if device_ids are not set
# vcopn = torch.nn.parallel.DistributedDataParallel(vcopn)
# log_dir = os.path.dirname(args.log)
# print(log_dir)
# else:
writer = None
if args.mode == 'train':
if args.desp:
exp_name = '{}_cl{}_{}_{}'.format(args.model, args.cl, args.desp, time.strftime('%m%d%H%M'))
else:
exp_name = '{}_cl{}_{}'.format(args.model, args.cl, time.strftime('%m%d%H%M'))
log_dir = os.path.join(args.log, exp_name)
print(log_dir)
if args.gpu == 0:
writer = SummaryWriter(log_dir)
train_transforms = transforms.Compose([
transforms.Resize((128, 171)), # smaller edge to 128
transforms.RandomCrop(112),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.5),
transforms.RandomGrayscale(p=0.2),
# transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
if args.dataset == 'ucf101':
train_dataset = UCF101Dataset_Classify('data/ucf101', args.cl, args.split, True, train_transforms)
val_size = 800
elif args.dataset == 'hmdb51':
train_dataset = UCF101Dataset_Classify('data/hmdb51', args.cl, args.split, True, train_transforms)
val_size = 400
# split val for 800 videos
train_dataset, val_dataset = random_split(train_dataset, (len(train_dataset) - val_size, val_size))
print('TRAIN video number: {}, VAL video number: {}.'.format(len(train_dataset), len(val_dataset)))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
# train_dataloader = DataLoader(train_dataset, batch_size=args.bs, shuffle=True,
# num_workers=args.workers, pin_memory=True)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.bs, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
# val_dataloader = DataLoader(val_dataset, batch_size=args.bs, shuffle=False,
# num_workers=args.workers, pin_memory=True)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.bs, shuffle=(val_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=val_sampler)
# save graph and clips_order samples
for data in train_dataloader:
clips, idxs = data
if args.gpu == 0:
writer.add_video('train/clips', clips, fps=8)
writer.add_text('train/idxs', str(idxs.tolist()))
# clips = clips
clips = clips.to(args.gpu)
if args.gpu == 0:
writer.add_graph(model.module, clips)
break
# save init params at step 0
for name, param in model.named_parameters():
if args.gpu == 0:
writer.add_histogram('params/{}'.format(name), param, 0)
# ## loss funciton, optimizer and scheduler ###
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = optim.SGD([
{'params': [param for name, param in model.named_parameters() if
'linear' not in name and 'conv5' not in name and 'conv4' not in name]},
{'params': [param for name, param in model.named_parameters() if
'linear' in name or 'conv5' in name or 'conv4' in name], 'lr': args.ft_lr}],
lr=args.lr, momentum=args.momentum, weight_decay=args.wd)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.wd)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', min_lr=1e-5, patience=50, factor=0.1)
prev_best_val_loss = float('inf')
prev_best_model_path = None
for epoch in range(args.start_epoch, args.start_epoch+args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
time_start = time.time()
adjust_learning_rate(optimizer, epoch, args)
lr = optimizer.param_groups[0]['lr']
# ft_lr = optimizer.param_groups[1]['lr']
ft_lr = lr
train(args, model, criterion, optimizer, args.gpu, train_dataloader, writer, epoch, lr, ft_lr)
print('Epoch time: {:.2f} s.'.format(time.time() - time_start))
val_loss = validate(args, model, criterion, args.gpu, val_dataloader, writer, epoch)
# scheduler.step(val_loss)
if args.gpu == 0:
writer.add_scalar('train/lr', optimizer.param_groups[0]['lr'], epoch)
# writer.add_scalar('train/ft_lr', optimizer.param_groups[1]['lr'], epoch)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
# save model every 20 epoches
if epoch % 10 == 0 or epoch == args.epochs:
torch.save(model.state_dict(), os.path.join(log_dir, 'model_{}.pt'.format(epoch)))
# save model for the best val
if val_loss < prev_best_val_loss:
model_path = os.path.join(log_dir, 'best_model_{}.pt'.format(epoch))
torch.save(model.state_dict(), model_path)
prev_best_val_loss = val_loss
if prev_best_model_path:
os.remove(prev_best_model_path)
prev_best_model_path = model_path
if epoch == args.start_epoch+args.epochs -1 or epoch % 10 == 0:
# model.load_state_dict(torch.load(args.ckpt))
test_transforms = transforms.Compose([
transforms.Resize((128, 171)),
transforms.CenterCrop(112),
transforms.ToTensor()
])
if args.dataset == 'ucf101':
test_dataset = UCF101Dataset_Classify('data/ucf101', args.cl, args.split, False, test_transforms)
elif args.dataset == 'hmdb51':
test_dataset = UCF101Dataset_Classify('data/hmdb51', args.cl, args.split, False, test_transforms)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
# test_dataloader = DataLoader(test_dataset, batch_size=args.bs, shuffle=False,
# num_workers=args.workers, pin_memory=True)
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.bs, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=test_sampler)
print('TEST video number: {}.'.format(len(test_dataset)))
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
_, reduced_avg_acc = test(args, model, criterion, args.gpu, test_dataloader)
if args.gpu == 0:
writer.add_scalar('test/Accuracy', reduced_avg_acc.item(), epoch)
# ########## Test #############
elif args.mode == 'test':
model.load_state_dict(torch.load(args.ckpt), strict=True)
test_transforms = transforms.Compose([
transforms.Resize((128, 171)),
transforms.CenterCrop(112),
transforms.ToTensor()
])
if args.dataset == 'ucf101':
test_dataset = UCF101Dataset_Classify('data/ucf101', args.cl, args.split, False, test_transforms)
elif args.dataset == 'hmdb51':
test_dataset = UCF101Dataset_Classify('data/hmdb51', args.cl, args.split, False, test_transforms)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
# test_dataloader = DataLoader(test_dataset, batch_size=args.bs, shuffle=False,
# num_workers=args.workers, pin_memory=True)
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.bs, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=test_sampler)
print('TEST video number: {}.'.format(len(test_dataset)))
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
test(args, model, criterion, args.gpu, test_dataloader)
if __name__ == '__main__':
main()
| guoshengcv/CACL | train_finetune.py | train_finetune.py | py | 23,057 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "random.uniform",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFilter.GaussianBlur",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFilter",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "math.cos"... |
8118820886 | #
# Initiation à Pygame - Épisode 25 - Changer la vitesse de déplacement
#
# https://kreatuto.info
#
import pygame
# Couleur du fond de la fenête
COULEUR_FOND = (255, 255, 255)
# Couleur de la zone de commande
COULEUR_FOND_CMD = (211, 211, 211) # GRIS_CLAIR
# Couleur d'affichage du texte dans la zone de commande
COULEUR_TXT_CMD = (1, 49, 180) # BLEU_SAPHIR
# Hauteur de la zone de commande
HAUTEUR_CMD = 100
# FPS = frame per second (images par seconde)
FPS = 25
pygame.init()
# Chargement de la police de caractères pour afficher la vitesse
TAILLE_POLICE = 48
police = pygame.font.Font("SuperBubble-Rpaj3.ttf", TAILLE_POLICE)
# Permettra d'attendre avant de rafraîchir l'affichage
horloge = pygame.time.Clock()
# Chargement de l'image du fond de la fenêtre
fond_mountain = pygame.image.load("mountains-757731_600.jpg")
# Les dimensions de la fenêtre sont celles de l'image de la montagne
LARGEUR, HAUTEUR = fond_mountain.get_size()
# Initialisation de la fenêtre pygame
fen = pygame.display.set_mode((LARGEUR, HAUTEUR))
pygame.display.set_caption("Épisode 25")
fen.fill(COULEUR_FOND)
# Préparation du fond pour son utilisation avec Pygame
fond_mountain = fond_mountain.convert()
fond_mountain.set_alpha(200)
# Préparation des boutons de commandes de la vitesse
# Les boutons ont les mêmes dimensions
btn_plus = pygame.image.load("./btn/plus_47x50_normal.png").convert_alpha()
btn_moins_on = pygame.image.load("./btn/minus_47x50_normal.png").convert_alpha()
btn_moins_off = pygame.image.load("./btn/minus_47x50_locked.png").convert_alpha()
LARGEUR_BTN, HAUTEUR_BTN = btn_plus.get_size()
DECALAGE_X_BTN = 200
POS_X_BTN_PLUS = LARGEUR - DECALAGE_X_BTN
POS_X_BTN_MOINS = DECALAGE_X_BTN - LARGEUR_BTN
POS_Y_BTN = HAUTEUR - HAUTEUR_CMD + (HAUTEUR_CMD - HAUTEUR_BTN) // 2
# Zone contenant chaque bouton de commande
zone_cmd_plus = pygame.Rect(POS_X_BTN_PLUS, POS_Y_BTN, LARGEUR_BTN, HAUTEUR_BTN)
zone_cmd_moins = pygame.Rect(POS_X_BTN_MOINS, POS_Y_BTN, LARGEUR_BTN, HAUTEUR_BTN)
# Diviseur de la taille de l'image du chien
DIV_CHIEN = 2.5
# Préparation de l'image du premier sprite du chien
chien = pygame.image.load("./dog/Walk (1).png").convert_alpha()
LARGEUR_CHIEN, HAUTEUR_CHIEN = (int(chien.get_width() / DIV_CHIEN), int(chien.get_height() / DIV_CHIEN))
chien = pygame.transform.scale(chien, (LARGEUR_CHIEN, HAUTEUR_CHIEN))
# Nombre de sprites du chien
NB_CHIENS = 10
# Initialisation de la liste des sprites du chien
# Comme les sprites sont numérotés de 1 à 10, on conserve cette numérotation
# et l'incide 0 inutilisé.
chiens = [None, chien]
for n in range(2, NB_CHIENS + 1):
# Préparation de l'image du sprite n du chien
chien = pygame.image.load(f"./dog/Walk ({n:d}).png").convert_alpha()
chien = pygame.transform.scale(chien, (LARGEUR_CHIEN, HAUTEUR_CHIEN))
chiens.append(chien)
continuer = True
# Numéro du sprite
num_chien = 1
# Coordonnées du chien
pos_x = (LARGEUR-LARGEUR_CHIEN) // 2
pos_y = (HAUTEUR - HAUTEUR_CHIEN - HAUTEUR_CMD) // 2
# Vitesse de déplacement du chien
vitesse_x = 2
while continuer:
for event in pygame.event.get():
# L'utilisateur veut-il fermer la fenêtre ?
if event.type == pygame.QUIT:
continuer = False
# L'utilisateur a-t-il relâché un bouton de souris
elif event.type == pygame.MOUSEBUTTONUP:
if zone_cmd_plus.collidepoint(event.pos):
vitesse_x += 1
elif zone_cmd_moins.collidepoint(event.pos) and vitesse_x > 0:
vitesse_x -= 1
# Mise à jour de la position horizontale du chien
pos_x += vitesse_x
if pos_x >= LARGEUR:
pos_x = -LARGEUR_CHIEN
# Changement du sprite utilisé comme image du chien
if vitesse_x != 0:
num_chien += 1
if num_chien > NB_CHIENS:
num_chien = 1
# Mise à jour de l'affichage
fen.fill(COULEUR_FOND)
# Affichage du fond
fen.blit(fond_mountain, (0, 0))
# Affichage de la zone de commande
pygame.draw.rect(fen, COULEUR_FOND_CMD, [0, HAUTEUR - HAUTEUR_CMD, LARGEUR, HAUTEUR_CMD])
# Affichage de la vitesse actuelle
msg_vitesse = police.render(str(vitesse_x), True, COULEUR_TXT_CMD)
largeur_msg, _ = msg_vitesse.get_size()
# Affichage centré de la vitesse
fen.blit(msg_vitesse, ((LARGEUR - largeur_msg) // 2, HAUTEUR - HAUTEUR_CMD + (HAUTEUR_CMD - TAILLE_POLICE) // 2))
# Affichage des boutons de commande de la vitesse
fen.blit(btn_plus, (POS_X_BTN_PLUS, POS_Y_BTN))
if vitesse_x > 0:
fen.blit(btn_moins_on, (POS_X_BTN_MOINS, POS_Y_BTN))
else:
fen.blit(btn_moins_off, (POS_X_BTN_MOINS, POS_Y_BTN))
# Affichage du sprite du chien à la nouvelle position
fen.blit(chiens[num_chien], (pos_x, pos_y))
# Mise à jour de l'affichage
pygame.display.flip()
# Attente
horloge.tick(FPS)
pygame.quit()
| fred-lefevre/pygame | pygame-episode-25/changer-vitesse.py | changer-vitesse.py | py | 4,932 | python | fr | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
... |
8703812591 | from __future__ import annotations
import re
import xml.etree.ElementTree as ET
from datetime import datetime
from operator import itemgetter
from typing import TYPE_CHECKING
from urllib.parse import urlparse
import ds_caselaw_utils as caselawutils
from caselawclient.Client import (
DEFAULT_USER_AGENT,
MarklogicApiClient,
MarklogicAPIError,
)
from caselawclient.models.documents import DocumentURIString
from caselawclient.models.press_summaries import PressSummary
from django.conf import settings
from django.contrib.auth.models import Group, User
from .aws import copy_assets
api_client = MarklogicApiClient(
host=settings.MARKLOGIC_HOST,
username=settings.MARKLOGIC_USER,
password=settings.MARKLOGIC_PASSWORD,
use_https=settings.MARKLOGIC_USE_HTTPS,
user_agent=f"ds-caselaw-editor/unknown {DEFAULT_USER_AGENT}",
)
if TYPE_CHECKING:
from caselawclient.models.documents import Document
VERSION_REGEX = r"xml_versions/(\d{1,10})-(\d{1,10}|TDR)"
# Here we limit the number of digits in the version and document reference to 10 on purpose, see
# https://owasp.org/www-community/attacks/Regular_expression_Denial_of_Service_-_ReDoS for an explanation of why.
akn_namespace = {"akn": "http://docs.oasis-open.org/legaldocml/ns/akn/3.0"}
uk_namespace = {"uk": "https://caselaw.nationalarchives.gov.uk/akn"}
class MoveJudgmentError(Exception):
pass
class NeutralCitationToUriError(Exception):
pass
def is_url_relative(url):
return not bool(urlparse(url).netloc)
def ensure_local_referer_url(request, default="/"):
"""
Make sure that we do not redirect the user to a website we do not control.
In future we should explicitly specify a return URL in the POST data when clicking a button,
rather than parsing the HTTP_REFERER header.
"""
referer = request.META.get("HTTP_REFERER")
if referer:
parsed = urlparse(referer)
is_url_local = parsed.netloc in ["", request.get_host()]
if is_url_local:
return referer
return default
def format_date(date):
if date == "" or date is None:
return None
time = datetime.strptime(date, "%Y-%m-%d")
return time.strftime("%d-%m-%Y")
def get_judgment_root(judgment_xml) -> str:
try:
parsed_xml = ET.XML(bytes(judgment_xml, encoding="utf-8"))
except ET.ParseError:
return "error"
return parsed_xml.tag
def update_document_uri(old_uri, new_citation):
"""
Move the document at old_uri to the correct location based on the neutral citation
The new neutral citation *must* not already exist (that is handled elsewhere)
"""
new_uri = caselawutils.neutral_url(new_citation.strip())
if new_uri is None:
msg = f"Unable to form new URI for {old_uri} from neutral citation: {new_citation}"
raise NeutralCitationToUriError(
msg,
)
if api_client.document_exists(new_uri):
msg = f"The URI {new_uri} generated from {new_citation} already exists, you cannot move this document to a pre-existing Neutral Citation Number."
raise MoveJudgmentError(
msg,
)
try:
api_client.copy_document(old_uri, new_uri)
set_metadata(old_uri, new_uri)
copy_assets(old_uri, new_uri)
api_client.set_judgment_this_uri(new_uri)
except MarklogicAPIError as e:
msg = (
f"Failure when attempting to copy document from {old_uri} to {new_uri}: {e}"
)
raise MoveJudgmentError(
msg,
) from e
try:
api_client.delete_judgment(old_uri)
except MarklogicAPIError as e:
msg = f"Failure when attempting to delete document from {old_uri}: {e}"
raise MoveJudgmentError(
msg,
) from e
return new_uri
def set_metadata(old_uri, new_uri):
source_organisation = api_client.get_property(old_uri, "source-organisation")
source_name = api_client.get_property(old_uri, "source-name")
source_email = api_client.get_property(old_uri, "source-email")
transfer_consignment_reference = api_client.get_property(
old_uri,
"transfer-consignment-reference",
)
transfer_received_at = api_client.get_property(old_uri, "transfer-received-at")
for key, value in [
("source-organisation", source_organisation),
("source-name", source_name),
("source-email", source_email),
("transfer-consignment-reference", transfer_consignment_reference),
("transfer-received-at", transfer_received_at),
]:
if value is not None:
api_client.set_property(new_uri, key, value)
"""
`published` is a boolean property and set differently, technically
these failures should be unpublished but copy the property just in case.
"""
published = api_client.get_published(old_uri)
api_client.set_boolean_property(new_uri, "published", bool(published))
def render_versions(decoded_versions):
versions = [
{
"uri": part.text.rstrip(".xml"),
"version": extract_version(part.text),
}
for part in decoded_versions
]
return sorted(versions, key=lambda d: -d["version"])
def extract_version(version_string: str) -> int:
result = re.search(VERSION_REGEX, version_string)
return int(result.group(1)) if result else 0
def editors_dict():
if settings.EDITORS_GROUP_ID:
editors_group = Group.objects.get(id=settings.EDITORS_GROUP_ID)
editors = editors_group.user_set.filter(is_active=True)
else:
editors = User.objects.filter(is_active=True)
return sorted(
[
{
"name": editor.get_username(),
"print_name": editor.get_full_name() or editor.get_username(),
}
for editor in editors
],
key=itemgetter("print_name"),
)
def get_linked_document_uri(document: Document) -> str | None:
related_uri = _build_related_document_uri(document)
return (
related_uri
if api_client.document_exists(DocumentURIString(related_uri))
else None
)
def _build_related_document_uri(document: Document) -> str:
press_summary_suffix = "/press-summary/1"
if isinstance(document, PressSummary):
return document.uri.removesuffix(press_summary_suffix)
return document.uri + press_summary_suffix
| nationalarchives/ds-caselaw-editor-ui | judgments/utils/__init__.py | __init__.py | py | 6,412 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "caselawclient.Client.MarklogicApiClient",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MARKLOGIC_HOST",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 24,
"usage_type": ... |
72594416674 | import torch
import torch.nn as nn
class UNet(nn.Module):
"""
add zero padding
"""
def __init__(self, num_classes=12):
super(UNet, self).__init__()
self.enc1_1 = self.CBR2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True)
self.enc1_2 = self.CBR2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.enc2_1 = self.CBR2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True)
self.enc2_2 = self.CBR2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.enc3_1 = self.CBR2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True)
self.enc3_2 = self.CBR2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.enc4_1 = self.CBR2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True)
self.enc4_2 = self.CBR2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True)
self.pool4 = nn.MaxPool2d(kernel_size=2)
self.enc5_1 = self.CBR2d(in_channels=512, out_channels=1024, kernel_size=3, stride=1, padding=1, bias=True)
self.enc5_2 = self.CBR2d(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding=1, bias=True)
self.upconv4 = nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=2, stride=2, padding=0, bias=True)
self.dec4_2 = self.CBR2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True)
self.dec4_1 = self.CBR2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True)
self.upconv3 = nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=2, stride=2, padding=0, bias=True)
self.dec3_2 = self.CBR2d(in_channels=512, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True)
self.dec3_1 = self.CBR2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True)
self.upconv2 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=2, stride=2, padding=0, bias=True)
self.dec2_2 = self.CBR2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True)
self.dec2_1 = self.CBR2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True)
self.upconv1 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=2, stride=2, padding=0, bias=True)
self.dec1_2 = self.CBR2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True)
self.dec1_1 = self.CBR2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True)
self.score_fr = nn.Conv2d(in_channels=64, out_channels=num_classes, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
enc1_1 = self.enc1_1(x)
enc1_2 = self.enc1_2(enc1_1)
pool1 = self.pool1(enc1_2)
enc2_1 = self.enc2_1(pool1)
enc2_2 = self.enc2_2(enc2_1)
pool2 = self.pool2(enc2_2)
enc3_1 = self.enc3_1(pool2)
enc3_2 = self.enc3_2(enc3_1)
pool3 = self.pool3(enc3_2)
enc4_1 = self.enc4_1(pool3)
enc4_2 = self.enc4_2(enc4_1)
pool4 = self.pool4(enc4_2)
enc5_1 = self.enc5_1(pool4)
enc5_2 = self.enc5_2(enc5_1)
upconv4 = self.upconv4(enc5_2)
# crop_enc4_2 = self.crop_img(enc4_2, upconv4.size()[2])
# cat4 = torch.cat([upconv4, crop_enc4_2], dim=1)
cat4 = torch.cat([upconv4, enc4_2], dim=1)
dec4_2 = self.dec4_2(cat4)
dec4_1 = self.dec4_1(dec4_2)
upconv3 = self.upconv3(dec4_1)
# crop_enc3_2 = self.crop_img(enc3_2, upconv3.size()[2])
# cat3 = torch.cat([upconv3, crop_enc3_2], dim=1)
cat3 = torch.cat([upconv3, enc3_2], dim=1)
dec3_2 = self.dec3_2(cat3)
dec3_1 = self.dec3_1(dec3_2)
upconv2 = self.upconv2(dec3_1)
# crop_enc2_2 = self.crop_img(enc2_2, upconv2.size()[2])
# cat2 = torch.cat([upconv2, crop_enc2_2], dim=1)
cat2 = torch.cat([upconv2, enc2_2], dim=1)
dec2_2 = self.dec2_2(cat2)
dec2_1 = self.dec2_1(dec2_2)
upconv1 = self.upconv1(dec2_1)
# crop_enc1_2 = self.crop_img(enc1_2, upconv1.size()[2])
# cat1 = torch.cat([upconv1, crop_enc1_2], dim=1)
cat1 = torch.cat([upconv1, enc1_2], dim=1)
dec1_2 = self.dec1_2(cat1)
dec1_1 = self.dec1_1(dec1_2)
x = self.score_fr(dec1_1)
return x
def CBR2d(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU()
)
def crop_img(self, in_tensor, out_size):
dim1, dim2 = in_tensor.size()[2:]
out_tensor = in_tensor[:,
:,
int((dim1-out_size)/2):int((dim1+out_size)/2),
int((dim2-out_size)/2):int((dim2+out_size)/2),
]
return out_tensor
# 구현된 model에 임의의 input을 넣어 output이 잘 나오는지 test
model = UNet(num_classes=12)
x = torch.randn([1, 3, 512, 512])
print("input shape : ", x.shape)
out = model(x)
print("output shape : ", out.size()) | bcaitech1/p3-ims-obd-doggum | sooho_seg/models/UNet.py | UNet.py | py | 5,802 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
42577664395 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 5 17:57 2021
@author: Pedro Vieira
@description: Implements the test function for the DFFN network published in https://github.com/weiweisong415/Demo_DFFN_for_TGRS2018
"""
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
from sklearn import metrics
from tqdm import tqdm
from utils.config import DFFNConfig
from utils.dataset import DFFNDataset
from utils.tools import *
from net.dffn import DFFN
# Import tensorboard
from torch.utils.tensorboard import SummaryWriter
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
########################
# SET TEST CONFIG FILE #
########################
CONFIG_FILE = '' # Empty string to load default 'config.yaml'
# Test DFFN runs
def test():
# Load config data from training
config_file = 'config.yaml' if not CONFIG_FILE else CONFIG_FILE
cfg = DFFNConfig(config_file, test=True)
# Start tensorboard
writer = None
if cfg.use_tensorboard:
writer = SummaryWriter(cfg.tensorboard_folder)
# Set string modifier if testing best models
test_best = 'best_' if cfg.test_best_models else ''
if cfg.test_best_models:
print('Testing best models from each run!')
# Load processed dataset
data = torch.load(cfg.exec_folder + 'proc_data.pth')
for run in range(cfg.num_runs):
print(f'TESTING RUN {run + 1}/{cfg.num_runs}')
# Load test ground truth and initialize test loader
_, test_gt, _ = HSIData.load_samples(cfg.split_folder, cfg.train_split, cfg.val_split, run)
test_dataset = DFFNDataset(data, test_gt, cfg.sample_size, data_augmentation=False)
test_loader = DataLoader(test_dataset, batch_size=cfg.test_batch_size, shuffle=False)
num_classes = len(np.unique(test_gt)) - 1
# Load model
model_file = cfg.exec_folder + f'runs/dffn_{test_best}model_run_' + str(run) + '.pth'
model = nn.DataParallel(DFFN(cfg.sample_bands, num_classes))
model.load_state_dict(torch.load(model_file, map_location=device))
model.eval()
# Set model to device
model = model.to(device)
# Test model from the current run
report = test_model(model, test_loader, writer)
filename = cfg.results_folder + 'test.txt'
save_results(filename, report, run)
if cfg.use_tensorboard:
writer.close()
# Function for performing the tests for a given model and data loader
def test_model(model, loader, writer=None):
labels_pr = []
prediction_pr = []
with torch.no_grad():
total_predicted = np.array([], dtype=int)
total_labels = np.array([], dtype=int)
for i, (images, labels) in tqdm(enumerate(loader), total=len(loader)):
# for images, labels in loader:
# Get input and compute model output
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
# Get predicted outputs
_, predicted = torch.max(outputs, 1)
# Save total values for analysis
total_predicted = np.append(total_predicted, predicted.cpu().numpy())
total_labels = np.append(total_labels, labels.cpu().numpy())
report = get_report(total_predicted, total_labels)
print(f'- Overall accuracy: {report["overall_accuracy"]:f}')
print(f'- Average accuracy: {report["average_accuracy"]:f}')
print(f'- Kappa coefficient: {report["kappa"]:f}')
if writer is not None:
# Accuracy per class
classes = range(9)
for i in classes:
labels_i = labels_pr == i
prediction_i = prediction_pr[:, i]
writer.add_pr_curve(str(i), labels_i, prediction_i, global_step=0)
return report
# Compute OA, AA and kappa from the results
def get_report(y_pr, y_gt):
classify_report = metrics.classification_report(y_gt, y_pr)
confusion_matrix = metrics.confusion_matrix(y_gt, y_pr)
class_accuracy = metrics.precision_score(y_gt, y_pr, average=None)
overall_accuracy = metrics.accuracy_score(y_gt, y_pr)
average_accuracy = np.mean(class_accuracy)
kappa_coefficient = kappa(confusion_matrix)
# Save report values
report = {
'classify_report': classify_report,
'confusion_matrix': confusion_matrix,
'class_accuracy': class_accuracy,
'overall_accuracy': overall_accuracy,
'average_accuracy': average_accuracy,
'kappa': kappa_coefficient
}
return report
# Compute kappa coefficient
def kappa(confusion_matrix):
data_mat = np.mat(confusion_matrix)
p_0 = 0.0
for i in range(confusion_matrix.shape[0]):
p_0 += data_mat[i, i] * 1.0
x_sum = np.sum(data_mat, axis=1)
y_sum = np.sum(data_mat, axis=0)
p_e = float(y_sum * x_sum) / np.sum(data_mat)**2
oa = float(p_0 / np.sum(data_mat) * 1.0)
cohens_coefficient = float((oa - p_e) / (1 - p_e))
return cohens_coefficient
# Main for running test independently
def main():
test()
if __name__ == '__main__':
main()
| abandonsea/DFFN | test.py | test.py | py | 5,216 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "utils.config.DFF... |
31772141626 | # coding=utf-8
import json
import telegram
import logging
from telegram.error import NetworkError, Unauthorized
from time import sleep
TOKEN = '206483377:AAHnQ_ohMuvDhI5mfbDMrHKTnTGIi7YhT6A' # Ponemos nuestro Token generado con el @BotFather
#bot.setWebhook('https://api.tekegram.org/bot/'+TOKEN+'/')
def main():
bot = telegram.Bot(TOKEN) # mi id de usuario: 23709664
updates = bot.getUpdates()
text = [u.message.text for u in updates]
chatId = [u.message.chat.id for i in updates]
try:
update_id = len(bot.getUpdates())-1
except IndexError:
update_id = None
lastId = chatId[update_id]
lastUp = text[update_id]
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
while True:
last = 0
if lastUp == '/t' and last!=len(bot.getUpdates())-1:
tiempo(bot, lastId)
last = len(bot.getUpdates())-1
updates = bot.getUpdates()
text = [u.message.text for u in updates]
chatId = [u.message.chat.id for i in updates]
update_id = len(bot.getUpdates())-1
lastId = chatId[update_id]
lastUp = text[update_id]
def tiempo(bot, id):
bot.sendMessage(chat_id=id, text='El tiempo de hoy: ')
def start(bot, update):
bot.sendMessage(update.message.chat_id, text='Hola pucelano')
def help(bot, update):
bot.sendMessage(update.message.chat_id, text='Help!')
if __name__ == '__main__':
main()
| LucasHG94/VallaBot | vallaBotOld.py | vallaBotOld.py | py | 1,480 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "telegram.Bot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 29,
"usage_type": "call"
}
] |
29644079936 | from django import forms
from django.contrib import admin
from django.db import models
from indexpage.models import Section, SubSection
class SectionAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {"widget": forms.Textarea(attrs={"class": "ckeditor"})},
}
class Media:
css = {"all": ("admin_style.css",)}
js = ("//cdn.ckeditor.com/4.4.7/standard/ckeditor.js",)
class SubSectionAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {"widget": forms.Textarea(attrs={"class": "ckeditor"})},
}
class Media:
css = {"all": ("admin_style.css",)}
js = ("//cdn.ckeditor.com/4.4.7/standard/ckeditor.js",)
admin.site.register(Section, SectionAdmin)
admin.site.register(SubSection, SubSectionAdmin)
| Ernir/old.ernir.net | indexpage/admin.py | admin.py | py | 801 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 10,
"usage_type": "attribute"
},
... |
5746620273 | import cv2
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
cap1 = cv2.VideoCapture(1)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,640)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
while(True):
ret,frame = cap.read()
ret2,frame2 = cap1.read()
if ret2:
cv2.imshow('1',frame2)
if cv2.waitKey(1) & 0XFF ==27:
break
if ret:
cv2.imshow('2',frame)
if cv2.waitKey(1) & 0XFF == 27:
break
cap.release()
cap1.release()
cv2.destroyAllWindows()
| chuchanhee/2022ESWContest_free_1009 | Monitoring_robot.py | Monitoring_robot.py | py | 581 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_na... |
69850112355 | #! /usr/bin/env python3
########################################################################################
#
# Script for generating a vocabulary file (character-based) on the transcriptions found
# in one or more shard folders.
#
# Author(s): Nik Vaessen
########################################################################################
import json
import pathlib
from typing import Tuple
import click
from torch.utils.data import DataLoader
from tqdm import tqdm
from data_utility.eval.speech.transform import get_default_token_list
from data_utility.pipe.primitives.shard import load_audio_samples_from_shards
from data_utility.pipe.containers import WavAudioDataSample
########################################################################################
# functionality to collect all letters in all transcripts
class WavAudioDataSampleVocabularyAggregator:
def __init__(self):
self.vocab = set()
def __call__(self, x: WavAudioDataSample):
assert isinstance(x, WavAudioDataSample)
if x.transcription is not None:
for c in x.transcription:
self.vocab.add(c)
else:
raise ValueError("sample is missing transcription")
########################################################################################
# entrypoint of script
@click.command()
@click.argument(
"dirs",
nargs=-1,
type=pathlib.Path,
required=True,
)
@click.option(
"--out",
"json_path",
type=pathlib.Path,
required=True,
)
def main(dirs: Tuple[pathlib.Path], json_path: pathlib.Path):
dp = load_audio_samples_from_shards(list(dirs), allow_partial=True)
ag = WavAudioDataSampleVocabularyAggregator()
for x in tqdm(DataLoader(dp, batch_size=None, num_workers=0)):
ag(x)
# ensure blank is always index 0 for CTC loss, and space is already defined as |
list_chars = get_default_token_list() + sorted([c for c in ag.vocab if c != " "])
char_to_idx = {c: i for i, c in enumerate(list_chars)}
idx_to_char = {v: k for k, v in char_to_idx.items()}
vocab_dict = {
"characters": list_chars,
"char_to_idx": char_to_idx,
"idx_to_char": idx_to_char,
}
with json_path.open("w") as f:
json.dump(vocab_dict, f)
if __name__ == "__main__":
main()
| Loes5307/VocalAdversary2022 | data_utility-main/data_utility/scripts/generate_character_vocabulary.py | generate_character_vocabulary.py | py | 2,331 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "data_utility.pipe.containers.WavAudioDataSample",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "data_utility.pipe.containers.WavAudioDataSample",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "typing.Tuple",
"line_number": 60,
"us... |
32017784263 | """Master Section for the Video Converter controller"""
import threading
import pexpect
from config import CONFIG
class VideoConverterBase:
"""Master Section for the Video Converter controller"""
def __init__(self, pool_sema: threading.BoundedSemaphore, db_id: int):
self.__thread = threading.Thread(target=self.run, args=())
self.__thread.setName(f"Converter Task: {str(db_id)}")
self._pool_sema = pool_sema
self._db_id = db_id
self._conf = CONFIG["ripper"]["converter"]
self._label = "EMPTY"
self._filename = ""
self._command: list = []
self.__frame_count: int = 0
self.__frame_process: int = 0
self.__percent: float = 0.0
self._wait = threading.Event()
self._thread_run: bool = True
self.__active: bool = False
self.__thread.start()
@property
def thread_run(self) -> bool:
"""return if thread is running"""
return self.__thread.is_alive()
@property
def active(self) -> bool:
"""return if thread is Active"""
return self.__active
@property
def db_id(self) -> int:
"""returns the DB ID"""
return self._db_id
def stop_thread(self):
"""stop the thread"""
if self.__thread.is_alive():
self._thread_run = False
self._wait.set()
self.__thread.join()
def release_wait(self):
"""releases the wait if the system needs to wait for information"""
self._wait.set()
def _get_frame_count(self, infile: str):
"""gets the frame count of the file"""
cmd = 'ffmpeg -hide_banner -v quiet -stats -i "'
cmd += infile
cmd += '" -map 0:v:0 -c copy -f null -'
frames = 0
thread = pexpect.spawn(cmd, encoding="utf-8")
cpl = thread.compile_pattern_list([pexpect.EOF, r"frame= *\d+"])
while True:
i = thread.expect_list(cpl, timeout=None)
if i == 0: # EOF
break
elif i == 1:
frames = thread.match.group(0)
self.__frame_count = int(frames.replace("frame=", "").strip())
def _do_conversion(self):
"""method to convert file"""
self.__active = True
thread = pexpect.spawn(" ".join(self._command), encoding="utf-8")
cpl = thread.compile_pattern_list([pexpect.EOF, r"frame= *\d+"])
while True:
i = thread.expect_list(cpl, timeout=None)
if i == 0: # EOF
self.__active = False
return True
if i == 1:
return_string = thread.match.group(0).replace("frame=", "").lstrip()
self.__frame_process = int(return_string)
self.__percent = round(float(self.__frame_process / self.__frame_count * 100), 2)
def api_data(self) -> dict:
"""returns the data as dict for html"""
file_name_split = self._filename.replace(".mkv", "").split("/")
return_dict = {
"id": self._db_id,
"label": self._label,
"discid": int(file_name_split[-2]),
"trackid": int(file_name_split[-1]),
"converting": self.__active,
"framecount": self.__frame_count,
"process": self.__frame_process,
"percent": self.__percent,
}
return return_dict
def html_data(self) -> dict:
"""returns the data for html"""
return_dict = self.api_data()
return return_dict
| GaryTheBrown/Tackem | ripper/video_converter/base.py | base.py | py | 3,543 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "threading.BoundedSemaphore",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.CONFIG",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "thread... |
14719935805 | import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
#Opening file containing experiment data
fin = open('/Users/ammaagyei/mu_code/Experiment1.txt')
mytxt = fin.read()
print(mytxt)
#Splitting data on file into acceleration list, angular position list and time list
t = []
acc = []
ang = []
i = 0
line = ""
data = []
for char in mytxt:
if char == "\n":
data.append(line)
line = ""
else:
line = line + char
for num in data:
if i % 2 == 0:
t.append(float(num))
else:
acc.append(float(num.split(",")[0]))
ang.append(float(num.split(",")[1]))
i += 1
fin.close()
#Plotting Acceleration vs Time
plt.figure(figsize=(8,8))
plt.subplot(3,1,2)
plt.plot(t, acc, 'k-')
plt.xlim(900,8500)
plt.ylim(-500,200)
plt.xlabel('Time(milliseconds)')
plt.ylabel('acceleration(m/s**2)')
plt.title("Acceleration vs Time")
plt.grid()
#Plotting Angle vs Time
plt.figure(figsize=(8,8))
plt.subplot(3,1,2)
plt.plot(t, ang, 'k-')
plt.xlabel('Time(milliseconds)')
plt.ylabel('angle(radians))')
plt.xlim(4500,7000)
plt.ylim(-8,2)
plt.title("Theta vs Time")
plt.grid()
#Graphing filtered data and finding peaks
ang = np.radians(ang)
acc = np.array(acc)
t = np.array(t)
acc_filt = sig.medfilt(acc,33)
acc_pks,_ = sig.find_peaks(acc,10,prominence=200)
acc_filt_pks, _ = sig.find_peaks(acc_filt,None,None,prominence=20)
#Plotting Noisy Data
plt.figure()
plt.plot(t,acc, 'r-', t[acc_pks],acc[acc_pks], 'b.')
plt.title('Noisy Data')
plt.show()
#Plotting Filtered Data
plt.figure(figsize=(10,10))
plt.plot(t, acc_filt, 'r-',t[acc_filt_pks],acc_filt[acc_filt_pks], 'b.')
plt.title('Filtered Data')
plt.show()
#Plotting Peaks vs Time and Calculating the Period
newt = t[acc_filt_pks]
newacc= acc[acc_filt_pks]
t.resize((9,),refcheck = False)
plt.figure()
plt.plot(newt, newacc, 'ro-')
plt.title('Peaks Vs Time')
plt.show()
#Calculating the period
mean_T = newt.mean()
T = mean_T
| ES2Spring2019-ComputinginEngineering/project-one-t2 | ParsingData_Graphing_and_CalcuatingPeriod.py | ParsingData_Graphing_and_CalcuatingPeriod.py | py | 1,956 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "... |
29443739493 | ############################################################################################
#
# Cálculo do desvio padrão para as idades dos personagens dos Simpsons
#
# Cálculo do desvio padrão populacional e amostral para as idades dos personagens dos Simpsons:
# As regras básicas para os cálculos de desvios padrão são:
# * Calculamos o desvio padrão populaconal quando o conjunto de dados é a população inteira.
# * Consideramos o desvio padrão amostral se nossos conjuntos de dados representarem uma amostra retirada de uma grande população (como é o caso para as idades dos personagems dos Simpsons).
#
# NOTA:
# O desvio padrão amostral sempre será maior que o desvio padrão populacional para
# o mesmo conjunto de dados porque há mais incerteza ao calcular o desvio padrão da amostra,
# assim nossa estimativa do desvio padrão será maior.
###########################################################################################
import statistics as stat
import numpy as np
grupo_1 = (1, 8, 10, 38, 39)
grupo_2 = (8, 10, 39, 45, 49)
# Cálculo do desvios padrões populacional e amostral usando a biblioteca Numpy
def Calcula_Desvio_Padrao_Populacional_Numpy(a):
Population_STD_Numpy = np.std(a)
return Population_STD_Numpy
def Calcula_Desvio_Padrao_da_Amostra_Numpy(a):
Sample_STD_Numpy = np.std(a, ddof = 1)
return Sample_STD_Numpy
# Cálculo do desvios padrões populacional e amostral usando a biblioteca Statistics
def Calcula_Desvio_Padrao_Populacional_Stat(a):
Population_STD_Stat = stat.pstdev(a)
return Population_STD_Stat
def Calcula_Desvio_Padrao_da_Amostra_Stat(a):
Sample_STD_Stat = stat.stdev(a)
return Sample_STD_Stat
print("\n Desvio Padrão populacional para o Grupo 1 (usando a biblioteca numpy): ", Calcula_Desvio_Padrao_Populacional_Numpy(grupo_1))
print("\n Desvio Padrão populacional para o Grupo 2 (usando a biblioteca numpy): ", Calcula_Desvio_Padrao_Populacional_Numpy(grupo_2))
print("\n Desvio Padrão amostral para o Grupo 1 (usando a biblioteca numpy): ", Calcula_Desvio_Padrao_da_Amostra_Numpy(grupo_1))
print("\n Desvio Padrão amostral para o Grupo 2 (usando a biblioteca numpy): ", Calcula_Desvio_Padrao_da_Amostra_Numpy(grupo_2))
print("\n Desvio Padrão populacional para o Grupo 1 (usando a biblioteca statistics): ", Calcula_Desvio_Padrao_Populacional_Stat(grupo_1))
print("\n Desvio Padrão populacional para o Grupo 2 (usando a biblioteca statistics): ", Calcula_Desvio_Padrao_Populacional_Stat(grupo_2))
print("\n Desvio Padrão amostral para o Grupo 1 (usando a biblioteca statistics): ", Calcula_Desvio_Padrao_da_Amostra_Stat(grupo_1))
print("\n Desvio Padrão amostral para o Grupo 2 (usando a biblioteca statistics): ", Calcula_Desvio_Padrao_da_Amostra_Stat(grupo_2))
| Grinduim/Bosch-2022.2 | Bosch/InnoHub/Treinamento de IA/materiais/Exemplos_1/SIMPSONS_EXAMPLE/SIMPSONS_STANDARD_DEVIATION.py | SIMPSONS_STANDARD_DEVIATION.py | py | 2,853 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "numpy.std",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "statistics.pstdev",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "statistics.stdev",
"line_nu... |
3978585491 | from google.appengine.dist import use_library
use_library('django', '1.1')
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import helpers
import models
import settings
import appengine_utilities.sessions
import oauth
import logging
class MainHandler(webapp.RequestHandler):
def get(self):
user = None
session = appengine_utilities.sessions.Session()
username = session.get('user', None)
if username:
user = models.User.get_by_key_name(username)
helpers.render_template(self, 'mainpage.html', {'user':user, 'sessions':models.Session.all().order('title')})
class TwitterSigninHandler(webapp.RequestHandler):
def get(self):
client = oauth.TwitterClient(settings.CONSUMER_KEY, settings.CONSUMER_SECRET, 'http://sessionpicker.appspot.com/twitter/callback')
self.redirect(client.get_authenticate_url())
class TwitterCallbackHandler(webapp.RequestHandler):
def get(self):
client = oauth.TwitterClient(settings.CONSUMER_KEY, settings.CONSUMER_SECRET, 'http://sessionpicker.appspot.com/twitter/callback')
auth_token = self.request.get("oauth_token")
auth_verifier = self.request.get("oauth_verifier")
user_info = client.get_user_info(auth_token, auth_verifier=auth_verifier)
user = models.User.get_or_insert( user_info['username'],
twitter_name = user_info['username'],
display_name = user_info['name'],
image_url = user_info['picture'],
oauth_token = user_info['token'],
oauth_secret = user_info['secret']
)
session = appengine_utilities.sessions.Session()
session['user'] = user.twitter_name
self.redirect('/')
class CreateSessionHandler(webapp.RequestHandler):
def post(self):
session = appengine_utilities.sessions.Session()
user = helpers.get_session_user()
title = self.request.get('title')
description = self.request.get('description')
logging.info('title: "%s" description: "%s" - args "%s"' % (title, description, self.request.arguments()))
session = models.Session(title=title, description=description, submitter=user).save()
self.redirect('/')
class LikeSessionHandler(webapp.RequestHandler):
def get(self, id):
user = helpers.get_session_user()
session = models.Session.get(id)
if not session:
self.redirect('/')
#We only want to count a like once, if we find an object, don't increment the count
obj = models.Like.all().filter('session =', session).filter('user =', user).get()
if not obj:
obj = models.Like(session=session, user=user).save()
session.likes += 1
session.save()
self.redirect('/')
class FakeUserHandler(webapp.RequestHandler):
def get(self):
session = appengine_utilities.sessions.Session()
session['user'] = 'bruntonspall'
self.redirect('/')
class SignoutHandler(webapp.RequestHandler):
def get(self):
session = appengine_utilities.sessions.Session()
del session['user']
self.redirect('/')
def main():
application = webapp.WSGIApplication([
('/', MainHandler),
('/twitter/signin', TwitterSigninHandler),
('/twitter/callback', TwitterCallbackHandler),
('/session/new', CreateSessionHandler),
('/session/(?P<id>[a-zA-Z0-9-]+)/like', LikeSessionHandler),
('/signout', SignoutHandler),
# ('/debug/fakeuser', FakeUserHandler),
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| bruntonspall/sessionpicker | main.py | main.py | py | 3,721 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "google.appengine.dist.use_library",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.webapp.RequestHandler",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.webapp",
"line_number": 15,
"usage_... |
18468768051 | import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sklearn
import sklearn.datasets
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
z1 = np.dot(W1, X) + b1
a1 = relu(z1)
z2 = np.dot(W2, a1) + b2
a2 = relu(z2)
z3 = np.dot(W3, a2) + b3
a3 = sigmoid(z3)
cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)
return a3, cache
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache
dz3 = 1./m * (a3 - Y)
dW3 = np.dot(dz3, a2.T)
db3 = np.sum(dz3, axis=1, keepdims = True)
da2 = np.dot(W3.T, dz3)
dz2 = np.multiply(da2, np.int64(a2 > 0))
dW2 = np.dot(dz2, a1.T)
db2 = np.sum(dz2, axis=1, keepdims = True)
da1 = np.dot(W2.T, dz2)
dz1 = np.multiply(da1, np.int64(a1 > 0))
dW1 = np.dot(dz1, X.T)
db1 = np.sum(dz1, axis=1, keepdims = True)
gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,
"da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
"da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}
return gradients
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of n_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters['W' + str(i)] = ...
parameters['b' + str(i)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for k in range(L):
parameters["W" + str(k+1)] = parameters["W" + str(k+1)] - learning_rate * grads["dW" + str(k+1)]
parameters["b" + str(k+1)] = parameters["b" + str(k+1)] - learning_rate * grads["db" + str(k+1)]
return parameters
def compute_loss(a3, Y):
"""
Implement the loss function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
loss - value of the loss function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
loss = 1./m * np.nansum(logprobs)
return loss
def load_cat_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_set_x = train_set_x_orig/255
test_set_x = test_set_x_orig/255
return train_set_x, train_set_y, test_set_x, test_set_y, classes
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1,m), dtype = np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
# print results
print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))
return p
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y.ravel(), cmap=plt.cm.Spectral) #
plt.show()
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3>0.5)
return predictions
def load_dataset():
np.random.seed(1)
train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)
np.random.seed(2)
test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)
# Visualize the data
plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);
train_X = train_X.T
train_Y = train_Y.reshape((1, train_Y.shape[0]))
test_X = test_X.T
test_Y = test_Y.reshape((1, test_Y.shape[0]))
return train_X, train_Y, test_X, test_Y
#%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
print(train_Y)
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
#Model with Unit Initialization
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(2/layers_dims[l-1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with Unit initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) | sum-coderepo/Optimization-Python | NeuralNetwork/Weight_Initialization.py | Weight_Initialization.py | py | 14,724 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.exp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "init_utils.relu",
"line_number"... |
3018338966 | from setuptools import setup
install_requires=[
'cython >= 0.13',
'jinja2 >= 2.5',
'argparse',
]
setup(name='protocyt',
version='0.1.5',
description="Fast python port of protobuf",
long_description="Compiles protobuf files into python extension modules using cython",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Internet",
"Programming Language :: Python"
],
keywords='serialization data-format cython protobuf',
author='Eugene Chernyshov',
author_email='Chernyshov.Eugene@gmail.com',
url='http://evgenus.github.com/protocyt/',
license='LGPL',
packages=['protocyt'],
install_requires=install_requires,
package_data=dict(
protocyt = [
'ProtobufGrammar.txt',
'common.pytempl',
'file.pytempl',
'message.pytempl',
'package.pytempl',
'structure.pytempl',
'includes/*.*',
]
)
)
| Evgenus/protocyt | setup.py | setup.py | py | 1,136 | python | en | code | 24 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 9,
"usage_type": "call"
}
] |
10086144499 | import pytest
import mongomock
import pymongo
from dotenv.main import find_dotenv, load_dotenv
from todo_app.app import create_app
@pytest.fixture
def client():
file_path = find_dotenv('.env.test')
load_dotenv(file_path, override=True, verbose=True)
with mongomock.patch(servers=(('fakemongo.com', 27017),)):
test_app = create_app()
with test_app.test_client() as client:
yield client
def test_index_page(client):
mockData = [
{
"title": "foo",
"status": "Not Started"
},
{
"title": "bar",
"status": "In Progress"
},
{
"title": "blah",
"status": "Complete"
}
]
mongoClient = pymongo.MongoClient('mongodb://fakemongo.com')['Todo']
mongoClient.items.insert_many(mockData)
response = client.get('/')
responseBody = response.data.decode("utf-8")
assert response.status_code is 200
assert "foo" in responseBody
assert "bar" in responseBody
assert "blah" in responseBody
| lawli01/DevOps-Course-Starter | tests/integration/test_app.py | test_app.py | py | 1,071 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.main.find_dotenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dotenv.main.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mongomock.patch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "todo_a... |
36634644803 | # -*- coding: utf-8 -*-
import scrapy
from quotetutorial.items import QuoteItem
class QuotesSpider(scrapy.Spider):
name = "quotes" #指定spider的名称
allowed_domains = ["quotes.toscrape.com"]
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
quotes = response.css('.quote')
for quote in quotes:
item = QuoteItem()
text = quote.css('.text::text').extract_first()
#scrapy的spider特有语法,输出text属性里的text文档内容。extract提取前面函数里的全部内容,extract_first()提取前面函数里的第一条内容。
author = quote.css('.author::text').extract_first()
tags = quote.css('.tags .tag::text').extract()
item['text'] = text
item['author'] = author
item['tags'] = tags
yield item
next = response.css('.pager .next a::attr(href)').extract_first()
url = response.urljoin(next) #获取目标路径的绝对地址。
yield scrapy.Request(url=url,callback=self.parse) #callback表示回调函数的意思,即获得response后,再由callback表示由哪个函数处理。本案例表示递归调用,自己调用自己。实现翻页效果
| Sylor-huang/quotetutorial | quotetutorial/spiders/quotes.py | quotes.py | py | 1,268 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "quotetutorial.items.QuoteItem",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 27,
"usage_type": "call"
}
] |
8283102056 |
import GetOldTweets3 as got
import time
import datetime
def got_func(search_word, search_lang,time_start, time_end):
tweetcriteria = got.manager.TweetCriteria().setQuerySearch(search_word).setSince(time_start).setUntil(time_end).setLang(search_lang).setTopTweets(True).setMaxTweets(10)
tweet = got.manager.TweetManager.getTweets(tweetcriteria)
#filename = "tweet_" + time_start + "_" + time_end
#file = open("filename.txt", "a")
for t in tweet:
print(t.date)
print(t.id)
print(t.text + "\n")
# file.write(t.text + "\n")
#file.close()
def got_start(ts, te,word_search, tweet_lang):
try:
time_start = ts
time_increment = datetime.datetime.strptime(ts,'%Y-%m-%d') + datetime.timedelta(days = 10)
time_end = time_increment.strftime('%Y-%m-%d')
while time_start < te:
print(time_start)
print(time_end)
got_func(word_search,tweet_lang,time_start, time_end)
time_start = time_end
time_increment = datetime.datetime.strptime(time_start,'%Y-%m-%d') + datetime.timedelta(days = 10)
time_end = time_increment.strftime('%Y-%m-%d')
except Exception:
print("Error")
print("sleeping for 15 minutes")
time.sleep(15*60)
while time_start < te:
print(time_start)
print(time_end)
got_func(word_search,tweet_lang,time_start, time_end)
time_start = time_end
time_increment = datetime.datetime.strptime(time_start,'%Y-%m-%d') + datetime.timedelta(days = 10)
time_end = time_increment.strftime('%Y-%m-%d')
ws = "https://www.luxurydaily.com/inviting-opinion-pieces-on-luxury-issues-marketing-retail-and-media/"
sl = 'en'
got_start("2018-09-01", "2019-01-30",ws,sl)
| Swarnalathaa/Twitter | twitter_got.py | twitter_got.py | py | 1,883 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "GetOldTweets3.manager.TweetCriteria",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "GetOldTweets3.manager",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "GetOldTweets3.manager.TweetManager.getTweets",
"line_number": 8,
"usage_type... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.