index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
1,976
|
Tarun-yadav777/Djanjo-Project-Schemer-
|
refs/heads/master
|
/schemegen/migrations/0002_delete_genre.py
|
# Generated by Django 3.0.8 on 2020-11-13 11:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('schemegen', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Genre',
),
]
|
{"/schemegen/views.py": ["/schemegen/models.py"], "/schemegen/admin.py": ["/schemegen/models.py"]}
|
1,977
|
Tarun-yadav777/Djanjo-Project-Schemer-
|
refs/heads/master
|
/schemegen/urls.py
|
from django.urls import path
from schemegen import views
urlpatterns = [
path('', views.index, name='index'),
path('adhaar', views.adhaar, name='adhaar'),
path('requirements', views.requirements, name='requirements'),
path('detail', views.detail, name='detail'),
path('adhaar-hindi', views.adhaar_hindi, name='adhaar_hindi'),
path('requirements-hindi', views.requirements_hindi, name='requirements-hindi'),
]
|
{"/schemegen/views.py": ["/schemegen/models.py"], "/schemegen/admin.py": ["/schemegen/models.py"]}
|
1,978
|
Tarun-yadav777/Djanjo-Project-Schemer-
|
refs/heads/master
|
/schemegen/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Schemegen
def index(request):
return render(request, 'Index.html')
def adhaar(request):
return render(request, 'Adhaar.html')
def requirements(request):
return render(request, 'Requirements.html')
def detail(request):
type = request.GET['occupation']
schemes = Schemegen.objects.filter(type=type)
return render(request, 'detail.html', {'schemes': schemes})
def adhaar_hindi(request):
return render(request, 'adhaar_hindi.html')
def requirements_hindi(request):
return render(request, 'requirements_hindi.html')
|
{"/schemegen/views.py": ["/schemegen/models.py"], "/schemegen/admin.py": ["/schemegen/models.py"]}
|
1,979
|
Tarun-yadav777/Djanjo-Project-Schemer-
|
refs/heads/master
|
/schemegen/migrations/0001_initial.py
|
# Generated by Django 3.0.8 on 2020-11-13 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Schemegen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('type', models.CharField(choices=[("W's D", "Women's Development"), ('S J', 'Social Justice'), ('S', 'Sports'), ('R D', 'Ruler Development'), ("C's D", 'Child Development')], default="Women's Development", max_length=20)),
('info_link', models.URLField()),
],
),
migrations.CreateModel(
name='User_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('gender', models.CharField(max_length=6)),
('dob', models.DateField()),
('address', models.CharField(max_length=100)),
('phone_no', models.IntegerField()),
('interested_scheme', models.CharField(max_length=200)),
],
),
]
|
{"/schemegen/views.py": ["/schemegen/models.py"], "/schemegen/admin.py": ["/schemegen/models.py"]}
|
1,980
|
Tarun-yadav777/Djanjo-Project-Schemer-
|
refs/heads/master
|
/schemegen/admin.py
|
from django.contrib import admin
from .models import Schemegen, User_info
admin.site.register(Schemegen)
admin.site.register(User_info)
|
{"/schemegen/views.py": ["/schemegen/models.py"], "/schemegen/admin.py": ["/schemegen/models.py"]}
|
2,032
|
Zuoway/redfin_crawler
|
refs/heads/master
|
/Redfin/pipelines.py
|
# -*- coding: utf-8 -*-
'''Can use pymysql client to upload item directly to MySQL DB.
Or simply take stored json file into mysql within mysql shell
As a proof of concept project, not implemented yet.
For coding challenge purposes, currently exporting file into .csv format
'''
# from scrapy.exporters import JsonItemExporter
from scrapy.exporters import CsvItemExporter
class RedfinPipeline(object):
# def __init__(self):
# self.file = open('data.json', 'wb')
# self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)
# self.exporter.start_exporting()
# def close_spider(self, spider):
# self.exporter.finish_exporting()
# self.file.close()
# def process_item(self, item, spider):
# self.exporter.export_item(item)
# return item
def __init__(self):
#Write csv to Result folder
self.file = open("Result/redfin_zhuowei.csv", 'wb')
self.exporter = CsvItemExporter(self.file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
{"/Redfin/spiders/redfin_crawler.py": ["/Redfin/items.py"]}
|
2,033
|
Zuoway/redfin_crawler
|
refs/heads/master
|
/Redfin/spiders/redfin_crawler.py
|
# -*- coding: utf-8 -*-
import scrapy
import csv
import re
import random
from Redfin.items import RedfinItem
class RedfinCrawlerSpider(scrapy.Spider):
name = 'redfin_crawler'
allowed_domains = ['redfin.com']
'''start requests by looping through all valid zipcodes on redfin,
generate links by zipcode'''
def start_requests(self):
with open("all_postal_codes.txt") as f:
for zipcode in f:
zipcode = zipcode.rstrip('\n')
url = 'https://www.redfin.com/zipcode/' + zipcode
yield scrapy.Request(url=url, callback=self.get_csv_url)
'''parse and go to download_csv link. Some zipcode may not have it visible,
use manual link construction can still find it'''
def get_csv_url(self, response):
regionId = re.search(r"regionId=(.+?)&",response.text).group(1)
csv_url = 'https://www.redfin.com/stingray/api/gis-csv?al=1®ion_id='+regionId+'®ion_type=2&sold_within_days=180&status=9&uipt=1,2,3,4,5,6&v=8'
return scrapy.Request(url=csv_url,callback=self.parse_csv)
'''parse the result csv to item pipeline'''
def parse_csv(self, response):
all_items = response.body.decode().split('\n')
for index, line in enumerate(all_items):
if index != 0 and line:
fields = next(csv.reader(line.splitlines(), skipinitialspace=True))
item = RedfinItem()
item['sold_date'] = fields[1]
item['property_type'] = fields[2]
item['address'] = fields[3]
item['city'] = fields[4]
item['state'] = fields[5]
item['zipcode'] = fields[6]
item['price'] = fields[7]
item['beds'] = fields[8]
item['baths'] = fields[9]
item['square_feet'] = fields[11]
item['lot_size'] = fields[12]
item['year_built'] = fields[13]
item['days_on_market'] = fields[14]
yield item
|
{"/Redfin/spiders/redfin_crawler.py": ["/Redfin/items.py"]}
|
2,034
|
Zuoway/redfin_crawler
|
refs/heads/master
|
/Redfin/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RedfinItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
sold_date = scrapy.Field()
property_type = scrapy.Field()
address = scrapy.Field()
city = scrapy.Field()
state = scrapy.Field()
zipcode = scrapy.Field()
price = scrapy.Field()
beds = scrapy.Field()
baths = scrapy.Field()
square_feet = scrapy.Field()
lot_size = scrapy.Field()
year_built = scrapy.Field()
days_on_market = scrapy.Field()
|
{"/Redfin/spiders/redfin_crawler.py": ["/Redfin/items.py"]}
|
2,035
|
Zuoway/redfin_crawler
|
refs/heads/master
|
/Redfin/main.py
|
import scrapy.cmdline
import requests
from lxml.html import fromstring
'''
A method to scrape free publicly available proxies used to crawl. Unutilized at the moment due
to unreliability of public proxies sources (retrying dead proxies and abandoning them slows down
crawling speed drastically)
'''
def get_proxies():
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = []
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[5][contains(text(),"elite proxy")]'):
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.append(proxy)
with open('proxies.txt', 'w') as f:
for proxy in proxies:
f.write(proxy+'\n')
if __name__ == '__main__':
# generate proxy list
# get_proxies()
scrapy.cmdline.execute(argv=['scrapy','crawl','redfin_crawler'])
|
{"/Redfin/spiders/redfin_crawler.py": ["/Redfin/items.py"]}
|
2,042
|
wbolster/earnest
|
refs/heads/master
|
/earnest/__init__.py
|
from .earnest import ( # noqa: unused imports
lookup_path,
walk,
)
|
{"/earnest/__init__.py": ["/earnest/earnest.py"], "/test_earnest.py": ["/earnest/__init__.py"]}
|
2,043
|
wbolster/earnest
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(
name='earnest',
version='0.0.1a',
packages=['earnest'],
)
|
{"/earnest/__init__.py": ["/earnest/earnest.py"], "/test_earnest.py": ["/earnest/__init__.py"]}
|
2,044
|
wbolster/earnest
|
refs/heads/master
|
/earnest/earnest.py
|
try:
# Python 3
from functools import reduce
STRING_TYPE = str
except ImportError:
# Python 2
STRING_TYPE = basestring # noqa
_SENTINEL = object()
def walk(obj, parent_first=True):
# Top down?
if parent_first:
yield (), obj
# For nested objects, the key is the path component.
if isinstance(obj, dict):
children = obj.items()
# For nested lists, the position is the path component.
elif isinstance(obj, (list, tuple)):
children = enumerate(obj)
# Scalar values have no children.
else:
children = []
# Recurse into children
for key, value in children:
for child_path, child in walk(value, parent_first):
yield (key,) + child_path, child
# Bottom up?
if not parent_first:
yield (), obj
def lookup_path(obj, path, default=_SENTINEL):
if isinstance(path, STRING_TYPE):
path = path.split('.')
# Convert integer components into real integers.
for position, component in enumerate(path):
try:
path[position] = int(component)
except ValueError:
pass
try:
return reduce(lambda x, y: x[y], path, obj)
except (IndexError, KeyError, TypeError):
if default is _SENTINEL:
raise
return default
|
{"/earnest/__init__.py": ["/earnest/earnest.py"], "/test_earnest.py": ["/earnest/__init__.py"]}
|
2,045
|
wbolster/earnest
|
refs/heads/master
|
/test_earnest.py
|
import pytest
import earnest
@pytest.fixture()
def sample_object():
return dict(
a=1,
b=2,
c=['c1', 'c2'],
d=dict(nested=[1, dict(foo='bar', baz={})]),
)
def test_walk(sample_object):
import pprint
pprint.pprint(sample_object)
print()
for path, obj in earnest.walk(sample_object, parent_first=True):
print('.'.join(map(str, path)))
print(obj)
print()
def test_lookup_path(sample_object):
lookup_path = earnest.lookup_path
assert lookup_path(sample_object, ['a']) == 1
assert lookup_path(sample_object, 'a') == 1
assert lookup_path(sample_object, ['d', 'nested', 1, 'foo']) == 'bar'
assert lookup_path(sample_object, 'd.nested.1.foo') == 'bar'
with pytest.raises(KeyError):
lookup_path(sample_object, 'd.nested.1.too-bad')
assert lookup_path(sample_object, 'd.nested.1.too-bad', 'x') == 'x'
|
{"/earnest/__init__.py": ["/earnest/earnest.py"], "/test_earnest.py": ["/earnest/__init__.py"]}
|
2,049
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium/test_xueqiu.py
|
# This sample code uses the Appium python client
# pip install Appium-Python-Client
# Then you can paste this into a file and simply run with Python
from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestXueQiu:
def setup(self):
caps = {}
caps["platformName"] = "android"
caps["deviceName"] = "test1"
caps["appPackage"] = "com.xueqiu.android"
caps["appActivity"] = ".view.WelcomeActivityAlias"
caps["chromedriverExecutable"] = "/Users/user/tool/chromedriver/2.20/chromedriver"
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable((MobileBy.ID, 'com.xueqiu.android:id/tv_agree')))
self.driver.find_element(MobileBy.ID, 'com.xueqiu.android:id/tv_agree').click()
self.driver.implicitly_wait(10)
def test_webview_context(self):
self.driver.find_element(MobileBy.XPATH, "//*[@text='交易' and contains(@resource-id,'tab_name')]").click()
# WebDriverWait(self.driver, 15).until(lambda x: len(self.driver.contexts) > 1)
for i in range(5):
print(self.driver.contexts)
sleep(1)
print(self.driver.page_source)
self.driver.switch_to.context(self.driver.contexts[-1])
print(self.driver.contexts)
print(self.driver.page_source)
def teardown(self):
sleep(20)
self.driver.quit()
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,050
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium_page_object/page/main.py
|
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from test_appium_page_object.page.apply_etc_card.apply_credit_card import ApplyCreditCard
from test_appium_page_object.page.base_page import BasePage
class Main(BasePage):
def goto_etc_home(self):
self.find(MobileBy.XPATH, "//*[@text='ETC']").click()
WebDriverWait(self._driver, 20).until(expected_conditions.element_to_be_clickable((MobileBy.ID,
"android:id/button1")))
self.find(MobileBy.ID, "android:id/button1").click()
return ApplyCreditCard(self._driver)
def goto_etc_services_more(self):
pass
def goto_profile(self):
pass
def goto_message(self):
pass
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,051
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium_page_object/testcase/test_apply_credit_card.py
|
from test_appium_page_object.page.app import App
class TestApplyCreditCard:
def setup(self):
self.main = App().start().main()
def test_apply_credit_card(self):
self.main.goto_etc_home().apply_credit_card()
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,052
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium_page_object/page/apply_etc_card/apply_credit_card.py
|
from appium.webdriver.common.mobileby import MobileBy
from test_appium_page_object.page.base_page import BasePage
class ApplyCreditCard(BasePage):
_name_apply_card_element = (MobileBy.ID, "com.wlqq.phantom.plugin.etc:id/tv_online_open_card")
_name_nfc_element = (MobileBy.ID, "com.wlqq:id/btn_back")
def apply_credit_card(self):
self.find(self._name_apply_card_element).click()
self.find(self._name_nfc_element).click()
return self
def goto_faq(self):
pass
def goto_bind_card(self):
pass
def goto_obu(self):
pass
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,053
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium_page_object/page/app.py
|
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from test_appium_page_object.page.base_page import BasePage
from test_appium_page_object.page.main import Main
class App(BasePage):
_appPackage = "com.wlqq"
_appActivity = ".activity.HomeActivity"
def start(self):
if self._driver is None:
caps = {}
caps['platformName'] = 'android'
caps['deviceName'] = '28d6f388'
caps["appPackage"] = self._appPackage
caps["appActivity"] = self._appActivity
caps["automationname"] = "uiautomator2"
caps["chromedriverExecutable"] = "/Users/user/tool/chromedriver/2.35/chromedriver"
self._driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
self._driver.implicitly_wait(10)
self.find(MobileBy.ID, "com.wlqq:id/dialog_btn_right").click()
return self
else:
self._driver.start_activity(self._appPackage, self._appActivity)
def restart(self):
pass
def stop(self):
pass
# 类型提示 ->
def main(self) -> Main:
# todo: wait main page
WebDriverWait(self._driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.ID,
"android:id/button1")))
self.find(MobileBy.ID, "android:id/button1").click()
self.find(MobileBy.XPATH, "//*[@text='知道了']").click()
WebDriverWait(self._driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.XPATH,
"//*[contains(@resource-id,'content')]\
//*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']")))
self.find(MobileBy.XPATH, "//*[contains(@resource-id,'content')]\
//*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']").click()
# WebDriverWait(self._driver, 30).until(lambda x: "ETC" in self._driver.page_source) # 等待首页元素出现完成加载
return Main(self._driver)
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,054
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium/hcb_app/test_hcb_home.py
|
from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
class TestHcb:
def setup(self):
caps = {}
caps['platformName'] = 'android'
caps['deviceName'] = '28d6f388'
caps['appPackage'] = 'com.wlqq'
caps['appActivity'] = 'com.wlqq.activity.HomeActivity'
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
self.driver.implicitly_wait(5)
# 启动APP点击同意
el1 = self.driver.find_element(MobileBy.ID, 'com.wlqq:id/dialog_btn_right')
el1.click()
self.driver.implicitly_wait(15)
# 点击首页温馨提示
el2 = self.driver.find_element(MobileBy.ID, 'com.wlqq:id/text_positive')
el2.click()
self.driver.implicitly_wait(20) # 隐式等待:在设置超时时间范围内,一直寻找元素,若在时间内找到则立即执行后面操作,若时间内未找到则抛出异常
# 点击NFC授权
el3 = self.driver.find_element(MobileBy.ID, 'android:id/button1')
el3.click()
def test_etc_home(self):
e2 = self.driver.find_element(MobileBy.XPATH, '//android.widget.ImageView[@text="ETC"]')
e2.click()
print(e2.get_attribute('text'))
print("点击ETC服务完成,进入ETC插件首页")
# print(self.driver.page_source)
assert 'ETC' == e2.get_attribute('text')
def test_hcb_home(self):
el1 = self.driver.find_element(MobileBy.XPATH, '//android.view.View[@text="ETC服务"]')
el1.click()
def teardown(self):
sleep(20) # 强制等待
self.driver.quit()
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,055
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium/YMM_APP/TestYmmPY.py
|
# This sample code uses the Appium python client
# pip install Appium-Python-Client
# Then you can paste this into a file and simply run with Python
from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.common.touch_action import TouchAction
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestYmmAPP:
def setup(self):
caps = {}
caps["platformName"] = "android"
caps["deviceName"] = "xiaomi5"
caps["appPackage"] = "com.xiwei.logistics"
caps["appActivity"] = "com.xiwei.logistics.carrier.ui.CarrierMainActivity"
# caps["noReset"] = True
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
self.driver.implicitly_wait(15) # 全局隐式等待
# 同意协议
self.driver.find_element_by_id("com.xiwei.logistics:id/dialog_btn_right").click()
# 加入显示等待机制,因为此处页面元素呈现较慢,需要等待20s
WebDriverWait(self.driver, 20).until(expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1")))
# 同意NFC授权
self.driver.find_element(MobileBy.ID, "android:id/button1").click()
# 点击知道了弹框
self.driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/buttons_layout").click()
# 关闭广告弹窗x按钮
WebDriverWait(self.driver, 10).until(
expected_conditions.element_to_be_clickable((MobileBy.ID, "com.xiwei.logistics:id/iv_close")))
self.driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/iv_close").click()
def test_etchome(self):
# page_source方法返回页面xml结构
# print(self.driver.page_source)
tab = "// *[@text='服务']/../../.." # 父节点
tab1 = "//*[contains(@resource-id,'ll_tab_container')]" # 模糊匹配:使用contains
tab2 = "//*[contains(@resource-id,'tv_tab') and @text='服务']" # 使用多表达式组合 and
# 点击服务,进入满帮服务首页
self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_tab') and @text='服务']").click()
# 滑动屏幕
action = TouchAction(self.driver)
window_rect = self.driver.get_window_rect()
print(window_rect)
width = window_rect['width']
height = window_rect['height']
for i in range(3):
action.press(x=width * 1 / 2, y=height * 5 / 6).wait(2000).move_to(x=width * 1 / 2,
y=height * 1 / 6).release().perform()
# 再滑动回至原位置
for i in range(3):
action.press(x=width * 1 / 2, y=height * 1 / 6).wait(2000).move_to(x=width * 1 / 2,
y=height * 5 / 6).release().perform()
etc_tab = "//*[@text='ETC']"
self.driver.find_element(MobileBy.XPATH, etc_tab).click()
WebDriverWait(self.driver, 15).until(
expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1")))
# 点击NFC授权
self.driver.find_element(MobileBy.ID, "android:id/button1").click()
quick_appaly_image = "//*[contains(@resource-id,'ll_online_open_card')]"
assert not (self.driver.find_element(MobileBy.XPATH, quick_appaly_image).get_attribute(
"resourceId")) != "com.wlqq.phantom.plugin.etc:id/ll_online_open_card"
def test_apply_card(self):
self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_tab') and @text='服务']").click()
etc_tab = "//*[@text='ETC']"
self.driver.find_element(MobileBy.XPATH, etc_tab).click()
WebDriverWait(self.driver, 15).until(
expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1")))
# 点击NFC授权
self.driver.find_element(MobileBy.ID, "android:id/button1").click()
quick_appaly_image = "//*[contains(@resource-id,'ll_online_open_card')]"
assert (self.driver.find_element(MobileBy.XPATH, quick_appaly_image).get_attribute(
"resourceId")) == "com.wlqq.phantom.plugin.etc:id/ll_online_open_card"
# 点击快速开卡
WebDriverWait(self.driver, 15).until(
expected_conditions.element_to_be_clickable((MobileBy.XPATH, "//*[@text='快速开卡']")))
self.driver.find_element(MobileBy.XPATH, "//*[@text='快速开卡']").click()
# 点击返回
self.driver.find_element(MobileBy.ID, 'com.xiwei.logistics:id/btn_back').click()
WebDriverWait(self.driver, 30).until(lambda x: len(self.driver.contexts) > 1)
print(self.driver.contexts)
def test_ui_selector(self):
self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_tab') and @text='服务']").click()
# 利用ui_selector滑动查找元素进行定位
self.driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().'
'scrollable(true).instance(0)).'
'scrollIntoView(new UiSelector().textContains("去卖车").'
'instance(0));').click()
# 加入显示等待,新调转的页面是webview,后面需要修改断言代码
WebDriverWait(self.driver, 10).until(expected_conditions.visibility_of_element_located(MobileBy.ID,
"com.xiwei.logistics:id/tv_title"))
assert self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_title')]").\
get_attribute('text') == '我要卖车'
def test_etc_services(self):
etc_service_more = "//*[@text='ETC服务']//*[@text='查看更多']"
etc_service_apply_credit_card = "//*[@text='ETC服务']//*[contains(@text,'全国记账卡')]//*[@text='去办卡']"
etc_service_apply_stored_card = "//*[@text='ETC服务']//*[contains(@text,'全国储值卡')]//*[@text='去办卡']"
def test_etc_apply_card(self):
quick_apply = "//*[contains(@resource-id,'pager_banner')][1]"
apply_card_tab = " "
def teardown(self):
# self.driver.quit()
pass
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,056
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/leetcode/Solution.py
|
"""
给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。
示例:
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/two-sum
"""
from typing import List
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
a = []
for i in range(len(nums)):
for j in range(1, len(nums)):
if nums[i] + nums[j] == target:
if i not in a:
a.append(i)
a.append(j)
return a
if __name__ == "__main__":
s = Solution()
nums = [3,2,4]
b = 6
result = s.twoSum(nums, b)
print(result)
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,057
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium/YMM_APP/TestYmm.py
|
# This sample code uses the Appium python client
# pip install Appium-Python-Client
# Then you can paste this into a file and simply run with Python
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
caps = {}
caps["platformName"] = "android"
caps["deviceName"] = "xiaomi5"
caps["appPackage"] = "com.xiwei.logistics"
caps["appActivity"] = "com.xiwei.logistics.carrier.ui.CarrierMainActivity"
# caps["noReset"] = True
driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
# 同意协议
el1 = driver.find_element_by_id("com.xiwei.logistics:id/dialog_btn_right")
el1.click()
# 同意NFC授权,需要等待20s
driver.implicitly_wait(25)
el2 = driver.find_element(MobileBy.ID, "android:id/button1")
el2.click()
# 点击知道了弹框
el3 = driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/buttons_layout")
# el3 = driver.find_element(MobileBy.XPATH, "//*[@text='知道了' and contains(@resource-id,'com.xiwei.logistics:id/buttons_layout')]")
el3.click()
# 关闭广告弹窗x按钮
driver.implicitly_wait(15)
el4 = driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/iv_close")
el4.click()
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,058
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium/hcb_app/test_hcb_demo.py
|
from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestHcbDemo:
def setup(self):
caps = {}
caps['platformName'] = 'android'
caps['deviceName'] = '28d6f388'
caps["appPackage"] = "com.wlqq"
caps["appActivity"] = ".activity.HomeActivity"
caps["automationname"] = "uiautomator2"
caps["chromedriverExecutable"] = "/Users/user/tool/chromedriver/2.35/chromedriver"
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
self.driver.implicitly_wait(10)
self.driver.find_element(MobileBy.ID, "com.wlqq:id/dialog_btn_right").click()
WebDriverWait(self.driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.ID,
"android:id/button1")))
self.driver.find_element(MobileBy.ID, "android:id/button1").click()
self.driver.find_element(MobileBy.XPATH, "//*[@text='知道了']").click()
WebDriverWait(self.driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.XPATH,
"//*[contains(@resource-id,'content')]\
//*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']")))
self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'content')]\
//*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']").click()
def test_etc_home(self):
self.driver.find_element(MobileBy.XPATH, "//*[@text='ETC']").click()
self.driver.find_element(MobileBy.XPATH, "//*[@text='快速办卡']").click()
def test_webview(self):
self.driver.find_element(MobileBy.XPATH, "//*[@text='ETC']").click()
WebDriverWait(self.driver, 20).until(expected_conditions.element_to_be_clickable((MobileBy.ID,
"android:id/button1")))
self.driver.find_element(MobileBy.ID, "android:id/button1").click()
WebDriverWait(self.driver, 15).until(expected_conditions.element_to_be_clickable((MobileBy.ID, 'com.wlqq.phantom.plugin.etc:id/tv_online_open_card')))
self.driver.find_element(MobileBy.ID, "com.wlqq.phantom.plugin.etc:id/tv_online_open_card").click()
print(self.driver.contexts)
self.driver.find_element(MobileBy.ID, "com.wlqq:id/btn_back").click()
# 打印当前页面结构page_source,当前xml结构
# print(self.driver.page_source)
# 等待上下文出现,webview出现
WebDriverWait(self.driver, 20).until(lambda x: (len(self.driver.contexts) > 1))
# 切换至webview容器
self.driver.switch_to.context(self.driver.contexts[-1])
# 打印当前页面结构page_source,当前html结构
print(self.driver.page_source)
self.driver.find_element(By.CSS_SELECTOR, ".button-container.fixed-button").click()
# webview中toast定位获取到div中的id属性
toast = self.driver.find_element(By.CSS_SELECTOR, "#goblin-toast").text
print(toast)
assert "未选择车牌" in toast
print(self.driver.contexts)
# self.driver.switch_to.context(self.driver.contexts['NATIVE_APP'])
self.driver.find_element(MobileBy.ID, "com.wlqq:id/back_btn").click()
def teardown(self):
pass
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,059
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_yaml/test_yaml.py
|
import pytest
import yaml
class TestYaml:
def test_yaml(self):
print(yaml.load("""
- Hesperiidae
- Papilionidae
- Apatelodidae
- Epiplemidae
"""))
@pytest.mark.parametrize("a,b", yaml.safe_load(open("testyaml.yaml", encoding='utf-8')))
def test_yaml_read(self, a, b):
assert a + b == 10
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,060
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium/testApiDemo.py
|
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
class TestApiDemo:
def setup(self):
caps = {}
caps['platformName'] = "android"
caps['deviceName'] = "小米5"
caps['appPackage'] = "io.appium.android.apis"
caps['appActivity'] = ".ApiDemos"
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
self.driver.implicitly_wait(10)
def test_toast(self):
self.driver.find_element(MobileBy.XPATH, "//*[@text='Views' and contains(@resource-id,'text1')]").click()
self.driver.find_element(MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector().'
'scrollable(true).instance(0)).'
'scrollIntoView(new UiSelector().textContains("Popup Menu").'
'instance(0));').click()
self.driver.find_element(MobileBy.ACCESSIBILITY_ID, 'Make a Popup!').click()
self.driver.find_element(MobileBy.XPATH, "//*[@text='Search']").click()
# toast定位,由于toast短暂最好用变量存下来
toast = self.driver.find_element(MobileBy.XPATH, "//*[@class='android.widget.Toast']").text
print(toast)
assert 'Clicked' in toast
assert 'popup menu' in toast
assert 'API Demos:Clicked popup menu item Search' == toast
def teardown(self):
pass
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,061
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/test_appium_page_object/page/base_page.py
|
import yaml
from appium.webdriver import WebElement
from appium.webdriver.webdriver import WebDriver
import logging
class BasePage:
logging.basicConfig(level=logging.INFO) # 使用logging
_driver: WebDriver
_black_list = []
_error_max = 5
_error_count = 0
def __init__(self, driver: WebDriver = None):
self._driver = driver
# todo:当有广告、各种异常弹框出现的时候,要进行异常处理,通常用装饰器进行异常处理
def find(self, locator, value: str = None):
logging.info(locator, value)
try:
# 寻找控件
element = self._driver.find_element(*locator) if isinstance(locator, tuple) else self._driver.find_element(
locator, value)
# 如果成功,清空错误计数
self._error_count = 0
return element
# todo:self._error_max = 0
except Exception as e:
# 如果次数太多,就退出异常处理,直接报错
if self._error_count > self._error_max:
raise e
# 记录一直异常的次数
self._error_max += 1
# 对黑名单弹框进行处理
for element in self._black_list:
elements = self._driver.find_elements(*element)
if len(elements) > 0:
elements[0].click()
# 继续寻找原来正常的控件,使用递归
return self.find(locator, value)
# 如果黑名单也没找到,就报错
logging.warn("black list no found")
raise e
def steps(self, path):
with open(path) as f:
# 读取步骤定义文件
steps: list[dict] = yaml.safe_load(f)
# 保存一个目标对象
element: WebElement = None
for step in steps:
logging.info(step)
if "by" in step.keys():
element = self.find(step["by"], step["locator"])
if "action" in step.keys():
action = step["action"]
if action == "find":
pass
elif action == "click":
element.click()
elif action == "text":
element.text()
elif action == "attribute":
element.get_attribute(step["value"])
elif action in ["send", "input"]:
content: str = step["value"]
for key in self._params.keys():
content = content.replace("{%s}" % key, self._params[key])
element.send_keys(content)
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,062
|
RainGod6/SDET11-LY
|
refs/heads/master
|
/unit/test_unit.py
|
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
assert 1 + 2 == 3
|
{"/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/base_page.py", "/test_appium_page_object/page/main.py"]}
|
2,075
|
mafrasiabi/Bayesian-GLM-for-Classification
|
refs/heads/master
|
/pp3.py
|
#!/usr/local/bin/python3
import sys
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
import pprint
import timeit
#Function to read dataset based on name and randomly split into training and testing set
def read_csv(dataset_name):
features = pd.read_csv("pp3data/"+dataset_name+".csv", header = None).values
labels = pd.read_csv("pp3data/labels-"+dataset_name+".csv", header = None).values
#irlstest is sample dataset to test w, we do not split it
if dataset_name == "irlstest":
return features, labels, features, labels
else:
dataset = np.column_stack((features,labels))
#np.random.shuffle(dataset)
#training set is 2/3rd of dataset and test set is remaining
train_l = int(2/3*len(dataset))
train, trainL, test, testL = dataset[:train_l,:-1], dataset[:train_l,-1:], dataset[train_l:,:-1], dataset[train_l:,-1:]
return train, trainL, test, testL
#Function to compute 1st and 2nd derivative for Logistic Regression
def compute_R_d_Logistic(a, t):
y = sigmoid(-a)
r = y * (1 - y)
#First Derivative term
d = t-y
#Second Derivative term R is diagonal matrix of y(1-y)
R = np.diag(r.ravel())
return R, d
#Function to compute 1st and 2nd derivative for Poisson Regression
def compute_R_d_Poisson(a, t):
y = np.array([[math.exp(ai)] for ai in a])
r = y
#First Derivative term
d = t-y
#Second Derivative term R is diagonal matrix of y
R = np.diag(r.ravel())
return R, d
#Function to compute 1st and 2nd derivative for Ordinal Regression
def compute_R_d_Ordinal(a, t):
phiJ = [-math.inf,-2,-1,0,1,math.inf]
s = 1
d = []
r = []
for i,ai in enumerate(a):
ti = int(t[i][0])
yiti = yij(ai,phiJ[ti],s)
yiti_1 = yij(ai,phiJ[ti-1],s)
d.append(yiti + yiti_1 - 1)
r.append(s*s*(yiti*(1-yiti)+yiti_1*(yiti_1)))
#print(d)
#print(r)
ri = np.array(r)
R = np.diag(ri.ravel())
return R, d
#Prediction function for Logistic regression
def prediction_Logistic(a):
y = sigmoid(-a)
#Predict True label for values >=0.5
y = [int(val>=0.5) for val in y]
return y
#Prediction function for Poisson regression
def prediction_Poisson(a):
y = [math.exp(ai) for ai in a]
#Use floor function to predict
t = [math.floor(yi) for yi in y]
return t
#Prediction function for Ordinal regression
def prediction_Ordinal(a):
s = 1
phiJ = [-math.inf,-2,-1,0,1,math.inf]
t = []
#Compute values for all ordinals J = 1,2,3,4,5 and choose max
for ai in a:
pj = []
for j in range(1,6):
yj = yij(ai,phiJ[j],s)
yj_1 = yij(ai,phiJ[j-1],s)
pj.append(yj - yj_1)
t.append(pj.index(max(pj))+1)
return t
#Function to plot the error rate as a function of training set sizes
def plot_summary(data,sizes,model,dataset_name,alpha):
errors = [d.get("Mean") for d in data]
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
plt.rc('font', **font)
std = [d.get("STD") for d in data]
plt.gcf().clear()
plt.figure(figsize=(25,25),dpi=90)
plt.errorbar(sizes,errors,yerr=std,ecolor='r', color = 'b', capsize=25, label = "GLM Model : "+model)
plt.xlabel("Training Sizes")
plt.ylabel("Error Rate")
plt.grid("on")
plt.title("Dataset: " +dataset_name+" | Alpha: "+str(alpha))
plt.legend(loc="best")
plt.savefig(dataset_name + "_"+ model + '.png')
plt.show()
#Function implement the common GLM function
def GLM_variant(model, dataset_name):
#Read dataset and split to Train and Test sets
train, trainL, test, testL = read_csv(dataset_name)
#Set training set sizes as 0.1, 0.2, 0.3... 1
training_set_sizes = [1] if dataset_name == "irlstest" else [int(i/10*len(train)) for i in range(1, 11, 1)]
summary = []
for size in training_set_sizes:
trials = 30 if dataset_name != "irlstest" else 1
#trials =1
error_predictions = []
iterations = []
time = []
#Repeat for 30 trials
for trial in range(0,trials):
if dataset_name == "irlstest":
train_sub, trainL_sub = train, trainL
else:
#Shuffle training data
#train_sub, trainL_sub = zip(*random.sample(list(zip(train, trainL)),size))
train_sub, trainL_sub = train[:size], train[:size]
t = trainL_sub
N = len(train_sub)
w0 = np.array([[1]] * N)
#Append data matrix with ones
phi = np.concatenate((w0, train_sub), axis=1)
M = len(phi[0])
#Set parameter value
alpha = 10
I = np.eye(M)
#Newton Raphson starts with w0 = vector of zeroes
w = np.array([[0]] * M)
convergence_test = 1
itr = 1
start = timeit.default_timer()
#Repeat Newton Raphson update formula until convergence or 100 iterations
while itr < 100 and convergence_test > 10 ** -3:
w_old = w
a = np.matmul(phi,w_old)
#Compute first and second Derivatives based on Model
if model == "logistic":
R, d = compute_R_d_Logistic(a, t)
elif model == "poisson":
R, d = compute_R_d_Poisson(a, t)
elif model == "ordinal":
R, d = compute_R_d_Ordinal(a, t)
#First derivative
g = np.matmul(np.transpose(phi),d) - (alpha * w)
#Hessian matrix of second derivatives
H = -(alpha * I) - np.matmul(np.transpose(phi),np.matmul(R,phi))
#Newton Raphson update formula for GLM
#W_old = W_new - inverse(H)*g
if np.linalg.det(H) != 0:
w_new = w_old - np.matmul(np.linalg.inv(H),g)
#Test convergence
if np.linalg.norm(w_old) != 0:
convergence_test = np.linalg.norm(w_new - w_old) / np.linalg.norm(w_old)
w = w_new
itr += 1
#print(w)
stop = timeit.default_timer()
iterations.append(itr)
time.append(stop-start)
#prediction
Ntest = len(test)
w0 = np.array([[1]] * Ntest)
#Use test set for prediction
phi = np.concatenate((w0, test), axis=1)
#We predict using WMap calculated earlier using Newton Raphson
a = np.matmul(phi,w)
if model == "logistic":
y = prediction_Logistic(a)
error_predictions.append(((y-testL.flatten()) != 0).sum())
if model == "poisson":
y = prediction_Poisson(a)
error_predictions.append((abs(y-testL.flatten()) != 0).sum())
if model == "ordinal":
y = prediction_Ordinal(a)
error_predictions.append((abs(y-testL.flatten()) != 0).sum())
print(size,"Done")
summary_data = {}
summary_data['Model'] = model
summary_data['Run Time'] = np.mean(time)
summary_data['Dataset Size'] = size
summary_data['Mean'] = np.mean(np.array(error_predictions)/Ntest)
summary_data['STD'] = np.std(np.array(error_predictions)/Ntest)
summary_data['Iterations'] = np.mean(iterations)
summary.append(summary_data)
pprint.pprint(summary)
filename = 'Output Summary '+model+ ' ' + dataset_name + '.txt'
#Write Summary to file
with open(filename, 'wt') as out:
pprint.pprint(summary, stream=out)
#Plot graph
plot_summary(summary,training_set_sizes,model, dataset_name, alpha)
#Utility function to calculate Sigmoid
def sigmoid(x):
return 1 / (1 + np.exp(x))
#Utility function to calculate sigmoid based on S and Phi parameters for Ordinal
def yij(a,phij,s):
x = np.array(s*(phij-a))
if x >= 0:
z = np.exp(-1*x)
return 1 / (1 + z)
else:
z = np.exp(x)
return z / (1 + z)
#Code Execution starts here!
if __name__ == "__main__":
#Sample dataset names = ["A","usps","AO","AP"]
#Sample model names = ["logistic","poisson","ordinal"]
if(len(sys.argv) != 3):
raise Exception('Error: expected 2 command line arguments!')
#Code expects dataset name and model name as command line argument
dataset_name = sys.argv[1]
model = sys.argv[2]
#Common function to generate GLM model, predict and evaluate
GLM_variant(model, dataset_name)
print("\n\n..Done!")
|
{"/model_selection.py": ["/pp3.py"]}
|
2,076
|
mafrasiabi/Bayesian-GLM-for-Classification
|
refs/heads/master
|
/model_selection.py
|
#!/usr/local/bin/python3
import sys
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
import pprint
import timeit
from numpy import linalg as LA
import pp3
#Function to perform cross validation for model selection. Based on my code in Assignment 2
def model_selection_using_cross_validation(train, trainR, test, testR, dataset_name, model):
len_train = np.shape(train)[0]
step = len_train//10
error_for_params = []
#Running cross validation with below values of parameters
test_params = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] + [i for i in range(1,101)]
for l in test_params:
error_predictions = []
#Steps = 10 to perform 10 fold cross validation
for i in range(0,len_train, step):
#Training set will be all portions except i-th portion
current_train = np.delete(train,slice(i,i+step),0)
current_trainR = np.delete(trainR,slice(i,i+step),0)
w = calculate_w(l,current_train,current_trainR, model)
test = train[i:i+step]
testL = trainR[i:i+step]
Ntest = len(test)
w0 = np.array([[1]] * Ntest)
#Use remaining part of data set for prediction
phi = np.concatenate((w0, test), axis=1)
a = np.matmul(phi,w)
#We predict using WMap calculated earlier using Newton Raphson
if model == "logistic":
y = pp3.prediction_Logistic(a)
error_predictions.append(((y-testL.flatten()) != 0).sum())
if model == "poisson":
y = pp3.prediction_Poisson(a)
error_predictions.append((abs(y-testL.flatten()) != 0).sum())
if model == "ordinal":
y = pp3.prediction_Ordinal(a)
error_predictions.append((abs(y-testL.flatten()) != 0).sum())
error_for_params.append(avg(error_predictions))
print("Dataset: ", dataset_name)
print("--MODEL SELECTION USING CROSS VALIDATION--")
print("Parameter: ", str(test_params[error_for_params.index(min(error_for_params))]))
#Using Same function to calculate w
#Didn't get enough time to generalize function wriiten in pp3 to return only w for cross validation.
#But I am calling functions written in pp3 file
def calculate_w(l, train_sub, trainL_sub, model):
t = trainL_sub
N = len(train_sub)
w0 = np.array([[1]] * N)
#Append data matrix with ones
phi = np.concatenate((w0, train_sub), axis=1)
M = len(phi[0])
#Set parameter value
alpha = l
I = np.eye(M)
#Newton Raphson starts with w0 = vector of zeroes
w = np.array([[0]] * M)
convergence_test = 1
itr = 1
#Repeat Newton Raphson update formula until convergence or 100 iterations
while itr < 100 and convergence_test > 10 ** -3:
w_old = w
a = np.matmul(phi,w_old)
#Compute first and second Derivatives based on Model
if model == "logistic":
R, d = pp3.compute_R_d_Logistic(a, t)
elif model == "poisson":
R, d = pp3.compute_R_d_Poisson(a, t)
elif model == "ordinal":
R, d = pp3.compute_R_d_Ordinal(a, t)
#First derivative
g = np.matmul(np.transpose(phi),d) - (alpha * w)
#Hessian matrix of second derivatives
H = -(alpha * I) - np.matmul(np.transpose(phi),np.matmul(R,phi))
#Newton Raphson update formula for GLM
#W_old = W_new - inverse(H)*g
if np.linalg.det(H) != 0:
w_new = w_old - np.matmul(np.linalg.inv(H),g)
#Test convergence
if np.linalg.norm(w_old) != 0:
convergence_test = np.linalg.norm(w_new - w_old) / np.linalg.norm(w_old)
w = w_new
itr += 1
return w
#Find average of list
def avg(lst):
return sum(lst)/len(lst)
#Code Execution starts here!
if __name__ == "__main__":
#Sample dataset names = ["A","usps","AO","AP"]
#Sample model names = ["logistic","poisson","ordinal"]
if(len(sys.argv) != 3):
raise Exception('Error: expected 2 command line arguments!')
#Code expects dataset name and model name as command line argument
dataset_name = sys.argv[1]
model = sys.argv[2]
#Common function to generate GLM model, predict and evaluate
train, trainL, test, testL = pp3.read_csv(dataset_name)
model_selection_using_cross_validation(train, trainL, test, testL, dataset_name, model)
print("..Done!")
|
{"/model_selection.py": ["/pp3.py"]}
|
2,081
|
cmontemuino/dbschools
|
refs/heads/master
|
/stusched/app/models.py
|
from django.db import models
class Course(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return self.name.__str__()
PROPOSED = 1
ACCEPTING = 2
SCHEDULED = 3
statuses = {
PROPOSED: 'proposed',
ACCEPTING: 'accepting',
SCHEDULED: 'scheduled'
}
class Section(models.Model):
start_time = models.DateTimeField()
duration_per_day = models.DurationField()
num_days = models.DecimalField(max_digits=3, decimal_places=0, default=1)
course = models.ForeignKey(Course)
price = models.DecimalField(max_digits=6, decimal_places=2)
min_students = models.IntegerField(default=3)
max_students = models.IntegerField(default=6)
scheduled_status = models.IntegerField()
def end_time(self): return self.start_time + self.duration_per_day
def __str__(self):
return "At %s" % (self.start_time.__str__())
class Parent(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
def __str__(self):
return self.name.__str__()
class Student(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey(Parent)
sections = models.ManyToManyField(Section, blank=True)
def proposed_sections(self):
return self.sections.filter(scheduled_status=PROPOSED)
def accepting_sections(self):
return self.sections.filter(scheduled_status=ACCEPTING)
def scheduled_sections(self):
return self.sections.filter(scheduled_status=SCHEDULED)
def __str__(self):
return self.name.__str__()
|
{"/stusched/app/admin.py": ["/stusched/app/models.py"], "/stusched/app/views.py": ["/stusched/app/models.py"]}
|
2,082
|
cmontemuino/dbschools
|
refs/heads/master
|
/stusched/app/admin.py
|
from django.contrib import admin
from .models import *
class StudentInline(admin.TabularInline):
model = Student
extra = 0
class ParentAdmin(admin.ModelAdmin):
inlines = [StudentInline]
admin.site.register(Course)
admin.site.register(Section)
admin.site.register(Parent, ParentAdmin)
admin.site.register(Student)
|
{"/stusched/app/admin.py": ["/stusched/app/models.py"], "/stusched/app/views.py": ["/stusched/app/models.py"]}
|
2,083
|
cmontemuino/dbschools
|
refs/heads/master
|
/stusched/app/views.py
|
from django.shortcuts import render
from .models import Course, Section, Parent
class ScheduledCourse(object):
def __init__(self, name, description, sections):
self.name = name
self.description = description
self.sections = sections
def __str__(self, *args, **kwargs):
return self.name + ' ' + self.description
def index(request):
sections = Section.objects.order_by('start_time')
scheduled_courses = set((s.course for s in sections))
courses = (ScheduledCourse(c.name, c.description,
[s for s in sections if s.course == c]) for c in Course.objects.order_by('name') if c in scheduled_courses)
return render(request, 'app/courses.html', {'courses': courses})
def status(request):
parents = Parent.objects.order_by('name')
return render(request, 'app/status.html', {'parents': parents})
|
{"/stusched/app/admin.py": ["/stusched/app/models.py"], "/stusched/app/views.py": ["/stusched/app/models.py"]}
|
2,084
|
LasseJacobs/std_unordered_map_py_wrapper
|
refs/heads/master
|
/map.py
|
from ctypes import *
lib = cdll.LoadLibrary('./libmap.so')
class c_result(Structure):
_fields_ = [('value', c_char_p), ('found', c_bool)]
###################
# __init__(self):
lib.new_map.restype = c_void_p;
###################
# empty(self):
lib.m_empty.argtypes = [c_void_p]
# size(self):
lib.m_size.argtypes = [c_void_p]
lib.m_size.restype = c_ulong
# max_size(self):
lib.m_max_size.argtypes = [c_void_p]
lib.m_max_size.restype = c_ulong
##################
# clear(self):
lib.m_clear.argtypes = [c_void_p]
# set(self, key, value):
lib.m_set.argtypes = [c_void_p, c_char_p, c_char_p]
# erase(self, key):
lib.m_erase.argtypes = [c_void_p, c_char_p]
# swap(self, other_map):
lib.m_swap.argtypes = [c_void_p, c_void_p]
##################
# get(self, key):
lib.m_get.argtypes = [c_void_p, c_char_p]
lib.m_get.restype = c_char_p
# count(self, key):
lib.m_count.argtypes = [c_void_p, c_char_p]
lib.m_count.restype = c_ulong
# find(self, key):
lib.m_find.argtypes = [c_void_p, c_char_p]
lib.m_find.restype = c_result
################
# bucket_count(self):
lib.m_bucket_count.argtypes = [c_void_p]
lib.m_bucket_count.restype = c_ulong
# max_bucket_count(self):
lib.m_max_bucket_count.argtypes = [c_void_p]
lib.m_max_bucket_count.restype = c_ulong
# bucket_size(self, n):
lib.m_bucket_size.argtypes = [c_void_p, c_ulong]
lib.m_bucket_size.restype = c_ulong
# bucket(self, key):
lib.m_bucket.argtypes = [c_void_p, c_char_p]
lib.m_bucket.restype = c_ulong
###############
# load_factor(self):
lib.m_load_factor.argtypes = [c_void_p]
lib.m_load_factor.restype = c_float
# max_load_factor(self):
lib.m_get_max_load_factor.argtypes = [c_void_p]
lib.m_get_max_load_factor.restype = c_float
# max_load_factor(self, ml):
lib.m_set_max_load_factor.argtypes = [c_void_p, c_float]
# rehash(self, count):
lib.m_rehash.argtypes = [c_void_p, c_ulong]
# reserve(self, count):
lib.m_reserve.argtypes = [c_void_p, c_ulong]
###############
# __del__(self):
lib.delete_map.argtypes = [c_void_p]
class std_result(object):
def __init__(self, value, found):
self.value = value
self.found = found
class std_map(object):
def __init__(self):
self.obj = lib.new_map()
## Capacity
def empty(self):
return lib.m_empty(self.obj)
def size(self):
return lib.m_size(self.obj)
def max_size(self):
return lib.m_max_size(self.obj)
## Modifiers
def clear(self):
lib.m_clear(self.obj)
def set(self, key, value):
b_key = key.encode('utf-8')
b_value = value.encode('utf-8')
lib.m_set(self.obj, b_key, b_value)
def erase(self, key):
b_key = key.encode('utf-8')
lib.m_erase(self.obj, b_key)
def swap(self, other_map):
lib.m_swap(self.obj, other_map.obj)
## Lookup
def get(self, key):
b_key = key.encode('utf-8')
value = lib.m_get(self.obj, b_key)
return value.decode() if value else None
def count(self, key):
b_key = key.encode('utf-8')
return lib.m_count(self.obj, b_key)
def find(self, key):
b_key = key.encode('utf-8')
pw_result = lib.m_find(self.obj, b_key)
value = pw_result.value.decode() if pw_result.found else None
return std_result(value , pw_result.found)
## Bucket Interface
def bucket_count(self):
return lib.m_bucket_count(self.obj)
def max_bucket_count(self):
return lib.m_max_bucket_count(self.obj)
def bucket_size(self, n):
return lib.m_bucket_size(self.obj, n)
def bucket(self, key):
b_key = key.encode('utf-8')
return lib.m_bucket(self.obj, b_key)
## Hash Policy
def load_factor(self):
return lib.m_load_factor(self.obj)
def max_load_factor(self):
return lib.m_get_max_load_factor(self.obj)
def max_load_factor(self, ml):
lib.m_set_max_load_factor(self.obj, ml)
def rehash(self, count):
lib.m_rehash(self.obj, count)
def reserve(self, count):
lib.m_reserve(self.obj, count)
## Destructor
def __del__(self):
lib.delete_map(self.obj)
|
{"/main.py": ["/map.py"]}
|
2,085
|
LasseJacobs/std_unordered_map_py_wrapper
|
refs/heads/master
|
/main.py
|
from map import std_map
m = std_map()
m.set("demo", "Hello World")
print(m.get("demo"))
|
{"/main.py": ["/map.py"]}
|
2,109
|
PI2-Estufa/iluminationServer
|
refs/heads/master
|
/db.py
|
import datetime
import os
from sqlalchemy import create_engine, Column, Integer, Boolean, Unicode, Sequence, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine(os.environ.get("POSTGRES_URL"), echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Ilumination(Base):
__tablename__ = 'ilumination'
id = Column(Integer,
Sequence('ilumination_id_seq'), primary_key=True)
value = Column(Boolean)
created_date = Column(DateTime, default=datetime.datetime.utcnow)
Base.metadata.create_all(engine)
|
{"/ilumination_server.py": ["/db.py"]}
|
2,110
|
PI2-Estufa/iluminationServer
|
refs/heads/master
|
/ilumination_server.py
|
from nameko.rpc import rpc
import db
from db import Ilumination
from psycopg2 import OperationalError
class IluminationServer():
name = "ilumination_server"
@rpc
def receive_ilumination(self, ilumination):
i = Ilumination()
i.value = ilumination
try:
db.session.add(i)
db.session.commit()
except:
db.session.rollback()
finally:
db.session.close()
return ilumination
|
{"/ilumination_server.py": ["/db.py"]}
|
2,111
|
3toe/DojosWithTemplates
|
refs/heads/main
|
/dojo_ninjas_app/models.py
|
from django.db import models
class Dojo(models.Model):
name = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=2)
desc = models.CharField(max_length=255, null=True)
def __repr__(self):
return f"<{self.name} ({self.id})>"
class Ninja(models.Model):
dojo = models.ForeignKey(Dojo, related_name="ninjas", on_delete = models.CASCADE)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
def __repr__(self):
return f"<{self.last_name} ({self.dojo})>"
|
{"/dojo_ninjas_app/views.py": ["/dojo_ninjas_app/models.py"]}
|
2,112
|
3toe/DojosWithTemplates
|
refs/heads/main
|
/dojo_ninjas_app/views.py
|
from django.db import reset_queries
from django.shortcuts import render, redirect
from .models import Dojo, Ninja
def index(request):
context = {
"Dojos" : Dojo.objects.all(),
"Ninjas" : Ninja.objects.all()
}
return render(request, "index.html", context)
def process(request):
if "DojoButton" in request.POST:
if request.POST['name'] == "" or request.POST['city'] == "" or request.POST['state'] == "":
return redirect('/')
Dojo.objects.create(name = request.POST['name'], city = request.POST['city'], state = request.POST['state'])
return redirect('/')
if "NinjaButton" in request.POST:
if request.POST['sel_dojo'] == "" or request.POST['Fname'] == "" or request.POST['Lname'] == "":
return redirect('/')
Ninja.objects.create(dojo = Dojo.objects.get(id=request.POST['sel_dojo']), first_name = request.POST['Fname'], last_name = request.POST['Lname'])
return redirect('/')
|
{"/dojo_ninjas_app/views.py": ["/dojo_ninjas_app/models.py"]}
|
2,116
|
grantsrb/simple_autoencoder
|
refs/heads/master
|
/encoder.py
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
class Encoder(nn.Module):
def cuda_if(self, tobj):
if torch.cuda.is_available():
tobj = tobj.cuda()
return tobj
def __init__(self, obs_size, n_classes, emb_size=256, bnorm=True):
"""
obs_size - the size of the input data. Shape = (..., C, H, W)
"""
super(Encoder, self).__init__()
self.obs_size = obs_size
self.emb_size = emb_size
self.bnorm = bnorm
# Encoder
self.convs = nn.ModuleList([])
shape = [*self.obs_size[-3:]]
ksize=3; padding=1; stride=1; out_depth = 16
self.convs.append(self.conv_block(obs_size[-3], out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
shape = self.get_new_shape(shape, out_depth, ksize=ksize, stride=stride, padding=padding)
ksize=3; padding=0; stride=2; in_depth=out_depth
out_depth = 32
self.convs.append(self.conv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
shape = self.get_new_shape(shape, out_depth, ksize=ksize, stride=stride, padding=padding)
ksize=3; padding=1; stride=2; in_depth = out_depth
out_depth = 64
self.convs.append(self.conv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
shape = self.get_new_shape(shape, out_depth, ksize=ksize, stride=stride, padding=padding)
ksize=3; padding=1; stride=2; in_depth = out_depth
out_depth = 64
self.convs.append(self.conv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
shape = self.get_new_shape(shape, out_depth, ksize=ksize, stride=stride, padding=padding)
self.features = nn.Sequential(*self.convs)
self.feat_shape = shape
self.flat_size = int(np.prod(shape))
self.mu = nn.Linear(self.flat_size, emb_size)
self.sigma = nn.Linear(self.flat_size, emb_size)
# Reconstructor
self.deconvs = nn.ModuleList([])
ksize=5; padding=0; stride=1; in_depth = out_depth
out_depth = 64
self.deconvs.append(self.deconv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
ksize=5; padding=0; stride=1; in_depth = out_depth
out_depth = 64
self.deconvs.append(self.deconv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
ksize=4; padding=0; stride=2; in_depth = out_depth
out_depth = 32
self.deconvs.append(self.deconv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
ksize=5; padding=0; stride=1; in_depth = out_depth
out_depth = 16
self.deconvs.append(self.deconv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=self.bnorm))
ksize=3; padding=0; stride=1; in_depth = out_depth
out_depth = obs_size[-3]
self.deconvs.append(self.deconv_block(in_depth, out_depth, ksize=ksize, padding=padding, stride=stride, bnorm=False))
self.rwd_flat = nn.Sequential(nn.Linear(emb_size, self.flat_size),nn.ReLU(), nn.BatchNorm1d(1024))
self.rwd_features = nn.Sequential(*self.deconvs)
# Classifier
block = []
block.append(nn.Linear(self.emb_size, 200))
block.append(nn.ReLU())
block.append(nn.BatchNorm1d(200))
block.append(nn.Linear(200,int(n_classes)))
self.classifier = nn.Sequential(*block)
def get_new_shape(self, old_shape, depth, ksize=3, stride=1, padding=1):
new_shape = [depth]
for i in range(len(old_shape[1:])):
new_shape.append((old_shape[i+1] - ksize + 2*padding)//stride + 1)
return new_shape
def forward(self, x):
fx = self.features(x)
fx = fx.view(-1, self.flat_size)
mu = self.mu(fx)
sigma = self.sigma(fx)
z = mu + sigma*Variable(self.cuda_if(torch.normal(means=torch.zeros(mu.shape), std=1)))
fx = self.rwd_flat(z)
fx = fx.view(-1, *self.feat_shape)
remake = self.rwd_features(fx)
return z, remake
def classify(self, z):
return self.classifier(z)
def conv_block(self,in_depth,out_depth,ksize=3,stride=1,padding=1,activation='relu',bnorm=False):
block = []
block.append(nn.Conv2d(in_depth, out_depth, ksize, stride=stride, padding=padding))
if activation is None:
pass
elif activation.lower() == 'relu':
block.append(nn.ReLU())
elif activation.lower() == 'tanh':
block.append(nn.Tanh())
if bnorm:
block.append(nn.BatchNorm2d(out_depth))
return nn.Sequential(*block)
def deconv_block(self,in_depth,out_depth,ksize=3,stride=1,padding=1,activation='relu',bnorm=False):
block = []
block.append(nn.ConvTranspose2d(in_depth, out_depth, ksize, stride=stride, padding=padding))
if activation is None:
pass
elif activation.lower() == 'relu':
block.append(nn.ReLU())
elif activation.lower() == 'tanh':
block.append(nn.Tanh())
if bnorm:
block.append(nn.BatchNorm2d(out_depth))
return nn.Sequential(*block)
|
{"/main.py": ["/trainer.py", "/encoder.py"]}
|
2,117
|
grantsrb/simple_autoencoder
|
refs/heads/master
|
/trainer.py
|
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class Trainer():
def cuda_if(self, tobj):
if torch.cuda.is_available():
tobj = tobj.cuda()
return tobj
def __init__(self, net, lr=1e-3, pred_coef=.1):
self.net = net
self.pred_coef = pred_coef
self.optim = optim.Adam(self.net.parameters(), lr=lr)
def batch_loss(self, x, y=None):
loss = 0
zs, remakes = self.net.forward(x)
acc = None
if y is not None:
logits = self.net.classify(zs)
loss = self.pred_coef * F.cross_entropy(logits, y)
maxes, max_idxs = torch.max(logits.data, dim=-1)
acc = torch.eq(max_idxs, y.data).float().mean()
return loss + F.mse_loss(remakes, x), acc
def train(self, X, y=None, batch_size=128):
idxs = self.cuda_if(torch.randperm(len(X)).long())
losses = []
accuracies = []
for i in range(0, len(X), batch_size):
self.optim.zero_grad()
batch_idxs = idxs[i:i+batch_size]
batch_x = X[batch_idxs]
if y is not None: batch_y = y[batch_idxs]
else: batch_y = None
loss, acc = self.batch_loss(Variable(batch_x), Variable(batch_y))
loss.backward()
self.optim.step()
losses.append(loss.data[0])
accuracies.append(acc)
print(i,"/", len(X), "– Loss:", losses[-1], "– Acc:", acc, end='\r')
return losses, accuracies
|
{"/main.py": ["/trainer.py", "/encoder.py"]}
|
2,118
|
grantsrb/simple_autoencoder
|
refs/heads/master
|
/main.py
|
import torch
import torchvision
from torch.autograd import Variable
from trainer import Trainer
from encoder import Encoder
import matplotlib.pyplot as plt
import numpy as np
import sys
def cuda_if(tobj):
if torch.cuda.is_available():
tobj = tobj.cuda()
return tobj
def preprocess(imgs, mean, std):
return (imgs-mean)/(std+1e-7)
def postprocess(remakes, mean, std):
return remakes*(std+1e-7) + mean
n_epochs = 500
batch_size = 128
lr = .001
pred_coef = .25 # Portion of loss from classification
process = False
resume = False
if len(sys.argv) > 1:
for i in range(len(sys.argv)):
str_arg = str(sys.argv[i])
if 'lr=' == str_arg[:3]: lr = float(str_arg[3:])
if 'pred_coef=' in str_arg: pred_coef = float(str_arg[len('pred_coef='):])
print("lr:", lr)
print("pred_coef:", pred_coef)
cifar = torchvision.datasets.CIFAR10("/Users/satchelgrant/Datasets/cifar10", train=True, download=True)
imgs = cifar.train_data
imgs = cuda_if(torch.FloatTensor(imgs.transpose((0,3,1,2))))
mean = imgs.mean()
std = imgs.std()
if process:
imgs = preprocess(imgs, mean, std)
labels = cuda_if(torch.LongTensor(cifar.train_labels))
perm = cuda_if(torch.randperm(len(imgs)))
train_imgs = imgs[perm[:45000]]
train_labels = labels[perm[:45000]]
val_imgs = imgs[perm[45000:]]
val_labels = labels[perm[45000:]]
net = Encoder(imgs.shape, torch.max(labels)+1)
net = cuda_if(net)
if resume:
net.load_state_dict(torch.load('network.p'))
trainer = Trainer(net, lr=lr, pred_coef=pred_coef)
for epoch in range(n_epochs):
print("Begin Epoch", epoch)
losses, accuracies = trainer.train(train_imgs, train_labels, batch_size)
print("Avg Loss:", np.mean(losses), "– Avg Acc:", np.mean(accuracies))
if (epoch % 10) == 0:
acc = 0
val_batch_size = 300
for i in range(0,len(val_imgs), val_batch_size):
zs, remakes = net.forward(Variable(val_imgs[i:i+val_batch_size]))
logits = net.classify(zs)
_, max_idxs = torch.max(logits.data, dim=-1)
acc += torch.eq(max_idxs, val_labels[i:i+val_batch_size]).float().mean()
acc = acc/(len(val_imgs)/float(val_batch_size))
torch.save(net.state_dict(), 'network.p')
print("Val Acc:", acc)
zs, remakes = net.forward(Variable(val_imgs[:20]))
torch.save(net.state_dict(), 'network.p')
if process:
remakes = postprocess(remakes, mean, std)
remakes = remakes.data.cpu().numpy()
reals = val_imgs[:20]
if process:
reals = postprocess(reals, mean, std)
reals = reals.cpu().numpy()
np.save('remakes.npy', remakes)
np.save('reals.npy', reals[:20])
|
{"/main.py": ["/trainer.py", "/encoder.py"]}
|
2,130
|
Formalin564/VM
|
refs/heads/master
|
/Vm/models.py
|
from django.db import models
class address_info(models.Model):
name = models.TextField()
longitude = models.FloatField()
latitude = models.FloatField()
data = models.CharField(max_length=200)
|
{"/Vm/view.py": ["/Vm/models.py"]}
|
2,131
|
Formalin564/VM
|
refs/heads/master
|
/Vm/view.py
|
from django.shortcuts import render
import json
from .models import address_info
def hello(request):
address_point = address_info.objects.all()
address_longitude = []
address_latitude = []
address_data = []
for i in range(len(address_point)):
address_longitude.append(address_point[i].longitude)
address_latitude.append(address_point[i].latitude)
address_data.append(address_point[i].data)
return render(request, 'map.html',
{'address_longitude': json.dumps(address_longitude),
'address_latitude': json.dumps(address_latitude), 'address_data': json.dumps(address_data)})
|
{"/Vm/view.py": ["/Vm/models.py"]}
|
2,132
|
Formalin564/VM
|
refs/heads/master
|
/Vm/urls.py
|
from .import view
from django.urls import path
urlpatterns = [
path('', view.hello),
]
|
{"/Vm/view.py": ["/Vm/models.py"]}
|
2,133
|
Formalin564/VM
|
refs/heads/master
|
/Vm/sn.py
|
# -*- coding: utf-8 -*-
import urllib.parse
import urllib.request
import json
key = 'iq9mhXfEXPYMlLyn070A3uvFFx968kpq'
#属性名1:x 用于存储经度 类型 float
#属性名2:y 用于存储纬度 类型 float
#逆地址编码的方法
class locationXY:
def __init__(self,x,y):
self.x=x
self.y=y
#正/逆地理编码
def getLocation(address):
data = urllib.parse.urlencode({'address': address, 'output': 'json','ak':key})
response = urllib.request.urlopen('http://api.map.baidu.com/geocoding/v3/?%s' % data)
html = response.read()
data = html.decode('utf-8')
result=json.loads(data)
if result['status'] == 0 :
lng = result['result']['location']['lng'] # 纬度
lat = result['result']['location']['lat'] # 经度
l=locationXY(lng,lat)
return l
else:
print(address+"没找到")
|
{"/Vm/view.py": ["/Vm/models.py"]}
|
2,134
|
Formalin564/VM
|
refs/heads/master
|
/vm_notice/apps.py
|
from django.apps import AppConfig
class VmNoticeConfig(AppConfig):
name = 'vm_notice'
|
{"/Vm/view.py": ["/Vm/models.py"]}
|
2,137
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/writer.py
|
'''
Created on Sep 24, 2012
@author: shawn
'''
from lib.options.config import configuration as config
import __builtin__
class FileWrapper(object):
def __init__(self,obj):
self._obj = obj
def close(self,*args,**kwargs):
print "Closing file..."
self._obj.close(*args,**kwargs)
def __getattr__(self, attr):
# see if this object has attr
# NOTE do not use hasattr, it goes into
# infinite recurrsion
if attr in self.__dict__:
# this object has it
return getattr(self, attr)
# proxy to the wrapped object
return getattr(self._obj, attr)
def marked_open(*params):
global _open
#print params
if len(params) > 1 and (params[1] == 'w' or params[1] == 'wb' or params[1] == 'w+'):
print "Opening file..."
return FileWrapper(_open(*params))
else:
return _open(*params)
_open = __builtin__.open
__builtin__.open = marked_open
"""
def __defattr__(self,attr):
if hasattr(self.obj, attr):
attr_value = getattr(self.obj,attr)
if isinstance(attr_value,types.MethodType):
def callable(*args, **kwargs):
return attr_value(*args, **kwargs)
return callable
else:
return attr_value
else:
raise AttributeError
"""
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,138
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/training/test.py
|
import lda
from utils.reader import windowed,filter_tokenise
import sys
import matplotlib.pyplot as plt
from collections import defaultdict
def plot_hist(bin_size,bin_list, upper =None):
for bins in bin_list:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
up_bound = upper or max(bins)
x = [i for i in range(up_bound+1)]
y = [bins[i] for i in range(up_bound+1)]
# print x
# print y
ax.bar(x,y,width=1)
plt.show()
docs = [' '.join(w[2]) for w,_ in windowed(sys.argv[2:],int(sys.argv[1]))]
tokenised_docs = [filter_tokenise(i) for i in docs]
num_topics = 3
lda = lda.LDASampler(
docs=tokenised_docs,
num_topics=num_topics,
alpha=0.25,
beta=0.25)
print 'Sampling...'
for _ in range(100):
zs = lda.assignments
#print zs
#print '[%i %i] [%i %i]' % (zs[0][3], zs[1][3], zs[2][3], zs[3][3])
lda.next()
print
print 'words ordered by probability for each topic:'
tks = lda.topic_keys()
for i, tk in enumerate(tks):
print '%3d'%i , tk[:10]
# print '%3s'%'', tk[10:20]
# print '%3s'%'', tk[20:30]
print
print 'document keys:'
dks = lda.doc_keys()
size = 20
time_differences = [dt for _,dt in windowed(sys.argv[2:],int(sys.argv[1]))]
bin_list = []
for i in range(num_topics):
bins = defaultdict(float)
bin_list.append(bins)
for dt, doc, dk in zip(time_differences, docs, dks):
print '%5d'%dt + '\t'+\
doc[:40] +"..." + '\t' +\
str(dk)
for p,i in dk:
bin = int(float(dt)/size)
bin_list[i][bin] += p
plot_hist(size,bin_list)
#print 'topic assigned to each word of first document in the final iteration:'
#lda.doc_detail(0)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,139
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/pickled_globals.py
|
import cPickle as pickle
class pickled_globals(object):
def __init__(self,pg_dir):
self.pg_dir = pg_dir
def __getattr__(self, attr_name):
"""
Loads the file from pg_dir into an object,
then caches the object in memory.
"""
obj = pickle.load(open('%s/%s'%(self.pg_dir,attr_name),'rb'))
self.__setattr__(attr_name,obj)
return obj
pg = pickled_globals('global_objs')
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,140
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/training/trainer.py
|
from utils.reader import windowed
from utils.reporting import *
from utils.util import *
from regression_performance import performance
import pickle,math,getopt
def train(model,extractor,filenames,window_size,iterations = 1):
for _ in range(iterations):
for f in filenames:
try:
model.train(extracted_vecs(extractor,f,window_size))
except ValueError as e:
raise e
model.finalise()
return model.save()
"""
f = timestamp_model('model')
pickle.dump(model,f)
f.close()
"""
def train_extractor(extractor,filenames,window_size):
extractor.train(windowed(filenames,window_size))
extractor.finalise()
return extractor.save()
if __name__ == "__main__":
o,args = read_options()
reporting_init(o,"pickled_models")
extractor = load_from_file(o.extractor_name, "Extractor")
model = load_from_file(o.model_name,"Model",o)
if hasattr(extractor,'train'):
train_extractor( extractor,args,o.window_size)
filename = train(model,extractor,args,o.window_size)
print performance(model,extractor,[o.test_file],o.window_size,o.verbose,filename)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,141
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/evaluation/evaluate_window.py
|
#!/usr/bin/python2
from lib.io.reader import windowed
from lib.io.reporting import reporting_init,timestamp_log
from lib.io.util import *
from lib.options import *
from lib.interfaces.model_utils import unpickle_model
def evaluate(threadfile, model, extractor, window_size = 1, bandwidth = 1000000, LAG_TIME = 10, offset=0):
posts_log, visit_log, result_log = timestamp_log(
'posts',
'visit',
'sliding_window')
try:
time = 0
d_visit = LAG_TIME
time_visit = time
time_visit += d_visit
post_buffer = []
visits = 0
visit_times = []
posts_times = []
for window,d_t in windowed([threadfile],window_size,offset):
#post being made
print "%d\t-->"%time
posts_log.write("%d\n"%time)
posts_times.append(time)
assert(time_visit - time > 0)
time_post = time + d_t
post_buffer.append(window)
last_post_time = time
while time_visit <= time_post:
#visit being made
time = time_visit
print "%d\t<--"%time
visits += 1
visit_log.write("%d\n"%time)
visit_times.append(time)
if post_buffer:
feature_vec = extractor.extract(post_buffer[-1])
d_visit = model.predict(feature_vec,d_t)
post_buffer = []
else:
d_visit = model.repredict()
p_from_last_post = last_post_time + d_visit
if time < p_from_last_post:
time_visit = p_from_last_post
else:
d_visit = model.repredict()
time_visit = time + d_visit
time = time_post
k = 120
N = int(max(visit_times[-1],posts_times[-1]))
sum_Phi = 0
sum_Psi = 0
sum_ref = 0
for i in range(N-k):
r = len([j for j in posts_times if j >= i and j < i + k ])
h = len([j for j in visit_times if j >= i and j < i + k ])
if r > 0: sum_ref += 1
if r > h: sum_Phi += 1
elif r < h: sum_Psi += 1
Pr_miss = float(sum_Phi)/sum_ref
Pr_fa = float(sum_Psi)/float(N-k)
Pr_error = 0.5*Pr_miss + 0.5*Pr_fa
result_log.write(str(Pr_miss) + ' , ' + str(Pr_fa) + '\n')
model.add_experiment('prerror_test',threadfile,Pr_error)
model.save()
return Pr_error,visits
except Exception:
raise
finally:
posts_log.close()
visit_log.close()
result_log.close()
eval_file = None
model_name = None
extr_name = None
class Extractor:
def extract(self,window):
return window[0]
if __name__ == "__main__":
o,args = read_options()
reporting_init(o,"reports")
extractor = load_from_file(o['extractor_name'], "Extractor")
model = load_from_file(o['model_name'],"Model",o)
if o.has_key('pickled_model'):
pickle_file = o['pickled_model']
model = unpickle_model(open(pickle_file,'rb'))
result = evaluate(
o['test_file'],
model,
extractor,
pickle_file,
o['window_size']
)
print result
#for i,j in windowed(["thread"],1):print j
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,142
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/__init__.py
|
import pickled_globals
import writer
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,143
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/training/trainer_test.py
|
from utils.reader import windowed
from utils.reporting import *
from utils.util import *
import pickle,math,getopt
from evaluate_window import evaluate as evaluate_window
from utils.options import read_options, read_model_extractor_options
def train(model,extractor,iterator,window_size,iterations = 1):
for _ in range(iterations):
model.train(iterator)
model.finalise()
return model.save()
"""
f = timestamp_model('model')
pickle.dump(model,f)
f.close()
"""
def performance(model,extractor,rest_instances,window_size,verbose,model_file):
print "Calculating MAPE"
print "====================="
total_percent_error = 0
count = 0
for fv,d_t in rest_instances:
p = model.predict(fv)
if d_t > 0:
percent_error = math.fabs(float(p - d_t)/d_t)
if verbose: print "delta_t: %d\tpredicted: %d\tAPE: %0.2f"%(
d_t,
p,
percent_error
)
total_percent_error += percent_error
count += 1
ave_percentage_error = total_percent_error/count
return ave_percentage_error
def train_extractor(extractor,filenames,window_size):
extractor.train(windowed(filenames,window_size))
extractor.finalise()
return extractor.save()
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
from evaluate import evaluate
if __name__ == "__main__"
'Visit/Post':
o,args = read_options()
reporting_init(o,"pickled_models")
extractor = load_from_file(o.extractor_name, "Extractor")
model = load_from_file(o.model_name,"Model",o)
args = read_model_extractor_options(args,extractor,model)
print "Training extractor..."
if hasattr(extractor,'train'):
train_extractor( extractor,args,o.window_size)
instances = [i for i in extracted_vecs(extractor,args[0],o.window_size)]
instance_count = len(instances)
if instance_count < 2:
print "Insufficient instances"
sys.exit()
reporting_init(o,"pickled_models")
train_count = int(instance_count*0.75)
trainset,testset = instances[:train_count],instances[train_count:]
#trainset,testset = instances,instances
#print trainset
print "Instance split:",len(trainset),len(testset)
print "Training model..."
filename = train(
model,
extractor,
trainset,
o.window_size)
print "Evaluating..."
ave_percentage_error = performance(model,extractor,testset,o.window_size,o.verbose,filename)
print ave_percentage_error
model.add_experiment('regression_test(partial thread)',filename,ave_percentage_error)
result = evaluate(args[0], model, extractor, o.window_size, o.bandwidth,
offset = train_count,
sliding_window_size=sum(i for _,i in trainset)/len(trainset),
verbose = o.verbose)
result['filename'] = args[0]
result['offset'] = train_count
print model.experiments
model.add_experiment('visit_evaluation',filename,result)
model.save()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,144
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/graphs.py
|
import os
def plot_hist(bin_size, bin_list, directory=None, upper=None):
if not os.path.exists(directory): os.makedirs(directory)
import matplotlib.pyplot as plt
count = 1
for bins in bin_list:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
up_bound = upper or max(bins)
x = [i for i in range(up_bound+1)]
y = [bins[i] for i in range(up_bound+1)]
# print x
# print y
ax.bar(x,y,width=1)
if not directory:
plt.show()
else:
plt.savefig('%s/%03d'%(directory, count))
count += 1
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,145
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/evaluation/evaluate.py
|
#!/usr/bin/python2
from lib.io.reader import windowed
from lib.io.reporting import reporting_init,timestamp_log
from lib.io.util import *
from lib.options import *
from lib.interfaces.model_utils import unpickle_model
from lib.evaluation.sliding_window import SlidingWindow
from lib.evaluation.pairwise import PairwiseScoring
def evaluate(threadfile, model, extractor,
window_size = 1,
bandwidth = 1000000,
LAG_TIME = 10,
offset = 0,
sliding_window_size = 120,
verbose = False
):
posts_log, visit_log, result_log_tscore,result_log_window = timestamp_log(
'posts',
'visit',
't_score',
'sliding_window')
try:
time = 0
d_visit = LAG_TIME
time_visit = time
time_visit += d_visit
post_buffer = []
t_score_cum = 0
count = 0
visits = 0
correct_count,wrong_count = 0,0
w = SlidingWindow(K = 20, alpha = 0.5)
ps = PairwiseScoring()
for window,d_t in windowed([threadfile],window_size, offset):
#post being made
if verbose: print "%d\t-->"%time
posts_log.write("%d\n"%time)
w.event('post',time)
ps.event('post',time)
assert(time_visit - time > 0)
t_score_cum += time_visit-time
count += 1
time_post = time + d_t
post_buffer.append((extractor.extract(window),d_t))
last_post_time = time
while time_visit <= time_post:
#visit being made
time = time_visit
if verbose: print "%d\t<--"%time
visits += 1
visit_log.write("%d\n"%time)
w.event('visit',time)
ps.event('visit',time)
#start correction
d_visit = None
if post_buffer: feature_vec,_ = post_buffer[-1]
d_visit = model.predict(
feature_vec,d_t,
current_d_t = time - last_post_time,
unseen = post_buffer[:-1]
)
if post_buffer: post_buffer = []
time_visit = last_post_time + d_visit
assert(time < time_visit)
#end correction
time = time_post
Pr_miss, Pr_fa, Pr_error = w.pr_error()
result_log_window.write(str(Pr_miss) + ' , ' + str(Pr_fa) + '\n')
model.add_experiment('prerror_test',threadfile,Pr_error)
model.add_experiment('pairwise_scoring',threadfile,ps.score())
t_score = t_score_cum/float(count)
result_log_tscore.write(str(t_score)+'\n')
model.add_experiment('t-score_test',threadfile,t_score)
#save_model(pickle_file,model)
model.save()
return {
'T-score': t_score,
'Pr_error': (Pr_miss,Pr_fa,Pr_error),
'Visits': visits,
'Posts': count,
'Pairwise': ps.score()
#'Invalid Predictions': (correct_count+wrong_count,
# wrong_count/float(correct_count+wrong_count))
}
except Exception:
raise
finally:
posts_log.close()
visit_log.close()
result_log_tscore.close()
result_log_window.close()
if __name__ == "__main__":
o,args = read_options()
reporting_init(o,"reports")
extractor = load_from_file(o.extractor_name, "Extractor")
model = load_from_file(o.model_name,"Model",o)
if o.pickled_model:
pickle_file = o.pickled_model
model = unpickle_model(open(pickle_file,'rb'))
result = evaluate(
o.test_file,
model,
extractor,
o.window_size,
verbose = o.verbose
)
#print result
#for i,j in windowed(["thread"],1):print j
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,146
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/options/options.py
|
from optparse import OptionParser
from random import random
opts,args = None,None
p_opts = None
def read_options():
global opts,args
p = OptionParser()
p.add_option("-M","--model",metavar = "MODEL_PATH.py",
action = "store",
dest = "model_name",
help = "Model to be used for current experiment")
p.add_option("-E","--extractor",metavar = "EXTRACTOR_PATH.py",
action = "store",
dest = "extractor_name",
help = "Extractor to be used for current experiment")
p.add_option("-t","--test-file", metavar = "FILE",
action = "store",
dest = "test_file",
help = "file model will be evaluated on")
p.add_option("-n","--name",metavar = "NAME",
action = "store",
dest = "experiment_name",
help = "Name given to experiment")
p.add_option("-S","--pickled-extractor",metavar = "PICKLED_EXTRACTOR",
action = "store",
dest = "pickled_extractor",
help = "Pickled extractor to be used for current experiment\n\
--extractor must be specified")
p.add_option("-P","--pickled-model",metavar = "PICKLED_MODEL",
action = "store",
dest = "pickled_model",
help = "Pickled model to be used for current experiment\n\
--model must be specified")
p.add_option("-N","--window-size",metavar = "N",
type = "int",
default = 1,
action = "store",
dest = "window_size",
help = "Window size to segment thread stream into")
p.add_option("-B","--bandwidth",metavar = "BW",
action = "store",
dest = "bandwidth",type = "int",default = 1000,
help = "Bandwidth limit. Default is 1000")
p.add_option("-v","--verbose",
action = "store_true",
dest = "verbose",
help = "print extra debug information")
(opts,args) = p.parse_args()
print opts,args
if not opts.extractor_name:
opts.extractor_name = opts.model_name
if opts.experiment_name and opts.experiment_name.endswith('RANDOM'):
opts.experiment_name = opts.experiment_name.replace(
'RANDOM',
str(random.randint(100,999)))
return opts,args
import sys
def read_model_extractor_options(args,extractor=None,model=None):
global p_opts
p = OptionParser()
try: extractor.opt_cfg(p)
except: print "Extractor has no options"
try: model.opt_cfg(p)
except: print "Model has no options"
p_opts,args = p.parse_args(args)
print p_opts
return args
if __name__=="__main__":
read_options()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,147
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/predictor.py
|
import cPickle as pickle
import sys
import lib.io.pickled_globals
import lib.graphs as graphs
from hist_to_probdist import time_dist
from lib.io.reader import windowed,filter_tokenise
window_size = 15
time_bin = 20
def load_model(topics):
timdist = pickle.load(open('graphs/prob_dist/dist_t%03d'%topics,'rb'))
lda = pickle.load(
open('global_objs/w%d_t%d_learnt_topics'%(window_size,topics),'rb')
)
prior = pickle.load(open('graphs/prob_dist/dist_t%03d_prior'%topics,'rb'))
return lda,timdist,prior
def main():
print "loading documents..."
documents = ['data/'+i.strip() for i in open(sys.argv[1],'r')]
print documents
lda, time_model,prior = load_model(9)
docs = ((' '.join(w[2]),dt) for w,dt in windowed(documents,window_size))
for doc,dt in docs:
topic_dist = lda.doc_distribution(filter_tokenise(doc))
dt_dist = time_dist(topic_dist,time_model,prior,limit=24*3*7)
print sum((i*(time_bin/2)) * p for i,p in enumerate(dt_dist)), dt
if __name__ == "__main__":
main()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,148
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/util.py
|
from reader import windowed
import sys, imp, traceback, md5, pickle
def load_from_file(filepath,class_name,*params):
class_inst = None
"""
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name, filepath)
elif file_ext.lower() == '.pyc':
py_mod = imp.load_compiled(mod_name, filepath)
"""
try:
try:
#code_dir = os.path.dirname(filepath)
#code_file = os.path.basename(filepath)
fin = open(filepath, 'rb')
module_name = md5.new(filepath).hexdigest()
py_mod = imp.load_source(module_name, filepath, fin)
print "%s loaded as %s"%(filepath,module_name)
finally:
try: fin.close()
except: pass
except ImportError:
traceback.print_exc(file = sys.stderr)
raise
except:
traceback.print_exc(file = sys.stderr)
raise
if hasattr(py_mod, class_name):
class_ = getattr(py_mod,class_name)
class_inst = class_(*params)
return class_inst
def extracted_vecs(extractor, filename, window_size, first = None):
for window,d_t in windowed([filename],window_size):
feature_vec = extractor.extract(window)
yield feature_vec,d_t
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,149
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/evaluation/play.py
|
from lib.io.reporting import set_directory
from lib.io.util import load_from_file
from lib.options import *
from lib.interfaces.model_utils import unpickle_model
import os
import glob
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot
import numpy as np
def plot(values,output,
x_axis = 'Values',
y_axis = 'Frequency',
title = 'Histogram',
range_min = None,
range_max = None):
if range_min != None: values = [v for v in values if v >= range_min]
if range_max != None: values = [v for v in values if v <= range_max]
fig = pyplot.figure()
n, bins, patches = pyplot.hist(
values,
60,
facecolor = 'green',
alpha=0.75
)
print n, bins, patches
pyplot.xlabel(x_axis)
pyplot.ylabel(y_axis)
pyplot.title(title)
pyplot.axis([min(values),max(values),0,max(n)])
pyplot.grid(True)
fig.savefig('collated/%s'%output)
def scatter_plot(x_vals,y_vals,c_vals,output,
x_axis = 'Values',
y_axis = 'Frequency',
title = 'Scatterplot'):
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
ax.set_yscale('log')
#ax.set_xscale('log')
pyplot.ylim((0.1,1000))
pyplot.xlim((0,7500))
pyplot.scatter(x_vals,y_vals,c=c_vals, cmap=mpl.cm.Greens)
pyplot.xlabel(x_axis)
pyplot.ylabel(y_axis)
pyplot.title(title)
fig.savefig('collated/%s'%output)
ws_ex = [
#('Average $w = %d$', '*w%d_winavg*', 'w%d_dt_average.result'),
#('$w=%d,\\dtvec$', '*w%d_dt-*', 'w%d_rbf_dt'),
#('$w=%d,\\dtvec,\\ctxvec$', '*w%d_dt_ctx*', 'w%d_rbf_dt_ctx'),
#('$w=%d,\\vocab$', '*w%d_lang-*', 'w%d_rbf_lang_fs'),
#('$\\alpha=%0.1f,\\vocab$', '*w%0.1f_lang_decay-*', 'w%0.1f_rbf_lang_fs_decay'),
#('$w=%d,\\vocab$,p', '*w%d_lang_punc-*', 'w%d_rbf_lang_p_fs')
#('$w=%d,\\vocab,\\dtvec$', '*w%d_lang_dt-*', 'w%d_rbf_lang_dt_fs'),
#('$w=%d,\\vocab,\\dtvec$', '*w%d_lang_dt_decay-*', 'w%d_rbf_lang_dt_fs')
#('cluster', '*cluster_time-*','cluster_time')
]
vocab_size_ex = [
('$\\vocab,|\\vocab|=%d', '*w15_lang_top%d-*', 'vocab-size%d'),
]
patterns = []
alpha_sizes = [5,10,15,20,25,30,35,40,45,50]
for i,j,k in vocab_size_ex:
patterns += [(i%w,j%w,k%w) for w in alpha_sizes]
if __name__ == '__main__':
o,args = read_options()
#extractor = load_from_file(o['extractor_name'], "Extractor")
for n in glob.glob('models/*.py'):
load_from_file(n,"Model",o)
summary = open('collated/summary','w')
header_tuple = [
'MAPE',
'$Pr_{miss}$',
'$Pr_{fa}$',
'$Pr_{error}$',
'$T$-score',
#'Inv. pred',
#'Posts',
#'Visits',
'Pairwise',
'Visit/Post'
]
summary.write('%20s &\t'%'')
summary.write(' &\t'.join("%10s"%i for i in header_tuple) + ' \\\\\n\\hline\n')
for l_col,p,outfile in patterns:
print 'pickled_models/'+p+'/model'
files = glob.glob('pickled_models/'+p+'/model')
log_file = open('collated/'+outfile,'w')
log_file_coeffs = open('collated/'+outfile+'_coeffs','w')
print len(files)
count = 0
sum_tup = [0]*len(header_tuple)
log_file.write('\t'.join("%10s"%i for i in header_tuple) + '\n')
regression_perfs = []
t_scores = []
pv_ratios = []
tscore_pv_plot = []
posts_vals = []
for pickle_file in files:
set_directory(os.path.dirname(pickle_file))
model = unpickle_model(open(pickle_file,'rb'))
print model.experiments
for k in model.experiments:
exps = model.experiments[k]
values = dict((e_name,result) for e_name,_,result in exps)
if values.has_key('visit_evaluation'):
try:
#print values
regression_perf = values['regression_test(partial thread)']
pr_miss,pr_fa,pr_error = values['visit_evaluation']['Pr_error']
t_score = values['visit_evaluation']['T-score']
posts = values['visit_evaluation']['Posts']
visits = values['visit_evaluation']['Visits']
filename = values['visit_evaluation']['filename']
pairwise = values['visit_evaluation']['Pairwise']
pv_ratio = visits/float(posts)
#inv_preds = values['visit_evaluation']['Invalid Predictions'][1]
tuple = [
regression_perf,
pr_miss,
pr_fa,
pr_error,
t_score,
pairwise,
#inv_preds,
pv_ratio
]
regression_perfs.append(regression_perf)
t_scores.append(t_score)
pv_ratios.append(pv_ratio)
posts_vals.append(posts)
sum_tup = [s + i for s,i in zip(sum_tup,tuple)]
count += 1
log_file.write('\t'.join("%10.3f"%i for i in tuple) +\
'\t' + filename + '\n')
except KeyError as ke:
print ke
if values.has_key('token_score'):
coeffs = values['token_score']
log_file_coeffs.write('\t'.join("%10s"%i for _,i in coeffs[:-1]) + '\n')
log_file_coeffs.write('\t'.join("%10.3f"%i for i,_ in coeffs[:-1]) + '\t' +\
"%10.3f"%coeffs[-1] + '\n')
"""
plot( output = 'mape_dist_%s.png'%outfile,
values = regression_perfs,
x_axis = 'MAPE',
)
plot( output = 't_score_dist_%s.png'%outfile,
values = t_scores,
x_axis = '$T$-score',
)
plot( output = 'pv_ratio_dist_%s.png'%outfile,
values = pv_ratios,
x_axis = 'Post/Visit ratio'
)
"""
scatter_plot(
x_vals = t_scores,
y_vals = pv_ratios,
c_vals = posts_vals,
x_axis = '$T$-scores',
y_axis = 'Post/Visit ratio',
output = 'tscore_pv_plot%s.png'%outfile,
title = '$T$-score vs. Post/Visit ratio'
)
avg_tup = [float(s)/count for s in sum_tup]
log_file.write('\n')
log_file.write('\t'.join("%10.3f"%i for i in avg_tup) + '\n')
summary.write('%20s &\t'%l_col)
summary.write(' &\t'.join("%10.3f"%i for i in avg_tup) + ' \\\\\n')
log_file.close()
log_file_coeffs.close()
summary.close()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,150
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/preamble.py
|
import cPickle as pickle
import lib.io.pickled_globals
import lib.graphs as graphs
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,151
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/reader.py
|
#!/usr/bin/python2
import nltk,re
from nltk.stem.porter import PorterStemmer
import time
bin_size = 10
users = set()
def text_tdelta(input_file):
prev_tup = None
for line in open(input_file):
tup = line.split('\t')
if prev_tup: yield (
(float(tup[0])-float(prev_tup[0]))/60,
tup[1].strip(),
tup[2].strip(),
time.localtime(float(tup[0]))
)
prev_tup = tup
def class_text(threadfiles):
for threadfile in threadfiles:
for line in open(threadfile):
tup = line.split('\t')
users.add(tup[1])
for threadfile in threadfiles:
for td,text,t in text_tdelta(threadfile):
yield (td,text,t)
def windowed(threadfiles,N, offset = -1):
count = 0
for threadfile in threadfiles:
window = [None]
prev_window = None
for tup in text_tdelta(threadfile):
window.append(tup)
if prev_window:
if count <= offset:
count += 1
else:yield prev_window,tup[0]
if len(window) > N:
window.pop(0)
result = [None]*len(tup)
for i in range(len(tup)): result[i] = [t[i] for t in window]
prev_window = tuple(result)
def filter_tokenise(text):
text = text.lower()
r = []
for w in re.split('[^0-9a-z\.\$]+',text):
w = preprocess(w)
if w: r.append(w)
return r
non_alphanum = re.compile('\W')
number = re.compile('[0-9]')
splitter = re.compile('[\s\.\-\/]+')
model = re.compile('([.\#]+\w+|\w+[.\#]+)')
stemmer = PorterStemmer()
stop_words = set(nltk.corpus.stopwords.words('english'))
def preprocess(word):
global users
w = word
w = w.lower()
if w in stop_words: return
w = number.sub("#",w)
if model.match(w): return #w = "#MODEL#"
if w in users: return "#USER#"
w = stemmer.stem_word(w)
if len(w) < 3 : return
return w
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,152
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/options/config.py
|
import ConfigParser
from collections import namedtuple
sections = ['dirs','filename_formats']
def subconf(section):
Conf = namedtuple(section,(k for k,_ in c.items(section)))
conf = Conf(**dict(c.items(section)))
return conf
c = ConfigParser.RawConfigParser()#allow_no_value=True)
c.readfp(open('config','r'))
PConf = namedtuple('Configuration',sections)
d = dict((sect,subconf(sect)) for sect in sections)
configuration = PConf(**d)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,153
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/evaluation/sliding_window.py
|
class SlidingWindow():
def __init__(self,K = 60, alpha = 0.5):
self.window = []
self.low = 0
self.window_size = K
self.alpha = alpha
self.phi_count = 0
self.psi_count = 0
self.ref_count = 0
self.all_count = 0
def event(self,event_type,time):
time = int(time)
if time >= self.low + self.window_size :
low = self.low
for t in range(low, time - self.window_size + 1):
#print t
self.low = t
#Add appropriate counts
if self.window:
while self.window[0][0] < self.low:
self.window.pop(0)
if not self.window: break
self.count()
self.low = t + 1
self.window.append((time,event_type))
#print self.low, self.window[0]
while self.window[0][0] < self.low:
self.window.pop(0)
if not self.window: break
else:
self.window.append((time,event_type))
#print self.window
def count(self):
R = [j for j,et in self.window if et == 'post']
H = [j for j,et in self.window if et == 'visit']
#print H, self.low + self.window_size -1
r = len(R)
h = len(H)
if r > 0: self.ref_count += 1
if r > h: self.phi_count += 1
elif r < h: self.psi_count += 1
self.all_count += 1
def pr_error(self):
pr_miss = float(self.phi_count)/self.ref_count
pr_fa = float(self.psi_count)/(self.all_count)
pr_error = self.alpha*pr_miss + (1-self.alpha)*pr_fa
return pr_miss, pr_fa, pr_error
if __name__ == "__main__":
k = 10
posts = [(t*2 ,'post') for t in range(10)] +\
[(t*2 ,'post') for t in range(30,40)]
visit = [(t*8+1 ,'visit') for t in range(10)]
sum = 0
for i in range(len(posts)-1):
a,b = posts[i:i+2]
sum += b[0]-a[0]
w = SlidingWindow(K =int(float(sum)*0.5/(len(posts) -1)) )
events = posts + visit
events.sort()
print events[-1]
posts_times = [i for i,_ in posts]
visit_times = [i for i,_ in visit]
"""
sum_Phi = 0
sum_Psi = 0
sum_ref = 0
for i in range(events[-1][0]-k + 1):
R = [j for j in posts_times if j >= i and j < i + k ]
H = [j for j in visit_times if j >= i and j < i + k ]
print H, i + k - 1
r = len(R)
h = len(H)
if r > 0: sum_ref += 1
if r > h: sum_Phi += 1
elif r < h: sum_Psi += 1
Pr_miss = float(sum_Phi)/sum_ref
Pr_fa = float(sum_Psi)/float(events[-1][0]-k + 1)
Pr_error = 0.5*Pr_miss + 0.5*Pr_fa
print Pr_miss,Pr_fa,Pr_error
"""
for t,e in events: w.event(e,t)
print w.pr_error()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,154
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/dataset.py
|
import numpy as np
from sklearn import linear_model
from itertools import permutations
from lang_model import Extractor
from utils.reader import *
import csv,sys
count = 0
clf = linear_model.LinearRegression()
filenames = [sys.argv[1]]
filename_x = "X"
filename_y = "Y"
window_size = 15
e = Extractor()
count = sum(1 for _ in windowed(filenames,window_size))
class RewinderWindow():
def __init__(self,filenames,window_size):
self.filenames = filenames
self.window_size = window_size
def reset(self):
return windowed(self.filenames,self.window_size)
e.train(RewinderWindow(filenames,window_size))
e.finalise()
def first(vec_size,vec_count):
X = np.memmap(
filename_x,
mode = 'w+',
shape = (vec_count,vec_size),
dtype="float64"
)
Y = np.memmap(
filename_y,
mode = "w+",
shape = (vec_count,),
dtype = "float64"
)
return X,Y
X,Y = None,None
for i,instance in enumerate(windowed(filenames,window_size)):
window, d_t = instance
x_vec = e.extract(window)
if i == 0: X,Y = first(len(x_vec),count)
X[i][:] = x_vec[:]
Y[i] = d_t
print X, X.shape
print Y, Y.shape
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,155
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/interfaces/model_utils.py
|
'''
Created on Jul 19, 2012
@author: shawn
'''
from lib.io.reporting import get_directory
import pickle
def save_model(filename,model):
fullpath = "%s/%s"%(get_directory(),filename)
f = open(fullpath,'wb')
pickle.dump(model,f)
f.close()
return fullpath
def unpickle_model(filepath):
return pickle.load(filepath)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,156
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/options/__init__.py
|
import options,config
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,157
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/interfaces/generic_model.py
|
'''
Created on Jul 17, 2012
@author: shawn
'''
import md5
from model_utils import save_model
from collections import defaultdict
class GenericModel(object):
epsilon = 0.1
def __init__(self,o):
self.options = o
self.experiments = defaultdict(list)
def predict(self,feature_vec = None, d_t = None, current_d_t = None):
pred = self.avg
if current_d_t:
k = 0
while k*self.avg + pred <= current_d_t + self.epsilon: k += 1
return k*self.avg + pred
else:
return pred
def ensure_prediction_conditions(self,pred,feature_vec,d_t,current_d_t):
if current_d_t:
if pred > current_d_t + self.epsilon:
return pred
else:
return GenericModel.predict(self,feature_vec,d_t,current_d_t)
else:
return pred
def add_experiment(self,test_type,test_files,result):
if hasattr(test_files,'sort'):
test_files.sort()
names = '\n'.join(test_files)
else:
names = test_files
key = md5.new(names).hexdigest()
self.experiments[key].append((test_type,test_files,result))
def save(self):
return save_model('model', self)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,158
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/evaluation/pairwise.py
|
import math
class PairwiseScoring():
def __init__(self,scoring = {
('visit','visit') : lambda e1,e2: math.exp(0.01*(e1-e2)),
('post', 'visit') : lambda e1,e2: 1-math.exp(0.01*(e1-e2)),
('post', 'post' ) : lambda e1,e2: 0 ,
('visit','post' ) : lambda e1,e2: 0}):
self.total_score = 0
self.count = 0
self.prev_event = (None,0)
self.scoring = scoring
def event(self,event_type,time):
if self.prev_event[0]:
et1,et2 = self.prev_event[0],event_type
t1,t2 = self.prev_event[1],time
score = self.scoring[et1,et2](float(t1),float(t2))
#print "%10s\t%10s\t%10d\t%10d\t%10.10f"%(et1,et2,t1,t2,score)
if score > 0 : self.count += 1
self.total_score += score
self.prev_event = (event_type,time)
def score(self):
return self.total_score/self.count
if __name__ == "__main__":
k = 10
posts = [(t*10 ,'post') for t in range(10)] +\
[(t*10 ,'post') for t in range(30,40)]
visit = [(t+13 ,'visit') for t,_ in posts]
sum = 0
for i in range(len(posts)-1):
a,b = posts[i:i+2]
sum += b[0]-a[0]
events = posts + visit
events.sort()
posts_times = [i for i,_ in posts]
visit_times = [i for i,_ in visit]
w = PairwiseScoring()
for t,e in events: w.event(e,t)
print w.score()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,159
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/hist_to_probdist.py
|
#!/usr/bin/python2
import os
import numpy as np
import cPickle as pickle
from collections import defaultdict
import lib.io.pickled_globals
import lib.graphs as graphs
def main():
directory = 'graphs/prob_dist'
if not os.path.exists(directory): os.makedirs(directory)
for i in range(1,10):
hist = pickle.load(open('graphs/histograms/w%d_histograms'%(i+1),'rb'))
model = []
topic_dist = []
for topic_hist in hist:
total = sum(topic_hist[i] for i in topic_hist)
prob_dist = defaultdict(float,((i,topic_hist[i]/float(total)) for i in topic_hist))
model.append(prob_dist)
topic_dist.append(total)
print prob_dist
topic_dist = np.array(topic_dist)/float(sum(topic_dist))
pickle.dump(topic_dist,open('graphs/prob_dist/dist_t%03d_prior'%i,'wb'))
pickle.dump(model,open('graphs/prob_dist/dist_t%03d'%i,'wb'))
def time_dist(topic_dist,prior,model,limit = 24*3*2):
t_dist = np.zeros(limit)
for i in range(limit):
t_dist[i] = sum(
model[t][i] * topic_dist[t] * prior[t]
for t in range(len(topic_dist)))
t_dist = t_dist/sum(t_dist)
return t_dist
if __name__ == '__main__':
main()
model = pickle.load(open('graphs/prob_dist/dist_t%03d'%9,'rb'))
#print model
print time_dist([0.1 for i in range(9)],model)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,160
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/interfaces/extractor_utils.py
|
'''
Created on Jul 19, 2012
@author: shawn
'''
from lib.io.reporting import get_directory
from lib.options import read_options
from lib.io.reader import windowed
from lib.io.util import load_from_file
import pickle
def save_model(filename,model):
f = open("%s/%s"%(get_directory(),filename),'wb')
pickle.dump(model,f)
f.close()
def unpickle_model(filepath):
return pickle.load(filepath)
if __name__ == '__main__':
o,args = read_options()
extractor = load_from_file(o['extractor_name'], "Extractor")
for window,d_t in windowed([o['test_file']],o['window_size']):
print extractor.extract(window),d_t
extractor.save()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,161
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/evaluation/analyse_bins.py
|
import sys,operator
import shelve
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import bsddb3
from collections import defaultdict
K = int(sys.argv[1])
output_file = sys.argv[2]
transit_file = sys.argv[3]
bins = shelve.BsdDbShelf(bsddb3.hashopen('bins.data', 'r'))
#bins = shelve.open('bins.data','r')
out = open(output_file,'w')
keys = [int(key) for key in bins]
keys.sort()
for key in keys:
key = str(key)
print "Evaluating ",key, " ..."
sorted_top = sorted(
bins[key].iteritems(),
key=operator.itemgetter(1),
reverse = True)[:K]
total = sum(v for _,v in sorted_top)
sorted_top = map(lambda tup: (tup[0],float(tup[1])/total), sorted_top)
out.write('%10d\t'%(20*int(key)))
out.write('\t'.join('%10s' %i for i,_ in sorted_top) + '\n')
out.write('%10s\t'%"")
out.write('\t'.join('%10.5f'%i for _,i in sorted_top) + '\n')
out.close()
bins.close()
states = set()
#time_trans = shelve.open('trans_bins.data','r')
time_trans = shelve.BsdDbShelf(bsddb3.hashopen('trans_bins.data', 'r'))
state_total = defaultdict(int)
transited_to = set()
transited_from = set()
for key in time_trans:
p,n = [int(i) for i in key.split('-')]
transited_to.add(n)
transited_from.add(p)
transited_to = sorted(list(transited_to))
transited_from = sorted(list(transited_from))
for i in transited_from: state_total[i] = sum(time_trans.get("%d-%d"%(i,j),0) for j in transited_to)
"""
out=open(transit_file,'w')
out.write('\t'.join("%5s"%j for j in transited_to)+ '\n')
for i in transited_from:
out.write('\t'.join("%5.4f"%(time_trans.get("%d-%d"%(i,j),0)/float(state_total[i]))for j in transited_to)+ '\n')
out.close()
"""
def pdensity(dimI,dimJ):
print "Creating sparse matrix %d,%d"%(dimI,dimJ)
#pd = lil_matrix((dimI,dimJ),dtype=np.float32)
pd = np.zeros((dimI,dimJ),dtype=np.float32)
for key in time_trans:
i,j = [int(i) for i in key.split('-')]
if i > dimI or j > dimJ: continue
pd[i-1,j-1] = time_trans[key]/float(state_total[i])
return pd
# make these smaller to increase the resolution
#x = arange(0, transited_from[-1], 1)
#y = arange(0, transited_to[-1], 1)
print "Constructing density matrix..."
#Z = pdensity(transited_from[-1], transited_to[-1])
Z = pdensity(100, 100)
fig = plt.figure()
#plt.imshow(Z.toarray(),cmap=cm.Greys)
im = plt.imshow(Z,cmap=cm.Greys,interpolation='nearest')
#im.set_interpolation('bicubic')
#ax.set_image_extent(-3, 3, -3, 3)
#plt.axis([0,200*20, 0, 200*20])
#fig.savefig('collated/%s'%output)
plt.title("Density matrix plot of $p(q_{t+1}|q_t)$")
plt.xlabel("$q_{t+1}$ (20 minute blocks)")
plt.ylabel("$q_{t}$ (20 minute blocks)")
plt.show()
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,162
|
shawntan/predict-forum-pgm
|
refs/heads/master
|
/lib/io/reporting.py
|
REPORTS = None
SUBDIR = None
import sys,os
from datetime import datetime
def set_directory(directory):
global SUBDIR
SUBDIR = directory
def get_directory():
global SUBDIR
return SUBDIR
def reporting_init(options,directory):
global SUBDIR,REPORTS
REPORTS = directory
SUBDIR = '%s/%s'%(directory,datetime.now().strftime('%Y%m%d%H%M') +\
(' - %s'%options.experiment_name\
if options.experiment_name else ''))
ensure_dir(SUBDIR)
with open("%s/%s"%(SUBDIR,'command'),'w') as f:
f.write(sys.executable)
f.write(' ')
f.write(sys.argv[0])
for i in sys.argv[1:]:
if i[0] == '-':
f.write(' \\\n\t')
f.write(i)
else:
f.write(' ')
f.write('"%s"'%i)
f.write('\n')
def ensure_dir(f):
if not os.path.exists('./%s'%f):
os.makedirs(f)
def timestamp_log(*filenames):
test = [open("%s/%s"%(SUBDIR,f),'w') for f in filenames]
if len(test) == 1: return test[0]
else: return test
def timestamp_model(*filenames):
test = [open("%s/%s"%(SUBDIR,f),'wb') for f in filenames]
if len(test) == 1: return test[0]
else: return test
def write_value(key,value):
with open("%s/%s"%(SUBDIR,key),'w') as f:f.write('%s\n'%value)
|
{"/preamble.py": ["/lib/io/pickled_globals.py", "/lib/graphs.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
|
2,168
|
doublepi123/demo
|
refs/heads/master
|
/mgr/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
from common.models import Student
def liststudent(request):
qs = Student.objects.values()
retStr = ''
for student in qs:
for name, value in student.items():
retStr += f'{name} : {value} | '
retStr += '<br>'
return HttpResponse(retStr)
|
{"/mgr/views.py": ["/common/models.py"]}
|
2,169
|
doublepi123/demo
|
refs/heads/master
|
/model/Student.py
|
class Student:
name = ''
id = ''
age = ''
homeIncome = ''
costForFood = ''
costForOther = ''
|
{"/mgr/views.py": ["/common/models.py"]}
|
2,170
|
doublepi123/demo
|
refs/heads/master
|
/common/models.py
|
from django.db import models
from django.contrib import admin
# Create your models here.
class Student(models.Model):
# 姓名
name = models.CharField(max_length=20)
# 学号
stu_id = models.CharField(max_length=20, primary_key=True)
# 电话号码
phone = models.CharField(max_length=20)
# 出生日期
birthdate = models.DateField()
# 家庭人数
people = models.IntegerField()
# 家庭收入
income = models.FloatField()
# 特殊情况
special = models.CharField(max_length=200)
admin.site.register(Student)
|
{"/mgr/views.py": ["/common/models.py"]}
|
2,191
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/imports/__init__.py
|
from . import general
from . import ml
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,192
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/deepfake/dataset.py
|
import pathlib
from warnings import warn
import subprocess
import maruti
import os
from os.path import join
from PIL import Image
import torch
import shlex
import time
from collections import defaultdict
from ..vision.video import get_frames_from_path, get_frames
import random
from ..utils import unzip, read_json
from ..sizes import file_size
import numpy as np
import cv2
from tqdm.auto import tqdm
from torchvision import transforms as torch_transforms
from torch.utils.data import Dataset
from ..torch.utils import def_norm as normalize
DATA_PATH = join(os.path.dirname(__file__), 'data/')
__all__ = ['split_videos', 'VideoDataset', 'transform', 'group_transform']
transform = {
'train': torch_transforms.Compose(
[
torch_transforms.ToPILImage(),
torch_transforms.ColorJitter(0.3, 0.3, 0.3, 0.1),
torch_transforms.RandomHorizontalFlip(),
torch_transforms.RandomResizedCrop((224, 224), scale=(0.65, 1.0)),
torch_transforms.ToTensor(),
normalize,
]
),
'val': torch_transforms.Compose([
torch_transforms.ToTensor(),
normalize, ]
)
}
group_transform = {
'train': lambda x: torch.stack(list(map(transform['train'], x))),
'val': lambda x: torch.stack(list(map(transform['val'], x)))
}
class ImageReader:
def __init__(self, path, metadata, is_path_cache=False, vb=True, ignore_frame_errors=False):
self.vid2part = {}
self.meta = metadata
self.ignore_frame_errors = ignore_frame_errors
if not is_path_cache:
parts = os.listdir(path)
assert len(parts) > 0, 'no files found'
start = time.perf_counter()
for part in parts:
path_to_part = os.path.join(path, part)
imgs = os.listdir(path_to_part)
for img in imgs:
self.vid2part[self.vid_name(img)] = path_to_part
end = time.perf_counter()
if vb:
print('Total time taken:', (end - start) / 60, 'mins')
else:
self.vid2part = maruti.read_json(path)
def is_real(self, vid):
return self.meta[vid]['label'] == 'REAL'
def is_fake(self, vid):
return not self.is_real(vid)
def is_error(self, vid):
return 'error' in self.meta[vid]
def vid_name(self, img_name):
name = img_name.split('_')[0]
return name + '.mp4'
def create_name(self, vid, frame, person):
return f'{vid[:-4]}_{frame}_{person}.jpg'
def total_persons(self, vid):
if self.is_real(vid):
return self.meta[vid]['pc']
orig_vid = self.meta[vid]['original']
return self.meta[orig_vid]['pc']
def random_person(self, vid, frame):
person = random.choice(range(self.total_persons(vid)))
return self.get_image(vid, frame, person)
def random_img(self, vid):
frame = random.choice(range(self.total_frames(vid)))
person = random.choice(range(self.total_persons(vid)))
return self.get_image(vid, frame, person)
def sample(self):
vid = random.choice(list(self.vid2part))
while self.is_error(vid):
vid = random.choice(list(self.vid2part))
frame = random.choice(range(self.total_frames(vid)))
person = random.choice(range(self.total_persons(vid)))
return self.get_image(vid, frame, person)
def total_frames(self, vid):
return self.meta[vid]['fc'] - 1
def create_absolute(self, name):
path = os.path.join(self.vid2part[self.vid_name(name)], name)
return path
def get_image(self, vid, frame, person):
if self.total_persons(vid) <= person:
raise Exception('Not Enough Persons')
if self.total_frames(vid) <= frame:
if self.ignore_frame_errors:
frame = self.total_frames(vid) - 1
else:
raise Exception('Not Enough Frames')
img = self.create_name(vid, frame, person)
path = self.create_absolute(img)
return Image.open(path)
def split_videos(meta_file):
'''
Groups real-fake videos in dictionary
'''
split = defaultdict(lambda: set())
for vid in meta_file:
if meta_file[vid]['label'] == 'FAKE':
split[meta_file[vid]['original']].add(vid)
return split
class VideoDataset:
'''
create dataset from videos and metadata.
@params:
To download and create use VideoDataset.from_part method
'''
def __init__(self, path, metadata_path=None):
self.path = pathlib.Path(path)
self.video_paths = list(self.path.glob('*.mp4'))
metadata_path = metadata_path if metadata_path else self.path / 'metadata.json'
try:
self.metadata = read_json(metadata_path)
except FileNotFoundError:
del metadata_path
print('metadata file not found.\n Some functionalities may not work.')
if hasattr(self, 'metadata'):
self.video_groups = split_videos(self.metadata)
@staticmethod
def download_part(part='00', download_path='.', cookies_path=join(DATA_PATH, 'kaggle', 'cookies.txt')):
dataset_path = f'https://www.kaggle.com/c/16880/datadownload/dfdc_train_part_{part}.zip'
# folder = f'dfdc_train_part_{int(part)}'
command = f'wget -c --load-cookies {cookies_path} {dataset_path} -P {download_path}'
command_args = shlex.split(command)
fp = open(os.devnull, 'w')
download = subprocess.Popen(command_args, stdout=fp, stderr=fp)
bar = tqdm(total=10240, desc='Downloading ')
zip_size = 0
while download.poll() is None:
time.sleep(0.1)
try:
new_size = int(
file_size(download_path + f'/dfdc_train_part_{part}.zip'))
bar.update(new_size - zip_size)
zip_size = new_size
except FileNotFoundError:
continue
if download.poll() != 0:
print('some error')
print('download', download.poll())
download.terminate()
fp.close()
bar.close()
return download_path + f'/dfdc_train_part_{part}.zip'
@classmethod
def from_part(cls, part='00',
cookies_path=join(DATA_PATH, 'kaggle', 'cookies.txt'),
download_path='.'):
folder = f'dfdc_train_part_{int(part)}'
if os.path.exists(pathlib.Path(download_path) / folder):
return cls(pathlib.Path(download_path) / folder)
downloaded_zip = cls.download_part(
part=part, download_path=download_path, cookies_path=cookies_path)
unzip(downloaded_zip, path=download_path)
os.remove(download_path + f'/dfdc_train_part_{part}.zip')
path = pathlib.Path(download_path) / folder
return cls(path)
def __len__(self):
return len(self.video_paths)
def n_groups(self, n, k=-1):
'''
returns random n real-fake pairs by default.
else starting from k.
'''
if k != -1:
if n + k >= len(self.video_groups):
warn(RuntimeWarning(
'n+k is greater then video length. Returning available'))
n = len(self.video_groups) - k - 1
return self.video_groups[k:n + k]
if n >= len(self.video_groups):
warn(RuntimeWarning('n is greater then total groups. Returning available'))
n = len(self.video_groups) - 1
return choices(self.video_groups, k=n)
class VidFromPathLoader:
""" Loader to use with DeepfakeDataset class"""
def __init__(self, paths, img_reader=None):
"""paths as {'00':/part/00,'01'..}"""
self.path = paths
self.img_reader = self.img_reader if img_reader is None else img_reader
@staticmethod
def img_reader(path, split='val', max_limit=40):
frame_no = 0 if split == 'val' else random.randint(0, max_limit)
frame = list(get_frames_from_path(
path, [frame_no]))[0]
return frame
@staticmethod
def img_group_reader(path, split='val', mode='distributed', num_frames=4, mode_info=[None]):
"""use with partial to set mode
mode info: distributed -> No Use
forward -> {jumps, index:0, readjust_jumps: True}
backward -> {jumps, index:-1, readjust_jumps: True} -1 refers to end"""
cap = cv2.VideoCapture(path)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if mode == 'distributed':
frames = np.linspace(0, frame_count - 1, num_frames, dtype=int)
elif mode == 'forward':
start = mode_info.get('index', 0)
adjust = mode_info.get('readjust_jumps', True)
jumps = mode_info['jumps']
if adjust:
frames = np.linspace(start, min(
frame_count - 1, start + (num_frames - 1) * jumps), num_frames, dtype=int)
else:
frames = np.linspace(
start, start + (num_frames - 1) * jumps, num_frames, dtype=int)
elif mode == 'backward':
end = mode_info.get('index', frame_count)
adjust = mode_info.get('readjust_jumps', True)
jumps = mode_info['jumps']
if adjust:
frames = np.linspace(
max(0, start - (num_frames - 1) * jumps), end, num_frames, dtype=int)
else:
frames = np.linspace(
start + (num_frames - 1) * jumps, num_frames, end, dtype=int)
return get_frames(cap, frames, 'rgb')
def __call__(self, metadata, video, split='val'):
vid_meta = metadata[video]
video_path = join(self.path[vid_meta['part']], video)
return self.img_reader(video_path, split)
class DeepfakeDataset(Dataset):
"""Methods 'f12' r1-f1, r1-f2..,(default)
'f..' r1-f1/f2/f3..
'f1' r1-f1,
'ff' r f1 f2 f3..
Metadata 'split'(train-val),'label'(FAKE-REAL),'fakes'([video,video])
loader func(metadata,video,split)->input
error_handler func(self, index, error)->(input, label)"""
iteration = 0
def __init__(self, metadata, loader, transform=None, split='train', method='f12', error_handler=None):
self.transform = transform
self.split = split
self.loader = loader
self.method = method
self.error_handler = error_handler
self.metadata = metadata
self.dataset = []
real_videos = filter(
lambda x: metadata[x]['split'] == split, list(split_videos(metadata)))
for real_video in real_videos:
fake_videos = list(metadata[real_video]['fakes'])
self.dataset.append(real_video)
if method == 'f12':
self.dataset.append(
fake_videos[self.iteration % len(fake_videos)])
elif method == 'f..':
self.dataset.append(random.choice(fake_videos))
elif method == 'f1':
self.dataset.append(fake_videos[0])
elif method == 'ff':
for fake_video in fake_videos:
self.dataset.append(fake_video)
else:
raise ValueError(
'Not a valid method. Choose from f12, f.., f1, ff')
def __getitem__(self, i):
if i == 0:
self.iteration += 1
try:
img = self.loader(self.metadata, self.dataset[i], split=self.split)
label = torch.tensor(
[float(self.metadata[self.dataset[i]]['label'] == 'FAKE')])
if self.transform is not None:
img = self.transform(img)
return img, label
except Exception as e:
if self.error_handler is None:
def default_error_handler(obj, x, e):
print(f'on video {self.dataset[x]} error: {e}')
return self[random.randint(1, len(self) - 1)]
self.error_handler = default_error_handler
return self.error_handler(self, i, e)
def __len__(self):
return len(self.dataset)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,193
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/vision/video.py
|
import cv2
import numpy as np
from .. import vision as mvis
from facenet_pytorch import MTCNN
import torch
from PIL import Image
from collections import defaultdict
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class Video(cv2.VideoCapture):
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
def vid_info(path):
"return frame_count, (h, w)"
cap = cv2.VideoCapture(path)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
return frame_count, (h, w)
def get_frames(cap: 'cv2.VideoCapture object', frames: 'iterable<int>', code='rgb', start_frame=0):
"""Frame numbers out of the scope will be ignored"""
curr_index = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
if curr_index != start_frame:
cap.set(cv2.CAP_PROP_POS_FRAMES, curr_index)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frames = set(frames)
last_frame = max(frames)
if frame_count == 0:
raise Exception('The video is corrupt. Closing')
for i in range(curr_index, frame_count):
_ = cap.grab()
if i in frames:
_, frame = cap.retrieve()
if code == 'rgb':
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
else:
yield frame
if i == last_frame:
cap.release()
break
cap.release()
def get_frames_from_path(path: 'str or posix', frames: 'iterable<int>', code='rgb'):
cap = cv2.VideoCapture(str(path))
return get_frames(cap, frames, code)
def crop_face(img, points, size: "(h,w)" = None):
if size:
size = size[1], size[0] # cv2 resize needs (w,h)
face = img[points[1]:points[3],
points[0]:points[2]]
if size is not None:
face = cv2.resize(face, size,)
return face
def bbox_from_det(det_list):
working_det = np.array([[0, 0,
224, 224]])
bbox = []
for detection in det_list:
if detection is None:
bbox.append(working_det.astype(int) * 2)
else:
bbox.append(detection.astype(int) * 2)
working_det = detection.copy()
return bbox
def _face_from_det(frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=30, size=(224, 224), mtcnn=None):
start = frame_idx[0]
n_h, n_w = f_h // 2, f_w // 2
full_det_list = [None] * len(frame_idx)
# first frame should be correct so it can compunsate upcomings
if det_list[0] is None:
_detection = mtcnn.detect(frames[0])[0]
if _detection is not None:
det_list[0] = _detection / 2
#
for i, box in zip(detect_idx, det_list):
full_det_list[i - start] = box
bbox = bbox_from_det(full_det_list)
working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112,
(f_h // 2) + 112, (f_w // 2) + 112])
faces = []
for frame, box in zip(frames, bbox):
best_pred = box[0]
best_pred[[0, 1]] -= margin // 2
best_pred[[2, 3]] += (margin + 1) // 2
try:
cropped_faces = crop_face(frame, best_pred, size=size)
working_pred = best_pred
except:
cropped_faces = crop_face(frame, working_pred, size=size)
faces.append(cropped_faces)
return faces
def non_overlapping_ranges(rngs):
all_idx = set()
for rng in rngs:
for i in range(rng[0], rng[1]):
all_idx.add(i)
min_i = min(all_idx)
max_i = max(all_idx)
non_overlapping_rngs = []
last = min_i
start = min_i
i = min_i + 1
while i < max_i + 1:
if i in all_idx:
last = i
i += 1
continue
else:
non_overlapping_rngs.append([start, last + 1])
while i not in all_idx:
i += 1
start = i
last = i
non_overlapping_rngs.append([start, last + 1])
return non_overlapping_rngs
def get_face_frames2(path, frame_rngs, jumps=4, margin=30, mtcnn=None, size: "(h,w)" = (224, 224)):
# for height and width
cap = cv2.VideoCapture(path)
f_h, f_w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
cap.get(cv2.CAP_PROP_FRAME_WIDTH))
n_h, n_w = f_h // 2, f_w // 2
cap.release()
#
non_overlapping_rngs = non_overlapping_ranges(frame_rngs)
idx2face = defaultdict(lambda: None)
idx2frame = defaultdict(lambda: None)
if mtcnn is None:
mtcnn = MTCNN(select_largest=False, device=device,)
# getting video frames in one shot
all_frames_idx = []
for rng in non_overlapping_rngs:
all_frames_idx.extend(range(rng[0], rng[1]))
vid_frames = list(get_frames_from_path(path, all_frames_idx))
for i, frame in zip(all_frames_idx, vid_frames):
idx2frame[i] = frame
# getting detection in one shot
all_detect_idx = []
for frame_rng in non_overlapping_rngs:
all_detect_idx.extend(range(frame_rng[0], frame_rng[1], jumps))
all_detect_small_frames = [cv2.resize(frame, (n_w, n_h)) for i, frame in zip(
all_frames_idx, vid_frames) if i in all_detect_idx]
det, conf = mtcnn.detect(all_detect_small_frames)
idx2det = defaultdict(lambda: None)
for i, det in zip(all_detect_idx, det):
idx2det[i] = det
# face crop for each non-overlapping range
for frame_rng in non_overlapping_rngs:
start, end = frame_rng
frame_idx = list(range(start, end))
detect_idx = list(range(start, end, jumps))
frames = [idx2frame[i] for i in frame_idx]
det_list = [idx2det[i] for i in detect_idx]
faces = _face_from_det(
frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=margin, size=size, mtcnn=mtcnn)
for i, face in zip(frame_idx, faces):
idx2face[i] = face
# distribution to each range
rng_faces = []
for rng in frame_rngs:
curr_rng_faces = []
for i in range(rng[0], rng[1]):
curr_rng_faces.append(idx2face[i])
rng_faces.append(curr_rng_faces)
return rng_faces
def crop(frame, bb):
return frame[bb[1]:bb[3], bb[0]:bb[2]]
def toint(bb):
return [int(i) for i in bb]
def apply_margin(bb, margin, size):
bb = [max(0,bb[0]-margin),max(0, bb[1] - margin),min(size[0] -1, bb[2]+margin),min(size[1]-1, bb[3]+margin)]
return bb
def expand_detection(detections, idx, length):
assert (len(detections) == len(
idx)), f'length of detection ({len(detections)}) and indices ({len(idx)}) must be same'
j = 0
last = detections[j] if detections[j] is not None else []
final_detections = []
for i in range(length):
if i in idx:
last = detections[idx.index(i)]
if last is None:
last = []
final_detections.append(last)
return final_detections
def get_all_faces(path: 'str', detections=32, mtcnn=None, margin=20):
if mtcnn is None:
mtcnn = MTCNN(select_largest=False, device=device,)
cap = cv2.VideoCapture(path)
frames = []
next_frame = True
while next_frame:
next_frame, fr = cap.read()
if next_frame:
frames.append(cv2.cvtColor(fr, cv2.COLOR_BGR2RGB))
np_det_idx = np.linspace(0, len(frames), detections,
endpoint=False, dtype=int)
detection_idx = list(map(int, np_det_idx))
detection_frames = [frame for i, frame in enumerate(
frames) if i in detection_idx]
detection = mtcnn.detect(detection_frames)
detection = detection[0]
del detection_frames
detection = expand_detection(detection, detection_idx, len(frames))
faces = []
for i, bboxes in enumerate(detection):
faces.append([])
for bbox in bboxes:
bbox = apply_margin(bbox, margin, frames[0].shape[:2])
faces[-1].append(crop(frames[i], toint(bbox)))
return faces
def get_face_frames(path, frame_idx, margin=30, mtcnn=None, size: "(h,w)" = (224, 224),):
"""
Consumes more RAM as it stores all the frames in full resolution.
Try to detect in small batches if needed.
"""
# for height and width
cap = cv2.VideoCapture(path)
f_h, f_w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
cap.get(cv2.CAP_PROP_FRAME_WIDTH))
cap.release()
#
n_h, n_w = f_h // 2, f_w // 2
if mtcnn is None:
mtcnn = MTCNN(select_largest=False, device=device,)
frames = list(get_frames_from_path(path, frame_idx))
small_faces = [cv2.resize(frame, (n_w, n_h)) for frame in frames]
det, conf = mtcnn.detect(small_faces)
det_list = list(map(lambda x: x, det))
bbox = bbox_from_det(det_list)
working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112,
(f_h // 2) + 112, (f_w // 2) + 112])
faces = []
for frame, box in zip(frames, bbox):
best_pred = box[0]
best_pred[[0, 1]] -= margin // 2
best_pred[[2, 3]] += (margin + 1) // 2
try:
cropped_faces = crop_face(frame, best_pred, size=size)
working_pred = best_pred
except:
cropped_faces = crop_face(frame, working_pred, size=size)
faces.append(cropped_faces)
return faces
def get_faces_frames(path, frame_idx, margin=30, mtcnn=None, size: "(h,w)" = (224, 224),):
"""
Consumes more RAM as it stores all the frames in full resolution.
Try to detect in small batches if needed.
"""
# for height and width
_, (f_h, f_w) = vid_info(path)
#
n_h, n_w = f_h // 2, f_w // 2
if mtcnn is None:
mtcnn = MTCNN(select_largest=False, device=device,)
frames = list(get_frames_from_path(path, frame_idx))
small_faces = [cv2.resize(frame, (n_w, n_h)) for frame in frames]
det, conf = mtcnn.detect(small_faces)
det_list = list(map(lambda x: x, det))
if det_list[0] is None:
_detection = mtcnn.detect(frames[0])[0]
if _detection is not None:
det_list[0] = _detection / 2
bbox = bbox_from_det(det_list)
working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112,
(f_h // 2) + 112, (f_w // 2) + 112])
faces = []
for frame, box in zip(frames, bbox):
all_faces = []
for face_det in box:
best_pred = face_det
best_pred[[0, 1]] -= margin // 2
best_pred[[2, 3]] += (margin + 1) // 2
try:
cropped_faces = crop_face(frame, best_pred, size=size)
working_pred = best_pred
except:
cropped_faces = crop_face(frame, working_pred, size=size)
all_faces.append(cropped_faces)
faces.append(all_faces)
return faces
def _face_from_det(frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=30, size=(224, 224), mtcnn=None):
start = frame_idx[0]
n_h, n_w = f_h // 2, f_w // 2
full_det_list = [None] * len(frame_idx)
# first frame should be correct so it can compunsate upcomings
if det_list[0] is None:
_detection = mtcnn.detect(frames[0])[0]
if _detection is not None:
det_list[0] = _detection / 2
#
for i, box in zip(detect_idx, det_list):
full_det_list[i - start] = box
bbox = bbox_from_det(full_det_list)
working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112,
(f_h // 2) + 112, (f_w // 2) + 112])
faces = []
for frame, box in zip(frames, bbox):
all_faces = []
for face_det in box:
best_pred = face_det
best_pred[[0, 1]] -= margin // 2
best_pred[[2, 3]] += (margin + 1) // 2
try:
cropped_faces = crop_face(frame, best_pred, size=size)
working_pred = best_pred
except:
cropped_faces = crop_face(frame, working_pred, size=size)
all_faces.append(cropped_faces)
faces.append(all_faces)
return faces
def get_faces_frames2(path, frame_rngs, jumps=4, margin=30, mtcnn=None, size: "(h,w)" = (224, 224)):
# for height and width
cap = cv2.VideoCapture(path)
f_h, f_w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
cap.get(cv2.CAP_PROP_FRAME_WIDTH))
n_h, n_w = f_h // 2, f_w // 2
cap.release()
#
non_overlapping_rngs = non_overlapping_ranges(frame_rngs)
idx2face = defaultdict(lambda: None)
idx2frame = defaultdict(lambda: None)
if mtcnn is None:
mtcnn = MTCNN(select_largest=False, device=device,)
# getting video frames in one shot
all_frames_idx = []
for rng in non_overlapping_rngs:
all_frames_idx.extend(range(rng[0], rng[1]))
vid_frames = list(get_frames_from_path(path, all_frames_idx))
for i, frame in zip(all_frames_idx, vid_frames):
idx2frame[i] = frame
# getting detection in one shot
all_detect_idx = []
for frame_rng in non_overlapping_rngs:
all_detect_idx.extend(range(frame_rng[0], frame_rng[1], jumps))
all_detect_small_frames = [cv2.resize(frame, (n_w, n_h)) for i, frame in zip(
all_frames_idx, vid_frames) if i in all_detect_idx]
det, conf = mtcnn.detect(all_detect_small_frames)
idx2det = defaultdict(lambda: None)
for i, det in zip(all_detect_idx, det):
idx2det[i] = det
# face crop for each non-overlapping range
for frame_rng in non_overlapping_rngs:
start, end = frame_rng
frame_idx = list(range(start, end))
detect_idx = list(range(start, end, jumps))
frames = [idx2frame[i] for i in frame_idx]
det_list = [idx2det[i] for i in detect_idx]
faces = _faces_from_det(
frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=margin, size=size, mtcnn=mtcnn)
for i, face in zip(frame_idx, faces):
idx2face[i] = face
# distribution to each range
rng_faces = []
for rng in frame_rngs:
curr_rng_faces = []
for i in range(rng[0], rng[1]):
curr_rng_faces.append(idx2face[i])
rng_faces.append(curr_rng_faces)
return rng_faces
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,194
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/deepfake/__init__.py
|
from . import dataset
from . import models
from .dataset import VideoDataset, DeepfakeDataset, ImageReader
from .dataset import transform, group_transform
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,195
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/torch/callback.py
|
import os
from datetime import datetime, timezone, timedelta
from torch.utils.tensorboard import SummaryWriter
from copy import deepcopy
class Callback:
def on_epoch_end(self, losses, metrics, extras, epoch):
"""
extras-> dict ['time']['model']
"""
pass
def on_epoch_start(self, epoch):
pass
def on_batch_start(self, epoch, batch):
pass
def on_batch_end(self, loss, metrics, extras, epoch, batch):
pass
def on_validation_start(self, epoch):
pass
def on_validation_end(self, loss, metrics, epoch):
pass
def on_min_val_start(self, epoch, batch):
pass
def on_min_val_end(self, loss, metrics, extras, epoch, batch):
"""extras['model']"""
pass
def on_train_start(self, epoch):
pass
def Compose(callbacks):
class NewCallback(Callback):
def on_epoch_end(self, losses, metrics, extras, epoch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_epoch_end(
losses, metrics, extras, epoch)
return isEnd
def on_epoch_start(self, epoch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_epoch_start(epoch)
return isEnd
def on_batch_start(self, epoch, batch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_batch_start(epoch, batch)
return isEnd
def on_batch_end(self, loss, metrics, extras, epoch, batch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_batch_end(loss, metrics, extras,
epoch, batch)
return isEnd
def on_validation_start(self, epoch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_validation_start(epoch)
return isEnd
def on_validation_end(self, loss, metrics, epoch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_validation_end(
loss, metrics, epoch)
return isEnd
def on_min_val_start(self, epoch, batch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_min_val_start(
epoch, batch)
return isEnd
def on_min_val_end(self, loss, metrics, extras, epoch, batch):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_min_val_end(
loss, metrics, extras, epoch, batch)
return isEnd
def on_train_start(self, epochs):
isEnd = False
for callback in callbacks:
isEnd = isEnd or callback.on_train_start(epochs)
return isEnd
return NewCallback()
class Recorder(Callback):
def __init__(self):
self.best_model = None
self.best_score = float('inf')
self.summaries = []
self.others = []
self.prevs = []
# to monitor if the learner was stopped in between of an epoch
self.epoch_started = False
def on_train_start(self, epochs):
self.new_state()
def new_state(self):
sd = self.state_dict()
del sd['prevs']
self.prevs.append(self.state_dict())
self.summaries = []
self.others = []
def on_epoch_start(self, epoch):
if self.epoch_started:
self.new_state()
self.summaries.append({})
self.others.append({'train_losses': [], 'train_metrics': []})
self.epoch_started = True
def on_batch_end(self, train_loss, train_metrics, extras, epoch, batch):
self.others[epoch]['train_losses'].append(train_loss)
self.others[epoch]['train_metrics'].append(train_metrics)
@property
def last_summary(self):
if self.summaries:
return self.summaries[-1]
raise Exception('no summaries exists')
def on_min_val_end(self, loss, metrics, extras, epoch, batch):
if loss < self.best_score:
self.best_score = loss
self.best_model = deepcopy(extras['model'].state_dict())
def on_epoch_end(self, losses, metrics, extras, epoch):
self.summaries[epoch]['train_loss'] = losses['train']
self.summaries[epoch]['train_metrics'] = metrics['train']
self.summaries[epoch]['time'] = extras['time']
representative_loss = 'train' # for best model udpate
if 'val' in losses:
representative_loss = 'val'
self.summaries[epoch]['val_loss'] = losses['val']
if 'val' in metrics:
self.summaries[epoch]['val_metrics'] = metrics['val']
if losses[representative_loss] < self.best_score:
self.best_score = losses[representative_loss]
self.best_model = deepcopy(extras['model'])
self.epoch_started = False
def state_dict(self):
state = {}
state['best_score'] = self.best_score
state['best_model'] = self.best_model
state['summaries'] = self.summaries
state['others'] = self.others
state['prevs'] = self.prevs
return deepcopy(state)
def load_state_dict(self, state):
self.best_score = state['best_score']
self.best_model = state['best_model']
self.summaries = state['summaries']
self.others = state['others']
self.prevs = state['prevs']
class BoardLog(Callback):
def __init__(self, comment='learn', path='runs'):
self.path = path
self.run = 0
self.comment = comment
self.batch_count = 0
def on_train_start(self, epochs):
india_timezone = timezone(timedelta(hours=5.5))
time_str = datetime.now(tz=india_timezone).strftime('%d_%b_%H:%M:%S')
path = os.path.join(self.path, self.comment, time_str)
self.writer = SummaryWriter(log_dir=path, flush_secs=30)
self.run += 1
def on_batch_end(self, loss, metrics, extras, epoch, batch):
lr_vals = {}
for i, param in enumerate(extras['optimizer'].param_groups):
lr_vals['lr_' + str(i)] = param['lr']
self.writer.add_scalars(
'batch', {'loss': loss, **metrics, **lr_vals}, global_step=self.batch_count)
self.batch_count += 1
def on_min_val_end(self, loss, metrics, extras, epoch, batch):
self.writer.add_scalars(
'min_val', {'loss': loss, **metrics}, global_step=self.batch_count)
def on_epoch_end(self, losses, metrics, extras, epoch):
self.writer.add_scalars('losses', losses, global_step=epoch)
for metric in metrics['train']:
self.writer.add_scalars(metric, {'val': metrics['val'][metric],
'train': metrics['train'][metric]}, global_step=epoch)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,196
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/deepfake/models.py
|
import torch.nn as nn
import torchvision
from ..torch.utils import freeze
from torch.nn.utils.rnn import PackedSequence, pack_sequence
import itertools
from .dataset import group_transform
def resnext50(feature=False, pretrained=False):
model = torchvision.models.resnext50_32x4d(pretrained)
if feature:
model.fc = nn.Identity()
else:
model.fc = nn.Linear(2048, 1)
return model
def binaryClassifier(input_features):
return nn.Sequential(
nn.Linear(input_features, input_features // 2),
nn.ReLU(),
nn.BatchNorm1d(input_features // 2),
nn.Linear(input_features // 2, input_features // 2),
nn.ReLU(),
nn.BatchNorm1d(input_features // 2),
nn.Linear(input_features // 2, 128),
nn.ReLU(),
nn.Dropout(),
nn.Linear(128, 1),
nn.Flatten())
class ResLSTM(nn.Module):
def __init__(self, pretrained=False, hidden_size=512, num_layers=1, bidirectional=True, dropout=0.5):
super().__init__()
# resnext
self.feature_model = resnext50(True, pretrained)
# lstm
self.hidden_size = hidden_size
self.lstm = nn.LSTM(2048, hidden_size=hidden_size, num_layers=num_layers,
bidirectional=bidirectional, dropout=dropout)
classifier_features = hidden_size * num_layers
if bidirectional:
classifier_features *= 2
self.classifier = binaryClassifier(classifier_features)
def forward(self, x):
# indices
unsorted_indices = x.unsorted_indices
# prediction on all images from each batch
x_data = self.feature_model(x.data)
# converting again to PackedSequence
x = PackedSequence(x_data, x.batch_sizes)
# lstm
out, (h, c) = self.lstm(x)
batch_size = h.shape[1]
# treat each batch differently instaed of lstm layer
split_on_batch = h.permute(1, 0, 2)
# reshape to make each bach flat
combining_passes = split_on_batch.reshape(batch_size, -1)
# classify
val = self.classifier(combining_passes).squeeze(1)
return val[unsorted_indices]
def param(self, i=-1):
# all
if i == -1:
return self.parameters()
# grouped
if i == 0:
return itertools.chain(self.feature_model.conv1.parameters(),
self.feature_model.bn1.parameters(),
self.feature_model.layer1.parameters(),)
if i == 1:
return itertools.chain(self.feature_model.layer2.parameters(),
self.feature_model.layer3.parameters())
if i == 2:
return itertools.chain(self.feature_model.layer4.parameters(),
self.feature_model.fc.parameters())
if i == 3:
return itertools.chain(self.lstm.parameters(),
self.classifier.parameters())
else:
print('there are only 4 param groups -> 0,1,2,3')
class ReslstmNN(nn.Module):
def __init__(self, num_sets=6, pretrained=False, hidden_size=512, num_layers=1, bidirectional=True, dropout=0.5):
super().__init__()
self.feature = ResLSTM(pretrained=pretrained, hidden_size=hidden_size,
num_layers=num_layers, bidirectional=bidirectional, dropout=dropout)
self.feature.classifier[9] = nn.Identity()
self.feature.classifier[10] = nn.Identity()
self.classifier = binaryClassifier(128 * num_sets)
def forward(x):
preds = []
for vid_set in x:
preds.append(self.feature(vid_set))
preds = torch.cat(preds, dim=1)
preds = self.classifier(preds)
return preds.squeeze(dim=1)
@staticmethod
def transform(vid_sets):
transformed = []
for vid in vid_sets:
transformed.append(group_transform(vid))
return transformed
@staticmethod
def collate(batches):
ps_list = []
for set_idx in range(len(batches[0][0])):
vids = [batch[set_idx] for batch, target in batches]
ps = pack_sequence(vids, False)
ps_list.append(ps)
return ps, torch.tensor([target for _, target in batches])
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,197
|
gouravsaini021/maruti
|
refs/heads/master
|
/tests/test_utils.py
|
import maruti
import unittest
import tempfile
from maruti import utils
import os
class UtilsTests(unittest.TestCase):
def test_read_write_json(self):
with tempfile.TemporaryDirectory() as dir:
# creating dictionary
sample = {'h': 3, 'd': {'j': 4}}
path = os.path.join(dir, 'test.json')
# writing to file
utils.write_json(sample, path)
# reading same file
sample_read = utils.read_json(path)
self.assertEqual(sample, sample_read)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,198
|
gouravsaini021/maruti
|
refs/heads/master
|
/tests/torch/test_utils.py
|
import unittest
import tempfile
import os
import torchvision
import torch
from maruti.torch import utils
class TorchUtilsTest(unittest.TestCase):
def setUp(self):
self.model = torchvision.models.resnet18(False)
def tearDown(self):
self.model = None
def test_freeze_unfreeze(self):
utils.freeze(self.model)
for param in self.model.parameters():
self.assertFalse(param.requires_grad)
utils.unfreeze(self.model)
for param in self.model.parameters():
self.assertTrue(param.requires_grad)
def test_layer_freeze_unfreeze(self):
layers = ['fc.weight', 'layer1.0', 'layer2', 'layer1', 'layer3.0']
utils.freeze_layers(self.model, layers)
for name, layer in self.model.named_parameters():
tested = False
for to_freeze in layers:
if name.startswith(to_freeze):
tested = True
self.assertFalse(layer.requires_grad)
if not tested:
self.assertTrue(layer.requires_grad)
utils.unfreeze_layers(self.model, layers)
for param in self.model.parameters():
self.assertTrue(param.requires_grad)
def test_children_names(self):
names = utils.children_names(self.model)
layers = {'fc', 'layer1', 'layer2', 'layer3',
'layer4', 'conv1', 'bn1', 'relu', 'maxpool', 'avgpool'}
self.assertEquals(len(names), len(layers))
for name in names:
self.assertTrue(name in layers)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,199
|
gouravsaini021/maruti
|
refs/heads/master
|
/setup.py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="maruti",
version="1.3.4",
author="Ankit Saini",
author_email="ankitsaini100205@gmail.com",
description="Maruti Library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ankitsainidev/maruti",
download_url='https://github.com/ankitsainidev/maruti/archive/v1.3.tar.gz',
packages=['maruti', 'maruti.vision', 'maruti.deepfake',
'maruti.torch', 'maruti.imports'],
package_dir={'maruti': 'maruti'},
package_data={'maruti': ['deepfake/data/*/*']},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5',
install_requires=['tqdm==4.40.2', 'opencv-python', 'facenet_pytorch']
)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,200
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/imports/ml.py
|
from .general import *
from .general import __all__ as gen_all
import torch
import torch.nn as nn
import torchvision.transforms as torch_transforms
import torchvision
import maruti.torch as mtorch
import maruti.deepfake.dataset as mdata
import maruti
import maruti.deepfake as mfake
import numpy as np
import cv2
import maruti.vision as mvis
import pandas as pd
import torch.utils.data as tdata
import matplotlib.pyplot as plt
from torch.utils import data
import torch.optim as optim
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
__all__ = gen_all + ['mfake','mvis', 'cv2', 'mdata', 'tdata', 'pd', 'device', 'plt', 'np', 'torch', 'nn', 'torch_transforms',
'torchvision', 'mtorch', 'maruti', 'data', 'optim']
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,201
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/imports/general.py
|
import os
import shutil
from glob import glob
from tqdm.auto import tqdm
import itertools
import random
import time
from functools import partial
__all__ = ['time','random','os', 'shutil', 'glob', 'tqdm', 'itertools', 'partial']
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,202
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/utils.py
|
import json
import zipfile
import time
from tqdm.auto import tqdm
__all__ = ['rep_time', 'read_json', 'write_json', 'unzip']
def rep_time(seconds):
if seconds >= 3600:
return time.strftime('%H:%M:%S', time.gmtime(seconds))
else:
return time.strftime('%M:%S', time.gmtime(seconds))
def read_json(path):
'''
Read Json file as dict.
'''
with open(path, 'rb') as file:
json_dict = json.load(file)
return json_dict
def write_json(dictionary, path):
"""
Write dict as a json file
"""
with open(path, 'w') as fp:
json.dump(dictionary, fp)
def unzip(zip_path, path='.'):
with zipfile.ZipFile(zip_path) as zf:
for member in tqdm(zf.infolist(), desc='Extracting '):
try:
zf.extract(member, path)
except zipfile.error as e:
pass
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,203
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/torch/utils.py
|
# from torch_lr_finder import LRFinder
from tqdm.auto import tqdm
from functools import partial
import torch
import time
import numpy as np
from collections import Counter
from torchvision import transforms as torch_transforms
from . import callback as mcallback
tqdm_nl = partial(tqdm, leave=False)
__all__ = ['unfreeze', 'freeze', 'unfreeze_layers', 'freeze_layers', 'Learner']
def_norm = torch_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def children_names(model):
return set([child[0] for child in model.named_children()])
def apply_method(model, method):
for param in model.parameters():
param.requires_grad = True if method == 'unfreeze' else False
def unfreeze(model):
apply_method(model, 'unfreeze')
def freeze(model):
apply_method(model, 'freeze')
def apply_recursively(model, layer_dict, method):
if layer_dict is None:
apply_method(model, method)
else:
memo = set()
for name, child in model.named_children():
if name in layer_dict:
memo.add(name)
apply_recursively(child, layer_dict[name], method)
for name, parameter in model.named_parameters():
if name in layer_dict and name not in memo:
parameter.requires_grad = True if method == 'unfreeze' else False
def _dict_from_layers(layers):
if layers is None:
return {None}
splitted = [layer.split('.') for layer in layers]
childs = [split[0] for split in splitted]
child_count = Counter(childs)
layer_dict = {child: {} for child in child_count}
none_layers = set()
for split in splitted:
if len(split) == 1:
none_layers.add(split[0])
else:
layer_dict[split[0]] = {**layer_dict[split[0]],
**_dict_from_layers(split[1:]), }
for none_layer in none_layers:
layer_dict[none_layer] = None
return layer_dict
def freeze_layers(model: 'torch.nn Module', layers: 'generator of layer names'):
apply_recursively(model, _dict_from_layers(layers), 'freeze')
def unfreeze_layers(model: 'torch.nn Module', layers: 'generator of layer names'):
apply_recursively(model, _dict_from_layers(layers), 'unfreeze')
def _limit_string(string, length):
string = str(string)
if length > len(string):
return string
else:
return string[:length - 2] + '..'
def _time_rep(seconds):
if seconds >= 3600:
return time.strftime('%H:%M:%S', time.gmtime(seconds))
else:
return time.strftime('%M:%S', time.gmtime(seconds))
class Learner:
def __init__(self, model):
self.model = model
self.call_count = 0
self.record = mcallback.Recorder()
def compile(self, optimizer, loss, lr_scheduler=None,
device='cpu', metrics=None, callback=mcallback.Callback(), max_metric_prints=3):
self.optimizer = optimizer
self.loss = loss
self.metrics_plimit = max_metric_prints
self.device = device
self.cb = mcallback.Compose([callback, self.record])
if lr_scheduler is not None:
self.lr_scheduler = lr_scheduler
if metrics is not None:
self.metrics = metrics
else:
self.metrics = []
def state_dict(self):
if not hasattr(self, 'optimizer'):
print('You first need to compile the learner')
return
state = {
'record': self.record.state_dict(),
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
if hasattr(self, 'lr_scheduler'):
state['lr_scheduler'] = self.lr_scheduler.state_dict()
return state
def load_state_dict(self, state):
"""Return True if everything wents write. Else raises error or returns False."""
if not hasattr(self, 'optimizer'):
print('Compile with earlier settings.')
return False
self.optimizer.load_state_dict(state['optimizer'])
self.model.load_state_dict(state['model'])
self.record.load_state_dict(state['record'])
if hasattr(self, 'lr_scheduler'):
self.lr_scheduler.load_state_dict(state['lr_scheduler'])
else:
if 'lr_scheduler' in state:
print(
'lr_scheduler is missing. Recommended to compile with same settings.')
return False
return True
@property
def header_str(self):
header_string = ''
# loss
headings = ['Train Loss', 'Val Loss']
# metrics
for i in range(len(self.metrics)):
headings.append(self.metrics[i].__name__)
if i == self.metrics_plimit:
break
# time
headings.append('Time')
# getting together
for heading in headings:
header_string += _limit_string(heading, 12).center(12) + '|'
return header_string
@property
def epoch_str(self):
info = self.record.last_summary
info_string = ''
info_vals = [info['train_loss'], info['val_loss']
if 'val_loss' in info else None]
for i in range(len(self.metrics)):
info_vals.append(info['val_metrics'][self.metrics[i].__name__])
if i == self.metrics_plimit:
break
info_vals.append(_time_rep(info['time']))
for info_val in info_vals:
if isinstance(info_val, int):
info_val = round(info_val, 5)
info_string += _limit_string(info_val, 12).center(12) + '|'
return info_string
@property
def summary_str(self):
total_time = sum(
map(lambda x: x['time'], self.record.summaries))
best_score = self.record.best_score
return f'Total Time: {_time_rep(total_time)}, Best Score: {best_score}'
def execute_metrics(self, ypred, y):
metric_vals = {}
for metric in self.metrics:
# TODO: make better handling of non_scalar metrics
metric_vals[metric.__name__] = metric(ypred, y).item()
return metric_vals
def fit(self, epochs, train_loader, val_loader=None, accumulation_steps=1, save_on_epoch='learn.pth', min_validations=0):
# TODO: test for model on same device
# Save_on_epoch = None or False to stop save, else path to save
min_validation_idx = set(np.linspace(
0, len(train_loader), min_validations + 1, dtype=int)[1:])
self.call_count += 1
print(self.header_str)
# train
self.optimizer.zero_grad()
if self.cb.on_train_start(epochs):
return
for epoch in tqdm_nl(range(epochs)):
epoch_predictions = []
epoch_targets = []
if self.cb.on_epoch_start(epoch):
return
self.model.train()
start_time = time.perf_counter()
train_length = len(train_loader)
for i, (inputs, targets) in tqdm_nl(enumerate(train_loader), total=train_length, desc='Training: '):
if self.cb.on_batch_start(epoch, i):
return
inputs, targets = inputs.to(
self.device), targets.to(self.device)
pred = self.model(inputs)
loss = self.loss(pred, targets)
# logging
epoch_predictions.append(pred.clone().detach())
epoch_targets.append(targets.clone().detach())
batch_metrics = self.execute_metrics(pred, targets)
#
loss.backward()
if (i + 1) % accumulation_steps == 0:
self.optimizer.step()
if hasattr(self, 'lr_scheduler'):
self.lr_scheduler.step()
self.optimizer.zero_grad()
batch_extras = {'optimizer': self.optimizer, }
if hasattr(self, 'lr_scheduler'):
batch_extras['lr_scheduler'] = self.lr_scheduler
if self.cb.on_batch_end(loss.item(), batch_metrics, batch_extras, epoch, i):
return
if val_loader is not None:
if i in min_validation_idx:
del inputs
del targets
if self.cb.on_min_val_start(epoch, i):
return
min_val_loss, min_val_metrics = self._validate(
val_loader)
min_val_extras = {'model': self.model}
if self.cb.on_min_val_end(min_val_loss, min_val_metrics, min_val_extras, epoch, i):
return
self.model.train()
epoch_predictions = torch.cat(epoch_predictions)
epoch_targets = torch.cat(epoch_targets)
train_loss = self.loss(
epoch_predictions, epoch_targets).clone().detach().item()
train_metrics = self.execute_metrics(
epoch_predictions, epoch_targets)
losses = {'train': train_loss}
metrics = {'train': train_metrics}
if val_loader is not None:
if self.cb.on_validation_start(epoch):
return
val_loss, val_metrics = self._validate(val_loader)
losses['val'] = val_loss
metrics['val'] = val_metrics
if self.cb.on_validation_end(val_loss, val_metrics, epoch):
return
if save_on_epoch:
torch.save(self.state_dict(), save_on_epoch)
epoch_extra_dict = {'time': time.perf_counter() - start_time,
'model': self.model.state_dict(),
'optimizer': self.optimizer,
}
if hasattr(self, 'lr_scheduler'):
epoch_extra_dict['lr_scheduler'] = self.lr_scheduler
if self.cb.on_epoch_end(losses, metrics, epoch_extra_dict, epoch):
return
# this should after the epoch_end callback to be ready
tqdm.write(self.epoch_str)
print(self.summary_str)
def predict(self, data_loader, with_targets=True):
self.model.eval()
prediction_ar = []
target_ar = []
with torch.no_grad():
if with_targets:
for inputs, targets in tqdm_nl(data_loader, desc='Predicting: '):
inputs, targets = inputs.to(
self.device), targets.to(self.device)
pred = self.model(inputs)
prediction_ar.append(pred)
target_ar.append(targets)
return torch.cat(prediction_ar), torch.cat(target_ar)
for inputs in tqdm_nl(data_loader, desc='Predicting: '):
inputs = inputs.to(self.device)
pred = self.model(inputs)
prediction_ar.append(pred)
return torch.cat(prediction_ar)
def validate(self, val_loader):
self.call_count += 1
return self._validate(val_loader)
def _validate(self, val_loader):
pred, target = self.predict(val_loader)
loss = self.loss(pred, target).clone().detach().item()
metrics = self.execute_metrics(pred, target)
return loss, metrics
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,204
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/deepfake/utils.py
|
# from torch_lr_finder import LRFinder
from tqdm.auto import tqdm
from functools import partial
import torch
import time
tqdm_nl = partial(tqdm, leave=False)
class Callback:
pass
def _limit_string(string, length):
string = str(string)
if length > len(string):
return string
else:
return string[:length - 2] + '..'
def _time_rep(seconds):
if seconds >= 3600:
return time.strftime('%H:%M:%S', time.gmtime(seconds))
else:
return time.strftime('%M:%S', time.gmtime(seconds))
class Learner:
def __init__(self, model):
self.model = model
def compile(self, optimizer, loss, lr_scheduler=None, device='cpu', metrics=None):
self.optimizer = optimizer
self.loss = loss
self.device = device
if lr_scheduler is not None:
self.lr_scheduler = lr_scheduler
if metrics is not None:
self.metrics = metrics
else:
self.metrics = []
def fit(self, epochs, train_loader, val_loader=None, accumulation_steps=1):
# TODO: test for model on same device
best_loss = float('inf')
each_train_info = []
each_val_info = []
complete_info = {}
header_string = ''
headings = ['Train Loss', 'Val Loss']
for i in range(len(self.metrics)):
headings.append(self.metrics[i].__name__)
if i == 2:
break
for heading in headings:
header_string += _limit_string(heading, 12).center(12) + '|'
header_string += 'Time'.center(12) + '|'
print(header_string)
# train
self.optimizer.zero_grad()
for epoch in tqdm_nl(range(epochs)):
self.model.train()
train_info = {}
val_info = {}
train_info['losses'] = []
start_time = time.perf_counter()
train_length = len(train_loader)
for i, (inputs, targets) in tqdm_nl(enumerate(train_loader), total=train_length, desc='Training: '):
inputs, targets = inputs.to(
self.device), targets.to(self.device)
pred = self.model(inputs)
loss = self.loss(pred, targets)
train_info['losses'].append(loss)
loss.backward()
if (i + 1) % accumulation_steps == 0:
self.optimizer.step()
if hasattr(self, 'lr_scheduler'):
self.lr_scheduler.step()
self.optimizer.zero_grad()
train_info['time'] = time.perf_counter() - start_time
if val_loader is not None:
val_info = self.validate(val_loader)
info_string = ''
def format_infos(x, length):
return _limit_string(round(torch.stack(x).mean().item(), 2), 12).center(12)
info_values = [format_infos(train_info['losses'], 12)]
if 'losses' in val_info:
info_values.append(format_infos(val_info['losses'], 12))
if torch.stack(val_info['losses']).mean().item() < best_loss:
complete_info['best_state_dict'] = self.model.state_dict()
else:
if torch.stack(train_info['losses']).mean().item() < best_loss:
complete_info['best_state_dict'] = self.model.state_dict()
info_values.append(str(None).center(12))
for i, metric in enumerate(self.metrics):
info_values.append(format_infos(
val_info['metrics'][metric.__name__], 12))
if i == 2:
break
total_time = train_info['time']
if 'time' in val_info:
total_time += val_info['time']
info_values.append(_time_rep(total_time).center(12))
tqdm.write('|'.join(info_values) + '|')
each_train_info.append(train_info)
each_val_info.append(val_info)
complete_info = {**complete_info,
'train': each_train_info, 'val': each_val_info}
return complete_info
def validate(self, val_loader):
information = {}
information['losses'] = []
information['metrics'] = {}
for metric in self.metrics:
information['metrics'][metric.__name__] = []
self.model.eval()
val_loss = torch.zeros(1)
start_time = time.perf_counter()
with torch.set_grad_enabled(False):
for inputs, targets in tqdm_nl(val_loader, desc='Validating: '):
inputs, targets = inputs.to(
self.device), targets.to(self.device)
pred = self.model(inputs)
loss = self.loss(pred, targets)
information['losses'].append(loss)
for metric in self.metrics:
information['metrics'][metric.__name__].append(
metric(pred, targets))
total_time = time.perf_counter() - start_time
information['time'] = total_time
return information
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,205
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/vision/__init__.py
|
from . import image
from . import video
from .image import make_grid
from .video import *
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,206
|
gouravsaini021/maruti
|
refs/heads/master
|
/tests/vision/test_image.py
|
import unittest
import cv2
from cv2 import dnn_Net
from maruti.vision import image
import os
TEST_DATA_PATH = 'test_data'
class ImageTests(unittest.TestCase):
def setUp(self):
self.img_path = os.path.join(TEST_DATA_PATH, 'img1.jpeg')
self.img = cv2.imread(self.img_path)
def test_create_net(self):
self.assertIsInstance(image.create_net(), dnn_Net)
def test_brightness_score(self):
self.assertAlmostEqual(
image.brightness_score(self.img), 1.76, delta=1e-2)
def test_adjust_brightness(self):
brightness = image.brightness_score(self.img)
new_img = image.adjust_brightness(self.img, 2*brightness)
self.assertGreaterEqual(image.brightness_score(new_img), brightness)
def test_crop_around_point(self):
h, w = self.img.shape[:2]
points = [(0, 0), (h-1, w-1), (h//2, w//2)]
sizes = [(224, 224), (160, 160), (3000, 4000)]
for point in points:
for size in sizes:
cropped = image.crop_around_point(self.img, point, size)
self.assertEqual(size, cropped.shape[:2])
def test_get_face_center(self):
old_brightness = image.brightness_score(self.img)
(x, y), brightness = image.get_face_center(self.img)
self.assertEqual(old_brightness, brightness)
def test_detect_sized_rescaled_face(self):
sizes = [(224, 224), (160, 160), (3000, 4000)]
for size in sizes[::-1]:
face = image.detect_sized_rescaled_face(self.img, size,rescale_factor=2)
self.assertEqual(size, face.shape[:2])
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,207
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/__init__.py
|
from . import utils
from . import sizes
from . import vision
from . import deepfake
from . import kaggle
from . import torch
from .utils import *
from .sizes import *
from .deepfake import ImageReader
from .torch import Learner
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,208
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/torch/__init__.py
|
from . import utils
from . import metrics
from .utils import *
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,209
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/kaggle.py
|
import os
import subprocess
from pathlib import Path
import zipfile
def set_variables(credentials: 'lists[str,str]=[username, token]'):
os.environ['KAGGLE_USERNAME'] = credentials[0]
os.environ['KAGGLE_KEY'] = credentials[1]
def update_dataset(path, slug, message='new version', clean=False):
folder = os.path.basename(path)
path = os.path.dirname(path)
path = Path(path)
os.mkdir(path / folder / folder)
subprocess.call(['kaggle', 'datasets', 'download', '-p',
str(path / folder / folder), 'ankitsainiankit/' + slug, '--unzip'])
subprocess.call(['kaggle', 'datasets', 'metadata', '-p',
str(path / folder), 'ankitsainiankit/' + slug])
subprocess.call(['kaggle', 'datasets', 'version',
'-m', message, '-p', path / folder])
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,210
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/vision/image.py
|
import numpy as np
import cv2
from functools import lru_cache
from functools import partial
from os.path import join
import os
from PIL import Image
import torch
__all__ = ['brightness_score', 'adjust_brightness',
'crop_around_point', 'make_grid']
DATA_PATH = join(os.path.dirname(__file__), 'data')
def brightness_score(img):
'''
@params:
img - an array with shape (w/h, w/h, 3)
'''
cols, rows = img.shape[:2]
return np.sum(img) / (255 * cols * rows)
def adjust_brightness(img, min_brightness):
'''
Increase of decrease brightness
@params:
img - an array with shape (w,h,3)
'''
brightness = brightness_score(img)
ratio = brightness / min_brightness
return cv2.convertScaleAbs(img, alpha=1 / ratio, beta=0)
def crop_around_point(img, point, size):
'''
crop a rectangle with size centered at point
@params: size (h,w)
@params: point (x,y)
'''
h, w = img.shape[:2]
n_h, n_w = size
r_h, r_w = h, w
if h < n_h:
r_h = n_h
if w < n_w:
r_w = n_w
h_ratio = r_h / h
w_ratio = r_w / w
if h_ratio > w_ratio:
r_w = int(r_w * h_ratio / w_ratio)
elif w_ratio > h_ratio:
r_h = int(r_h * w_ratio / h_ratio)
pre_w, post_w = n_w // 2, n_w - (n_w // 2)
pre_h, post_h = n_h // 2, n_h - (n_h // 2)
img = cv2.resize(img, (r_w, r_h))
midx, midy = point
startX, startY, endX, endY = 0, 0, 0, 0
if midx - pre_w < 0:
startX, endX = 0, n_w
elif midx + post_w - 1 >= r_w:
startX, endX = r_w - n_w, r_w
else:
startX, endX = midx - pre_w, midx + post_w
if midy - pre_h < 0:
startY, endY = 0, n_h
elif midy + post_h - 1 >= r_h:
startY, endY = r_h - n_h, r_h
else:
startY, endY = midy - pre_h, midy + post_h
return img[startY:endY, startX:endX]
def _unNormalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
mt = torch.FloatTensor(mean).view(1, 1, 3)
st = torch.FloatTensor(std).view(1, 1, 3)
return (((img * st) + mt) * 255).int().numpy().astype(np.uint8)
def make_grid(imgs: '(n,h,w,c) tensor or list of (h,w,c) tensor', cols=8):
"return numpy array of size (h,w,c) easy for plotting"
count = len(imgs)
rows = (count + cols - 1) // cols
if not (imgs[0] > 5).any():
imgs = [_unNormalize(img) for img in imgs]
h, w = imgs[0].shape[:-1]
new_img_w = h * cols
new_img_h = w * rows
new_img = Image.new('RGB', (new_img_w, new_img_h))
for i in range(len(imgs)):
img = Image.fromarray(np.array(imgs[i]).astype(np.uint8))
x = h * (i % cols)
y = h * (i // cols)
new_img.paste(img, (x, y))
return np.array(new_img)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,211
|
gouravsaini021/maruti
|
refs/heads/master
|
/tests/test_sizes.py
|
import unittest
import tempfile
import os
from maruti import sizes
class DeepfakeTest(unittest.TestCase):
def test_byte_to_mb(self):
self.assertEqual(sizes.byte_to_mb(1024*1024), 1)
self.assertAlmostEqual(sizes.byte_to_mb(1024),
0.0009765624, delta=1e-8)
def test_sizes(self):
with tempfile.TemporaryDirectory() as dir:
# dir test
sizes.dir_size(dir)
sizes.dir_size()
# file test
with open(os.path.join(dir, 'test_file.txt'), 'w') as f:
f.write("It's a test")
sizes.file_size(os.path.join(dir, 'test_file.txt'))
# var test
sizes.var_size(dir)
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,212
|
gouravsaini021/maruti
|
refs/heads/master
|
/maruti/sizes.py
|
from sys import getsizeof
import os
__all__ = ['dir_size','file_size','var_size']
def byte_to_mb(size):
return size/(1024**2)
def dir_size(start_path='.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return byte_to_mb(total_size)
def file_size(path):
file_stats = os.stat(path)
return byte_to_mb(file_stats.st_size)
def var_size(var):
return byte_to_mb(getsizeof(var))
__all__ = ['var_size','file_size','dir_size']
|
{"/maruti/deepfake/dataset.py": ["/maruti/__init__.py", "/maruti/vision/video.py", "/maruti/utils.py", "/maruti/sizes.py", "/maruti/torch/utils.py"], "/maruti/vision/video.py": ["/maruti/__init__.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/tests/test_utils.py": ["/maruti/__init__.py"], "/tests/torch/test_utils.py": ["/maruti/torch/__init__.py"], "/maruti/imports/ml.py": ["/maruti/imports/general.py", "/maruti/torch/__init__.py", "/maruti/deepfake/dataset.py", "/maruti/__init__.py", "/maruti/deepfake/__init__.py", "/maruti/vision/__init__.py"], "/maruti/torch/utils.py": ["/maruti/torch/__init__.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py", "/maruti/vision/video.py"], "/tests/vision/test_image.py": ["/maruti/vision/__init__.py"], "/maruti/__init__.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/deepfake/__init__.py", "/maruti/torch/__init__.py"], "/maruti/torch/__init__.py": ["/maruti/torch/utils.py"], "/tests/test_sizes.py": ["/maruti/__init__.py"]}
|
2,217
|
lbarchive/b.py
|
refs/heads/master
|
/tests/test_bpy_handlers_text.py
|
# Copyright (C) 2013, 2014 Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import test_bpy_handlers_base as test_base
from bpy.handlers.text import Handler
class HandlerTestCase(test_base.BaseHandlerTestCase):
def setUp(self):
self.handler = Handler(None)
# =====
def test_generate_title_pre_wrap_oneline(self):
handler = self.handler
handler.options['pre_wrap'] = True
super(HandlerTestCase, self).test_generate_title_oneline()
def test_generate_pre_wrap_multiline(self):
handler = self.handler
handler.options['pre_wrap'] = True
super(HandlerTestCase, self).test_generate_title_multiline()
def test_generate_pre_wrap_common_markup(self):
handler = self.handler
handler.options['pre_wrap'] = True
super(HandlerTestCase, self).test_generate_title_common_markup()
# =====
def test_embed_images(self):
handler = self.handler
self.assertRaises(RuntimeError, handler.embed_images, ('', ))
def test_embed_images_generate(self):
handler = self.handler
handler.options['embed_images'] = True
handler.markup = '<img src="http://example.com/example.png"/>'
html = handler.generate()
EXPECT = '<img src="http://example.com/example.png"/>'
self.assertEqual(html, EXPECT)
|
{"/tests/test_bpy_handlers_text.py": ["/bpy/handlers/text.py"], "/bpy/handlers/mkd.py": ["/bpy/handlers/__init__.py"], "/b.py": ["/bpy/handlers/__init__.py", "/bpy/services/__init__.py"], "/tests/test_bpy_handlers_mkd.py": ["/bpy/handlers/mkd.py"], "/bpy/handlers/rst.py": ["/bpy/handlers/__init__.py"], "/bpy/handlers/text.py": ["/bpy/handlers/__init__.py"], "/tests/test_bpy_handlers_base.py": ["/bpy/handlers/base.py"], "/bpy/services/blogger.py": ["/bpy/services/base.py"], "/bpy/services/base.py": ["/bpy/handlers/__init__.py"], "/tests/test_bpy_handlers_rst.py": ["/bpy/handlers/rst.py"], "/bpy/handlers/asciidoc.py": ["/bpy/handlers/__init__.py"], "/bpy/handlers/html.py": ["/bpy/handlers/__init__.py"], "/bpy/services/wordpress.py": ["/bpy/handlers/__init__.py", "/bpy/services/base.py"]}
|
2,218
|
lbarchive/b.py
|
refs/heads/master
|
/bpy/handlers/mkd.py
|
# Copyright (C) 2013, 2014 Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
You can specify `configuration`__ for Python Markdown in :ref:`brc.py` or
embed_images_, for example:
__ http://packages.python.org/Markdown/reference.html#markdown
.. code:: python
handlers = {
'Markdown': {
'options': {
'config': {
'extensions': ['extension1', 'extension2'],
'tab_length': 8,
},
'embed_images': True,
},
},
}
"""
from __future__ import print_function, unicode_literals
import markdown
from bpy.handlers import base
class Handler(base.BaseHandler):
"""Handler for Markdown markup language
>>> handler = Handler(None)
>>> print(handler.generate_header({'title': 'foobar'}))
<!-- !b
title: foobar
-->
<BLANKLINE>
"""
PREFIX_HEAD = '<!-- '
PREFIX_END = '-->'
HEADER_FMT = '%s: %s'
def _generate(self, markup=None):
"""Generate HTML from Markdown
>>> handler = Handler(None)
>>> print(handler._generate('a *b*'))
<p>a <em>b</em></p>
"""
if markup is None:
markup = self.markup
# markdown library only accepts unicode, utf8 encoded str results in error.
html = markdown.markdown(markup, **self.options.get('config', {}))
return html
|
{"/tests/test_bpy_handlers_text.py": ["/bpy/handlers/text.py"], "/bpy/handlers/mkd.py": ["/bpy/handlers/__init__.py"], "/b.py": ["/bpy/handlers/__init__.py", "/bpy/services/__init__.py"], "/tests/test_bpy_handlers_mkd.py": ["/bpy/handlers/mkd.py"], "/bpy/handlers/rst.py": ["/bpy/handlers/__init__.py"], "/bpy/handlers/text.py": ["/bpy/handlers/__init__.py"], "/tests/test_bpy_handlers_base.py": ["/bpy/handlers/base.py"], "/bpy/services/blogger.py": ["/bpy/services/base.py"], "/bpy/services/base.py": ["/bpy/handlers/__init__.py"], "/tests/test_bpy_handlers_rst.py": ["/bpy/handlers/rst.py"], "/bpy/handlers/asciidoc.py": ["/bpy/handlers/__init__.py"], "/bpy/handlers/html.py": ["/bpy/handlers/__init__.py"], "/bpy/services/wordpress.py": ["/bpy/handlers/__init__.py", "/bpy/services/base.py"]}
|
2,219
|
lbarchive/b.py
|
refs/heads/master
|
/b.py
|
#!/usr/bin/env python
# Copyright (C) 2013-2016 by Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
============
b.py command
============
Commands
========
============= =======================
command supported services
============= =======================
``blogs`` ``b``
``post`` ``b``, ``wp``
``generate`` ``base``, ``b``, ``wp``
``checklink`` ``base``, ``b``, ``wp``
``search`` ``b``
============= =======================
Descriptions:
``blogs``
list blogs. This can be used for blog IDs lookup.
``post``
post or update a blog post.
``generate``
generate HTML file at ``<TEMP>/draft.html``, where ``<TEMP>`` is the system's
temporary directory.
The generation can output a preview html at ``<TEMP>/preview.html`` if there
is ``tmpl.html``. It will replace ``%%Title%%`` with post title and
``%%Content%%`` with generated HTML.
``checklink``
check links in generated HTML using lnkckr_.
``search``
search blog
.. _lnkckr: https://pypi.python.org/pypi/lnkckr
"""
from __future__ import print_function
import argparse as ap
import codecs
import imp
import logging
import os
import sys
import traceback
from bpy.handlers import handlers
from bpy.services import find_service, services
__program__ = 'b.py'
__description__ = 'Post to Blogger or WordPress in markup language seamlessly'
__copyright__ = 'Copyright 2013-2016, Yu Jie Lin'
__license__ = 'MIT License'
__version__ = '0.11.0'
__website__ = 'http://bitbucket.org/livibetter/b.py'
__author__ = 'Yu-Jie Lin'
__author_email__ = 'livibetter@gmail.com'
# b.py stuff
############
# filename of local configuration without '.py' suffix.
BRC = 'brc'
def parse_args():
p = ap.ArgumentParser()
p.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
p.add_argument('-d', '--debug', action='store_true',
help='turn on debugging messages')
p.add_argument('-s', '--service', default='base',
help='what service to use. (Default: %(default)s)')
sp = p.add_subparsers(help='commands')
pblogs = sp.add_parser('blogs', help='list blogs')
pblogs.set_defaults(subparser=pblogs, command='blogs')
psearch = sp.add_parser('search', help='search for posts')
psearch.add_argument('-b', '--blog', help='Blog ID')
psearch.add_argument('q', nargs='+', help='query text')
psearch.set_defaults(subparser=psearch, command='search')
pgen = sp.add_parser('generate', help='generate html')
pgen.add_argument('filename')
pgen.set_defaults(subparser=pgen, command='generate')
pchk = sp.add_parser('checklink', help='check links in chkerateed html')
pchk.add_argument('filename')
pchk.set_defaults(subparser=pchk, command='checklink')
ppost = sp.add_parser('post', help='post or update a blog post')
ppost.add_argument('filename')
ppost.set_defaults(subparser=ppost, command='post')
args = p.parse_args()
return args
def load_config():
rc = None
try:
search_path = [os.getcwd()]
_mod_data = imp.find_module(BRC, search_path)
print('Loading local configuration...')
try:
rc = imp.load_module(BRC, *_mod_data)
finally:
if _mod_data[0]:
_mod_data[0].close()
except ImportError:
pass
except Exception:
traceback.print_exc()
print('Error in %s, aborted.' % _mod_data[1])
sys.exit(1)
return rc
def main():
args = parse_args()
logging.basicConfig(
format=(
'%(asctime)s '
'%(levelname).4s '
'%(module)5.5s:%(funcName)-10.10s:%(lineno)04d '
'%(message)s'
),
datefmt='%H:%M:%S',
)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
encoding = sys.stdout.encoding
if not encoding.startswith('UTF'):
msg = (
'standard output encoding is %s, '
'try to set with UTF-8 if there is output issues.'
)
logging.warning(msg % encoding)
if sys.version_info.major == 2:
sys.stdout = codecs.getwriter(encoding)(sys.stdout, 'replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr, 'replace')
elif sys.version_info.major == 3:
sys.stdout = codecs.getwriter(encoding)(sys.stdout.buffer, 'replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr.buffer, 'replace')
rc = load_config()
service_options = {'blog': None}
if rc:
if hasattr(rc, 'handlers'):
for name, handler in rc.handlers.items():
if name in handlers:
handlers[name].update(handler)
else:
handlers[name] = handler.copy()
if hasattr(rc, 'services'):
for name, service in rc.services.items():
if name in services:
services[name].update(service)
else:
services[name] = service.copy()
if hasattr(rc, 'service'):
args.service = rc.service
if hasattr(rc, 'service_options'):
service_options.update(rc.service_options)
if hasattr(args, 'blog') and args.blog is not None:
service_options['blog'] = args.blog
filename = args.filename if hasattr(args, 'filename') else None
service = find_service(args.service, service_options, filename)
if args.command == 'blogs':
service.list_blogs()
elif args.command == 'search':
service.search(' '.join(args.q))
elif args.command == 'generate':
service.generate()
elif args.command == 'checklink':
service.checklink()
elif args.command == 'post':
service.post()
if __name__ == '__main__':
main()
|
{"/tests/test_bpy_handlers_text.py": ["/bpy/handlers/text.py"], "/bpy/handlers/mkd.py": ["/bpy/handlers/__init__.py"], "/b.py": ["/bpy/handlers/__init__.py", "/bpy/services/__init__.py"], "/tests/test_bpy_handlers_mkd.py": ["/bpy/handlers/mkd.py"], "/bpy/handlers/rst.py": ["/bpy/handlers/__init__.py"], "/bpy/handlers/text.py": ["/bpy/handlers/__init__.py"], "/tests/test_bpy_handlers_base.py": ["/bpy/handlers/base.py"], "/bpy/services/blogger.py": ["/bpy/services/base.py"], "/bpy/services/base.py": ["/bpy/handlers/__init__.py"], "/tests/test_bpy_handlers_rst.py": ["/bpy/handlers/rst.py"], "/bpy/handlers/asciidoc.py": ["/bpy/handlers/__init__.py"], "/bpy/handlers/html.py": ["/bpy/handlers/__init__.py"], "/bpy/services/wordpress.py": ["/bpy/handlers/__init__.py", "/bpy/services/base.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.