index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
7,057
|
CSTARS/ckanext-ecosis
|
refs/heads/master
|
/ckanext/ecosis/datastore/__init__.py
|
import json, re
# import ConfigParser
import time
from pymongo import MongoClient
from . import files, ckan, vocab, delete, query, workspace
from .query import workspace as workspaceQuery
from .utils import storage as ckanFileStorage
from . import mapreduce
from . import push
from .ckan import package
from ckanext.ecosis.datastore.mongo import db
collections = None
def init(schema, coll, pgConn, host, resourceUtil, workspacePath):
global collections
collections = coll;
ensureIndexes(collections)
files.init(collections, workspacePath)
ckan.init(pgConn, schema)
query.init(collections, host)
vocab.init(schema, collections)
delete.init(collections, workspacePath)
workspace.init(collections, resourceUtil, workspacePath)
mapreduce.init(collections, schema)
push.init(collections)
def getCollections():
return collections
def ensureIndexes(collections):
collectionNames = db.list_collection_names()
for name in collectionNames:
if re.match(r'workspace_spectra_.*', name):
db[name].create_index('index')
collections.get('resource').create_index('sheetId')
collections.get('package').create_index('packageId')
collections.get('resource').create_index('packageId')
collections.get('resource').create_index('resourceId')
def test():
t = time.time()*1000
# config = ConfigParser.ConfigParser()
config.read('/etc/ckan/default/development.ini')
schema = "test/schema.json"
file = "/home/adminuser/Downloads/demodata.csv"
metadataFile = "/home/adminuser/Downloads/demometadata.csv"
pgConnStr = config.get("app:main", "sqlalchemy.url")
with open(schema) as schema_file:
schema = json.load(schema_file)
client = MongoClient(config.get("app:main", "ecosis.mongo.url"))
db = client[config.get("app:main", "ecosis.mongo.db")]
collections = {
"spectra" : db[config.get("app:main", "ecosis.mongo.workspace_spectra_collection")],
"resource" : db[config.get("app:main", "ecosis.mongo.workspace_resource_collection")],
"package" : db[config.get("app:main", "ecosis.mongo.workspace_package_collection")],
"usda" : db[config.get("app:main", "ecosis.mongo.usda_collection")],
"search_package" : db[config.get("app:main", "ecosis.mongo.search_collection")],
"spectra" : db[config.get("app:main", "ecosis.mongo.spectra_collection")]
}
ckanFileStorage.init(config)
init(schema, collections, pgConnStr, config.get("app:main", "ecosis.search_url"), ckanFileStorage, config.get("app:main", "ecosis.workspace.root"))
workspace.clean()
workspace.prepare('05cd4761-49ff-4f0d-9a6c-0a0adb223f69')
result = workspaceQuery.get('05cd4761-49ff-4f0d-9a6c-0a0adb223f69')
foo = 1
if __name__ == "__main__":
test()
|
{"/ckanext/ecosis/lib/data_package_importer.py": ["/ckanext/ecosis/controller/resource/__init__.py", "/ckanext/ecosis/lib/aws.py"], "/ckanext/ecosis/datastore/vocab/controlled.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/push/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/lookup.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py", "/ckanext/ecosis/controller/admin/upgrade.py"], "/ckanext/ecosis/controller/user/__init__.py": ["/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py"], "/ckanext/ecosis/datastore/files/excel.py": ["/ckanext/ecosis/datastore/files/__init__.py"], "/ckanext/ecosis/datastore/workspace/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/files/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/delete/__init__.py": ["/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/mapreduce/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/upgrade.py": ["/ckanext/ecosis/datastore/ckan/package.py", "/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/resource/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/delete/__init__.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/controller/organization/__init__.py": ["/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/spectra/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/user_data/paster.py": ["/ckanext/ecosis/user_data/model.py"], "/ckanext/ecosis/controller/package/doi.py": ["/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/plugin.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/controller/organization/__init__.py", "/ckanext/ecosis/controller/package/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/controller/__init__.py", "/ckanext/ecosis/user_data/paster.py"], "/ckanext/ecosis/controller/workspace/__init__.py": ["/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/datastore/query/workspace.py", "/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/datastore/files/utils.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/controller/package/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/lib/data_package_importer.py"], "/ckanext/ecosis/controller/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/workspace.py": ["/ckanext/ecosis/datastore/ckan/__init__.py"]}
|
7,058
|
CSTARS/ckanext-ecosis
|
refs/heads/master
|
/ckanext/ecosis/controller/package/__init__.py
|
import json
from flask import make_response
import ckanext.ecosis.lib.utils as utils
from ckanext.ecosis.datastore import delete as deleteUtil
from ckanext.ecosis.lib.auth import hasAccess
from ckanext.ecosis.datastore import workspace
from ckanext.ecosis.datastore.ckan import package
from ckan.common import request
from ckan.lib.base import c, model
import ckan.logic as logic
from ckanext.ecosis.lib.utils import jsonStringify
from ckanext.ecosis.lib.utils import setPackageExtra
from ckan.lib.email_notifications import send_notification
from ckan.common import config
from .doi import handleDoiUpdate, hasAppliedDoi, getDoiStatus, DOI_STATUS, applyDoi
from .doi import init as initDoi
from ckanext.ecosis.lib.data_package_importer import DataPackageImporter
collections = None
ignoreTemplateVars = ["metadata_modified", "state", "creator_user_id", "revision_id", "type", "url","organization"]
'''
Most of these functions will be called from the main __init__.py controller file.
'''
# inject global dependencies
def init(co, pgConn):
global collections
initDoi(pgConn)
collections = co
# remove a package
# This will remove from CKAN as well as EcoSIS extension
def delete():
response.headers["Content-Type"] = "application/json"
# grab package id from request
params = {}
try:
params = utils.get_request_data(request)
except:
params = {'id': request.params.get('id')}
if params.get('id') is None:
raise Exception('No package_id provided')
# make sure user has access to package
hasAccess(params['id'])
# make sure no DOI is applied
if hasAppliedDoi(params['id']):
return json.dumps({'error': True, 'message':'Cannot delete package with applied DOI'})
# remove from CKAN
context = {'model': model, 'user': c.user}
logic.get_action('package_delete')(context, params)
# remove from EcoSIS
deleteUtil.package(params['id'])
return json.dumps({'success': True})
# update a package
# makes sure update is valid does checking against DOI status
def update():
response.headers["Content-Type"] = "application/json"
params = json.loads(request.body)
hasAccess(params['id'])
context = {'model': model, 'user': c.user}
cpkg = logic.get_action('package_show')(context, {'id': params['id']})
# check EcoSIS DOI status
resp = handleDoiUpdate(cpkg, params)
if resp.get('error') == True:
resp['doiApplied'] = True
return json.dumps(resp)
pkg = logic.get_action('package_update')(context, params)
doiStatus = getDoiStatus(pkg)
if doiStatus.get('status').get('value') == DOI_STATUS["ACCEPTED"]:
applyDoi(pkg)
if resp.get('email') is not None:
pkg['doi_user_email'] = resp.get('email')
pkg['doi_user_name'] = resp.get('user')
return json.dumps(pkg)
# create a package. notify admins
def after_create():
# send email to the admin email group
url = config.get('ckan.site_url')
admin_email = config.get('ecosis.admin_email')
if url != "" and url is not None:
if admin_email != "" and admin_email is not None:
try:
send_notification(
{
"email" : admin_email,
"display_name" : "EcoSIS Admins"
},
{
"subject" : "EcoSIS Dataset Created - %s" % ckanPackage.get('title'),
"body" : ("The dataset '%s' has been created by %s/user/%s. "
"You can view the dataset here: %s/dataset/%s"
"\n\n-EcoSIS Server") %
(ckanPackage.get('title'), config.get('ckan.site_url'), c.user, config.get('ckan.site_url'), ckanPackage.get("name"))
}
)
except:
print("Failed to send admin email")
# Once a DOI is applied, the update package function is disabled
# this is a simple workaround service, for just upda
# TODO: remove this, not required anymore, app should use normal package update
def updateLinkedResources():
context = {'model': model, 'user': c.user}
params = request.get_json()
package_id = params.get('id')
hasAccess(package_id)
linkedResources = params.get('linkedResources')
cpkg = logic.get_action('package_show')(context, {'id': package_id})
setPackageExtra('LinkedData', json.dumps(linkedResources), cpkg)
pkg = logic.get_action('package_update')(context, cpkg)
return {'success': True}
def importPackage():
context = {'model': model, 'user': c.user}
package_uri = request.args.get('uri')
if package_uri is None:
raise Exception('uri parameter not provided')
inst = DataPackageImporter(package_uri)
newPkg = inst.run(context)
headers = {"Location" : "/import/?id=%s" % newPkg.get('id')}
return make_response(("Redirecting", 307, headers))
# set a package to private
def setPrivate():
response.headers["Content-Type"] = "application/json"
package_id = request.params.get('id')
hasAccess(package_id)
# can't set a package to private that has a DOI
if hasAppliedDoi(package_id):
return json.dumps({'error':True, 'message': 'Cannot modify package with applied DOI'})
# Once a package is private, it should not be in EcoSIS search
deleteUtil.cleanFromSearch(package_id)
return json.dumps({'success': True})
# create the reusable template for a package
def getTemplate():
headers = {
"Content-Type" : "application/json"
}
package_id = request.params.get('id')
format = request.params.get('format')
# are we only creating a properties alias template?
mapOnly = request.params.get('mapOnly')
# check access
hasAccess(package_id)
pkg = package.get(package_id)
# clean out variables that should NOT be reused between templates
for var in ignoreTemplateVars:
if var in pkg:
del pkg[var]
# move the aliases to first order citizen
extras = pkg.get("extras")
if extras != None and extras.get("aliases") != None:
pkg["aliases"] = json.loads(extras["aliases"])
del extras["aliases"]
# resourceUpdateCount is only for keeping track of when resource files are
# modified, does not need to be passed along.
if extras != None:
if extras.get("resourceUpdateCount") is not None:
del extras["resourceUpdateCount"]
# backward compatability with the old 'map' attribute. Used to be used instead of 'aliases'
if pkg.get("aliases") == None:
wpkg = collections.get('package').find_one({"packageId": package_id},{"map": 1})
if "map" in wpkg:
pkg['aliases'] = wpkg['map']
else:
pkg['aliases'] = {}
# are we downloading or are we sending as rest api call?
if format != "json":
headers["Content-Disposition"] = "attachment; filename=\"%s.json\"" % pkg.get('name')
# we are only interested in the aliases template
if mapOnly:
schema = package.getSchema()
for key, s in schema.items():
for item in s:
if pkg['aliases'].get(item.get('name')) == None:
pkg['aliases'][item.get('name')] = ''
pkg = {
'aliases' : pkg['aliases']
}
return {"body": jsonStringify(pkg, formatted=True), "headers": headers}
# if someone is trying to access the main CKAN package create screen, redirect to
# EcoSIS spectra importer app.
def createPackageRedirect():
group = request.params.get('group')
headers = {}
if group == None:
headers["Location"] = "/import/"
else:
headers["Location"] = "/import/?group=%s" % group
return make_response(("Redirecting", 307, headers))
# if someone is trying to access the main CKAN package edit screen, redirect to
# EcoSIS spectra importer app.
def editPackageRedirect(id):
headers = {"Location" : "/import/?id=%s" % id.encode('ascii','ignore')}
return make_response(("Redirecting", 307, headers))
|
{"/ckanext/ecosis/lib/data_package_importer.py": ["/ckanext/ecosis/controller/resource/__init__.py", "/ckanext/ecosis/lib/aws.py"], "/ckanext/ecosis/datastore/vocab/controlled.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/push/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/lookup.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py", "/ckanext/ecosis/controller/admin/upgrade.py"], "/ckanext/ecosis/controller/user/__init__.py": ["/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py"], "/ckanext/ecosis/datastore/files/excel.py": ["/ckanext/ecosis/datastore/files/__init__.py"], "/ckanext/ecosis/datastore/workspace/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/files/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/delete/__init__.py": ["/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/mapreduce/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/upgrade.py": ["/ckanext/ecosis/datastore/ckan/package.py", "/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/resource/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/delete/__init__.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/controller/organization/__init__.py": ["/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/spectra/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/user_data/paster.py": ["/ckanext/ecosis/user_data/model.py"], "/ckanext/ecosis/controller/package/doi.py": ["/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/plugin.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/controller/organization/__init__.py", "/ckanext/ecosis/controller/package/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/controller/__init__.py", "/ckanext/ecosis/user_data/paster.py"], "/ckanext/ecosis/controller/workspace/__init__.py": ["/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/datastore/query/workspace.py", "/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/datastore/files/utils.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/controller/package/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/lib/data_package_importer.py"], "/ckanext/ecosis/controller/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/workspace.py": ["/ckanext/ecosis/datastore/ckan/__init__.py"]}
|
7,059
|
CSTARS/ckanext-ecosis
|
refs/heads/master
|
/ckanext/ecosis/controller/__init__.py
|
import os, json
from ckan.common import config
import ckan.lib.uploader as uploader
# from ckan.controllers.package import PackageController
from ckanext.ecosis.controller import git, admin, organization, package, resource, spectra, user
from ckanext.ecosis.controller import workspace as workspaceController
from ckanext.ecosis.lib.utils import handleError
from ckanext.ecosis import datastore
from ckanext.ecosis.datastore.mongo import collections
from ckanext.ecosis.lib.utils import jsonStringify
from flask import make_response
usdaApiUrl = 'http://plants.usda.gov/java/AdvancedSearchServlet?symbol=&dsp_vernacular=on&dsp_category=on&dsp_genus=on&dsp_family=on&Synonyms=all&viewby=sciname&download=on'
path = os.path.dirname(__file__)
schema = os.path.join(path, "../../../spectra-importer/utils/metadata/schema.json")
pgConnStr = config.get("sqlalchemy.url")
with open(schema) as schema_file:
schema = json.load(schema_file)
upload = uploader.ResourceUpload({})
datastore.init(schema, collections, pgConnStr, config.get("ecosis.search_url"), upload, config.get("ecosis.workspace.root"))
package.init(collections, pgConnStr)
organization.init(collections)
class EcosisController():
# def createPackage(self):
# try:
# return package.create()
# except Exception as e:
# return handleError(e)
# def deletePackage(self):
# try:
# return package.delete()
# except Exception as e:
# return handleError(e)
# def updatePackage(self):
# try:
# return package.update()
# except Exception as e:
# return handleError(e)
def cleanTests(self):
try:
return admin.cleanTests()
except Exception as e:
return handleError(e)
def clean(self):
try:
return admin.clean(collections)
except Exception as e:
return handleError(e)
# def setPrivate(self):
# try:
# return package.setPrivate()
# except Exception as e:
# return handleError(e)
def updateLinkedResources(self):
try:
return (package.updateLinkedResources())
except Exception as e:
return handleError(e)
def importPackage(self):
try:
return package.importPackage()
except Exception as e:
return handleError(e)
def getTemplate(self):
try:
content = package.getTemplate()
return make_response((content['body'], 200, content['headers']))
except Exception as e:
return handleError(e)
def createResource(self):
try:
return resource.create()
except Exception as e:
return handleError(e)
# def deleteResource(self):
# try:
# return resp(resource.delete())
# except Exception as e:
# return handleError(e)
def deleteResources(self):
try:
return resp(resource.deleteMany())
except Exception as e:
return handleError(e)
def rebuildIndex(self):
try:
return resp(admin.rebuildIndex(collections))
except Exception as e:
return handleError(e)
def upgrade(self):
try:
return admin.upgrade()
except Exception as e:
return handleError(e)
def fixUnits(self):
try:
return admin.fixUnits()
except Exception as e:
return handleError(e)
def fixCitations(self):
try:
return admin.fixCitationText()
except Exception as e:
return handleError(e)
def doiQuery(self):
try:
return resp(package.doi.doiQuery())
except Exception as e:
return handleError(e)
def clearDoi(self):
try:
return resp(package.doi.clearDoi())
except Exception as e:
return handleError(e)
# TODO: verify ok to remove
# def doiUpdateStatus(self):
# try:
# return package.doi.doiUpdateStatus()
# except Exception as e:
# return handleError(e)
def verifyWorkspace(self):
try:
return resp(admin.verifyWorkspace(collections))
except Exception as e:
return handleError(e)
def gitInfo(self):
try:
return resp(git.info())
except Exception as e:
return handleError(e)
def userInfo(self):
try:
return resp(user.info())
except Exception as e:
return handleError(e)
def remoteLogin(self):
try:
return resp(user.remote_login())
except Exception as e:
return handleError(e)
def setGithubInfo(self):
try:
return resp(user.set_github_info())
except Exception as e:
return handleError(e)
def getAllGithubInfo(self):
try:
return user.get_all_github_info()
except Exception as e:
return handleError(e)
def createPackageRedirect(self):
return package.createPackageRedirect()
def editPackageRedirect(self, package_id):
return package.editPackageRedirect(package_id)
def editPackageRedirectWResource(self, package_id, resource_id):
return package.editPackageRedirect(package_id)
# def editPackageRedirect(self):
# return package.editPackageRedirect()
def rebuildUSDACollection(self):
try:
return admin.rebuildUSDACollection(collections, usdaApiUrl)
except Exception as e:
return handleError(e)
def gcmdSuggest(self):
try:
return resp(spectra.suggestGCMD())
except Exception as e:
return handleError(e)
def topSuggest(self):
try:
return resp(spectra.suggestAttributeName())
except Exception as e:
return handleError(e)
def topOverview(self):
try:
return resp(spectra.suggestOverview())
except Exception as e:
return handleError(e)
def prepareWorkspace(self):
try:
return resp(workspaceController.prepare())
except Exception as e:
return handleError(e)
def cleanWorkspace(self):
try:
return workspaceController.clean()
except Exception as e:
return handleError(e)
def getWorkspace(self):
try:
return resp(workspaceController.get())
except Exception as e:
return handleError(e)
def processResource(self):
try:
return resp(resource.process())
except Exception as e:
return handleError(e)
def getResource(self):
try:
return resp(resource.get())
except Exception as e:
return handleError(e)
def getResourceByName(self, package_id, resource_name):
try:
return resource.getByName(package_id, resource_name)
except Exception as e:
return handleError(e)
def getSpectra(self):
try:
return resp(spectra.get())
except Exception as e:
return handleError(e)
def getSpectraCount(self):
try:
return resp(resource.getSpectraCount())
except Exception as e:
return handleError(e)
def getMetadataChunk(self):
try:
return resp(resource.getMetadataChunk())
except Exception as e:
return handleError(e)
def getMetadataInfo(self):
try:
return resp(resource.getMetadataInfo())
except Exception as e:
return handleError(e)
def pushToSearch(self):
try:
return resp(workspaceController.pushToSearch())
except Exception as e:
return handleError(e)
def resp(msg, code=200, headers={}):
if not isinstance(msg, str):
msg = jsonStringify(msg)
headers['Content-Type'] = 'application/json'
return make_response((msg, 200, headers))
|
{"/ckanext/ecosis/lib/data_package_importer.py": ["/ckanext/ecosis/controller/resource/__init__.py", "/ckanext/ecosis/lib/aws.py"], "/ckanext/ecosis/datastore/vocab/controlled.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/push/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/lookup.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py", "/ckanext/ecosis/controller/admin/upgrade.py"], "/ckanext/ecosis/controller/user/__init__.py": ["/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py"], "/ckanext/ecosis/datastore/files/excel.py": ["/ckanext/ecosis/datastore/files/__init__.py"], "/ckanext/ecosis/datastore/workspace/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/files/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/delete/__init__.py": ["/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/mapreduce/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/upgrade.py": ["/ckanext/ecosis/datastore/ckan/package.py", "/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/resource/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/delete/__init__.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/controller/organization/__init__.py": ["/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/spectra/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/user_data/paster.py": ["/ckanext/ecosis/user_data/model.py"], "/ckanext/ecosis/controller/package/doi.py": ["/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/plugin.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/controller/organization/__init__.py", "/ckanext/ecosis/controller/package/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/controller/__init__.py", "/ckanext/ecosis/user_data/paster.py"], "/ckanext/ecosis/controller/workspace/__init__.py": ["/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/datastore/query/workspace.py", "/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/datastore/files/utils.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/controller/package/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/lib/data_package_importer.py"], "/ckanext/ecosis/controller/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/workspace.py": ["/ckanext/ecosis/datastore/ckan/__init__.py"]}
|
7,060
|
CSTARS/ckanext-ecosis
|
refs/heads/master
|
/ckanext/ecosis/datastore/query/workspace.py
|
from ckanext.ecosis.datastore.ckan import resource as ckanResourceQuery
from ckanext.ecosis.datastore.ckan import package as ckanPackageQuery
import ckan.lib.uploader as uploader
import os
collections = None
getResource = None
isPushed = None
# inject global dependencies
def init(co, fn, q):
global collections, getResource, isPushed
collections = co
getResource = fn
isPushed = q
# get a workspace for a package
def get(package_id):
# get all package resources
resources = ckanResourceQuery.active(package_id)
response = {
"package" : collections.get("package").find_one({
"packageId": package_id,
}, {"runInfo": 0, "_id": 0}),
"resources" : [],
"ckan" : {
"package" : ckanPackageQuery.get(package_id),
"resources" : resources
},
"pushed" : isPushed(package_id)
}
if response['package'] is None:
response['package'] = {}
# append information about the dataset resources to response
for resource in resources:
sheets = getResource(resource.get('id'))
upload = uploader.ResourceUpload(resource)
path = upload.get_path(resource['id'])
if os.path.exists(path):
resource['file_size'] = os.path.getsize(path)
else:
resource['file_size'] = 0
for sheet in sheets:
# we don't care about root excel files, only the sheets
if sheet.get('excel') == True or sheet.get('isZip') == True:
continue
response.get('resources').append(sheet)
return response
|
{"/ckanext/ecosis/lib/data_package_importer.py": ["/ckanext/ecosis/controller/resource/__init__.py", "/ckanext/ecosis/lib/aws.py"], "/ckanext/ecosis/datastore/vocab/controlled.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/push/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/lookup.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/datastore/mapreduce/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py", "/ckanext/ecosis/controller/admin/upgrade.py"], "/ckanext/ecosis/controller/user/__init__.py": ["/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py"], "/ckanext/ecosis/datastore/files/excel.py": ["/ckanext/ecosis/datastore/files/__init__.py"], "/ckanext/ecosis/datastore/workspace/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/files/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/__init__.py": ["/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/delete/__init__.py": ["/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/mapreduce/__init__.py": ["/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/controller/admin/upgrade.py": ["/ckanext/ecosis/datastore/ckan/package.py", "/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/resource/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/delete/__init__.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/controller/organization/__init__.py": ["/ckanext/ecosis/datastore/__init__.py"], "/ckanext/ecosis/controller/spectra/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/user_data/paster.py": ["/ckanext/ecosis/user_data/model.py"], "/ckanext/ecosis/controller/package/doi.py": ["/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py"], "/ckanext/ecosis/plugin.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/controller/organization/__init__.py", "/ckanext/ecosis/controller/package/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/user_data/model.py", "/ckanext/ecosis/controller/__init__.py", "/ckanext/ecosis/user_data/paster.py"], "/ckanext/ecosis/controller/workspace/__init__.py": ["/ckanext/ecosis/datastore/workspace/__init__.py", "/ckanext/ecosis/datastore/query/workspace.py", "/ckanext/ecosis/datastore/push/__init__.py", "/ckanext/ecosis/lib/utils.py"], "/ckanext/ecosis/datastore/files/utils.py": ["/ckanext/ecosis/datastore/vocab/__init__.py"], "/ckanext/ecosis/datastore/__init__.py": ["/ckanext/ecosis/datastore/query/__init__.py", "/ckanext/ecosis/datastore/utils/__init__.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/controller/package/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/__init__.py", "/ckanext/ecosis/lib/auth.py", "/ckanext/ecosis/datastore/ckan/__init__.py", "/ckanext/ecosis/controller/package/doi.py", "/ckanext/ecosis/lib/data_package_importer.py"], "/ckanext/ecosis/controller/__init__.py": ["/ckanext/ecosis/lib/utils.py", "/ckanext/ecosis/datastore/mongo.py"], "/ckanext/ecosis/datastore/query/workspace.py": ["/ckanext/ecosis/datastore/ckan/__init__.py"]}
|
7,074
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/data/data.py
|
import numpy as np
from vsbrnn.utils import makeGaussian
import matplotlib.pyplot as plt
class Fixation:
def __init__(self, x, y, start, dur, img_type, img_pos):
self.x = x
self.y = y
self.start = start
self.dur = dur
self.img_type = img_type
self.img_pos = img_pos
def __repr__(self):
return "fix @({x}, {y}),t={start}, d={dur}".format(
x=self.x, y=self.y, start=self.start, dur=self.dur)
def __str__(self):
return "fix @({x}, {y}),t={start}, d={dur}".format(
x=self.x, y=self.y, start=self.start, dur=self.dur)
class FixationsList:
def __init__(self, fix_list):
self.fixations = fix_list
@classmethod
def from_pos(cls, x_pos, y_pos, start, dur, img_type, img_pos):
fixations = []
for x, y, s, d in zip(x_pos, y_pos, start, dur):
fix = Fixation(x, y, s, d, img_type, img_pos)
fixations.append(fix)
return cls(fixations)
def __getitem__(self, i):
return self.fixations[0]
def __repr__(self):
return str(self.fixations)
def __add__(self, b):
fixations = self.fixations + b.fixations
return FixationsList(fixations)
def sort(self):
self.fixations = sorted(self.fixations, key=lambda x: x.start)
def convert_fixations_to_saliency_map(self, size=(40, 40)):
saliency_map = np.zeros(size)
for fix in self.fixations:
x = int(fix.x * (size[0] - 1))
y = int(fix.y * (size[1] - 1))
gaussian = makeGaussian(size=size, centre=(x, y), fwhm=5)
# if (x < size[0] and x > 0) and (y < size[1] and y > 0):
#saliency_map[y, x] += 1
saliency_map += gaussian
# saliency_map[saliency_map<1] = 0
return saliency_map
def convert_fixations_to_fix_sequence(self, size=(40, 40)):
sequence = []
for fix in self.fixations:
x = int(fix.x * (size[0] - 1))
y = int(fix.y * (size[1] - 1))
gaussian = makeGaussian(size=size, centre=(x, y), fwhm=5)
sequence.append(gaussian)
return sequence
def convert_fixations_to_sequence(self, test, region_model):
sequence = []
img_labels = {}
for i in ["img_pos", "img_type"]:
img_labels[i] = []
for fix in self.fixations:
if region_model.ignore_fixations_outside:
if (fix.x < 0.0 or fix.x > 1.0) or (fix.y < 0.0 or fix.y > 1.0):
continue
label = region_model.fix_in_region(test, fix)
sequence.append(label)
img_labels["img_type"].append(fix.img_type)
img_labels["img_pos"].append(fix.img_pos)
return sequence, img_labels
def convert_fixations_to_sequence_with_vsbs(self, test, region_model, vsb_selected):
sequence, _ = self.convert_fixations_to_sequence(test, region_model)
prev_label = sequence[0]
prev_fix = self.fixations[0]
fixes = [self.fixations[0]]
fix_ordered = []
glance_label = [sequence[0]]
img_labels = {}
for i in ["img_pos", "img_type"]:
img_labels[i] = []
for fix, label in zip(self.fixations[1:], sequence[1:]):
if prev_label != label and prev_fix.img_pos != fix.img_pos:
fix_ordered.append(fixes)
glance_label.append(label)
img_labels["img_pos"].append(prev_fix.img_pos)
img_labels["img_type"].append(prev_fix.img_type)
fixes = []
prev_label = label
prev_fix = fix
fixes.append(fix)
fix_ordered.append(fixes)
img_labels["img_pos"].append(prev_fix.img_pos)
img_labels["img_type"].append(prev_fix.img_type)
vsbs = {}
for i in vsb_selected:
vsbs[i] = []
for fixes in fix_ordered:
glance_duration = self._get_glance_duration(fixes)
length_scanpath = self._get_length_scanpath(fixes)
no_fix = self._get_no_fixations(fixes)
if "glance_dur" in vsb_selected:
vsbs["glance_dur"].append(glance_duration)
if "no_fix" in vsb_selected:
vsbs["no_fix"].append(no_fix)
if "scan_path" in vsb_selected:
vsbs["scan_path"].append(length_scanpath)
return vsbs, glance_label, img_labels
def _get_glance_duration(self, fixes):
return sum([a.dur for a in fixes])/1000
def _get_length_scanpath(self, fixes):
scan_path = 0
prev_coord = (fixes[0].x, fixes[0].y)
for fix in fixes[1:]:
coord = (fix.x, fix.y)
scan_path += np.sqrt((coord[0] - prev_coord[0])**2 + (coord[1] - prev_coord[1])**2)
prev_coord = coord
return scan_path
def _get_no_fixations(self, fixes):
return len(fixes)
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,075
|
jonomon/VSMood
|
refs/heads/master
|
/main_AAAI.py
|
import numpy as np
import pandas as pd
import random
import logging
import argparse
import os
import matplotlib.pyplot as plt
random.seed(1114)
np.random.seed(129)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.metrics import balanced_accuracy_score
from vsbrnn.run_vsb_sequence import run_vsb_sequence
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
## For basic usage check python main.py -h
def main():
data_type_options = ["fix", "glance", "fix-sequence"]
multi_instance_options = ['mean', '2d-mean', 'max-likelihood', 'similar', 'log-prob']
cnn_layers_options = ['1', '2', 'none']
region_model_type_options = ["semantic5", "grid9", "semantic8", "grid16"]
use_vsb_options = ['scan_path', 'glance_dur']
use_img_options = ['img_type', 'img_pos']
logging.basicConfig(filename='log.txt', level=logging.DEBUG, filemode="w")
parser = argparse.ArgumentParser(description='Run RNN for bipolar.')
parser.add_argument('data_type', type=str, help='options: {}'.format(data_type_options))
parser.add_argument('states', type=int, help='states')
parser.add_argument('multi_instance', type=str,
help='Multi instance options {}'.format(multi_instance_options))
parser.add_argument('--region_model_type', type=str,
help='region model types {} default region_clinical'.format(
region_model_type_options))
parser.add_argument('--cnn_layers', type=str,
help='cnn options {}'.format(cnn_layers_options))
parser.add_argument('--max_len', type=int, help='max length of sequence')
parser.add_argument('--use_vsb', type=str, nargs="+",
help='VSB features only with glance data_type, options: {}'.format(
use_vsb_options))
parser.add_argument('--use_img', type=str, nargs="+",
help='should use image properties, options: {}'.format(use_img_options))
parser.add_argument('--verbose', dest='verbose', action='store_true')
parser.add_argument('--print_sub', dest='print_sub', action='store_true')
parser.add_argument('--plot', dest='plot', action='store_true')
args = parser.parse_args()
data_type = args.data_type
states = args.states
max_len = args.max_len
use_vsb = args.use_vsb
use_img = args.use_img
region_model_type = args.region_model_type
cnn_layers = args.cnn_layers
multi_instance = args.multi_instance
verbose = args.verbose
print_sub = args.print_sub
plot = args.plot
logging.debug("Running %s with states=%s, mi=%s, max_length=%s, use_vsb=%s, use_img=%s",
data_type, states, multi_instance, max_len, use_vsb, use_img)
print("Running {} with states={}, mi={}, max_length={}, use_vsb={}, use_img={} region_model={} cnn_layers={}".format(data_type, states, multi_instance, max_len, use_vsb, use_img,
region_model_type, cnn_layers))
if data_type not in data_type_options:
print("{} not an available data_type option".format(data_type))
return
if data_type == "fix" and use_vsb:
print("VSB parameters are not available when in fixation")
return
if multi_instance not in multi_instance_options:
print("{} not available option for multi_instance".format(multi_instance))
return
if region_model_type != None and region_model_type not in region_model_type_options:
print("{} not available option for region_model_type".format(region_model_type))
return
if cnn_layers != None and cnn_layers not in cnn_layers_options:
print("{} not available option for cnn_layers".format(cnn_layers))
return
sub_cat, sub_prob = run_vsb_sequence(data_type, states, max_len,
use_vsb, use_img, region_model_type,
cnn_layers, multi_instance,
verbose=verbose, print_sub=print_sub)
sub_prob = np.array(sub_prob)
df = pd.DataFrame({"cat": sub_cat, "prob": sub_prob})
df.to_csv("output/{}-{}-{}-{}.csv".format(data_type, states, region_model_type, cnn_layers))
clf = LogisticRegression(class_weight="balanced")
clf.fit(sub_prob.reshape(-1, 1), sub_cat)
y_predicted = clf.predict(sub_prob.reshape(-1, 1))
auc_val = roc_auc_score(sub_cat, sub_prob)
acc_val = accuracy_score(sub_cat, y_predicted)
b_acc_val = balanced_accuracy_score(sub_cat, y_predicted)
log_loss_val = log_loss(sub_cat, sub_prob)
print("Avg auc={} acc_val={} b_acc_val={} log_loss_val={}\n\n".format(
auc_val, acc_val, b_acc_val, log_loss_val))
if plot:
from sklearn.metrics import roc_curve, auc
plt.clf()
fpr, tpr, _ = roc_curve(sub_cat, sub_prob, pos_label=1)
auc_val = auc(fpr, tpr)
plt.plot(fpr, tpr, color='#FF69B4',
label='ROC curve (area = {})'.format(round(auc_val, 2)))
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
filename = "img/{} with states={}, mi={}".format(data_type, states, multi_instance)
plt.savefig(filename + ".png")
pd.DataFrame([sub_cat, sub_prob]).T.to_csv(filename + ".csv")
if __name__ == "__main__":
main()
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,076
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/data/data_creator.py
|
import numpy as np
from vsbrnn.utils import tokenise_cat, get_sub_to_cat_dict
class DataCreator():
def __init__(self, di, data_type, model, **kwargs):
self.data_type = data_type
self.properties = kwargs
self.model = model
if data_type == "glance":
self.df = di.get_vsb_sequence_data(max_len=self.properties["max_len"], model=model)
elif data_type == "fix":
self.df = di.get_sequence_data(max_len=self.properties["max_len"], model=model)
elif data_type == "fix-sequence":
df = di.get_fix_sequence_data(max_len=self.properties["max_len"])
self.df = df
def filter_cats(self, cats):
self.df = self.df.loc[np.in1d(self.df["Cat"], cats)]
def get_unique_subject_list(self):
subject_list = np.unique(self.df["Subject"])
return subject_list
def get_sub(self):
sub = self.df["Subject"].values
return sub
def get_cat(self):
cat = tokenise_cat(self.df["Cat"], one_hot=True)
return cat
def get_cat_letter(self):
cat = self.df["Cat"].values
return cat
def get_seq(self):
if self.data_type == "fix-sequence":
seq = np.stack(self.df["Sequence"])
seq = np.expand_dims(seq, axis=4)
else:
seq = np.vstack(self.df["Sequence"].values)
return seq
def get_vsb(self, vsb_prop):
vsb = self.df[vsb_prop]
return np.vstack(vsb.values)
def get_img(self, img_prop):
img = self.df[img_prop]
return np.vstack(img.values)
def get_cat_of_subjects(self, subject_list):
sub_to_cat = get_sub_to_cat_dict(self.get_sub(), self.get_cat())
cat_list = [sub_to_cat[a][1] for a in subject_list]
return cat_list
def get_cat_letter_of_subject(self, subject_list):
sub_to_cat = get_sub_to_cat_dict(self.get_sub(), self.get_cat_letter())
cat_list = [sub_to_cat[a] for a in subject_list]
return cat_list
def get_X(self, index):
output = {}
output["seq"] = self.get_seq()[index]
if "use_vsb" in self.properties and self.properties["use_vsb"]:
vsb = []
if "scan_path" in self.properties["use_vsb"]:
vsb.append(self.get_vsb('scan_path')[index])
if "glance_dur" in self.properties["use_vsb"]:
vsb.append(self.get_vsb('glance_dur')[index])
if "no_fix" in self.properties["use_vsb"]:
vsb.append(self.get_vsb('no_fix')[index])
vsb = np.array(vsb).transpose([1, 2, 0])
output["use_vsb"] = vsb
if "use_img" in self.properties and self.properties["use_img"]:
if "img_pos" in self.properties["use_img"]:
out = self.get_img('img_pos')[index]
output["use_img_pos"] = out
if "img_type" in self.properties["use_img"]:
out = self.get_img('img_type')[index]
output["use_img_type"] = out
return output
def get_data_for_subjects(self, subject_list):
index = np.where(np.in1d(self.get_sub(), subject_list))[0]
X_list = self.get_X(index)
y = self.get_cat()[index]
return X_list, y
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,077
|
jonomon/VSMood
|
refs/heads/master
|
/main_JAD.py
|
import os
import numpy as np
import pandas as pd
import argparse
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, confusion_matrix, roc_curve, auc
from vsbrnn.data.data_creator import DataCreator
from vsbrnn.data.data_importer import DataImporter
from vsbrnn.training import RnnTrain
from vsbrnn.utils import get_log_likelihood
from vsbrnn.data.region_model import FaceRegionModel4
np.random.seed(616)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def plotROC(cats, preds, xlabel=None, filename=None):
fpr, tpr, _ = roc_curve(cats, preds, pos_label=1)
auc_val = auc(fpr, tpr)
plt.plot(fpr, tpr, color='#FF69B4', label='ROC curve (area = {})'.format(round(auc_val, 2)))
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
if xlabel:
plt.xlabel('False Positive Rate\n' + xlabel)
else:
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
if filename:
plt.savefig("img/" + filename + ".png")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run seq2seq for apathy.')
parser.add_argument('--states', type=int, default=32, help="LSTM states")
parser.add_argument('--epochs', type=int, default=13, help="iterations")
args = parser.parse_args()
states = args.states
epochs = args.epochs
print("Reading, parsing, and processing visual scanning data")
# Get data
di = DataImporter()
bd_d_dc = DataCreator(di, "fix", max_len=None, model=FaceRegionModel4(),
use_vsb=None, use_img=["img_type"])
bd_d_dc.filter_cats(["BD", "D"])
other_dc = DataCreator(di, "fix", max_len=None, model=FaceRegionModel4(),
use_vsb=None, use_img=["img_type"])
other_dc.filter_cats(["BR", "R", "C"])
# Hold out data
subject_list = bd_d_dc.get_unique_subject_list()
cat_list = bd_d_dc.get_cat_of_subjects(subject_list)
train_subs, hold_out_subs, train_cats, hold_out_cat = train_test_split(
subject_list, cat_list,
test_size=0.33, stratify=cat_list, random_state=23422)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("1) Training leave one out classification model...")
# Results 1) Leave one out classification
mean_preds = []
mean_cats = []
classification = []
for idx, test_sub in enumerate(train_subs):
llo_train_subs = np.delete(train_subs, idx)
X_train, y_train = bd_d_dc.get_data_for_subjects(llo_train_subs)
X_test, y_test = bd_d_dc.get_data_for_subjects(test_sub)
trainer = RnnTrain(states=states, verbose=False)
trainer.do_simple_fix_training(X_train, y_train, epochs=epochs)
X_predicts = trainer.predict(X_train).reshape(-1, 1)
n_d, bins_d, _ = plt.hist(
X_predicts[y_train[:, 1]==1], facecolor='green', alpha=0.5)
n_bd, bins_bd, _ = plt.hist(
X_predicts[y_train[:, 1]==0], facecolor='red', alpha=0.5)
preds = trainer.predict(X_test)
log_like = np.mean([get_log_likelihood(a, n_bd, bins_bd, n_d, bins_d) for a in preds])
mean_preds.append(log_like)
mean_cats.append(y_test[0, 1])
classification.append(np.mean(log_like)>0)
print("Training leave one out classification model complete")
llo_auc = roc_auc_score(mean_cats, mean_preds)
print("\t---------------------------")
print("\tResults 1")
print("\tAUC score of the leave one out set= {}".format(llo_auc))
classification = np.array(classification)
mean_cats = np.array(mean_cats)
cm = confusion_matrix(mean_cats, classification)
print("\tClassification of leave one out set \n\ttp={}, fp={}, fn={}, tn={}".format(
cm[0, 0], cm[0, 1], cm[1, 0], cm[1, 1]))
print("\t---------------------------")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("2) Training held out classification model...")
X_train, y_train = bd_d_dc.get_data_for_subjects(train_subs)
trainer2 = RnnTrain(states=states, verbose=False)
trainer2.do_simple_fix_training(X_train, y_train, epochs=epochs)
X_predicts = trainer2.predict(X_train).reshape(-1, 1)
n_d, bins_d, _ = plt.hist(
X_predicts[y_train[:, 1]==1], facecolor='green', alpha=0.5)
n_bd, bins_bd, _ = plt.hist(
X_predicts[y_train[:, 1]==0], facecolor='red', alpha=0.5)
hold_out_X_train, hold_out_y_train = bd_d_dc.get_data_for_subjects(hold_out_subs)
held_out_preds = trainer2.predict(hold_out_X_train)
hold_out_mean_preds = []
hold_out_classification = []
hold_out_index = np.where(np.in1d(bd_d_dc.get_sub(), hold_out_subs))[0]
hold_out_sub = bd_d_dc.get_sub()[hold_out_index]
for sub_id, cat in zip(hold_out_subs, hold_out_cat):
index = np.where(np.in1d(hold_out_sub, sub_id))[0]
log_like = [get_log_likelihood(a, n_bd, bins_bd,
n_d, bins_d) for a in held_out_preds[index]]
prob = np.mean(log_like)
hold_out_mean_preds.append(prob)
hold_out_classification.append(prob>0)
hold_out_auc = roc_auc_score(np.array(hold_out_cat), hold_out_mean_preds)
print("Training held out classification model complete")
print("\t---------------------------")
print("\t AUC score of the held out set= {}".format(hold_out_auc))
cm = confusion_matrix(hold_out_cat, hold_out_classification)
print("\tClassification of held out set \n\ttp={}, fp={}, fn={}, tn={}".format(
cm[0, 0], cm[0, 1], cm[1, 0], cm[1, 1]))
held_lloc_auc = roc_auc_score(hold_out_cat + mean_cats.tolist(),
hold_out_mean_preds + mean_preds)
print("\t---------------------------")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Aggregate results")
print("\tAUC score of group 1 + 2 = {}".format(held_lloc_auc))
others_sub = other_dc.get_unique_subject_list()
others_cat = other_dc.get_cat_letter_of_subject(others_sub)
others_X_train, others_y_train = other_dc.get_data_for_subjects(others_sub)
others_preds = trainer2.predict(others_X_train)
test_cat_list = {}
for sub_id, cat in zip(others_sub, others_cat):
index = np.where(np.in1d(other_dc.get_sub(), sub_id))
log_like = [get_log_likelihood(a, n_bd, bins_bd,
n_d, bins_d) for a in others_preds[index]]
prob = np.mean(log_like)
if cat not in test_cat_list:
test_cat_list[cat] = []
test_cat_list[cat].append(prob)
hold_out_test_cats = np.array(hold_out_cat)
hold_out_bd_preds = np.array(hold_out_mean_preds)[np.where(hold_out_test_cats==0)[0]]
hold_out_d_preds = np.array(hold_out_mean_preds)[np.where(hold_out_test_cats==1)[0]]
remitted_auc = roc_auc_score([0]*len(test_cat_list["BR"]) + [1]*len(test_cat_list["R"]),
test_cat_list["BR"] + test_cat_list["R"])
print("\t---------------------------")
cats = hold_out_test_cats.tolist() + mean_cats.tolist() + [0]*len(test_cat_list["BR"]) + [1]*len(test_cat_list["R"]) + [1]*len(test_cat_list["C"])
preds = hold_out_mean_preds + mean_preds + test_cat_list["BR"] + test_cat_list["R"] + test_cat_list["C"]
all_auc = roc_auc_score(cats, preds)
cm = confusion_matrix(cats, np.array(preds)>0)
print("\tAUC for Bipolar (depressed + remitted) vs unipolar (depressed + remitted) and controls ={}".format(all_auc))
print("\tClassification of Bipolar (depressed + remitted) vs unipolar (depressed + remitted) and controls\n\ttp={}, fp={}, fn={}, tn={}".format(
cm[0, 0], cm[0, 1], cm[1, 0], cm[1, 1]))
print("\n\tSimilarity index")
print("\t\tDepressed bipolar disorder {}+{}".format(np.mean(hold_out_bd_preds),
np.std(hold_out_bd_preds)))
print("\t\tDepressed unipolar disorder {}+{}".format(np.mean(hold_out_d_preds),
np.std(hold_out_d_preds)))
print("\t\tRemitted bipolar disorder {}+{}".format(np.mean(test_cat_list["BR"]),
np.std(test_cat_list["BR"])))
print("\t\tRemitted unipolar {}+{}".format(np.mean(test_cat_list["R"]),
np.std(test_cat_list["R"])))
print("\t\tHealthy control {}+{}".format(np.mean(test_cat_list["C"]),
np.std(test_cat_list["C"])))
print("\tStatistic tests (t-tests)")
from scipy.stats import ttest_ind
t, p = ttest_ind(hold_out_bd_preds, hold_out_d_preds)
print("\t\tBipolar depressed vs unipolar depressed t={}, p={}".format(t, p))
t, p = ttest_ind(hold_out_d_preds, test_cat_list["R"])
print("\t\tUnipolar depressed vs remitted t={}, p={}".format(t, p))
t, p = ttest_ind(hold_out_d_preds, test_cat_list["C"])
print("\t\tUnipolar depressed vs healthy controls t={}, p={}".format(t, p))
t, p = ttest_ind(hold_out_d_preds, test_cat_list["BR"])
print("\t\tUnipolar depressed vs Bipolar remitted t={}, p={}".format(t, p))
t, p = ttest_ind(hold_out_bd_preds, test_cat_list["R"])
print("\t\tBipolar depressed vs unipolar remitted t={}, p={}".format(t, p))
t, p = ttest_ind(hold_out_bd_preds, test_cat_list["C"])
print("\t\tBipolar depressed vs healthy controls t={}, p={}".format(t, p))
t, p = ttest_ind(hold_out_bd_preds, test_cat_list["BR"])
print("\t\tBipolar depressed vs bipolar remitted t={}, p={}".format(t, p))
t, p = ttest_ind(test_cat_list["C"], test_cat_list["BR"])
print("\t\tHealthy controls vs bipolar remitted t={}, p={}".format(t, p))
t, p = ttest_ind(test_cat_list["C"], test_cat_list["R"])
print("\t\tHealthy controls vs unipolar remitted t={}, p={}".format(t, p))
print("\t---------------------------")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Generating plots")
plt.clf()
cats = hold_out_test_cats.tolist() + mean_cats.tolist()
preds = hold_out_mean_preds + mean_preds
plotROC(cats, preds, filename="mdd_bd_llo")
plt.subplot(2, 2, 1)
cats = hold_out_test_cats.tolist() + mean_cats.tolist() + [0]*len(test_cat_list["BR"]) + [1]*len(test_cat_list["R"])
preds = hold_out_mean_preds + mean_preds + test_cat_list["BR"] + test_cat_list["R"]
plotROC(cats, preds, xlabel="BD & BD-R vs MDD & MDD-R ")
plt.subplot(2, 2, 2)
cats = hold_out_test_cats.tolist() + mean_cats.tolist() + [0]*len(test_cat_list["BR"]) + [1]*len(test_cat_list["R"]) + [1]*len(test_cat_list["C"])
preds = hold_out_mean_preds + mean_preds + test_cat_list["BR"] + test_cat_list["R"] + test_cat_list["C"]
plotROC(cats, preds, xlabel="BD & BD-r vs MDD & MDD-R & C \n")
plt.subplot(2, 2, 3)
bd_hold_out_idx = hold_out_test_cats == 0
bd_hold_out_cats = hold_out_test_cats[bd_hold_out_idx]
bd_hold_mean_preds = np.array(hold_out_mean_preds)[bd_hold_out_idx]
bd_mean_cat_idx = mean_cats == 0
bd_mean_cats = mean_cats[bd_mean_cat_idx]
bd_mean_preds = np.array(mean_preds)[bd_mean_cat_idx]
cats = bd_hold_out_cats.tolist() + bd_mean_cats.tolist() + [1]*len(test_cat_list["R"]) + [0]*len(test_cat_list["C"])
preds = bd_hold_mean_preds.tolist() + bd_mean_preds.tolist() + test_cat_list["R"] + test_cat_list["C"]
plotROC(cats, preds, xlabel="BD & BD-r vs C ")
print("ROCs saved in img/rocs.png...")
plt.savefig("img/rocs.png")
plt.clf()
x = [1, 2, 3, 4, 5]
y = [hold_out_bd_preds, hold_out_d_preds, test_cat_list["BR"], test_cat_list["R"], test_cat_list["C"]]
plt.boxplot(y)
plt.xticks([1, 2, 3, 4, 5], ("BD", "MDD", "BD-R", "MDD-R", "Con"))
plt.ylabel("Similarity index")
plt.grid(axis='y', color="0.9", linestyle='-', linewidth=1)
plt.xlim([0.5, 5.5])
#plt.ylim([0.4, 1])
print("Similarity index box plots saved in img/mle_bar.png...")
plt.savefig("img/mle_bar.png")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,078
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/training.py
|
import numpy as np
np.random.seed(616)
from keras.models import Sequential
from keras.models import Model
from keras.layers.core import Dense, Activation
from keras.layers import LSTM, GRU
from keras.layers import Embedding
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, MaxoutDense
from keras.layers import Input, concatenate, TimeDistributed
from keras.callbacks import EarlyStopping, History, Callback, ModelCheckpoint
from keras.optimizers import Adam
#from vis.visualization import visualize_activation, visualize_saliency, visualize_cam
from scipy.misc import imsave
from sklearn.metrics import roc_auc_score
import logging
import matplotlib.pyplot as plt
class Auc_callback(Callback):
def __init__(self, validation_data=(), interval=10, verbose=False):
super(Callback, self).__init__()
self.interval = interval
self.verbose = verbose
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)[:, 1]
score = roc_auc_score(self.y_val[:, 1], y_pred)
if self.verbose:
print("AUC - epoch: {:d} - score: {:.6f}".format(epoch, score))
class RnnTrain:
def __init__(self, states, **kwargs):
self.states = states
self.batch_size = 40
self.learning_rate = 0.001
self.metrics = ['accuracy']
self.properties = kwargs
self.net = None
def _init_single_modal_net(self, **kwargs):
sequence_size = kwargs["seq"]["shape"]
max_features = kwargs["seq"]["max"]
sequence_input = Input(shape=sequence_size, name="sequence_input")
sequence_dense = Embedding(max_features, self.states)(sequence_input)
encoder = LSTM(self.states, dropout=0.5, recurrent_dropout=0.0)(sequence_dense)
output = Dense(2, activation="softmax", name="classification")(encoder)
model = Model(inputs=sequence_input, outputs=output)
adam = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=self.metrics)
return model
def _init_modal_net(self, **kwargs):
sequence_size = kwargs["seq"]["shape"]
max_features = kwargs["seq"]["max"]
sequence_input = Input(shape=sequence_size, name="sequence_input")
sequence_dense = Embedding(max_features, self.states)(sequence_input)
feature_inputs = []
feature_outputs = []
if "use_vsb" in kwargs:
shape = kwargs["use_vsb"]["shape"]
feature_input = Input(shape=shape, name="use_vsb")
feature_dense = Dense(self.states)(feature_input)
feature_inputs.append(feature_input)
feature_outputs.append(feature_dense)
if "use_img_type" in kwargs:
shape = kwargs["use_img_type"]["shape"]
max_features = kwargs["use_img_type"]["max"]
feature_input = Input(shape=shape, name="use_img_type")
feature_dense = Embedding(max_features, self.states)(feature_input)
feature_inputs.append(feature_input)
feature_outputs.append(feature_dense)
if "use_img_pos" in kwargs:
shape = kwargs["use_img_pos"]["shape"]
max_features = kwargs["use_img_pos"]["max"]
feature_input = Input(shape=shape, name="use_img_pos")
feature_dense = Embedding(max_features, self.states)(feature_input)
feature_inputs.append(feature_input)
feature_outputs.append(feature_dense)
merge_layer = concatenate([sequence_dense] + feature_outputs)
encoder = LSTM(self.states + len(feature_outputs) * self.states,
dropout=0.7, recurrent_dropout=0.7)(merge_layer)
# recurrent_dropout to d=rd=0.7 for psyc paper, d=0.5 rd=0 for technical
output = Dense(2, activation="softmax", name="classification")(encoder)
model = Model(inputs=[sequence_input] + feature_inputs, outputs=[output])
adam = Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=self.metrics)
return model
def make_net(self, X):
max_features = np.max(X["seq"]) + 1
sequence_shape = (X["seq"].shape[1],)
net_arguments = {}
net_arguments["seq"] = {"shape": sequence_shape, "max": max_features}
if "use_vsb" in X:
vsb_shape = (X["use_vsb"].shape[1], X["use_vsb"].shape[2])
net_arguments["use_vsb"] = {"shape": vsb_shape}
if "use_img_type" in X:
img_type_shape = (X["use_img_type"].shape[1],)
max_img_type_features = np.max(X["use_img_type"]) + 1
net_arguments["use_img_type"] = {"shape": img_type_shape,
"max": max_img_type_features}
if "use_img_pos" in X:
img_pos_shape = (X["use_img_pos"].shape[1],)
max_img_pos_features = np.max(X["use_img_pos"]) + 1
net_arguments["use_img_pos"] = {"shape": img_pos_shape,
"max": max_img_pos_features}
if "use_vsb" in X or "use_img_type" in X or "use_img_pos" in X:
net = self._init_modal_net(**net_arguments)
else:
net = self._init_single_modal_net(**net_arguments)
return net
def make_X_list(self, *args):
X = args[0] # let X be the first argument,
# assuming that the shape for the data are the same
X_base_list = []
for arg in args:
if X.keys() == ["seq"]:
X_list = arg["seq"]
else:
X_list = [arg["seq"]]
if "use_vsb" in X:
X_list.append(arg["use_vsb"])
if "use_img_type" in X:
X_list.append(arg["use_img_type"])
if "use_img_pos" in X:
X_list.append(arg["use_img_pos"])
X_base_list.append(X_list)
if len(X_base_list) == 1:
return X_base_list[0]
else:
return tuple(X_base_list)
def do_simple_fix_training(self, X_train, y_train, epochs=10):
self.net = self.make_net(X_train)
X_train_list = self.make_X_list(X_train)
his = History()
self.net.fit(X_train_list, y_train, verbose=self.properties['verbose'], shuffle=True,
batch_size=self.batch_size, epochs=epochs,
class_weight="auto",
callbacks=[his])
def do_training(self, X_train, y_train, X_valid, y_valid):
self.net = self.make_net(X_train)
X_train_list, X_valid_list = self.make_X_list(X_train, X_valid)
his = History()
es = EarlyStopping(patience=30, verbose=False, mode='min')
mc = ModelCheckpoint("ModelCheckpoint/tmp.pkg",
save_best_only=True, save_weights_only=True)
self.net.fit(X_train_list, y_train, verbose=self.properties["verbose"],
shuffle=True,
batch_size=self.batch_size, epochs=10000,
validation_data=(X_valid_list, y_valid),
class_weight="auto",
callbacks=[his, es, mc])
self.net.load_weights("ModelCheckpoint/tmp.pkg")
# output_string = ""
# for i in his.history.keys():
# output_string += "{}={} ".format(i, his.history[i][-1])
#return net, output_string
def predict(self, X):
X_list = [X["seq"]]
if "use_vsb" in X:
X_list.append(X["use_vsb"])
if "use_img_type" in X:
X_list.append(X["use_img_type"])
if "use_img_pos" in X:
X_list.append(X["use_img_pos"])
return self.net.predict(X_list, verbose=0)[:, 1]
class RNNFeatureTrain:
def __init__(self, cnn_layer, states, **kwargs):
self.cnn_layer = cnn_layer
self.states = states
self.batch_size = 40
self.learning_rate = 0.001
self.metrics = ['accuracy']
self.properties = kwargs
self.net = None
def make_net(self, X):
input_size = (None, X["seq"].shape[2], X["seq"].shape[3], X["seq"].shape[4])
sequence_input = Input(shape=input_size, name="sequence_input")
convs = Sequential()
if self.cnn_layer in [None, "1", "2"]:
convs.add(Conv2D(10, kernel_size=(3, 3), activation="relu",
input_shape=(
X["seq"].shape[2], X["seq"].shape[3], X["seq"].shape[4])))
convs.add(MaxPooling2D((2, 2), strides=(2, 2)))
if self.cnn_layer in [None, "2"]:
convs.add(Conv2D(20, kernel_size=(3, 3), activation="relu"))
convs.add(MaxPooling2D((2, 2), strides=(2, 2)))
if self.cnn_layer in [None]:
convs.add(Conv2D(40, kernel_size=(3, 3), activation="relu"))
convs.add(MaxPooling2D((2, 2), strides=(2, 2)))
if self.cnn_layer == "none":
convs.add(Flatten(input_shape=(
X["seq"].shape[2], X["seq"].shape[3], X["seq"].shape[4])))
else:
convs.add(Dropout(0.5))
convs.add(Flatten())
convs.add(MaxoutDense(output_dim=self.states/2,nb_feature=2, input_dim=self.states))
convs.add(Dropout(0.5))
convs.add(Dense(self.states/2, activation="relu", name="features"))
convs.add(Dropout(0.5))
x = TimeDistributed(convs)(sequence_input)
encoder = LSTM(self.states/2, dropout=0.5, recurrent_dropout=0.0)(x)
output = Dense(2, activation="softmax", name="classification")(encoder)
model = Model(inputs=sequence_input, outputs=output)
adam = Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=adam,
metrics=self.metrics)
return model
def make_X_list(self, *args):
X_base_list = []
for arg in args:
X_base_list.append(arg['seq'])
if len(X_base_list) == 1:
return X_base_list[0]
else:
return tuple(X_base_list)
def do_training(self, X_train, y_train, X_valid, y_valid):
self.net = self.make_net(X_train)
X_train_list, X_valid_list = self.make_X_list(X_train, X_valid)
his = History()
es = EarlyStopping(patience=30, verbose=False, mode='min')
mc = ModelCheckpoint("ModelCheckpoint/tmp-feat.pkg",
save_best_only=True, save_weights_only=True)
self.net.fit(X_train_list, y_train, verbose=self.properties["verbose"],
shuffle=True,
batch_size=self.batch_size, epochs=10000,
validation_data=(X_valid_list, y_valid),
class_weight="auto",
callbacks=[his, es, mc])
self.net.load_weights("ModelCheckpoint/tmp-feat.pkg")
# output_string = ""
# for i in his.history.keys():
# output_string += "{}={} ".format(i, his.history[i][-1])
#return net, output_string
def predict(self, X):
X_list = [X["seq"]]
return self.net.predict(X_list, verbose=0)[:, 1]
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,079
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/run_vsb_sequence.py
|
import numpy as np
import pandas as pd
from vsbrnn.data.data_importer import DataImporter
from vsbrnn.data.data_creator import DataCreator
from vsbrnn.training import RnnTrain, RNNFeatureTrain
from vsbrnn.multi_instance import MultiInstance
from sklearn.model_selection import train_test_split, StratifiedKFold
import matplotlib.pyplot as plt
from vsbrnn.utils import makeGaussian
from data.region_model import RegionModel, FaceRegionModel_grid9, FaceRegionModel_semantic5
from data.region_model import FaceRegionModel_grid16, FaceRegionModel_semantic8
from data.region_model import FaceRegionModel4
## GAUSSIAN SIGMA=6 from 5
def run_vsb_sequence(data_type, states, max_len, use_vsb,
use_img, region_model_type, cnn_layer, multi_instance,
verbose, print_sub):
di = DataImporter()
if region_model_type == "semantic5":
region_model = FaceRegionModel_semantic5()
elif region_model_type == "grid9":
region_model = FaceRegionModel_grid9()
elif region_model_type == "semantic8":
region_model = FaceRegionModel_semantic8()
elif region_model_type == "grid16":
region_model = FaceRegionModel_grid16()
else:
region_model = FaceRegionModel4()
dc = DataCreator(di, data_type, max_len=max_len, use_vsb=use_vsb,
model=region_model,
use_img=use_img)
dc.filter_cats(["BD", "D"])
subject_list = dc.get_unique_subject_list()[::-1]
cat_list = dc.get_cat_of_subjects(subject_list)
sub_cat = []
sub_prob = []
for idx, test_sub in enumerate(subject_list):
non_test_cats = np.delete(cat_list, idx)
non_test_subs = np.delete(subject_list, idx)
X_test, y_test = dc.get_data_for_subjects(test_sub)
predicts = []
skf = StratifiedKFold(n_splits=3)
for train_index, valid_index in skf.split(non_test_subs, non_test_cats):
# train_subs, valid_subs, train_cats, valid_cats = train_test_split(
# non_test_subs, non_test_cats, test_size=0.33,
# stratify=non_test_cats, random_state=i)
train_subs = non_test_subs[train_index]
valid_subs = non_test_subs[valid_index]
if data_type == "fix-sequence":
X_train, y_train = dc.get_data_for_subjects(train_subs)
X_valid, y_valid = dc.get_data_for_subjects(valid_subs)
trainer = RNNFeatureTrain(cnn_layer, states=states, verbose=verbose)
trainer.do_training(X_train, y_train,
X_valid, y_valid)
preds = trainer.predict(X_test)
else:
X_train, y_train = dc.get_data_for_subjects(train_subs)
X_valid, y_valid = dc.get_data_for_subjects(valid_subs)
trainer = RnnTrain(states=states, verbose=verbose)
trainer.do_training(X_train, y_train, X_valid, y_valid)
preds = trainer.predict(X_test)
mi = MultiInstance(multi_instance, X_train, y_train, X_test, trainer)
mi_preds = mi.get_pred(preds)
predicts.append(mi_preds)
mean_test_predict = np.mean(predicts)
sub_prob.append(mean_test_predict)
sub_cat.append(y_test[0, 1])
log = "Sub = {}, Mean test = {} cat = {}".format(
test_sub, mean_test_predict, y_test[0])
if print_sub or verbose:
print(log)
return sub_cat, sub_prob
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,080
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/data/data_importer.py
|
import pandas as pd
import numpy as np
import csv
from data import FixationsList
from vsbrnn.utils import get_max_sequence_len, tokenise_sequence, tokenise_cat, tokenise_img_type
from keras.preprocessing.sequence import pad_sequences
imid_position_TGH = {1: (144, 41), 2: (834, 41), 3: (144, 583), 4: (834, 583)}
imid_position_TWH = {1: (460, 80), 2: (1150, 80), 3: (460, 590), 4: (1150, 590)}
imid_position = {"ED Week 2_TGH": imid_position_TGH, "ED Week 2_TWH": imid_position_TWH}
imid_size = {"ED Week 2_TGH": (302, 400), "ED Week 2_TWH": (310, 410)}
class DataImporter:
def __init__(self):
self.file_data = pd.read_csv("vsbrnn/data/vsb_data.csv",
delimiter="\t", quoting=csv.QUOTE_NONE)
self.vsbs = ["glance_dur", "no_fix", "scan_path"]
def _preprocess_data(self, cat_data, subject_data, sequence_data, img_label_data,
max_len, model):
subject_data = np.array(subject_data)
cat_data = np.array(cat_data)
img_label_data["img_type"] = tokenise_img_type(img_label_data["img_type"])
seq = tokenise_sequence(np.array(sequence_data), model)
max_len = get_max_sequence_len(sequence_data) if max_len == None else max_len
seq = pad_sequences(seq, maxlen=max_len)
output = [subject_data, cat_data, seq]
cols = ["Subject", "Cat", "Sequence"]
for i in img_label_data:
img_label = pad_sequences(img_label_data[i], maxlen=max_len)
output.append(img_label)
cols.append(i)
data = pd.DataFrame(output).T
data.columns = cols
return data
def get_vsb_sequence_data(self, max_len, model):
cat_data = []
sequence_data = []
img_label_data = {}
for i in ["img_type", "img_pos"]:
img_label_data[i] = []
vsb_data = {}
for i in self.vsbs:
vsb_data[i] = []
subject_data = []
for (subject, slide_no, test, cat, slides) in self._iterate_file_data(self.file_data):
vsbs, sequence, img_labels = self._parse_slide_to_vsb_sequence(test, slides, model)
for k, v in vsbs.iteritems():
vsb_data[k].append(v)
sequence_data.append(sequence)
cat_data.append(cat)
subject_data.append(subject)
for k, v in img_labels.iteritems():
img_label_data[k].append(v)
vsb_output = []
vsb_cols = []
for i in vsb_data:
vsb_data[i] = pad_sequences(vsb_data[i], maxlen=max_len, dtype="float")
vsb_output.append(vsb_data[i])
vsb_cols.append(i)
vsb_df = pd.DataFrame([np.array(subject_data)] + vsb_output).T
vsb_df.columns = ["Subject1"] + vsb_cols
output_data = self._preprocess_data(
cat_data, subject_data, sequence_data, img_label_data, max_len, model)
data = pd.concat([output_data, vsb_df], axis=1)
data["Subject1"] = None
return data
def get_fix_sequence_data(self, max_len):
cat_data = []
sequence_data = []
subject_data = []
img_data = []
for (subject, slide_no, test, cat, slides) in self._iterate_file_data(self.file_data):
sequence = self._parse_slide_to_fix_sequence(slides)
saliency_image = self._parse_slide_to_saliency_img(slides)
sequence_data.append(np.stack(sequence))
img_data.append(saliency_image)
cat_data.append(cat)
subject_data.append(subject)
subject_data = np.array(subject_data)
cat_data = np.array(cat_data)
padded_sequence = pad_sequences(sequence_data, maxlen=max_len, dtype="float32")
output = [subject_data, cat_data, padded_sequence, img_data]
data = pd.DataFrame(output).T
data.columns = ["Subject", "Cat", "Sequence", "Saliency"]
return data
def get_sequence_data(self, max_len, model):
cat_data = []
sequence_data = []
subject_data = []
img_label_data = {}
for i in ["img_type", "img_pos"]:
img_label_data[i] = []
for (subject, slide_no, test, cat, slides) in self._iterate_file_data(self.file_data):
sequence, img_labels = self._parse_slide_to_sequence(test, slides, model)
sequence_data.append(sequence)
cat_data.append(cat)
subject_data.append(subject)
for k, v in img_labels.iteritems():
img_label_data[k].append(v)
output_data = self._preprocess_data(
cat_data, subject_data, sequence_data, img_label_data, max_len, model)
return output_data
def _parse_slide_to_fix_sequence(self, slides):
slides.sort()
sequence = slides.convert_fixations_to_fix_sequence()
return sequence
def _parse_slide_to_saliency_img(self, slides):
saliency_map = slides.convert_fixations_to_saliency_map()
return saliency_map
def _parse_slide_to_sequence(self, test, slides, model):
slides.sort()
sequence, img_labels = slides.convert_fixations_to_sequence(test, model)
return sequence, img_labels
def _parse_slide_to_vsb_sequence(self, test, slides, model):
slides.sort()
vsbs, glance_label, img_labels = slides.convert_fixations_to_sequence_with_vsbs(
test, model, self.vsbs)
return vsbs, glance_label, img_labels
def _iterate_file_data(self, file_data):
subject_data = {}
subject_to_cat = {}
subject_to_test = {}
for row in file_data.iterrows():
abs_fix_x_pos = self._split_by_comma(row[1]["VASTNormalisedFixationX"])
abs_fix_y_pos = self._split_by_comma(row[1]["VASTNormalisedFixationY"])
fix_dur = self._split_by_comma(row[1]["FixationDurations_ms"])
fix_start = self._split_by_comma(row[1]["FixationStart"])
img_type = row[1]["imgType(s)"]
img_pos = row[1]["ImId"]
cat = row[1]["cat"]
subject = row[1]["Subject"]
slide_num = row[1]["SlideNumCalculator"]
test = row[1]["Test"]
i_size = imid_size[test]
i_pos = imid_position[test][img_pos]
fix_x_pos = [(a - i_pos[0]) / i_size[0] for a in abs_fix_x_pos]
fix_y_pos = [(a - i_pos[1]) / i_size[1] for a in abs_fix_y_pos]
fix_list = FixationsList.from_pos(
fix_x_pos, fix_y_pos, fix_start, fix_dur, img_type, img_pos)
if subject not in subject_data:
subject_data[subject] = {}
if subject not in subject_to_cat:
subject_to_cat[subject] = cat
if subject not in subject_to_test:
subject_to_test[subject] = test
if slide_num not in subject_data[subject]:
subject_data[subject][slide_num] = fix_list
else:
subject_data[subject][slide_num] = subject_data[subject][slide_num] + fix_list
for subject_i in subject_data:
for slide in subject_data[subject_i]:
cat = subject_to_cat[subject_i]
test = subject_to_test[subject_i]
yield subject_i, slide, test, cat, subject_data[subject_i][slide]
def _split_by_comma(self, comma_string):
output = []
array = comma_string.split(",")
for i in array:
i_o = i.replace("\"", "")
if self.is_float(i_o):
output.append(float(i_o))
return output
def is_float(self, s):
try:
float(s)
return True
except ValueError:
return False
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,081
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/utils.py
|
import numpy as np
def makeGaussian(size, centre, fwhm=1):
x = np.arange(0, size[1], 1, float)
y = np.arange(0, size[0], 1, float)[:,np.newaxis]
x0 = centre[0]
y0 = centre[1]
return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
class Rect:
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def isPointInRect(self, px, py):
if px > self.x and px < (self.x + self.w):
within_x = True
else:
within_x = False
if py > self.y and py < (self.y + self.h):
within_y = True
else:
within_y = False
return within_x and within_y
def get_max_sequence_len(sequence):
max_len = 0
for i in sequence:
if len(i) > max_len:
max_len = len(i)
return max_len
def tokenise_sequence(sequence, model):
token_4 = {"LE": 1, "RE": 2, "NO": 3, "M": 4, "NT": 5, "LC": 6, "RC": 7, "FH1": 8,
"FH2": 9, "FH3": 10, "LM": 11, "RM": 12}
token_grid9 = {"FH1": 1, "FH2": 2, "FH3": 3, "LE": 4, "NT": 5, "RE": 6, "LC": 7, "NO": 8,
"RC": 9}
token_grid16 = {"FH1": 1, "FH2": 2, "FH3": 3, "FH4": 4, "LE1": 5, "LE2": 6, "RE1": 7,
"RE2": 8, "LC": 9, "NO1": 10, "NO2": 11, "RC": 12, "M1": 13, "M2": 14,
"M3": 15, "M4": 16}
token_semantic5 = {"LE": 1, "RE": 2, "NO": 3, "M": 4}
token_semantic8 = {"FH": 1, "LE": 2, "RE": 3, "NO": 4, "LC": 5, "RC": 6, "M": 7}
if sorted(token_4.keys()) == sorted(model.model_TGH.keys()):
token = token_4
if sorted(token_grid9.keys()) == sorted(model.model_TGH.keys()):
token = token_grid9
if sorted(token_semantic5.keys()) == sorted(model.model_TGH.keys()):
token = token_semantic5
if sorted(token_grid16.keys()) == sorted(model.model_TGH.keys()):
token = token_grid16
if sorted(token_semantic8.keys()) == sorted(model.model_TGH.keys()):
token = token_semantic8
assert token is not None, "Token incorrect"
token["N"] = len(token) + 1
out = []
for i in sequence:
seq = []
for v in i:
if isinstance(v, int):
seq.append(v)
else:
seq.append(token[v])
out.append(seq)
return out
def tokenise_cat(cat, one_hot=False):
token = {"BD": 0, "BR": 0, "C": 1, "D": 1, "R": 1}
out = []
for i in cat:
if one_hot:
if token[i] == 1:
out_i = [0, 1]
else:
out_i = [1, 0]
out.append(out_i)
else:
out.append(token[i])
return np.array(out)
def tokenise_img_type(img_types):
token = {"Happy": 2, "Sad": 1}
out = []
for img_label in img_types:
out.append([token[a] for a in img_label])
return out
def get_sub_to_cat_dict(sub, cat):
sub_to_cat = {}
for s, c in zip(sub, cat):
if s not in sub_to_cat:
sub_to_cat[s] = c
return sub_to_cat
def _getProb(i, n_, bins):
n_ = n_ / np.sum(n_)
cum = filter(lambda a: i >= a[1], zip(n_, bins))
if len(cum) == 0:
n_curr = n_[0]
else:
n_curr = cum[-1][0]
n_curr = np.clip(n_curr, 0.001, 1)
return n_curr
def get_log_likelihood(i, n_bd, bins_bd, n_d, bins_d):
p_bd = _getProb(i, n_bd, bins_bd)
p_d = _getProb(i, n_d, bins_d)
log_p_d = np.log(p_d)
log_p_bd = np.log(p_bd)
log_like_bd_d = log_p_d - log_p_bd
return log_like_bd_d
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,082
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/data/region_model.py
|
from vsbrnn.utils import Rect
import numpy as np
class RegionModel:
def __init__(self):
self.model_TGH = {}
self.model_TWH = {}
self.ignore_fixations_outside = False
self.segmentation = None
self.region_type = None
@classmethod
def from_segmentation(cls, segmentation):
cls = RegionModel()
cls.region_type = "segmentation"
cls.segmentation = segmentation
return cls
def add_region_TGH(self, name, x, y, w, h):
self.model_TGH[name] = Rect(x, y, w, h)
def add_region_TWH(self, name, x, y, w, h):
self.model_TWH[name] = Rect(x, y, w, h)
def fix_in_segmentation(self, fix):
x = np.floor(fix.x * self.segmentation.shape[0])
y = np.floor(fix.y * self.segmentation.shape[1])
if x < 0 or x >= self.segmentation.shape[0] or y < 0 or y >= self.segmentation.shape[1]:
return np.max(self.segmentation) + 2
else:
return self.segmentation[int(x), int(y)] + 1
def fix_in_bounding_box(self, test, fix):
if "TWH" in test:
models = self.model_TWH
else:
models = self.model_TGH
for key in models:
model = models[key]
if model.isPointInRect(fix.x, fix.y):
return key
return "N"
def fix_in_region(self, test, fix):
if self.region_type == "segmentation":
return self.fix_in_segmentation(fix)
elif self.region_type == "bounding_box":
return self.fix_in_bounding_box(test, fix)
class FaceRegionModel_semantic5(RegionModel):
def __init__(self):
RegionModel.__init__(self)
self.region_type = "bounding_box"
self.add_region_TGH("LE", x=0.0, y=0.25, w=0.45, h=0.2)
self.add_region_TGH("RE", x=0.55, y=0.25, w=0.45, h=0.2)
self.add_region_TGH("NO", x=0.275, y=0.45, w=0.45, h=0.2)
self.add_region_TGH("M", x=0.15, y=0.65, w=0.7, h=0.2)
self.add_region_TWH("LE", x=0.0, y=0.25, w=0.45, h=0.20)
self.add_region_TWH("RE", x=0.55, y=0.25, w=0.45, h=0.20)
self.add_region_TWH("NO", x=0.275, y=0.45, w=0.45, h=0.20)
self.add_region_TWH("M", x=0.15, y=0.65, w=0.7, h=0.2)
class FaceRegionModel_semantic8(RegionModel):
def __init__(self):
RegionModel.__init__(self)
self.region_type = "bounding_box"
self.add_region_TGH("FH", x=0.0, y=0.0, w=1, h=0.2)
self.add_region_TGH("LE", x=0.0, y=0.25, w=0.45, h=0.2)
self.add_region_TGH("RE", x=0.55, y=0.25, w=0.45, h=0.2)
self.add_region_TGH("NO", x=0.275, y=0.45, w=0.45, h=0.2)
self.add_region_TGH("LC", x=0.0, y=0.45, w=0.25, h=0.2)
self.add_region_TGH("RC", x=0.75, y=0.45, w=0.25, h=0.2)
self.add_region_TGH("M", x=0.15, y=0.65, w=0.7, h=0.2)
self.add_region_TWH("FH", x=0.0, y=0.0, w=1, h=0.2)
self.add_region_TWH("LE", x=0.0, y=0.25, w=0.45, h=0.20)
self.add_region_TWH("RE", x=0.55, y=0.25, w=0.45, h=0.20)
self.add_region_TWH("NO", x=0.275, y=0.45, w=0.45, h=0.20)
self.add_region_TWH("LC", x=0.0, y=0.45, w=0.25, h=0.2)
self.add_region_TWH("RC", x=0.75, y=0.45, w=0.25, h=0.2)
self.add_region_TWH("M", x=0.15, y=0.65, w=0.7, h=0.2)
class FaceRegionModel4(RegionModel):
def __init__(self):
RegionModel.__init__(self)
self.region_type = "bounding_box"
self.ignore_fixations_outside = True
self.add_region_TGH("FH1", x=0.0, y=0.0, w=0.38, h=0.25)
self.add_region_TGH("FH2", x=0.38, y=0.0, w=0.24, h=0.25)
self.add_region_TGH("FH3", x=0.62, y=0.0, w=0.38, h=0.25)
self.add_region_TGH("LE", x=0.0, y=0.25, w=0.38, h=0.21)
self.add_region_TGH("NT", x=0.38, y=0.25, w=0.24, h=0.21)
self.add_region_TGH("RE", x=0.62, y=0.25, w=0.38, h=0.21)
self.add_region_TGH("LC", x=0.0, y=0.46, w=0.38, h=0.21)
self.add_region_TGH("NO", x=0.38, y=0.46, w=0.24, h=0.21)
self.add_region_TGH("RC", x=0.62, y=0.46, w=0.38, h=0.21)
self.add_region_TGH("LM", x=0.0, y=0.67, w=0.38, h=0.33)
self.add_region_TGH("M", x=0.38, y=0.67, w=0.24, h=0.33)
self.add_region_TGH("RM", x=0.62, y=0.67, w=0.38, h=0.33)
self.add_region_TWH("FH1", x=0.0, y=0.0, w=0.38, h=0.25)
self.add_region_TWH("FH2", x=0.38, y=0.0, w=0.24, h=0.25)
self.add_region_TWH("FH3", x=0.62, y=0.0, w=0.38, h=0.25)
self.add_region_TWH("LE", x=0.0, y=0.25, w=0.38, h=0.21)
self.add_region_TWH("NT", x=0.38, y=0.25, w=0.24, h=0.21)
self.add_region_TWH("RE", x=0.62, y=0.25, w=0.38, h=0.21)
self.add_region_TWH("LC", x=0.0, y=0.46, w=0.38, h=0.21)
self.add_region_TWH("NO", x=0.38, y=0.46, w=0.24, h=0.21)
self.add_region_TWH("RC", x=0.62, y=0.46, w=0.38, h=0.21)
self.add_region_TWH("LM", x=0.0, y=0.67, w=0.38, h=0.33)
self.add_region_TWH("M", x=0.38, y=0.67, w=0.24, h=0.33)
self.add_region_TWH("RM", x=0.62, y=0.67, w=0.38, h=0.33)
class FaceRegionModel_grid9(RegionModel):
def __init__(self):
RegionModel.__init__(self)
self.region_type = "bounding_box"
self.ignore_fixations_outside = True
self.add_region_TGH("FH1", x=0.0, y=0.0, w=0.33, h=0.33)
self.add_region_TGH("FH2", x=0.33, y=0.0, w=0.33, h=0.33)
self.add_region_TGH("FH3", x=0.66, y=0.0, w=0.33, h=0.33)
self.add_region_TGH("LE", x=0.0, y=0.33, w=0.33, h=0.33)
self.add_region_TGH("NT", x=0.33, y=0.33, w=0.33, h=0.33)
self.add_region_TGH("RE", x=0.66, y=0.33, w=0.33, h=0.33)
self.add_region_TGH("LC", x=0.0, y=0.66, w=0.33, h=0.33)
self.add_region_TGH("NO", x=0.33, y=0.66, w=0.33, h=0.33)
self.add_region_TGH("RC", x=0.66, y=0.66, w=0.33, h=0.33)
self.add_region_TWH("FH1", x=0.0, y=0.0, w=0.33, h=0.33)
self.add_region_TWH("FH2", x=0.33, y=0.0, w=0.33, h=0.33)
self.add_region_TWH("FH3", x=0.66, y=0.0, w=0.33, h=0.33)
self.add_region_TWH("LE", x=0.0, y=0.33, w=0.33, h=0.33)
self.add_region_TWH("NT", x=0.33, y=0.33, w=0.33, h=0.33)
self.add_region_TWH("RE", x=0.66, y=0.33, w=0.33, h=0.33)
self.add_region_TWH("LC", x=0.0, y=0.66, w=0.33, h=0.33)
self.add_region_TWH("NO", x=0.33, y=0.66, w=0.33, h=0.33)
self.add_region_TWH("RC", x=0.66, y=0.66, w=0.33, h=0.33)
class FaceRegionModel_grid16(RegionModel):
def __init__(self):
RegionModel.__init__(self)
self.region_type = "bounding_box"
self.ignore_fixations_outside = True
self.add_region_TGH("FH1", x=0.0, y=0.0, w=0.25, h=0.25)
self.add_region_TGH("FH2", x=0.25, y=0.0, w=0.25, h=0.25)
self.add_region_TGH("FH3", x=0.50, y=0.0, w=0.25, h=0.25)
self.add_region_TGH("FH4", x=0.75, y=0.0, w=0.25, h=0.25)
self.add_region_TGH("LE1", x=0.0, y=0.25, w=0.25, h=0.25)
self.add_region_TGH("LE2", x=0.25, y=0.25, w=0.25, h=0.25)
self.add_region_TGH("RE1", x=0.50, y=0.25, w=0.25, h=0.25)
self.add_region_TGH("RE2", x=0.75, y=0.25, w=0.25, h=0.25)
self.add_region_TGH("LC", x=0.0, y=0.50, w=0.25, h=0.25)
self.add_region_TGH("NO1", x=0.25, y=0.50, w=0.25, h=0.25)
self.add_region_TGH("NO2", x=0.50, y=0.50, w=0.25, h=0.25)
self.add_region_TGH("RC", x=0.75, y=0.50, w=0.25, h=0.25)
self.add_region_TGH("M1", x=0.0, y=0.75, w=0.25, h=0.25)
self.add_region_TGH("M2", x=0.25, y=0.75, w=0.25, h=0.25)
self.add_region_TGH("M3", x=0.50, y=0.75, w=0.25, h=0.25)
self.add_region_TGH("M4", x=0.75, y=0.75, w=0.25, h=0.25)
self.add_region_TWH("FH1", x=0.0, y=0.0, w=0.25, h=0.25)
self.add_region_TWH("FH2", x=0.25, y=0.0, w=0.25, h=0.25)
self.add_region_TWH("FH3", x=0.50, y=0.0, w=0.25, h=0.25)
self.add_region_TWH("FH4", x=0.75, y=0.0, w=0.25, h=0.25)
self.add_region_TWH("LE1", x=0.0, y=0.25, w=0.25, h=0.25)
self.add_region_TWH("LE2", x=0.25, y=0.25, w=0.25, h=0.25)
self.add_region_TWH("RE1", x=0.50, y=0.25, w=0.25, h=0.25)
self.add_region_TWH("RE2", x=0.75, y=0.25, w=0.25, h=0.25)
self.add_region_TWH("LC", x=0.0, y=0.50, w=0.25, h=0.25)
self.add_region_TWH("NO1", x=0.25, y=0.50, w=0.25, h=0.25)
self.add_region_TWH("NO2", x=0.50, y=0.50, w=0.25, h=0.25)
self.add_region_TWH("RC", x=0.75, y=0.50, w=0.25, h=0.25)
self.add_region_TWH("M1", x=0.0, y=0.75, w=0.25, h=0.25)
self.add_region_TWH("M2", x=0.25, y=0.75, w=0.25, h=0.25)
self.add_region_TWH("M3", x=0.50, y=0.75, w=0.25, h=0.25)
self.add_region_TWH("M4", x=0.75, y=0.75, w=0.25, h=0.25)
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,083
|
jonomon/VSMood
|
refs/heads/master
|
/vsbrnn/multi_instance.py
|
import matplotlib.pyplot as plt
import numpy as np
from vsbrnn.utils import get_log_likelihood
class MultiInstance():
def __init__(self, method, X_train, y_train, x_test, trainer):
self.method = method
self.X_train = X_train
self.y_train = y_train
self.x_test = x_test
self.trainer = trainer
def get_pred(self, preds):
if self.method == "mean":
return self.get_mean_pred(preds)
elif self.method == "1std-mean":
return self.get_n_std_mean_pred(1, preds)
elif self.method == "max-likelihood":
return self.get_max_likelihood(preds)
elif self.method == "similar":
return self.get_similar(preds)
elif self.method == "log-prob":
return self.get_log_prob(preds)
else:
return None
def get_mean_pred(self, preds):
return np.mean(preds)
def get_n_std_mean_pred(self, n, preds):
std = np.std(preds)
mean = np.mean(preds)
max_value = mean + n * std
min_value = mean - n * std
mean_preds = preds[np.logical_and(preds > min_value, preds < max_value)]
return np.mean(mean_preds)
def get_max_likelihood(self, preds):
X_predicts = self.trainer.predict(self.X_train)
n_d, bins_d, _ = plt.hist(
X_predicts[self.y_train[:, 1]==1], facecolor='green', alpha=0.5)
n_bd, bins_bd, _ = plt.hist(
X_predicts[self.y_train[:, 1]==0], facecolor='red', alpha=0.5)
log_like = [get_log_likelihood(a, n_bd, bins_bd,
n_d, bins_d) for a in preds]
return np.mean(log_like)
def get_similar(self, preds):
sequences = self.x_test["seq"]
n = sequences.shape[0]
distances = np.zeros(shape=(n, n))
for i in range(n):
for j in range(n):
if i != j:
sequence1 = sequences[i, :]
sequence2 = sequences[j, :]
leven_dist = self.levenshteinDistance(sequence1, sequence2)
distances[i, j] = leven_dist
mean_distances = np.mean(distances, axis=1)
max_distance_index = np.argmax(mean_distances)
preds_max_removed = np.delete(preds, max_distance_index)
return np.mean(preds_max_removed)
def levenshteinDistance(self, s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(
1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
def get_log_prob(self, preds):
log_preds = np.log(preds)
log_preds = np.clip(log_preds, -1e15, 1e15)
return np.mean(log_preds)
|
{"/vsbrnn/data/data.py": ["/vsbrnn/utils.py"], "/main_AAAI.py": ["/vsbrnn/run_vsb_sequence.py"], "/vsbrnn/data/data_creator.py": ["/vsbrnn/utils.py"], "/main_JAD.py": ["/vsbrnn/data/data_creator.py", "/vsbrnn/data/data_importer.py", "/vsbrnn/training.py", "/vsbrnn/utils.py", "/vsbrnn/data/region_model.py"], "/vsbrnn/run_vsb_sequence.py": ["/vsbrnn/data/data_importer.py", "/vsbrnn/data/data_creator.py", "/vsbrnn/training.py", "/vsbrnn/multi_instance.py", "/vsbrnn/utils.py"], "/vsbrnn/data/data_importer.py": ["/vsbrnn/utils.py"], "/vsbrnn/data/region_model.py": ["/vsbrnn/utils.py"], "/vsbrnn/multi_instance.py": ["/vsbrnn/utils.py"]}
|
7,110
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/pytorch/regularizer.py
|
import torch as th
class Regularizer(object):
def __init__(self, coef, norm):
self.coef = coef
self.norm = norm
def __call__(self, params: list):
log = {}
reg = 0
if self.coef != 0:
for param in params:
reg += param.norm(p=self.norm) ** self.norm
reg *= self.coef
log['regularization'] = reg.detach().item()
return reg, log
else:
log['regularization'] = 0
return reg, log
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,111
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/logger.py
|
import os
import json
class Logger(object):
log_path = None
result_path = None
def __init__(self):
pass
@classmethod
def print(cls, content):
with open(cls.log_path, 'a+') as f:
f.write(content + '\n')
print(content)
@classmethod
def save_result(cls, result: dict):
with open(cls.result_path, 'w') as f:
json.dump(result, f, indent=4)
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,112
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/pytorch/ke_optimizer.py
|
import torch as th
import math
from collections import defaultdict
import copy
class Optimizer(object):
def __init__(self, defaults):
self.defaults = defaults
self.state = defaultdict(dict)
self.state['step'] = 0
def step(self, idx, param, grad, gpu_id=-1):
raise NotImplementedError
def share_memory(self):
for s in self.state.values():
if type(s) is th.Tensor:
s.share_memory_()
def to(self, device: th.device):
for k, s in self.state.items():
if type(s) is th.Tensor:
self.state[k] = s.to(device)
return self
class Adagrad(Optimizer):
def __init__(self, emb, device=th.device('cpu'), lr=1e-3, epsilon=1e-10, unique_indices=False, mean_sum=True):
defaults = dict(lr=lr, epsilon=epsilon)
super(Adagrad, self).__init__(defaults)
self.unique_indices = unique_indices
self.mean_sum = mean_sum
if mean_sum:
self.state['sum'] = emb.new().resize_(emb.shape[0], 1).zero_()
else:
self.state['sum'] = th.zeros_like(emb, device=device)
@th.no_grad()
def step(self, idx, param, grad, gpu_id=-1):
clr = self.defaults['lr']
epsilon = self.defaults['epsilon']
device = self.state['sum'].device
self.state['step'] += 1
grad_values = grad
grad_indices = idx
if grad_indices.device != device:
grad_indices = grad_indices.to(device)
if self.unique_indices:
grad_indices, inv_indicies, cnt = th.unique(grad_indices, return_inverse=True, return_counts=True)
grad_values = th.zeros(grad_indices.shape[0], grad.shape[1], device=device)
grad_values.index_add_(0, inv_indicies, grad)
grad_values = grad_values / cnt.unsqueeze(1)
if self.mean_sum:
grad_sq = th.mean(grad_values ** 2, 1, keepdim=True)
else:
grad_sq = grad_values ** 2
if grad_sq.device != device:
grad_sq = grad_sq.to(device)
self.state['sum'].index_add_(0, grad_indices, grad_sq)
std = self.state['sum'][grad_indices]
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(epsilon)
update_val = (-clr * grad_values / std_values)
if update_val.device != device:
update_val = update_val.to(device)
param.index_add_(0, grad_indices, update_val)
class Adam(Optimizer):
def __init__(self, emb, device=th.device('cpu'), lr=1e-3, eps=1e-8, betas=(0.9, 0.999)):
defaults = dict(lr=lr, eps=eps, betas=betas)
super(Adam, self).__init__(defaults)
self.state['exp_avg'] = th.zeros_like(emb, device=device)
self.state['exp_avg_sq'] = th.zeros_like(emb, device=device)
@th.no_grad()
def step(self, idx, param, grad, gpu_id=-1):
beta1, beta2 = self.defaults['betas']
lr, eps = self.defaults['lr'], self.defaults['eps']
self.state['step'] += 1
device = self.state['exp_avg'].device
if idx.device != device:
idx = idx.to(device)
if grad.device != device:
grad = grad.to(device)
grad_indices, inv_indicies, cnt = th.unique(idx, return_inverse=True, return_counts=True)
grad_values = th.zeros(grad_indices.shape[0], grad.shape[1], device=device)
grad_values.index_add_(0, inv_indicies, grad)
grad_values = grad_values / cnt.unsqueeze(1)
old_exp_avg_values = self.state['exp_avg'][grad_indices]
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
self.state['exp_avg'].index_add_(0, grad_indices, exp_avg_update_values)
old_exp_avg_sq_values = self.state['exp_avg_sq'][grad_indices]
exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
self.state['exp_avg_sq'].index_add_(0, grad_indices, exp_avg_sq_update_values)
bias_correction1 = 1 - beta1 ** self.state['step']
bias_correction2 = 1 - beta2 ** self.state['step']
step_size = lr * math.sqrt(bias_correction2) / bias_correction1
numer = exp_avg_update_values.add_(old_exp_avg_values)
demon = exp_avg_sq_update_values.add_(old_exp_avg_sq_values).sqrt_().add_(eps)
del exp_avg_update_values, exp_avg_sq_update_values
if gpu_id >= 0:
numer = numer.cuda(gpu_id)
demon = demon.cuda(gpu_id)
update_val = - step_size * numer.div_(demon)
if update_val.device != device:
update_val = update_val.to(device)
param.index_add_(0, grad_indices, update_val)
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,113
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/argparser/__init__.py
|
from .common_argparser import *
from .train_argparser import *
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,114
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/pytorch/ke_tensor.py
|
# -*- coding: utf-8 -*-
#
# tensor_models.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
KG Sparse embedding
"""
import os
import numpy as np
import copy
import torch as th
import torch.nn.init as INIT
from .ke_optimizer import *
from dglke.util import thread_wrapped_func
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
class KGEmbedding:
"""Sparse Embedding for Knowledge Graph
It is used to store both entity embeddings and relation embeddings.
Parameters
----------
num : int
Number of embeddings.
dim : int
Embedding dimention size.
device : th.device
Device to store the embedding.
"""
def __init__(self, device):
self.emb = None
self.is_train = False
self.async_q = None
self.device = device
def init(self, emb_init, lr, async_threads, num=-1, dim=-1, init_strat='uniform', optimizer='Adagrad', device=None):
"""Initializing the embeddings for training.
Parameters
----------
emb_init : float or tuple
The intial embedding range should be [-emb_init, emb_init].
"""
self.async_threads = async_threads
if device is not None:
self.device = device
if self.emb is None:
self.emb = th.empty(num, dim, dtype=th.float32, device=self.device)
self.num = self.emb.shape[0]
self.dim = self.emb.shape[1]
if optimizer == 'Adagrad':
self.optim = Adagrad(self.emb, device=self.device, lr=lr)
elif optimizer == 'Adam':
self.optim = Adam(self.emb, device=self.device, lr=lr)
else:
raise NotImplementedError(f'optimizer {optimizer} is not supported by dglke yet.')
self.trace = []
self.has_cross_rel = False
if init_strat == 'uniform':
INIT.uniform_(self.emb, -emb_init, emb_init)
elif init_strat == 'normal':
if type(emb_init) is tuple or type(emb_init) is list:
if len(emb_init) == 0:
mean = emb_init
std = 1
else:
mean, std = emb_init
INIT.normal_(self.emb.data, mean, std)
else:
init_size = emb_init
INIT.normal_(self.emb.data)
self.emb.data *= init_size
elif init_strat == 'random':
if type(emb_init) is tuple:
x, y = emb_init
self.emb.data = th.rand(num, dim, dtype=th.float32, device=self.device) * x + y
elif init_strat == 'xavier':
INIT.xavier_normal_(self.emb.data)
elif init_strat == 'constant':
INIT.constant_(self.emb.data, emb_init)
def clone(self, device):
clone_emb = copy.deepcopy(self)
clone_emb.device = device
clone_emb.emb = clone_emb.emb.to(device)
clone_emb.optim = clone_emb.optim.to(device)
return clone_emb
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name)
self.emb = th.Tensor(np.load(file_name))
def load_emb(self, emb_array):
"""Load embeddings from numpy array.
Parameters
----------
emb_array : numpy.array or torch.tensor
Embedding array in numpy array or torch.tensor
"""
if isinstance(emb_array, np.ndarray):
self.emb = th.Tensor(emb_array)
else:
self.emb = emb_array
def save(self, path, name):
"""Save embeddings.
Parameters
----------
path : str
Directory to save the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name)
np.save(file_name, self.emb.cpu().detach().numpy())
def train(self):
self.is_train = True
def eval(self):
self.is_train = False
def setup_cross_rels(self, cross_rels, global_emb):
cpu_bitmap = th.zeros((self.num,), dtype=th.bool)
for i, rel in enumerate(cross_rels):
cpu_bitmap[rel] = 1
self.cpu_bitmap = cpu_bitmap
self.has_cross_rel = True
self.global_emb = global_emb
def get_noncross_idx(self, idx):
cpu_mask = self.cpu_bitmap[idx]
gpu_mask = ~cpu_mask
return idx[gpu_mask]
def share_memory(self):
"""Use torch.tensor.share_memory_() to allow cross process tensor access
"""
self.emb.share_memory_()
self.optim.share_memory()
def __call__(self, idx, gpu_id=-1, trace=True):
""" Return sliced tensor.
Parameters
----------
idx : th.tensor
Slicing index
gpu_id : int
Which gpu to put sliced data in.
trace : bool
If True, trace the computation. This is required in training.
If False, do not trace the computation.
Default: True
"""
# for inference or evaluation
if self.is_train is False:
return self.emb[idx].cuda(gpu_id, non_blocking=True)
if self.has_cross_rel:
cpu_idx = idx.cpu()
cpu_mask = self.cpu_bitmap[cpu_idx]
cpu_idx = cpu_idx[cpu_mask]
cpu_idx = th.unique(cpu_idx)
if cpu_idx.shape[0] != 0:
cpu_emb = self.global_emb.emb[cpu_idx]
self.emb[cpu_idx] = cpu_emb.cuda(gpu_id, non_blocking=True)
s = self.emb[idx]
if gpu_id >= 0:
s = s.cuda(gpu_id, non_blocking=True)
# During the training, we need to trace the computation.
# In this case, we need to record the computation path and compute the gradients.
if trace:
data = s.clone().detach().requires_grad_(True)
self.trace.append((idx, data))
else:
data = s
return data
def update(self, gpu_id=-1):
""" Update embeddings in a sparse manner
Sparse embeddings are updated in mini batches. we maintains gradient states for
each embedding so they can be updated separately.
Parameters
----------
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
with th.no_grad():
for idx, data in self.trace:
grad = data.grad.data
# the update is non-linear so indices must be unique
grad_indices = idx
grad_values = grad
if self.async_q is not None:
grad_indices.share_memory_()
grad_values.share_memory_()
self.async_q.put((grad_indices, grad_values, gpu_id))
else:
if self.has_cross_rel:
cpu_mask = self.cpu_bitmap[grad_indices]
cpu_idx = grad_indices[cpu_mask]
if cpu_idx.shape[0] > 0:
cpu_grad = grad_values[cpu_mask]
self.global_emb.optim.step(cpu_idx, self.global_emb.emb, cpu_grad, gpu_id)
self.optim.step(grad_indices, self.emb, grad_values, gpu_id)
self.trace = []
def create_async_update(self):
"""Set up the async update subprocess.
"""
self.async_q = Queue(1)
self.async_p = mp.Process(target=self.async_update)
self.async_p.start()
def finish_async_update(self):
"""Notify the async update subprocess to quit.
"""
self.async_q.put((None, None, None))
self.async_p.join()
def async_update(self):
th.set_num_threads(self.async_threads)
while True:
(grad_indices, grad_values, gpu_id) = self.async_q.get()
if grad_indices is None:
return
with th.no_grad():
if self.has_cross_rel:
cpu_mask = self.cpu_bitmap[grad_indices]
cpu_idx = grad_indices[cpu_mask]
if cpu_idx.shape[0] > 0:
cpu_grad = grad_values[cpu_mask]
self.global_emb.optim.step(cpu_idx, self.global_emb.emb, cpu_grad, gpu_id)
self.optim.step(grad_indices, self.emb, grad_values, gpu_id)
def curr_emb(self):
"""Return embeddings in trace.
"""
data = [data for _, data in self.trace]
return th.cat(data, 0)
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,115
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/transe.py
|
from .ke_model import GEModel, EMB_INIT_EPS
from .pytorch.score_fun import TransEScore
from dglke.util import get_device
import torch as th
class TransEModel(GEModel):
def __init__(self, args, device, model_name):
score_func = TransEScore(args.gamma, dist_func='l1')
super(TransEModel, self).__init__(args, device, model_name, score_func)
def categorize_embedding(self):
self._entity_related_emb.update({'entity_emb': self._entity_emb})
self._relation_related_emb.update({'relation_emb': self._relation_emb})
def initialize(self, n_entities, n_relations, init_strat='uniform'):
args = self.args
eps = EMB_INIT_EPS
emb_init = (args.gamma + eps) / args.hidden_dim
device = get_device(args)
entity_related_device = th.device('cpu') if args.mix_cpu_gpu else device
relation_related_device = th.device('cpu') if (args.mix_cpu_gpu or args.strict_rel_part or args.soft_rel_part) else device
self._entity_emb.init(emb_init=emb_init, lr=self.lr, async_threads=args.num_thread, num=n_entities, dim=self.hidden_dim,
init_strat=init_strat, optimizer=args.optimizer, device=entity_related_device)
self._relation_emb.init(emb_init=emb_init, lr=self.lr, async_threads=args.num_thread, num=n_relations, dim=self.hidden_dim,
init_strat=init_strat, optimizer=args.optimizer, device=relation_related_device)
def acquire_embedding(self, data, gpu_id=-1, pos=True, train=True, neg_type='head'):
if pos and train:
emb = {'head': self._entity_related_emb['entity_emb'](data['head'], gpu_id=gpu_id, trace=True),
'tail': self._entity_related_emb['entity_emb'](data['tail'], gpu_id=gpu_id, trace=True),
'rel': self._relation_related_emb['relation_emb'](data['rel'], gpu_id=gpu_id, trace=True), }
elif not pos and train:
if neg_type == 'head':
emb = {'head': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=True), }
else:
emb = {'tail': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=True), }
elif pos and not train:
emb = {'head': self._entity_related_emb['entity_emb'](data['head'], gpu_id=gpu_id, trace=False),
'tail': self._entity_related_emb['entity_emb'](data['tail'], gpu_id=gpu_id, trace=False),
'rel': self._relation_related_emb['relation_emb'](data['rel'], gpu_id=gpu_id, trace=False), }
else:
emb = {'head': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=False),
'tail': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=False), }
return emb
def pos_forward(self, pos_emb):
return self._score_func.predict(pos_emb)
def neg_forward(self, pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=True):
pos_emb, neg_emb = self.prepare_data(pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train)
heads, relations, tails, neg = pos_emb['head'], pos_emb['rel'], pos_emb['tail'], neg_emb[neg_type]
num_chunk = len(heads) // chunk_size
if neg_type == 'head':
func = self._score_func.create_neg(True)
return func(neg, relations, tails, num_chunk, chunk_size, neg_sample_size)
else:
func = self._score_func.create_neg(False)
return func(heads, relations, neg, num_chunk, chunk_size, neg_sample_size)
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,116
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/math.py
|
import torch as th
# some constants
MIN_NORM = 1e-15
BALL_EPS = {th.float32: 4e-3, th.float64: 1e-5}
# math in hyperbolic space
class Artanh(th.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(-1 + 1e-5, 1 - 1e-5)
ctx.save_for_backward(x)
dtype = x.dtype
x = x.double()
return (th.log_(1 + x).sub_(th.log_(1 - x))).mul_(0.5).to(dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (1 - input ** 2)
def artanh(x):
return Artanh.apply(x)
def tanh(x):
return x.clamp(-15, 15).tanh()
def hyp_distance_multi_c(x, y, c, mode='batch'):
sqrt_c = c ** 0.5
if mode == 'mm':
ynorm = th.norm(y, p=2, dim=-1, keepdim=True).transpose(1, 2)
xy = th.bmm(x, y.transpose(1, 2)) / ynorm
gamma = tanh(th.bmm(sqrt_c, ynorm)) / sqrt_c
elif mode == 'batch':
ynorm = th.norm(y, p=2, dim=-1, keepdim=True)
xy = th.sum(x * y / ynorm, dim=-1, keepdim=True)
gamma = tanh(sqrt_c * ynorm) / sqrt_c
else:
raise ValueError(f'{mode} mode is not supported. Choose from [batch, mm]')
x2 = th.sum(x * x, dim=-1, keepdim=True)
c1 = 1 - 2 * c * gamma * xy + c * gamma ** 2
c2 = 1 - c * x2
num = th.sqrt((c1 ** 2) * x2 + (c2 ** 2) * (gamma ** 2) - (2 * c1 * c2) * gamma * xy)
denom = 1 - 2 * c * gamma * xy + (c ** 2) * (gamma ** 2) * x2
pairwise_norm = num / denom.clamp_min(MIN_NORM)
dist = artanh(sqrt_c * pairwise_norm)
return 2 * dist / sqrt_c
def mobius_add(x, y, c):
x2 = th.sum(x * x, dim=-1, keepdim=True)
y2 = th.sum(y * y, dim=-1, keepdim=True)
xy = th.sum(x * y, dim=-1, keepdim=True)
num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y
denom = 1 + 2 * c * xy + c ** 2 * x2 * y2
return num / denom.clamp_min(MIN_NORM)
def project(x, c):
norm = x.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
eps = BALL_EPS[x.dtype]
maxnorm = (1 - eps) / (c ** 0.5)
cond = norm > maxnorm
projected = x / norm * maxnorm
return th.where(cond, projected, x)
def expmap0(u, c):
sqrt_c = c ** 0.5
u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
gamma_1 = tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return project(gamma_1, c)
# euclidean space
def givens_rotations(r, x, comp='batch'):
if comp == 'batch':
givens = r.view((r.shape[0], -1, 2))
givens = givens / th.norm(givens, p=2, dim=-1, keepdim=True)
x = x.view((r.shape[0], -1, 2))
x_rot = givens[:, :, 0:1] * x + givens[:, :, 1:] * th.cat((-x[:, :, 1:], x[:, :, 0:1]), dim=-1)
return x_rot.view((r.shape[0], -1))
else:
givens = r.view((r.shape[0], r.shape[1], -1, 2))
givens = givens / th.norm(givens, p=2, dim=-1, keepdim=True)
x = x.view((x.shape[0], x.shape[1], -1, 2))
x_rot_a = th.einsum('bcde,bnde->bcnde', givens[:, :, :, 0:1].expand(-1, -1, -1, 2), x)
x_rot_b = th.einsum('bcde,bnde->bcnde', givens[:, :, :, 1:].expand(-1, -1, -1, 2),
th.cat((-x[:, :, :, 1:], x[:, :, :, 0:1]), dim=-1))
x_rot = x_rot_a + x_rot_b
return x_rot.view((r.shape[0], r.shape[1], x.shape[1], -1))
def givens_reflection(r, x, comp='batch'):
if comp == 'batch':
givens = r.view((r.shape[0], -1, 2))
givens = givens / th.norm(givens, p=2, dim=-1, keepdim=True)
x = x.view((r.shape[0], -1, 2))
x_ref = givens[:, :, 0:1] * th.cat((x[:, :, 0:1], -x[:, :, 1:]), dim=-1) + givens[:, :, 1:] * th.cat(
(x[:, :, 1:], x[:, :, 0:1]), dim=-1)
return x_ref.view((r.shape[0], -1))
else:
givens = r.view((r.shape[0], r.shape[1], -1, 2))
givens = givens / th.norm(givens, p=2, dim=-1, keepdim=True)
x = x.view((x.shape[0], x.shape[1], -1, 2))
x_ref_a = th.einsum('bcde,bnde->bcnde', givens[:, :, :, 0:1].expand(-1, -1, -1, 2),
th.cat((x[:, :, :, 0:1], -x[:, :, :, 1:]), dim=-1))
x_ref_b = th.einsum('bcde,bnde->bcnde', givens[:, :, :, 1:].expand(-1, -1, -1, 2), th.cat((x[:, :, :, 1:], x[:, :, :, 0:1]), dim=-1))
x_ref = x_ref_a + x_ref_b
return x_ref.view((r.shape[0], r.shape[1], x.shape[1], -1))
def logmap0(y, c):
sqrt_c = c ** 0.5
y_norm = y.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
return y / y_norm / sqrt_c * artanh(sqrt_c * y_norm)
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,117
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/argparser/train_argparser.py
|
from .common_argparser import CommonArgParser
class TrainArgParser(CommonArgParser):
def __init__(self):
super(TrainArgParser, self).__init__()
self.add_argument('--gpu', type=int, default=[-1], nargs='+',
help='A list of gpu ids, e.g. 0 1 2 4')
self.add_argument('--mix_cpu_gpu', action='store_true',
help='Training a knowledge graph embedding model with both CPUs and GPUs.' \
'The embeddings are stored in CPU memory and the training is performed in GPUs.' \
'This is usually used for training a large knowledge graph embeddings.')
self.add_argument('--valid', action='store_true',
help='Evaluate the model on the validation set in the training.')
self.add_argument('--rel_part', action='store_true',
help='Enable relation partitioning for multi-GPU training.')
self.add_argument('--async_update', action='store_true',
help='Allow asynchronous update on node embedding for multi-GPU training.' \
'This overlaps CPU and GPU computation to speed up.')
self.add_argument('--has_edge_importance', action='store_true',
help='Allow providing edge importance score for each edge during training.' \
'The positive score will be adjusted ' \
'as pos_score = pos_score * edge_importance')
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,118
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/misc.py
|
# -*- coding: utf-8 -*-
#
# misc.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import os
import csv
import json
import numpy as np
import torch as th
import glob
to_device = lambda x, gpu_id: x.to(th.device('cpu')) if gpu_id == -1 else x.to(th.device('cuda:%d' % gpu_id))
none = lambda x: x
norm = lambda x, p: x.norm(p=p) ** p
get_scalar = lambda x: x.detach().item()
reshape = lambda arr, x, y: arr.view(x, y)
def get_device(args):
return th.device('cpu') if args.gpu[0] < 0 else th.device('cuda:' + str(args.gpu[0]))
def get_compatible_batch_size(batch_size, neg_sample_size):
if neg_sample_size < batch_size and batch_size % neg_sample_size != 0:
old_batch_size = batch_size
batch_size = int(math.ceil(batch_size / neg_sample_size) * neg_sample_size)
print('batch size ({}) is incompatible to the negative sample size ({}). Change the batch size to {}'.format(
old_batch_size, neg_sample_size, batch_size))
return batch_size
def save_model(args, model, emap_file=None, rmap_file=None):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
print('Save model to {}'.format(args.save_path))
model.save_emb(args.save_path, args.dataset)
# We need to save the model configurations as well.
conf_file = os.path.join(args.save_path, 'config.json')
dict = {}
config = args
dict.update(vars(config))
dict.update({'emp_file': emap_file,
'rmap_file': rmap_file})
with open(conf_file, 'w') as outfile:
json.dump(dict, outfile, indent=4)
def load_model_config(config_f):
print(config_f)
with open(config_f, "r") as f:
config = json.loads(f.read())
#config = json.load(f)
print(config)
return config
def load_raw_triplet_data(head_f=None, rel_f=None, tail_f=None, emap_f=None, rmap_f=None):
if emap_f is not None:
eid_map = {}
id2e_map = {}
with open(emap_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
eid_map[row[1]] = int(row[0])
id2e_map[int(row[0])] = row[1]
if rmap_f is not None:
rid_map = {}
id2r_map = {}
with open(rmap_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
rid_map[row[1]] = int(row[0])
id2r_map[int(row[0])] = row[1]
if head_f is not None:
head = []
with open(head_f, 'r') as f:
id = f.readline()
while len(id) > 0:
head.append(eid_map[id[:-1]])
id = f.readline()
head = np.asarray(head)
else:
head = None
if rel_f is not None:
rel = []
with open(rel_f, 'r') as f:
id = f.readline()
while len(id) > 0:
rel.append(rid_map[id[:-1]])
id = f.readline()
rel = np.asarray(rel)
else:
rel = None
if tail_f is not None:
tail = []
with open(tail_f, 'r') as f:
id = f.readline()
while len(id) > 0:
tail.append(eid_map[id[:-1]])
id = f.readline()
tail = np.asarray(tail)
else:
tail = None
return head, rel, tail, id2e_map, id2r_map
def load_triplet_data(head_f=None, rel_f=None, tail_f=None):
if head_f is not None:
head = []
with open(head_f, 'r') as f:
id = f.readline()
while len(id) > 0:
head.append(int(id))
id = f.readline()
head = np.asarray(head)
else:
head = None
if rel_f is not None:
rel = []
with open(rel_f, 'r') as f:
id = f.readline()
while len(id) > 0:
rel.append(int(id))
id = f.readline()
rel = np.asarray(rel)
else:
rel = None
if tail_f is not None:
tail = []
with open(tail_f, 'r') as f:
id = f.readline()
while len(id) > 0:
tail.append(int(id))
id = f.readline()
tail = np.asarray(tail)
else:
tail = None
return head, rel, tail
def load_raw_emb_mapping(map_f):
assert map_f is not None
id2e_map = {}
with open(map_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
id2e_map[int(row[0])] = row[1]
return id2e_map
def load_raw_emb_data(file, map_f=None, e2id_map=None):
if map_f is not None:
e2id_map = {}
id2e_map = {}
with open(map_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
e2id_map[row[1]] = int(row[0])
id2e_map[int(row[0])] = row[1]
elif e2id_map is not None:
id2e_map = [] # dummpy return value
else:
assert False, 'There should be an ID mapping file provided'
ids = []
with open(file, 'r') as f:
line = f.readline()
while len(line) > 0:
ids.append(e2id_map[line[:-1]])
line = f.readline()
ids = np.asarray(ids)
return ids, id2e_map, e2id_map
def load_entity_data(file=None):
if file is None:
return None
entity = []
with open(file, 'r') as f:
id = f.readline()
while len(id) > 0:
entity.append(int(id))
id = f.readline()
entity = np.asarray(entity)
return entity
def prepare_save_path(args):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
folder = '{}_{}_'.format(args.model_name, args.dataset)
n = len([x for x in os.listdir(args.save_path) if x.startswith(folder)])
folder += str(n)
args.save_path = os.path.join(args.save_path, folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
def set_seed(args):
np.random.seed(args.seed)
th.manual_seed(args.seed)
def evaluate_best_result(model_name, dataset, save_path, threshold=3):
file_pattern = '{}/{}_{}_*/result.txt'.format(save_path, model_name, dataset)
files = glob.glob(file_pattern)
best_result = None
best_dir = None
for file in files:
dir = file.split('/')[-2]
with open(file, 'r') as f:
result = json.load(f)
if best_result is None:
best_result = result
best_dir = dir
continue
else:
cnt = 0
for k in result.keys():
if k == 'MR':
if result[k] <= best_result[k]:
cnt += 1
else:
if result[k] >= best_result[k]:
cnt += 1
if cnt >= threshold:
best_result = result
best_dir = dir
print(f'''{model_name} training on {dataset} best result is in folder {best_dir}\n'
best result:\n''')
for k, v in best_result.items():
print(f'{k}: {v}')
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,119
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/argparser/common_argparser.py
|
import argparse
class CommonArgParser(argparse.ArgumentParser):
def __init__(self):
super(CommonArgParser, self).__init__()
self.add_argument('--model_name', default='TransE',
choices=['TransE', 'TransE_l1', 'TransE_l2', 'TransR',
'RESCAL', 'DistMult', 'ComplEx', 'RotatE',
'SimplE', 'ConvE', 'AttH'],
help='The models provided by DGL-KE.')
self.add_argument('--data_path', type=str, default='data',
help='The path of the directory where DGL-KE loads knowledge graph data.')
self.add_argument('--dataset', type=str, default='FB15k',
help='The name of the builtin knowledge graph. Currently, the builtin knowledge ' \
'graphs include FB15k, FB15k-237, wn18, wn18rr and Freebase. ' \
'DGL-KE automatically downloads the knowledge graph and keep it under data_path.')
self.add_argument('--format', type=str, default='built_in',
help='The format of the dataset. For builtin knowledge graphs,' \
'the foramt should be built_in. For users own knowledge graphs,' \
'it needs to be raw_udd_{htr} or udd_{htr}.')
self.add_argument('--data_files', type=str, default=None, nargs='+',
help='A list of data file names. This is used if users want to train KGE' \
'on their own datasets. If the format is raw_udd_{htr},' \
'users need to provide train_file [valid_file] [test_file].' \
'If the format is udd_{htr}, users need to provide' \
'entity_file relation_file train_file [valid_file] [test_file].' \
'In both cases, valid_file and test_file are optional.')
self.add_argument('--delimiter', type=str, default='\t',
help='Delimiter used in data files. Note all files should use the same delimiter.')
self.add_argument('--save_path', type=str, default='ckpts',
help='the path of the directory where models and logs are saved.')
self.add_argument('--no_save_emb', action='store_true',
help='Disable saving the embeddings under save_path.')
self.add_argument('--max_step', type=int, default=80000,
help='The maximal number of steps to train the model.' \
'A step trains the model with a batch of data.')
self.add_argument('--batch_size', type=int, default=1024,
help='The batch size for training.')
self.add_argument('--batch_size_eval', type=int, default=8,
help='The batch size used for validation and test.')
self.add_argument('--neg_sample_size', type=int, default=256,
help='The number of negative samples we use for each positive sample in the training.')
self.add_argument('--neg_deg_sample', action='store_true',
help='Construct negative samples proportional to vertex degree in the training.' \
'When this option is turned on, the number of negative samples per positive edge' \
'will be doubled. Half of the negative samples are generated uniformly while' \
'the other half are generated proportional to vertex degree.')
self.add_argument('--neg_deg_sample_eval', action='store_true',
help='Construct negative samples proportional to vertex degree in the evaluation.')
self.add_argument('--neg_sample_size_eval', type=int, default=-1,
help='The number of negative samples we use to evaluate a positive sample.')
self.add_argument('--eval_percent', type=float, default=1,
help='Randomly sample some percentage of edges for evaluation.')
self.add_argument('--no_eval_filter', action='store_false', dest='eval_filter',
help='Disable filter positive edges from randomly constructed negative edges for evaluation')
self.add_argument('--self_loop_filter', action='store_true', dest='self_loop_filter',
help='Disable filter triple like (head - relation - head) score for evaluation')
self.add_argument('-log', '--log_interval', type=int, default=1000,
help='Print runtime of different components every x steps.')
self.add_argument('--eval_interval', type=int, default=1,
help='Print evaluation results on the validation dataset every x steps' \
'if validation is turned on')
self.add_argument('--test', action='store_true',
help='Evaluate the model on the test set after the model is trained.')
self.add_argument('--num_proc', type=int, default=1,
help='The number of processes to train the model in parallel.' \
'In multi-GPU training, the number of processes by default is set to match the number of GPUs.' \
'If set explicitly, the number of processes needs to be divisible by the number of GPUs.')
self.add_argument('--num_thread', type=int, default=1,
help='The number of CPU threads to train the model in each process.' \
'This argument is used for multiprocessing training.')
self.add_argument('--force_sync_interval', type=int, default=-1,
help='We force a synchronization between processes every x steps for' \
'multiprocessing training. This potentially stablizes the training process'
'to get a better performance. For multiprocessing training, it is set to 1000 by default.')
self.add_argument('--hidden_dim', type=int, default=400,
help='The embedding size of relation and entity')
self.add_argument('--lr', type=float, default=0.01,
help='The learning rate. DGL-KE uses Adagrad to optimize the model parameters.')
self.add_argument('-g', '--gamma', type=float, default=12.0,
help='The margin value in the score function. It is used by TransX and RotatE.')
self.add_argument('-de', '--double_ent', action='store_true',
help='Double entitiy dim for complex number or canonical polyadic. It is used by RotatE and SimplE.')
self.add_argument('-dr', '--double_rel', action='store_true',
help='Double relation dim for complex number or canonical polyadic. It is used by RotatE and SimplE')
self.add_argument('-adv', '--neg_adversarial_sampling', action='store_true',
help='Indicate whether to use negative adversarial sampling.' \
'It will weight negative samples with higher scores more.')
self.add_argument('-a', '--adversarial_temperature', default=1.0, type=float,
help='The temperature used for negative adversarial sampling.')
self.add_argument('-rc', '--regularization_coef', type=float, default=0.000002,
help='The coefficient for regularization.')
self.add_argument('-rn', '--regularization_norm', type=int, default=3,
help='norm used in regularization.')
self.add_argument('-pw', '--pairwise', action='store_true',
help='Indicate whether to use pairwise loss function. '
'It compares the scores of a positive triple and a negative triple')
self.add_argument('--loss_genre', default='Logsigmoid',
choices=['Hinge', 'Logistic', 'Logsigmoid', 'BCE'],
help='The loss function used to train KGEM.')
self.add_argument('-m', '--margin', type=float, default=1.0,
help='hyper-parameter for hinge loss.')
# args for ConvE
self.add_argument('--tensor_height', type=int, default=10,
help='Tensor height for ConvE. Note hidden_dim must be divisible by it')
self.add_argument('--dropout_ratio', type=float, nargs='+', default=0,
help='Dropout ratio for input, conv, linear respectively. If 0 is specified, ConvE will not use dropout for that layer')
self.add_argument('--batch_norm', '-bn', type=bool, default=True,
help='Whether use batch normalization in ConvE or not')
self.add_argument('--label_smooth', type=float, default=.0,
help='use label smoothing for training.')
# args for reproducibility
self.add_argument('--seed', type=int, default=0,
help='Random seed for reproducibility')
self.add_argument('--num_node', type=int, default=1,
help='Number of node used for distributed training')
# this is used for distributed training. not implemented yet
self.add_argument('--node_rank', type=int, default=0,
help='The rank of node, ranged from [0, num_node - 1]')
# self.add_argument('--eval_chunk', type=int, default=8,
# help='Number of chunk to corrupt for the whole graph to pervent OOM for evaluation. The smaller the more RAM it consumed.')
self.add_argument('--mode', type=str, default='fit',
choices=['fit', 'eval'],
help='Whether to train the model or to evaluate.')
# TODO: lingfei - use function to substitute brute force sampling
self.add_argument('--init_strat', type=str, default='uniform',
choices=['uniform', 'xavier', 'constant'],
help='Initial strategy for embeddings.')
self.add_argument('--num_workers', type=int, default=8,
help='Number of process to fetch data for training/validation dataset.')
# hyper-parameter for hyperbolic embeddings
self.add_argument('--init_scale', type=float, default=0.001,
help='Initialization scale for entity embedding, relation embedding, curvature, attention in hyperbolic embeddings')
self.add_argument('--optimizer', type=str, default='Adagrad',
choices=['Adagrad', 'Adam'],
help='Optimizer for kg embeddings')
self.add_argument('--no_save_log', action='store_false', dest='save_log',
help='If specified, dglke will not save log and result file to save path.')
self.add_argument('--tqdm', action='store_true', dest='tqdm',
help='Use tqdm to visualize training and evaluation process. Note this might drag speed of process 0 for multi-GPU training.')
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,120
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/conve.py
|
import torch as th
from .ke_model import GEModel, EMB_INIT_EPS
from .pytorch.score_fun import ConvEScore
from .pytorch.ke_tensor import KGEmbedding
from dglke.util import get_device
class ConvEModel(GEModel):
def __init__(self, args, device, model_name):
score_func = ConvEScore(hidden_dim=args.hidden_dim,
tensor_height=args.tensor_height,
dropout_ratio=args.dropout_ratio,
batch_norm=args.batch_norm)
self._entity_bias = KGEmbedding(device)
self.h = args.tensor_height
self.w = args.hidden_dim // self.h
self.gamma = args.gamma
super(ConvEModel, self).__init__(args, device, model_name, score_func)
def batch_concat(self, tensor_a, tensor_b, dim=-2):
""" element wise concatenation
"""
def _reshape(tensor):
if tensor.dim() == 2:
batch, hidden_dim = tensor.shape
assert hidden_dim == self.h * self.w, 'hidden dimension must match %d' % self.h * self.w
return tensor.reshape(batch, self.h, self.w)
elif tensor.dim() == 3:
batch, h, w = tensor.shape
assert h == self.h, 'tensor height must match %d' % h
assert w == self.w, 'tensor width must match %d' % w
return tensor
elif tensor.dim() == 4:
return tensor
else:
raise ValueError('tensor must have dimension larger than 1')
tensor_a = _reshape(tensor_a)
tensor_b = _reshape(tensor_b)
return th.cat([tensor_a, tensor_b], dim=dim)
def mutual_concat(self, tensor_a, tensor_b, chunk_size_a=16, chunk_size_b=16, mode='AxB', dim=-2):
""" broadcast concatenation for tensor_a and tensor_b. it is used only when corrupt negative tensor_a
"""
def _chunk_reshape(tensor, _chunk_size):
if tensor.dim() == 1:
raise ValueError('tensor with dimension %d is not supported' % tensor.dim())
elif tensor.dim() == 2:
batch, hidden_dim = tensor.shape
assert hidden_dim == self.h * self.w, 'hidden dimension must be %d' % self.h * self.w
return tensor.reshape([-1, _chunk_size, self.h, self.w])
elif tensor.dim() == 3:
batch, h, w = tensor.shape
assert h == self.h, 'tensor height must match %d.' % self.h
assert w == self.w, 'tensor width must match %d.' % self.w
return tensor.reshape([-1, _chunk_size, self.h, self.w])
tensor_a = _chunk_reshape(tensor_a, chunk_size_a)
tensor_b = _chunk_reshape(tensor_b, chunk_size_b)
num_chunk, _, h, w = tensor_a.shape
if mode is 'AxB':
tensor_a = tensor_a.unsqueeze(2)
tensor_b = tensor_b.unsqueeze(1)
tensor_a = tensor_a.repeat(1, 1, chunk_size_b, 1, 1)
tensor_b = tensor_b.repeat(1, chunk_size_a, 1, 1, 1)
elif mode is 'BxA':
tensor_a = tensor_a.unsqueeze(1)
tensor_b = tensor_b.unsqueeze(2)
tensor_a = tensor_a.repeat(1, chunk_size_b, 1, 1, 1)
tensor_b = tensor_b.repeat(1, 1, chunk_size_a, 1, 1)
cat_res = th.cat([tensor_a, tensor_b], dim=dim)
cat_res = cat_res.reshape(-1, 2 * h, w)
return cat_res
def categorize_embedding(self):
self._entity_related_emb.update({'entity_emb': self._entity_emb,
'entity_bias': self._entity_bias})
self._relation_related_emb.update({'relation_emb': self._relation_emb})
self._torch_model.update({'score_func': self._score_func})
def initialize(self, n_entities, n_relations, init_strat='xavier'):
args = self.args
eps = EMB_INIT_EPS
emb_init = (args.gamma + eps) / args.hidden_dim
device = get_device(args)
entity_related_device = th.device('cpu') if args.mix_cpu_gpu else device
relation_related_device = th.device('cpu') if (args.mix_cpu_gpu or args.strict_rel_part or args.soft_rel_part) else device
self._relation_emb.init(emb_init=emb_init, lr=self.lr, async_threads=None, num=n_relations, dim=self.hidden_dim,
init_strat=init_strat, device=relation_related_device)
self._entity_emb.init(emb_init=emb_init, lr=self.lr, async_threads=None, num=n_entities, dim=self.hidden_dim,
init_strat=init_strat, device=entity_related_device)
self._entity_bias.init(emb_init=0, lr=self.lr, async_threads=None, num=n_entities, dim=1, init_strat='uniform', device=entity_related_device)
def pos_forward(self, pos_emb):
concat_emb = self.batch_concat(pos_emb['head'], pos_emb['rel'], dim=-2)
return self.gamma - self._score_func(embs=[concat_emb, pos_emb['tail'], pos_emb['tail_bias']],
mode='all',
bmm=False,)
def neg_forward(self, pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train):
args = self.args
if neg_type == 'head':
concat_emb = self.mutual_concat(neg_emb['neg'], pos_emb['rel'],
chunk_size_a=neg_sample_size, chunk_size_b=chunk_size,
mode='BxA')
lhs = self._score_func(embs=[concat_emb], mode='lhs', bmm=False)
tail_emb = pos_emb['tail'].reshape(-1, chunk_size, 1, args.hidden_dim)
tail_bias = pos_emb['tail_bias'].reshape(-1, chunk_size, 1, 1)
return self.gamma - self._score_func(embs=[lhs, tail_emb, tail_bias], mode='rhs', bmm=False)
else:
concat_emb = self.batch_concat(pos_emb['head'], pos_emb['rel'], dim=-2)
b, h, w = concat_emb.shape
lhs = self._score_func(embs=[concat_emb], mode='lhs', bmm=False).reshape(b // chunk_size, chunk_size, -1)
tail_emb = neg_emb['neg']
tail_bias = neg_emb['neg_bias']
_, emb_dim = tail_emb.shape
tail_emb = tail_emb.reshape(-1, neg_sample_size, emb_dim)
tail_bias = tail_bias.reshape(-1, neg_sample_size, 1)
# bmm
score = self._score_func(embs=[lhs, tail_emb, tail_bias], mode='rhs', bmm=True)
return self.gamma - score
def acquire_embedding(self, data, gpu_id=-1, pos=True, train=True, neg_type='head'):
if pos and train:
emb = {'head': self._entity_related_emb['entity_emb'](data['head'], gpu_id=gpu_id, trace=True),
'tail': self._entity_related_emb['entity_emb'](data['tail'], gpu_id=gpu_id, trace=True),
'rel': self._relation_related_emb['relation_emb'](data['rel'], gpu_id=gpu_id, trace=True),
'tail_bias': self._entity_related_emb['entity_bias'](data['tail'], gpu_id=gpu_id, trace=True), }
elif not pos and train:
emb = {'neg': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=True)}
if neg_type == 'tail':
emb['neg_bias'] = self._entity_related_emb['entity_bias'](data['neg'], gpu_id=gpu_id, trace=True)
elif pos and not train:
emb = {'head': self._entity_related_emb['entity_emb'](data['head'], gpu_id=gpu_id, trace=False),
'rel': self._relation_related_emb['relation_emb'](data['rel'], gpu_id=gpu_id, trace=False),
'tail': self._entity_related_emb['entity_emb'](data['tail'], gpu_id=gpu_id, trace=False),
'tail_bias': self._entity_related_emb['entity_bias'](data['tail'], gpu_id=gpu_id, trace=False)
}
else:
emb = {'neg': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=False),
'neg_bias': self._entity_related_emb['entity_bias'](data['neg'], gpu_id=gpu_id, trace=False)}
return emb
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,121
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/util/__init__.py
|
from .misc import *
from .argparser import *
from .math import *
from .logger import *
from .multiprocess import *
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,122
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/hypere.py
|
from .pytorch.score_fun import ATTHScore
from .pytorch.ke_tensor import KGEmbedding
from .ke_model import GEModel
import torch as th
from dglke.util import *
class AttHModel(GEModel):
def __init__(self, args, device, model_name):
score_func = ATTHScore()
super(AttHModel, self).__init__(args, device, model_name, score_func)
self._rel_diag = KGEmbedding(device)
self._c = KGEmbedding(device)
self._context = KGEmbedding(device)
self._head_bias = KGEmbedding(device)
self._tail_bias = KGEmbedding(device)
# self.multi_c = args.multi_c
self.gamma = args.gamma
self._scale = (1. / np.sqrt(args.hidden_dim))
def categorize_embedding(self):
self._entity_related_emb.update({'head_bias': self._head_bias,
'tail_bias': self._head_bias,
'entity_emb': self._entity_emb,
})
self._relation_related_emb.update({'curvature': self._c,
'context': self._context,
'rel_diag': self._rel_diag,
'relation_emb': self._relation_emb,
})
def initialize(self, n_entities, n_relations, init_strat='uniform'):
args = self.args
init_scale = args.init_scale
device = get_device(args)
entity_related_device = th.device('cpu') if args.mix_cpu_gpu else device
relation_related_device = th.device('cpu') if (args.mix_cpu_gpu or args.strict_rel_part or args.soft_rel_part) else device
self._entity_emb.init(init_scale, lr=self.lr, async_threads=args.num_thread, num=n_entities, dim=self.hidden_dim,
init_strat='normal', optimizer=args.optimizer, device=entity_related_device)
self._relation_emb.init(init_scale, lr=self.lr, async_threads=args.num_thread, num=n_relations, dim=self.hidden_dim,
init_strat='normal', optimizer=args.optimizer, device=relation_related_device)
self._rel_diag.init(emb_init=(2, -1), lr=self.lr, async_threads=args.num_thread, num=n_relations, dim=self.hidden_dim * 2,
init_strat='random', optimizer=args.optimizer, device=relation_related_device)
self._c.init(emb_init=1, lr=self.lr, async_threads=args.num_thread, num=n_relations, dim=1,
init_strat='constant', optimizer=args.optimizer, device=relation_related_device)
self._context.init(emb_init=init_scale, lr=self.lr, async_threads=args.num_thread, num=n_relations, dim=self.hidden_dim,
init_strat='normal', optimizer=args.optimizer, device=relation_related_device)
self._head_bias.init(emb_init=0, lr=self.lr, async_threads=args.num_thread, num=n_entities, dim=1,
init_strat='constant', optimizer=args.optimizer, device=entity_related_device)
self._tail_bias.init(emb_init=0, lr=self.lr, async_threads=args.num_thread, num=n_entities, dim=1,
init_strat='constant', optimizer=args.optimizer, device=entity_related_device)
def prepare_data(self, pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=True):
pos_batch_size = pos_emb['head'].shape[0]
neg_batch_size = neg_emb[neg_type].shape[0]
pos_emb_reshape = {}
neg_emb_reshape = {}
if train and neg_type == 'head':
for k in pos_emb.keys():
pos_emb_reshape[k] = pos_emb[k].view(pos_batch_size // chunk_size, chunk_size, -1)
for k in neg_emb.keys():
neg_emb_reshape[k] = neg_emb[k].view(neg_batch_size // neg_sample_size, neg_sample_size, -1)
elif train and neg_type == 'tail':
pos_emb_reshape.update(pos_emb)
pos_emb_reshape['head_bias'] = pos_emb_reshape['head_bias'].view(pos_batch_size // chunk_size,
chunk_size, -1)
for k in neg_emb.keys():
neg_emb_reshape[k] = neg_emb[k].view(neg_batch_size // neg_sample_size, neg_sample_size, -1)
elif not train and neg_type == 'head':
for k in pos_emb.keys():
pos_emb_reshape[k] = pos_emb[k].view(1, pos_batch_size, -1)
for k in neg_emb.keys():
neg_emb_reshape[k] = neg_emb[k].view(1, neg_batch_size, -1)
elif not train and neg_type == 'tail':
pos_emb_reshape.update(pos_emb)
pos_emb_reshape['head_bias'] = pos_emb_reshape['head_bias'].view(1, pos_batch_size, -1)
for k in neg_emb.keys():
neg_emb_reshape[k] = neg_emb[k].view(1, neg_batch_size, -1)
return pos_emb_reshape, neg_emb_reshape
def get_score(self, lhs_e, head_bias, rhs_e, tail_bias, c, comp='batch'):
score = self._score_func(lhs_e, rhs_e, c, comp)
if comp == 'batch':
return self.gamma + head_bias + tail_bias + score
else:
return self.gamma + head_bias.unsqueeze(2) + tail_bias.unsqueeze(1) + score.unsqueeze(-1)
def pos_forward(self, pos_emb):
# get lhs
rel_c = pos_emb['curvature']
head = pos_emb['head']
rel_diag = pos_emb['rel_diag']
context_vec = pos_emb['context_vec']
rel = pos_emb['rel']
head_bias = pos_emb['head_bias']
c = th.nn.functional.softplus(rel_c)
rot_mat, ref_mat = th.chunk(rel_diag, 2, dim=1)
rot_q = givens_rotations(rot_mat, head).view(-1, 1, self.hidden_dim)
ref_q = givens_reflection(ref_mat, head).view(-1, 1, self.hidden_dim)
cands = th.cat([ref_q, rot_q], dim=1)
context_vec = context_vec.view(-1, 1, self.hidden_dim)
att_weights = th.sum(context_vec * cands * self._scale, dim=-1, keepdim=True)
att_weights = th.nn.functional.softmax(att_weights, dim=1)
att_q = th.sum(att_weights * cands, dim=1)
lhs = expmap0(att_q, c)
rel = expmap0(rel, c)
lhs = project(mobius_add(lhs, rel, c), c)
# get rhs
rhs = pos_emb['tail']
tail_bias = pos_emb['tail_bias']
score = self.get_score(lhs, head_bias, rhs, tail_bias, c, comp='batch')
return score
def neg_forward(self, pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=True):
pos_emb, neg_emb = self.prepare_data(pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=train)
if neg_type == 'head':
# for head
head = neg_emb['head']
head_bias = neg_emb['head_bias'].unsqueeze(1)
# for relation
rel_diag = pos_emb['rel_diag']
rel_c = pos_emb['curvature']
rel = pos_emb['rel']
context_vec = pos_emb['context_vec']
# for tail
rhs = pos_emb['tail'].unsqueeze(2)
tail_bias = pos_emb['tail_bias'].unsqueeze(2)
c = th.nn.functional.softplus(rel_c)
rot_mat, ref_mat = th.chunk(rel_diag, 2, dim=-1)
# batch, chunk, neg, hidden
rot_q = givens_rotations(rot_mat, head, comp='mm').unsqueeze(-2)
ref_q = givens_reflection(ref_mat, head, comp='mm').unsqueeze(-2)
cands = th.cat([ref_q, rot_q], dim=-2)
context_vec = context_vec.unsqueeze(2).unsqueeze(3)
attn_weights = th.sum(context_vec * cands * self._scale, dim=-1, keepdim=True)
attn_weights = th.nn.functional.softmax(attn_weights, dim=-2)
att_q = th.sum(attn_weights * cands, dim=-2)
expand_c = c.unsqueeze(2)
lhs = expmap0(att_q, expand_c)
rel = expmap0(rel, c).unsqueeze(2)
lhs = project(mobius_add(lhs, rel, expand_c), expand_c)
score = self.get_score(lhs, head_bias, rhs, tail_bias, expand_c, comp='batch')
return score
elif neg_type == 'tail':
# for head
head = pos_emb['head']
head_bias = pos_emb['head_bias']
# for relation
rel_c = pos_emb['curvature']
rel = pos_emb['rel']
rel_diag = pos_emb['rel_diag']
context_vec = pos_emb['context_vec']
# for tail
rhs = neg_emb['tail']
tail_bias = neg_emb['tail_bias']
c = th.nn.functional.softplus(rel_c)
rot_mat, ref_mat = th.chunk(rel_diag, 2, dim=-1)
rot_q = givens_rotations(rot_mat, head).view(-1, 1, self.hidden_dim)
ref_q = givens_reflection(ref_mat, head).view(-1, 1, self.hidden_dim)
cands = th.cat([ref_q, rot_q], dim=1)
context_vec = context_vec.view(-1, 1, self.hidden_dim)
att_weights = th.sum(context_vec * cands * self._scale, dim=-1, keepdim=True)
att_weights = th.nn.functional.softmax(att_weights, dim=1)
att_q = th.sum(att_weights * cands, dim=1)
lhs = expmap0(att_q, c)
rel = expmap0(rel, c)
lhs = project(mobius_add(lhs, rel, c), c)
c = c.view(c.shape[0] // chunk_size, chunk_size, -1)
score = self.get_score(lhs.view(lhs.shape[0] // chunk_size, chunk_size, -1), head_bias, rhs, tail_bias, c,
comp='mm')
return score
def acquire_embedding(self, data, gpu_id=-1, pos=True, train=True, neg_type='head'):
if pos and train:
emb = {'curvature': self._relation_related_emb['curvature'](data['rel'], gpu_id=gpu_id, trace=True),
'rel': self._relation_related_emb['relation_emb'](data['rel'], gpu_id=gpu_id, trace=True),
'rel_diag': self._relation_related_emb['rel_diag'](data['rel'], gpu_id=gpu_id, trace=True),
'context_vec': self._relation_related_emb['context'](data['rel'], gpu_id=gpu_id, trace=True),
'head': self._entity_related_emb['entity_emb'](data['head'], gpu_id=gpu_id, trace=True),
'tail': self._entity_related_emb['entity_emb'](data['tail'], gpu_id=gpu_id, trace=True),
'head_bias': self._entity_related_emb['head_bias'](data['head'], gpu_id=gpu_id, trace=True),
'tail_bias': self._entity_related_emb['tail_bias'](data['tail'], gpu_id=gpu_id, trace=True), }
elif not pos and train:
if neg_type == 'head':
emb = {'head': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=True),
'head_bias': self._entity_related_emb['head_bias'](data['neg'], gpu_id=gpu_id, trace=True)}
else:
emb = {'tail': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=True),
'tail_bias': self._entity_related_emb['tail_bias'](data['neg'], gpu_id=gpu_id, trace=True)}
elif pos and not train:
emb = {'curvature': self._relation_related_emb['curvature'](data['rel'], gpu_id=gpu_id, trace=False),
'head': self._entity_related_emb['entity_emb'](data['head'], gpu_id=gpu_id, trace=False),
'tail': self._entity_related_emb['entity_emb'](data['tail'], gpu_id=gpu_id, trace=False),
'rel': self._relation_related_emb['relation_emb'](data['rel'], gpu_id=gpu_id, trace=False),
'rel_diag': self._relation_related_emb['rel_diag'](data['rel'], gpu_id=gpu_id, trace=False),
'context_vec': self._relation_related_emb['context'](data['rel'], gpu_id=gpu_id, trace=False),
'head_bias': self._entity_related_emb['head_bias'](data['head'], gpu_id=gpu_id, trace=False),
'tail_bias': self._entity_related_emb['tail_bias'](data['tail'], gpu_id=gpu_id, trace=False), }
else:
emb = {'head': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=False),
'head_bias': self._entity_related_emb['head_bias'](data['neg'], gpu_id=gpu_id, trace=False),
'tail': self._entity_related_emb['entity_emb'](data['neg'], gpu_id=gpu_id, trace=False),
'tail_bias': self._entity_related_emb['tail_bias'](data['neg'], gpu_id=gpu_id, trace=False)}
return emb
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,123
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/ke_model.py
|
# -*- coding: utf-8 -*-
#
# ke_model.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Knowledge Graph Embedding Model
1. TransE_1
2. TransE_2
3. TransR
4. RESCAL
5. DistMult
6. ComplEx
7. RotatE
8. SimplE
9. ConvE
"""
from abc import ABC, abstractmethod
from tqdm import trange, tqdm
import dgl
import torch as th
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Adagrad, Adam
from torch.utils.data import DataLoader
from .pytorch.tensor_models import logsigmoid
from .pytorch.tensor_models import none
from .pytorch.score_fun import *
from .pytorch.ke_tensor import KGEmbedding
from .pytorch.tensor_models import cosine_dist
from .pytorch.tensor_models import l2_dist
from .pytorch.tensor_models import l1_dist
from .pytorch.tensor_models import dot_dist
from .pytorch.tensor_models import extended_jaccard_dist
from .pytorch.tensor_models import floor_divide
from .pytorch.loss import LossGenerator
from .pytorch.regularizer import Regularizer
from dglke.util import thread_wrapped_func, Logger, get_compatible_batch_size, prepare_save_path, get_scalar, to_device
from dglke.dataloader import EvalDataset, TrainDataset, SequentialTotalSampler, PartitionChunkDataset
from dglke.dataloader import get_dataset
import time
import logging
import json
# debug package
from pyinstrument import Profiler
EMB_INIT_EPS = 2.0
PRECISION_EPS = 1e-5
DEFAULT_INFER_BATCHSIZE = 1024
print = Logger.print
class BasicGEModel(object):
""" Basic Graph Embeding Model
"""
def __init__(self, device, model_name, score_func):
self._g = None
self._model_name = model_name
self._device = device
self._entity_emb = KGEmbedding(device)
self._relation_emb = KGEmbedding(device)
self._score_func = score_func
def attach_graph(self, g, etid_field='tid', ntid_field='ntid'):
""" Attach dataset into Graph Embedding Model
Parameter
----------
g: DGLGraph
Input data for knowledge graph
etid_field: str
Edge feature name storing the edge type id
ntid_field: str
Node feature name storing the node type id
Note
----
If the input graph is DGLGraph, we assume that it uses a homogeneous graph
to represent the heterogeneous graph. The edge type id is stored in etid_field
and the node type id is stored in ntid_field.
"""
self._etid_field = etid_field
self._ntid_field = ntid_field
assert isinstance(g, dgl.DGLGraph)
self._g = g
def load(self, model_path):
""" Load Graph Embedding Model from model_path.
The default entity embeding file is entity.npy.
The default relation embedding file is relation.npy.
Parameter
---------
model_path : str
Path to store the model information
"""
pass
def save(self, model_path):
""" Save Graph Embedding Model into model_path.
All model related data are saved under model_path.
The default entity embeding file is entity.npy.
The default relation embedding file is relation.npy.
Parameter
---------
model_path : str
Path to store the model information
"""
assert False, 'Not support training now'
def fit(self):
""" Start training
"""
assert False, 'Not support training now'
def test(self):
""" Start evaluation
"""
assert False, 'Not support evaluation now'
def _infer_score_func(self, head, rel, tail, triplet_wise=False, batch_size=DEFAULT_INFER_BATCHSIZE):
head_emb = self.entity_embed[head]
rel_emb = self.relation_embed[rel]
tail_emb = self.entity_embed[tail]
num_head = head.shape[0]
num_rel = rel.shape[0]
num_tail = tail.shape[0]
score = []
if triplet_wise:
# triplet wise score: head, relation and tail tensor have the same length N,
# for i in range(N):
# result.append(score(head[i],rel[i],tail[i]))
class FakeEdge(object):
def __init__(self, head_emb, rel_emb, tail_emb, device=-1):
self._hobj = {}
self._robj = {}
self._tobj = {}
self._hobj['emb'] = head_emb.to(device)
self._robj['emb'] = rel_emb.to(device)
self._tobj['emb'] = tail_emb.to(device)
@property
def src(self):
return self._hobj
@property
def dst(self):
return self._tobj
@property
def data(self):
return self._robj
# calculate scores in mini-batches
# so we can use GPU to accelerate the speed with avoiding GPU OOM
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
edata = FakeEdge(sh_emb, sr_emb, st_emb, self._device)
score.append(self._score_func.edge_func(edata)['score'].to(th.device('cpu')))
score = th.cat(score, dim=0)
return score
else:
# head, relation and tail tensors has different size
# for h_i in range(head):
# for r_j in range(relation):
# for t_k in range(tail):
# result.append(score(h_i, r_j, t_k))
# The result will have shape (len(head), len(relation), len(tail))
rel_emb = rel_emb.to(self._device)
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
s_score = []
sh_emb = sh_emb.to(self._device)
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
st_emb = st_emb.to(self._device)
s_score.append(self._score_func.infer(sh_emb, rel_emb, st_emb).to(th.device('cpu')))
score.append(th.cat(s_score, dim=2))
score = th.cat(score, dim=0)
return th.reshape(score, (num_head, num_rel, num_tail))
def _exclude_pos(self, sidx, score, idx, head, rel, tail, topk, exec_mode, exclude_mode):
g = self._g
num_triples = idx.shape[0]
num_head = 1 if exec_mode == 'batch_head' else head.shape[0]
num_rel = 1 if exec_mode == 'batch_rel' else rel.shape[0]
num_tail = 1 if exec_mode == 'batch_tail' else tail.shape[0]
res_head = []
res_rel = []
res_tail = []
res_score = []
result = []
if exclude_mode == 'exclude':
# exclude existing edges
cur_k = 0
batch_size = topk
while (cur_k < num_triples):
cur_sidx = sidx[cur_k:cur_k + batch_size if cur_k + batch_size < num_triples else num_triples]
cur_score = score[cur_k:cur_k + batch_size if cur_k + batch_size < num_triples else num_triples]
cur_idx = idx[cur_sidx]
if exec_mode == 'triplet_wise':
cur_head = head[cur_idx]
cur_rel = rel[cur_idx]
cur_tail = tail[cur_idx]
elif exec_mode == 'all':
tail_idx = cur_idx % num_tail
cur_idx = floor_divide(cur_idx, num_tail)
rel_idx = cur_idx % num_rel
cur_idx = floor_divide(cur_idx, num_rel)
head_idx = cur_idx % num_head
cur_head = head[head_idx]
cur_rel = rel[rel_idx]
cur_tail = tail[tail_idx]
elif exec_mode == 'batch_head':
tail_idx = cur_idx % num_tail
cur_idx = floor_divide(cur_idx, num_tail)
rel_idx = cur_idx % num_rel
cur_head = th.full((cur_sidx.shape[0],), head, dtype=head.dtype)
cur_rel = rel[rel_idx]
cur_tail = tail[tail_idx]
elif exec_mode == 'batch_rel':
tail_idx = cur_idx % num_tail
cur_idx = floor_divide(cur_idx, num_tail)
head_idx = cur_idx % num_head
cur_head = head[head_idx]
cur_rel = th.full((cur_sidx.shape[0],), rel, dtype=rel.dtype)
cur_tail = tail[tail_idx]
elif exec_mode == 'batch_tail':
rel_idx = cur_idx % num_rel
cur_idx = floor_divide(cur_idx, num_rel)
head_idx = cur_idx % num_head
cur_head = head[head_idx]
cur_rel = rel[rel_idx]
cur_tail = th.full((cur_sidx.shape[0],), tail, dtype=tail.dtype)
# Find exising edges
# It is expacted that the existing edges are much less than triples
# The idea is: 1) we get existing edges using g.edge_ids
# 2) sort edges according to source node id (O(nlog(n)), n is number of edges)
# 3) sort candidate triples according to cur_head (O(mlog(m)), m is number of cur_head nodes)
# 4) go over all candidate triples and compare with existing edges,
# as both edges and candidate triples are sorted. filtering edges out
# will take only O(n+m)
# 5) sort the score again it taks O(klog(k))
uid, vid, eid = g.edge_ids(cur_head, cur_tail, return_uv=True)
rid = g.edata[self._etid_field][eid]
for i in range(cur_head.shape[0]):
h = cur_head[i]
r = cur_rel[i]
t = cur_tail[i]
h_where = uid == h
t_where = vid[h_where] == t
r_where = rid[h_where][t_where]
edge_exist = False
if r_where.shape[0] > 0:
for c_r in r_where:
if c_r == r:
edge_exist = True
break
if edge_exist is False:
res_head.append(h)
res_rel.append(r)
res_tail.append(t)
res_score.append(cur_score[i])
if len(res_head) >= topk:
break
cur_k += batch_size
batch_size = topk - len(res_head) # check more edges
batch_size = 16 if batch_size < 16 else batch_size # avoid tailing issue
res_head = th.tensor(res_head)
res_rel = th.tensor(res_rel)
res_tail = th.tensor(res_tail)
res_score = th.tensor(res_score)
sidx = th.argsort(res_score, dim=0, descending=True)
sidx = sidx[:topk] if topk < sidx.shape[0] else sidx
result.append((res_head[sidx],
res_rel[sidx],
res_tail[sidx],
res_score[sidx],
None))
else:
# including the existing edges in the result
topk = topk if topk < num_triples else num_triples
sidx = sidx[:topk]
idx = idx[sidx]
if exec_mode == 'triplet_wise':
head = head[idx]
rel = rel[idx]
tail = tail[idx]
elif exec_mode == 'all':
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
head = head[head_idx]
rel = rel[rel_idx]
tail = tail[tail_idx]
elif exec_mode == 'batch_head':
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
head = th.full((topk,), head, dtype=head.dtype)
rel = rel[rel_idx]
tail = tail[tail_idx]
elif exec_mode == 'batch_rel':
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
head = head[head_idx]
rel = th.full((topk,), rel, dtype=rel.dtype)
tail = tail[tail_idx]
elif exec_mode == 'batch_tail':
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
head = head[head_idx]
rel = rel[rel_idx]
tail = th.full((topk,), tail, dtype=tail.dtype)
if exclude_mode == 'mask':
# Find exising edges
# It is expacted that the existing edges are much less than triples
# The idea is: 1) we get existing edges using g.edge_ids
# 2) sort edges according to source node id (O(nlog(n)), n is number of edges)
# 3) sort candidate triples according to cur_head (O(mlog(m)), m is number of cur_head nodes)
# 4) go over all candidate triples and compare with existing edges and mask them,
# as both edges and candidate triples are sorted. filtering edges out
# will take only O(n+m)
uid, vid, eid = g.edge_ids(head, tail, return_uv=True)
rid = g.edata[self._etid_field][eid]
mask = th.full((head.shape[0],), False, dtype=th.bool)
if len(uid) > 0:
for i in range(head.shape[0]):
h = head[i]
r = rel[i]
t = tail[i]
h_where = uid == h
t_where = vid[h_where] == t
r_where = rid[h_where][t_where]
if r_where.shape[0] > 0:
for c_r in r_where:
if c_r == r:
mask[i] = True
break
result.append((head, rel, tail, score, mask))
else:
result.append((head, rel, tail, score, None))
return result
def _topk_exclude_pos(self, score, idx, head, rel, tail, topk, exec_mode, exclude_mode):
""" Generate topk most relevent triplets and corresponding scores.
It takes following steps:
1) find topk elements
2) sort topk elements in descending order
3) call _exclude_pos if figure out existing edges
"""
if exclude_mode == 'exclude':
if idx.shape[0] < topk * 4: # TODO(xiangsx): Find a better value of topk * n
topk_score, topk_sidx = th.topk(score, k=idx.shape[0], dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
result = self._exclude_pos(sidx=sidx,
score=topk_score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
else:
topk_score, topk_sidx = th.topk(score, k= topk * 4, dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
result = self._exclude_pos(sidx=sidx,
score=topk_score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
if len(result) < topk:
sidx = th.argsort(score, dim=0, descending=True)
result = self._exclude_pos(sidx=sidx,
score=score[sidx],
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
else:
topk = idx.shape[0] if idx.shape[0] < topk else topk
topk_score, topk_sidx = th.topk(score, k=topk, dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
result = self._exclude_pos(sidx=sidx,
score=topk_score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
return result
def link_predict(self, head=None, rel=None, tail=None, exec_mode='all', sfunc='none', topk=10, exclude_mode=None, batch_size=DEFAULT_INFER_BATCHSIZE):
""" Predicts missing entities or relations in a triplet.
Given head_id, relation_id and tail_id, return topk most relevent triplet.
Parameters
----------
head: th.Tensor
A tensor of head entity id.
rel: th.Tensor
A tensor of relation id.
tail: th.Tensor
A tensor of tail entity id.
exec_mode: str
How to calculate scores for triplets and calculate topK:
* triplet_wise: head, relation and tail lists have the same length N,
and we calculate the similarity triplet by triplet:
``result = topK([score(h_i, r_i, t_i) for i in N])``,
the result shape will be (K,)
* all: three lists of head, relation and tail ids are provided as H, R and T,
and we calculate all possible combinations of all triplets (h_i, r_j, t_k):
``result = topK([[[score(h_i, r_j, t_k) for each h_i in H] for each r_j in R] for each t_k in T])``,
the result shape will be (K,)
* batch_head: three lists of head, relation and tail ids are provided as H, R and T
and we calculate topK for each element in head:
``result = topK([[score(h_i, r_j, t_k) for each r_j in R] for each t_k in T]) for each h_i in H``
the result shape will be (sizeof(H), K)
* batch_rel: three lists of head, relation and tail ids are provided as H, R and T,
and we calculate topK for each element in relation:
``result = topK([[score(h_i, r_j, t_k) for each h_i in H] for each t_k in T]) for each r_j in R``,
the result shape will be (sizeof(R), K)
* batch_tail: three lists of head, relation and tail ids are provided as H, R and T,
and we calculate topK for each element in tail:
``result = topK([[score(h_i, r_j, t_k) for each h_i in H] for each r_j in R]) for each t_k in T``,
the result shape will be (sizeof(T), K)
sfunc: str
What kind of score is used in ranking and will be output:
* none: $score = x$
* logsigmoid: $score = log(sigmoid(x))
topk: int
Return top k results
exclude_mode: str
Whether to exclude positive edges:
* None: Do not exclude positive edges.
* 'mask': Return topk edges and a mask indicating which one is positive edge.
* 'exclude': Exclude positive edges, the returned k edges will be missing edges in the graph.
Return
------
A list of (head_idx, rel_idx, tail_idx, score)
"""
if head is None:
head = th.arange(0, self.num_entity)
else:
head = th.tensor(head)
if rel is None:
rel = th.arange(0, self.num_rel)
else:
rel = th.tensor(rel)
if tail is None:
tail = th.arange(0, self.num_entity)
else:
tail = th.tensor(tail)
num_head = head.shape[0]
num_rel = rel.shape[0]
num_tail = tail.shape[0]
if sfunc == 'none':
sfunc = none
else:
sfunc = logsigmoid
# if exclude_mode is not None, we need a graph to do the edge filtering
assert (self._g is not None) or (exclude_mode is None), \
'If exclude_mode is not None, please use load_graph() to initialize ' \
'a graph for edge filtering.'
if exec_mode == 'triplet_wise':
assert num_head == num_rel, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
assert num_head == num_tail, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail, triplet_wise=True, batch_size=batch_size)
score = sfunc(raw_score)
idx = th.arange(0, num_head)
result = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
elif exec_mode == 'all':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
raw_score = th.reshape(raw_score, (head.shape[0]*rel.shape[0]*tail.shape[0],))
score = sfunc(raw_score)
idx = th.arange(0, num_head * num_rel * num_tail)
result = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
elif exec_mode == 'batch_head':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
for i in range(num_head):
score = sfunc(th.reshape(raw_score[i,:,:], (rel.shape[0]*tail.shape[0],)))
idx = th.arange(0, num_rel * num_tail)
res = self._topk_exclude_pos(score=score,
idx=idx,
head=head[i],
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
result.append(res[0])
elif exec_mode == 'batch_rel':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
for i in range(num_rel):
score = sfunc(th.reshape(raw_score[:,i,:], (head.shape[0]*tail.shape[0],)))
idx = th.arange(0, num_head * num_tail)
res = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel[i],
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
result.append(res[0])
elif exec_mode == 'batch_tail':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
for i in range(num_tail):
score = sfunc(th.reshape(raw_score[:,:,i], (head.shape[0]*rel.shape[0],)))
idx = th.arange(0, num_head * num_rel)
res = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel,
tail=tail[i],
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
result.append(res[0])
else:
assert False, 'unknow execution mode type {}'.format(exec_mode)
return result
def _embed_sim(self, head, tail, emb, sfunc='cosine', bcast=False, pair_ws=False, topk=10):
batch_size=DEFAULT_INFER_BATCHSIZE
if head is None:
head = th.arange(0, emb.shape[0])
else:
head = th.tensor(head)
if tail is None:
tail = th.arange(0, emb.shape[0])
else:
tail = th.tensor(tail)
head_emb = emb[head]
tail_emb = emb[tail]
if sfunc == 'cosine':
sim_func = cosine_dist
elif sfunc == 'l2':
sim_func = l2_dist
elif sfunc == 'l1':
sim_func = l1_dist
elif sfunc == 'dot':
sim_func = dot_dist
elif sfunc == 'ext_jaccard':
sim_func = extended_jaccard_dist
if pair_ws is True:
result = []
# chunked cal score
score = []
num_head = head.shape[0]
num_tail = tail.shape[0]
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sh_emb = sh_emb.to(self._device)
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = st_emb.to(self._device)
score.append(sim_func(sh_emb, st_emb, pw=True).to(th.device('cpu')))
score = th.cat(score, dim=0)
topk_score, topk_sidx = th.topk(score,
k=topk if score.shape[0] > topk else score.shape[0],
dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
score = score[sidx]
result.append((head[sidx],
tail[sidx],
score))
else:
num_head = head.shape[0]
num_tail = tail.shape[0]
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
score = []
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sh_emb = sh_emb.to(self._device)
s_score = []
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
st_emb = st_emb.to(self._device)
s_score.append(sim_func(sh_emb, st_emb).to(th.device('cpu')))
score.append(th.cat(s_score, dim=1))
score = th.cat(score, dim=0)
if bcast is False:
result = []
idx = th.arange(0, num_head * num_tail)
score = th.reshape(score, (num_head * num_tail, ))
topk_score, topk_sidx = th.topk(score,
k=topk if score.shape[0] > topk else score.shape[0],
dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
score = topk_score[sidx]
sidx = topk_sidx[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
result.append((head[head_idx],
tail[tail_idx],
score))
else: # bcast at head
result = []
for i in range(num_head):
i_score = score[i]
topk_score, topk_sidx = th.topk(i_score,
k=topk if i_score.shape[0] > topk else i_score.shape[0],
dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
i_score = topk_score[sidx]
idx = topk_sidx[sidx]
result.append((th.full((topk,), head[i], dtype=head[i].dtype),
tail[idx],
i_score))
return result
def embed_sim(self, left=None, right=None, embed_type='entity', sfunc='cosine', bcast=False, pair_ws=False, topk=10):
""" Finds the most similar entity/relation embeddings for
some pre-defined similarity functions given a set of
entities or relations.
Parameters
----------
left: th.Tensor
A tensor of left object id.
right: th.Tensor
A tensor of right object id.
embed_type: str
Whether it is using entity embedding or relation embedding.
If `entity`, it is entity embedding.
If 'relation', it is relation embedding.
sfunc: str
What kind of similarity function is used in ranking and will be output:
* cosine: use cosine similarity, score = $\frac{x \cdot y}{||x||_2||y||_2}$'
* l2: use l2 similarity, score = -$||x - y||_2$
* l1: use l1 similarity, score = -$||x - y||_1$
* dot: use dot product similarity, score = $x \cdot y$
* ext_jaccard: use extended jaccard similarity, score = $\frac{x \cdot y}{||x||_{2}^{2} + ||y||_{2}^{2} - x \cdot y}$
bcast: bool
If True, both left and right objects are provided as L and R,, and we calculate topK for each element in L:
* 'result = topK([score(l_i, r_j) for r_j in R]) for l_j in L, the result shape will be (sizeof(L), K)
Default: False
pair_ws: bool
If True, both left and right objects are provided with the same length N, and we will calculate the similarity pair by pair:
* result = topK([score(l_i, r_i)]) for i in N, the result shape will be (K,)
Default: False
topk: int
Return top k results
Note
----
If both bcast and pair_ws is False, both left and right objects are provided as L and R,
and we calculate all possible combinations of (l_i, r_j):
``result = topK([[score(l_i, rj) for l_i in L] for r_j in R])``,
the result shape will be (K,)
Return
------
A list of (left_idx, right_idx, sim_score)
"""
if embed_type == 'entity':
emb = self.entity_embed
elif embed_type == 'relation':
emb = self.relation_embed
else:
assert False, 'emb should entity or relation'
return self._embed_sim(head=left,
tail=right,
emb=emb,
sfunc=sfunc,
bcast=bcast,
pair_ws=pair_ws,
topk=topk)
@property
def model_name(self):
return self._model_name
@property
def entity_embed(self):
return self._entity_emb.emb
@property
def relation_embed(self):
return self._relation_emb.emb
@property
def num_entity(self):
return -1 if self.entity_embed is None else self.entity_embed.shape[0]
@property
def num_rel(self):
return -1 if self.relation_embed is None else self.relation_embed.shape[0]
@property
def graph(self):
return self._g
class KGEModel(BasicGEModel):
""" Basic Knowledge Graph Embedding Model
"""
def __init__(self, device, model_name, score_func):
super(KGEModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
entity_emb_file = 'entity.npy'
relation_emb_file = 'relation.npy'
self._entity_emb.load(model_path, entity_emb_file)
self._relation_emb.load(model_path, relation_emb_file)
self._score_func.load(model_path, self.model_name)
class TransEModel(KGEModel):
""" TransE Model
"""
def __init__(self, device, gamma):
model_name = 'TransE'
score_func = TransEScore(gamma, 'l2')
self._gamma = gamma
super(TransEModel, self).__init__(device, model_name, score_func)
class TransE_l2Model(KGEModel):
""" TransE_l2 Model
"""
def __init__(self, device, gamma):
model_name = 'TransE_l2'
score_func = TransEScore(gamma, 'l2')
self._gamma = gamma
super(TransE_l2Model, self).__init__(device, model_name, score_func)
class TransE_l1Model(KGEModel):
""" TransE_l1 Model
"""
def __init__(self, device, gamma):
model_name = 'TransE_l1'
score_func = TransEScore(gamma, 'l1')
self._gamma = gamma
super(TransE_l1Model, self).__init__(device, model_name, score_func)
class TransRModel(KGEModel):
""" TransR Model
"""
def __init__(self, device, gamma):
model_name = 'TransR'
# TransR score initialization is done at fit or load model
projection_emb = KGEmbedding(device)
score_func = TransRScore(gamma, projection_emb, -1, -1)
self._gamma = gamma
super(TransRModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
super(TransRModel, self).load(model_path)
self._score_func.relation_dim = self._relation_emb.emb.shape[1]
self._score_func.entity_dim = self._entity_emb.emb.shape[1]
class DistMultModel(KGEModel):
""" DistMult Model
"""
def __init__(self, device):
model_name = 'DistMult'
score_func = DistMultScore()
super(DistMultModel, self).__init__(device, model_name, score_func)
class ComplExModel(KGEModel):
""" ComplEx Model
"""
def __init__(self, device):
model_name = 'ComplEx'
score_func = ComplExScore()
super(ComplExModel, self).__init__(device, model_name, score_func)
class RESCALModel(KGEModel):
""" RESCAL Model
"""
def __init__(self, device):
model_name = 'RESCAL'
score_func = RESCALScore(-1, -1)
super(RESCALModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
super(RESCALModel, self).load(model_path)
self._score_func.entity_dim = self._entity_emb.emb.shape[1]
self._score_func.relation_dim = self._relation_emb.emb.shape[1] // self._score_func.entity_dim
class RotatEModel(KGEModel):
""" RotatE Model
"""
def __init__(self, device, gamma):
model_name = 'RotatE'
self._gamma = gamma
score_func = RotatEScore(gamma, 0)
super(RotatEModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
super(RotatEModel, self).load(model_path)
# retrive emb_init, which is used in scoring func
entity_dim = self._entity_emb.emb.shape[1]
hidden_dim = entity_dim // 2
emb_init = (self._gamma + EMB_INIT_EPS) / hidden_dim
self._score_func.emb_init = emb_init
class GNNModel(BasicGEModel):
""" Basic GNN Model
"""
def __init__(self, device, model_name, gamma=0):
if model_name == 'TransE' or model_name == 'TransE_l2':
score_func = TransEScore(gamma, 'l2')
elif model_name == 'TransE_l1':
score_func = TransEScore(gamma, 'l1')
elif model_name == 'DistMult':
score_func = DistMultScore()
else:
assert model_name in ['TransE', 'TransE_l2', 'TransE_l1', 'DistMult'], \
"For general purpose Scoring function for GNN, we only support TransE_l1, TransE_l2" \
"DistMult, but {} is given.".format(model_name)
super(GNNModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
entity_emb_file = 'entity.npy'
relation_emb_file = 'relation.npy'
self._entity_emb.load(model_path, entity_emb_file)
self._relation_emb.load(model_path, relation_emb_file)
class GEModel(ABC, BasicGEModel):
""" Graph Embedding model general framework
User need to implement abstract method by their own
"""
def __init__(self, args, device, model_name, score_func):
self.args = args
self.has_edge_importance = args.has_edge_importance
self.lr = args.lr
self.dist_train = (args.num_node * args.num_proc) != 1
self.hidden_dim = args.hidden_dim
self._optim = None
self.regularizer = Regularizer(args.regularization_coef, args.regularization_norm)
self._loss_gen = LossGenerator(args,
args.loss_genre,
args.neg_adversarial_sampling,
args.adversarial_temperature,
args.pairwise,
args.label_smooth)
# group embedding with learnable parameters to facilitate save, load, share_memory, etc
self._entity_related_emb = dict()
self._relation_related_emb = dict()
self._torch_model = dict()
self._global_relation_related_emb = None
super(GEModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
""" Load all related parameters for embeddings.
Parameters
----------
model_path: str
The path where all embeddings are stored
"""
modules = [self._entity_related_emb, self._relation_related_emb, self._torch_model]
for m in modules:
for name, emb in m.items():
emb_file = name + ('.pth' if m is self._torch_model else '.npy')
emb.load(model_path, emb_file)
def save(self, emap_file, rmap_file):
""" Save embeddings related to entity, relation and score function.
Parameters
----------
emap_file
rmap_file
"""
args = self.args
model_path = args.save_path
if not os.path.exists(model_path):
os.mkdir(model_path)
print('Save model to {}'.format(args.save_path))
modules = [self._entity_related_emb,
self._relation_related_emb if self._global_relation_related_emb is None else self._global_relation_related_emb,
self._torch_model]
for m in modules:
for name, emb in m.items():
emb_file = name + ('.pth' if m is self._torch_model else '.npy')
emb.save(model_path, emb_file)
# We need to save the model configurations as well.
conf_file = os.path.join(args.save_path, 'config.json')
dict = {}
config = args
dict.update(vars(config))
dict.update({'emp_file': emap_file,
'rmap_file': rmap_file})
with open(conf_file, 'w') as outfile:
json.dump(dict, outfile, indent=4)
def share_memory(self):
""" dglke shares entity-related and relation-related embeddings across GPUs to accelerate training.
"""
modules = [self._entity_related_emb, self._relation_related_emb]
for m in modules:
for emb in m.values():
emb.share_memory()
def train(self):
""" Enable gradient backpropagation through computation graph.
"""
modules = [self._entity_related_emb, self._relation_related_emb, self._torch_model]
if self._global_relation_related_emb is not None:
modules += [self._global_relation_related_emb]
for m in modules:
for emb in m.values():
emb.train()
def eval(self):
""" Disable gradient backpropagation. When doing test, call this method to save GPU memory.
"""
modules = [self._entity_related_emb, self._relation_related_emb, self._torch_model]
if self._global_relation_related_emb is not None:
modules += [self._global_relation_related_emb]
for m in modules:
for emb in m.values():
emb.eval()
def get_param_list(self):
""" Get trainable parameters for weight regularization
Returns
-------
a list of trainable parameters
"""
param_list = []
modules = [self._entity_related_emb, self._relation_related_emb]
for m in modules:
for emb in m.values():
param_list += [emb.curr_emb()]
for torch_module in self._torch_model.values():
for params in torch_module.parameters():
param_list += [params]
return param_list
def update(self, gpu_id):
""" update all the parameters where there is gradient in their tensor in a sparse manner.
Parameters
----------
gpu_id: int
Which gpu to accelerate the calculation. If -1 is provided, cpu is used.
"""
modules = [self._entity_related_emb, self._relation_related_emb]
for m in modules:
for emb in m.values():
emb.update(gpu_id)
if self._optim is not None:
self._optim.step()
def create_async_update(self):
for emb in self._entity_related_emb.values():
emb.create_async_update()
def finish_async_update(self):
for emb in self._entity_related_emb.values():
emb.finish_async_update()
def test(self):
# put it in train
args = self.args
if args.save_log:
log_file = 'log.txt'
result_file = 'result.txt'
Logger.log_path = os.path.join(args.save_path, log_file)
Logger.result_path = os.path.join(args.save_path, result_file)
init_time_start = time.time()
# load dataset and samplers
dataset = get_dataset(args.data_path,
args.dataset,
args.format,
args.delimiter,
args.data_files,
args.has_edge_importance)
if args.neg_sample_size_eval < 0:
args.neg_sample_size_eval = dataset.n_entities
args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval, args.neg_sample_size_eval)
# We need to ensure that the number of processes should match the number of GPUs.
if len(args.gpu) > 1 and args.num_proc > 1:
assert args.num_proc % len(args.gpu) == 0, \
'The number of processes needs to be divisible by the number of GPUs'
if args.neg_deg_sample_eval:
assert not args.eval_filter, "if negative sampling based on degree, we can't filter positive edges."
# need to change to num_nodes if use 0.6.0 dgl version
self.initialize(dataset.n_entities, dataset.n_relations, args.init_strat)
self.categorize_embedding()
if self.dist_train:
# share memory for multiprocess to access
self.share_memory()
if len(args.gpu) > 1:
args.num_test_proc = args.num_proc if args.num_proc < len(args.gpu) else len(args.gpu)
else:
args.num_test_proc = args.num_proc
assert dataset.test is not None, 'test set is not provided'
eval_dataset = EvalDataset(dataset, args)
self.attach_graph(eval_dataset.g)
self.load(args.save_path)
print('Total initialize time {:.3f} seconds'.format(time.time() - init_time_start))
# test
start = time.time()
self.eval()
if args.num_test_proc > 1:
queue = mp.Queue(args.num_test_proc)
procs = []
for i in range(args.num_test_proc):
proc = mp.Process(target=self.eval_proc, args=(i, eval_dataset, 'test', queue))
procs.append(proc)
proc.start()
metrics = {}
logs = []
for i in range(args.num_test_proc):
log = queue.get()
logs = logs + log
for metric in logs[0].keys():
metrics[metric] = sum([log[metric] for log in logs]) / len(logs)
print("-------------- Test result --------------")
for k, v in metrics.items():
print('Test average {} : {}'.format(k, v))
print("-----------------------------------------")
for proc in procs:
proc.join()
else:
self.eval_proc(rank=0, eval_dataset=eval_dataset, mode='test')
print('testing takes {:.3f} seconds'.format(time.time() - start))
def fit(self):
""" The whole process for model to be trained, validated, and tested
"""
# put it in train
args = self.args
prepare_save_path(args)
if args.save_log:
log_file = 'log.txt'
result_file = 'result.txt'
Logger.log_path = os.path.join(args.save_path, log_file)
Logger.result_path = os.path.join(args.save_path, result_file)
print = Logger.print
init_time_start = time.time()
# load dataset and samplers
dataset = get_dataset(args.data_path,
args.dataset,
args.format,
args.delimiter,
args.data_files,
args.has_edge_importance)
if args.neg_sample_size_eval < 0:
args.neg_sample_size_eval = dataset.n_entities
args.batch_size = get_compatible_batch_size(args.batch_size, args.neg_sample_size)
args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval, args.neg_sample_size_eval)
# We should turn on mix CPU-GPU training for multi-GPU training.
if len(args.gpu) > 1:
args.mix_cpu_gpu = True
if args.num_proc < len(args.gpu):
args.num_proc = len(args.gpu)
# We need to ensure that the number of processes should match the number of GPUs.
if len(args.gpu) > 1 and args.num_proc > 1:
assert args.num_proc % len(args.gpu) == 0, \
'The number of processes needs to be divisible by the number of GPUs'
# TODO: lingfei - degree based sampling is not supported currently
if args.neg_deg_sample_eval:
assert not args.eval_filter, "if negative sampling based on degree, we can't filter positive edges."
# partition training dataset here
train_dataset = TrainDataset(dataset, args, ranks=args.num_proc, has_importance=args.has_edge_importance)
args.strict_rel_part = args.mix_cpu_gpu and (train_dataset.cross_part is False)
args.soft_rel_part = args.mix_cpu_gpu and args.rel_part and train_dataset.cross_part is True
self.initialize(dataset.n_entities, dataset.n_relations, args.init_strat)
self.categorize_embedding()
eval_dataset = None
if args.valid or args.test:
if len(args.gpu) > 1:
args.num_test_proc = args.num_proc if args.num_proc < len(args.gpu) else len(args.gpu)
else:
args.num_test_proc = args.num_proc
if args.valid:
assert dataset.valid is not None, 'validation set is not provided'
if args.test:
assert dataset.test is not None, 'test set is not provided'
eval_dataset = EvalDataset(dataset, args)
self.attach_graph(eval_dataset.g)
if self.dist_train:
# share memory for multiprocess to access
self.share_memory()
emap_file = dataset.emap_fname
rmap_file = dataset.rmap_fname
print('Total initialize time {:.3f} seconds'.format(time.time() - init_time_start))
# print configuration
print('-' * 50)
for k, v in vars(args).items():
print('{} : {}'.format(k, v))
print('-' * 50)
# train
start = time.time()
rel_parts = train_dataset.rel_parts if args.strict_rel_part or args.soft_rel_part else None
cross_rels = train_dataset.cross_rels if args.soft_rel_part else None
self.train()
if args.num_proc > 1:
processes = []
barrier = mp.Barrier(args.num_proc)
for rank in range(args.num_proc):
p = mp.Process(target=self.train_mp,
args=(rank, train_dataset, eval_dataset, rel_parts, cross_rels, barrier))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
self.train_proc(0, train_dataset, eval_dataset, rel_parts, cross_rels)
print('training takes {} seconds'.format(time.time() - start))
if not args.no_save_emb:
self.save(emap_file, rmap_file)
# test
self.eval()
if args.test:
start = time.time()
if args.num_test_proc > 1:
queue = mp.Queue(args.num_test_proc)
procs = []
for i in range(args.num_test_proc):
proc = mp.Process(target=self.eval_mp, args=(i, eval_dataset, 'test', queue))
procs.append(proc)
proc.start()
metrics = {}
logs = []
for i in range(args.num_test_proc):
log = queue.get()
logs = logs + log
for metric in logs[0].keys():
metrics[metric] = sum([log[metric] for log in logs]) / len(logs)
print("-------------- Test result --------------")
for k, v in metrics.items():
print('Test average {} : {}'.format(k, v))
print("-----------------------------------------")
for proc in procs:
proc.join()
else:
self.eval_proc(rank=0, eval_dataset=eval_dataset, mode='test')
print('testing takes {:.3f} seconds'.format(time.time() - start))
def compute_ranking(self, pos_score, neg_score, data, mode='tail', eval_filter=True, self_loop_filter=True):
head, rel, tail, neg = data['head'], data['rel'], data['tail'], data['neg']
b_size = data['head'].shape[0]
pos_score = pos_score.view(b_size, -1)
neg_score = neg_score.view(b_size, -1)
ranking = th.zeros(b_size, 1, device=th.device('cpu'))
log = []
for i in range(b_size):
cand_idx = (neg_score[i] >= pos_score[i]).nonzero(as_tuple=False).cpu()
# there might be precision error where pos_score[i] actually equals neg_score[i, pos_entity[i]]
# we explicitly add this index to cand_idx to overcome this issue
if mode == 'tail':
if tail[i] not in cand_idx:
cand_idx = th.cat([cand_idx, tail[i].detach().cpu().view(-1, 1)], dim=0)
# here we filter out self-loop(head-relation-head)
if self_loop_filter and head[i] in cand_idx:
cand_idx = cand_idx[cand_idx != head[i]].view(-1, 1)
else:
if head[i] not in cand_idx:
cand_idx = th.cat([cand_idx, head[i].detach().cpu().view(-1, 1)], dim=0)
if self_loop_filter and tail[i] in cand_idx:
cand_idx = cand_idx[cand_idx != tail[i]].view(-1, 1)
cand_num = len(cand_idx)
if not eval_filter:
ranking[i] = cand_num
continue
if mode is 'tail':
select = self.graph.has_edges_between(head[i], neg[cand_idx[:, 0]]).nonzero(as_tuple=False)[:, 0]
if len(select) > 0:
select_idx = cand_idx[select].view(-1)
uid, vid, eid = self.graph.edge_ids(head[i], select_idx, return_uv=True)
else:
raise ValueError('at least one element should have the same score with pos_score. That is itself!')
else:
select = self.graph.has_edges_between(neg[cand_idx[:, 0]], tail[i]).nonzero(as_tuple=False)[:, 0]
if len(select) > 0:
select_idx = cand_idx[select].view(-1)
uid, vid, eid = self.graph.edge_ids(select_idx, tail[i], return_uv=True)
else:
raise ValueError('at least one element should have the same score with pos_score. That is itself!')
rid = self.graph.edata[self._etid_field][eid]
# - 1 to exclude rank for positive score itself
cand_num -= th.sum(rid == rel[i]) - 1
ranking[i] = cand_num
for i in range(b_size):
ranking_i = get_scalar(ranking[i])
log.append({
'MRR': 1.0 / ranking_i,
'MR': float(ranking_i),
'HITS@1': 1.0 if ranking_i <= 1 else 0.0,
'HITS@3': 1.0 if ranking_i <= 3 else 0.0,
'HITS@10': 1.0 if ranking_i <= 10 else 0.0
})
return ranking, log
# misc for DataParallelTraining
def setup_model(self, rank, world_size, gpu_id):
""" Set up score function for DistributedDataParallel.
As score function is a dense model, if we need to parallelly train/eval the mode, we need to put the model into different gpu devices
Parameters
----------
rank : int
process id in regards of world size
world_size : int
total number of process
gpu_id : int
which device should the model be put to, -1 for cpu otherwise gpu
"""
args = self.args
for name, module in self._torch_model.items():
self._torch_model[name] = to_device(module, gpu_id)
if self.dist_train and len(self._torch_model.items()) != 0:
# configure MASTER_ADDR and MASTER_PORT manually in command line
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '8888'
dist.init_process_group('nccl', rank=rank, world_size=world_size)
# make DDP model
# broadcast_buffers=False to enable batch normalization
params = []
for name, module in self._torch_model.items():
self._torch_model[name] = module if not self.dist_train else DistributedDataParallel(module,
device_ids=[gpu_id],
broadcast_buffers=False)
for param in self._torch_model[name].parameters():
params += [param]
if len(params) != 0:
if args.optimizer == 'Adagrad':
self._optim = Adagrad(params, lr=self.lr)
elif args.optimizer == 'Adam':
self._optim = Adam(params, lr=self.lr)
def cleanup(self):
""" destroy parallel process if necessary
Parameters
----------
dist_train : bool
whether it's distributed training or not
"""
if self.dist_train and len(self._torch_model) != 0:
dist.destroy_process_group()
else:
pass
def prepare_relation(self, device=None):
local_emb = {}
for k, v in self._relation_related_emb.items():
local_emb[k] = v.clone(device=device)
self._global_relation_related_emb = self._relation_related_emb
self._relation_related_emb = local_emb
def prepare_cross_rels(self, cross_rels):
for k, v in self._relation_related_emb.items():
v.setup_cross_rels(cross_rels, self._global_relation_related_emb[k])
def writeback_relation(self, rank=0, rel_parts=None):
args = self.args
idx = rel_parts[rank]
for name, embeddings in self._relation_related_emb.items():
if args.soft_rel_part:
local_idx = embeddings.get_noncross_idx(idx)
else:
local_idx = idx
# MARK - TBD, whether detach here
self._global_relation_related_emb[name].emb[local_idx] = embeddings.emb.detach().clone().cpu()[local_idx]
def train_proc(self, rank, train_dataset, eval_dataset, rel_parts=None, cross_rels=None, barrier=None):
""" training process for fit(). it will read data, forward embedding data, compute loss and update param using gradients
Parameters
----------
rank : int
process id in regards of world size
train_dataset : KGDataset
dataset used for training
eval_dataset : KGDataset
dataset used for evaluation
"""
# setup
if rank == 0:
profiler = Profiler()
profiler.start()
args = self.args
world_size = args.num_proc * args.num_node
if len(args.gpu) > 0:
gpu_id = args.gpu[rank % len(args.gpu)] if args.num_proc > 1 else args.gpu[0]
else:
gpu_id = -1
# setup optimizer, load embeddings into gpu, enable async_update
if args.async_update:
self.create_async_update()
if args.strict_rel_part or args.soft_rel_part:
self.prepare_relation(th.device(f'cuda:{gpu_id}'))
if args.soft_rel_part:
self.prepare_cross_rels(cross_rels)
self.setup_model(rank, world_size, gpu_id)
logs = []
for arg in vars(args):
logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
train_start = start = time.time()
sample_time = 0
update_time = 0
forward_time = 0
backward_time = 0
batch_size = args.batch_size // args.neg_sample_size
partition_dataset = PartitionChunkDataset(train_dataset, rank, world_size, 'train', args.neg_sample_size, args.max_step, batch_size)
partition_dataset.pin_memory()
sampler = SequentialTotalSampler(batch_size=batch_size, max_step=args.max_step)
dataloader = DataLoader(dataset=partition_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=0,
drop_last=False,
pin_memory=True)
data_iter = iter(dataloader)
step_range = trange(0, args.max_step, desc='train') if (rank == 0 and args.tqdm) else range(0, args.max_step)
for step in step_range:
neg_type = 'head' if step % 2 == 0 else 'tail'
start1 = time.time()
# get pos training data
data = next(data_iter)
data = {k: v.view(-1) for k, v in data.items()}
sample_time += time.time() - start1
loss, log = self.train_forward(data, neg_type, gpu_id)
if rank == 0 and args.tqdm:
step_range.set_postfix(loss=f'{loss.item():.4f}')
forward_time += time.time() - start1
start1 = time.time()
if self._optim is not None:
self._optim.zero_grad()
loss.backward()
backward_time += time.time() - start1
# update embedding & dense_model using different optimizer, for dense_model, use regular pytorch optimizer
# for embedding, use built-in Adagrad sync/async optimizer
start1 = time.time()
self.update(gpu_id)
update_time += time.time() - start1
logs.append(log)
if args.force_sync_interval > 0 and (step + 1) % args.force_sync_interval == 0:
barrier.wait()
if (step + 1) % args.log_interval == 0:
for k in logs[0].keys():
v = sum(l[k] for l in logs) / len(logs)
print('[proc {}][Train]({}/{}) average {}: {}'.format(rank, (step + 1), args.max_step, k, v))
logs = []
print('[proc {}][Train] {} steps take {:.3f} seconds'.format(rank, args.log_interval,
time.time() - start))
print('[proc {}]sample: {:.3f}, forward: {:.3f}, backward: {:.3f}, update: {:.3f}'.format(
rank, sample_time, forward_time, backward_time, update_time))
sample_time = 0
update_time = 0
forward_time = 0
backward_time = 0
start = time.time()
if args.valid and (step + 1) % args.eval_interval == 0 and step > 1 and eval_dataset is not None:
valid_start = time.time()
# for async update
if args.strict_rel_part or args.soft_rel_part:
self.writeback_relation(rank, rel_parts)
# forced sync for validation
if self.dist_train:
barrier.wait()
self.eval_proc(rank, eval_dataset, mode='valid')
self.train()
print('[proc {}]validation take {:.3f} seconds.'.format(rank, time.time() - valid_start))
if args.soft_rel_part:
self.prepare_cross_rels(cross_rels)
if self.dist_train:
barrier.wait()
print('proc {} takes {:.3f} seconds'.format(rank, time.time() - train_start))
if args.async_update:
self.finish_async_update()
if args.strict_rel_part or args.soft_rel_part:
self.writeback_relation(rank, rel_parts)
self.cleanup()
if rank == 0:
profiler.stop()
print(profiler.output_text(unicode=False, color=False))
def eval_proc(self, rank, eval_dataset, mode='valid', queue=None):
if rank == 0:
profiler = Profiler()
profiler.start()
args = self.args
if len(args.gpu) > 0:
gpu_id = args.gpu[rank % len(args.gpu)] if args.num_proc > 1 else args.gpu[0]
else:
gpu_id = -1
world_size = args.num_proc * args.num_node
if mode is not 'valid':
self.setup_model(rank, world_size, gpu_id)
self.eval()
partition_dataset = PartitionChunkDataset(eval_dataset, rank, world_size, mode, None, None, None)
partition_dataset.pin_memory()
pos_dataloader = DataLoader(dataset=partition_dataset,
batch_size=args.batch_size_eval,
shuffle=False,
num_workers=0,
drop_last=False,
pin_memory=True)
data = dict()
data['neg'] = eval_dataset.g.nodes().clone()
with th.no_grad():
logs = []
iterator = tqdm(iter(pos_dataloader), desc='evaluation') if (rank == 0 and args.tqdm) else iter(pos_dataloader)
for pos_data in iterator:
# update data[-1] to all the nodes in the graph to perform corruption for all the nodes
data.update(pos_data)
log = self.test_forward(data, gpu_id)
logs += log
metrics = {}
if len(logs) > 0:
for metric in logs[0].keys():
metrics[metric] = sum([log[metric] for log in logs]) / len(logs)
if queue is not None:
queue.put(logs)
else:
for k, v in metrics.items():
print('[{}]{} average {}: {}'.format(rank, mode, k, v))
Logger.save_result(metrics)
if mode is not 'valid':
self.cleanup()
if rank == 0:
profiler.stop()
print(profiler.output_text(unicode=False, color=False))
def train_forward(self, data, neg_type, gpu_id):
args = self.args
chunk_size = args.neg_sample_size
neg_sample_size = args.neg_sample_size
pos_emb = self.acquire_embedding(data=data, gpu_id=gpu_id, pos=True, train=True, neg_type=neg_type)
edge_impts = to_device(data['impts'], gpu_id) if args.has_edge_importance else None
pos_score = self.pos_forward(pos_emb)
neg_emb = self.acquire_embedding(data=data, gpu_id=gpu_id, pos=False, train=True, neg_type=neg_type)
neg_score = self.neg_forward(pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=True)
neg_score = neg_score.reshape(-1, neg_sample_size)
loss, log = self._loss_gen.get_total_loss(pos_score, neg_score, edge_impts)
reg, reg_log = self.regularizer(self.get_param_list())
loss += reg
log.update(reg_log)
return loss, log
def test_forward(self, data, gpu_id):
args = self.args
log = []
pos_emb = self.acquire_embedding(data, gpu_id, pos=True, train=False)
neg_emb = self.acquire_embedding(data, gpu_id, pos=False, train=False)
batch_size = data['head'].shape[0]
num_node = len(data['neg'])
pos_score = self.pos_forward(pos_emb)
neg_score_corr_head = self.neg_forward(pos_emb, neg_emb, 'head', chunk_size=batch_size, neg_sample_size=num_node, train=False)
neg_score_corr_tail = self.neg_forward(pos_emb, neg_emb, 'tail', chunk_size=batch_size, neg_sample_size=num_node, train=False)
ranking_corr_tail, log_corr_tail = self.compute_ranking(pos_score, neg_score_corr_tail, data, mode='tail',
eval_filter=args.eval_filter,
self_loop_filter=args.self_loop_filter)
ranking_corr_head, log_corr_head = self.compute_ranking(pos_score, neg_score_corr_head, data, mode='head',
eval_filter=args.eval_filter,
self_loop_filter=args.self_loop_filter)
log += log_corr_tail
log += log_corr_head
return log
def prepare_data(self, pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=True):
""" Prepare positive/negative embedding data for training/evaluation. This is the place to reshape tensor
to enable operation like bmm takes place.
Parameters
----------
pos_emb: dict
dictionary containing all positive embeddings involved.
neg_emb: dict
dictionary containing all negative embeddings involved.
neg_type: str
choice: ['head', 'tail'], for this batch, triples are corrupted by neg_type.
chunk_size: int
normally to reshape positive embeddings from [batch, embbeding_size]
to [\frac{batch}{chunk_size}, chunk_size, embedding_size]
neg_sample_size: int
normally to reshape negative embeddings from [batch, embedding_size]
to [\frac{batch}{neg_sample_size}, neg_sample_size, embedding_size]
train: bool
prepare data for training or evaluation. Model for evaluation might have different behavior.
Returns
-------
th.Tensor: reshaped pos_emb
th.Tensor: reshaped neg_emb
"""
return pos_emb, neg_emb
@thread_wrapped_func
def train_mp(self, rank, train_dataset, eval_dataset, rel_parts=None, cross_rels=None, barrier=None):
args = self.args
if args.num_proc > 1:
th.set_num_threads(args.num_thread)
self.train_proc(rank, train_dataset, eval_dataset, rel_parts, cross_rels, barrier)
@thread_wrapped_func
def eval_mp(self, rank, eval_dataset, mode='valid', queue=None):
args = self.args
if args.num_proc > 1:
th.set_num_threads(args.num_thread)
self.eval_proc(rank, eval_dataset, mode, queue)
@abstractmethod
def categorize_embedding(self):
pass
@abstractmethod
def initialize(self, n_entities, n_relations, init_strat='uniform'):
pass
@abstractmethod
def pos_forward(self, pos_emb):
pass
@abstractmethod
def neg_forward(self, pos_emb, neg_emb, neg_type, chunk_size, neg_sample_size, train=True):
pass
@abstractmethod
def acquire_embedding(self, data, gpu_id=-1, pos=True, train=True, neg_type='head'):
pass
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,124
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/pytorch/train_sampler.py
|
import torch as th
import dgl.backend as F
from torch.utils.data import Dataset
class TrainSampler(object):
# TODO lingfei
# 1. [x] support nagative mode: head, tail, chunk-head, chunk-tail
# 2. [x] add rel_weight, node_weight to non-uniformly sample rels
# 3. [x] infinite sample or not?
# 4. [ ] num_workers -> multiprocess
# 5. [x] replacement
# 6. [x] reset
# 7. [x] negative mode -> head, tail
# 8. [x] neg_sample_size ?
# 9. [x] chunk_size
# 10.[ ] exclude_positive -> mask
def __init__(self,
train_data,
rank,
batch_size,
shuffle,
rel_weight,
neg_sample_size,
chunk_size,
exclude_positive=False,
replacement=False,
reset=True,
drop_last=True):
# seed_edges are the index of triple
g = train_data.g
seed_edges = train_data.edge_parts[rank]
if seed_edges is None:
seed_edges = F.arange(0, g.number_of_edges())
assert batch_size % chunk_size == 0, 'batch size {} must be divisible by chunk size {} to enable chunk negative sampling'.format(batch_size, chunk_size)
self.rels = g.edata['tid'][seed_edges]
heads, tails = g.all_edges(order='eid')
self.heads = heads[seed_edges]
self.tails = tails[seed_edges]
self.node_pool = g.nodes()
self.reset = reset
self.replacement = replacement
# self.chunk_size = chunk_size
# self.neg_sample_size = neg_sample_size
# TODO mask all false negative rels
self.exclude_positive = exclude_positive
self.drop_last = drop_last
# might be replaced by rel weight vector provided
self.rel_weight = th.ones(len(self.rels), dtype=th.float32) if rel_weight is None else rel_weight[seed_edges]
# shuffle data
if shuffle:
# MARK - whether to shuffle data or shuffle indices only?
self.node_pool = self.node_pool[th.randperm(len(self.node_pool))]
idx = th.randperm(len(self.rels))
self.rels = self.rels[idx]
self.heads = self.heads[idx]
self.tails = self.tails[idx]
# the rel weight need to shuffle together to ensure consistency
self.rel_weight = self.rel_weight[idx]
self.batch_size = batch_size
self.pool_size = self.batch_size // chunk_size * neg_sample_size
self.iter_idx = 0
self.pool_idx = 0
self.step = 0
def __iter__(self):
return self
# without multiprocess
def __next__(self):
pos = {}
neg = {}
if self.replacement:
# choose with replacement with weight given, default weight for each rel is 1
selected_idx = th.multinomial(self.rel_weight, num_samples=self.batch_size, replacement=True)
pos['head'] = self.heads[selected_idx]
pos['rel'] = self.rels[selected_idx]
pos['tail'] = self.tails[selected_idx]
else:
end_iter_idx = min(self.iter_idx + self.batch_size, self.__len__())
pos['head'] = self.heads[self.iter_idx: end_iter_idx]
pos['rel'] = self.rels[self.iter_idx: end_iter_idx]
pos['tail'] = self.tails[self.iter_idx: end_iter_idx]
# need to setup lock to avoid mess
end_pool_idx = min(self.pool_idx + self.pool_size, len(self.node_pool))
neg_type = 'head' if self.step % 2 == 0 else 'tail'
neg[neg_type] = self.node_pool[self.pool_idx: end_pool_idx]
# neg['head' if self.step % 2 == 0 else 'tail'] = self.corrupt()
self.iter_idx += self.batch_size
self.pool_idx += self.pool_size
self.step += 1
if self.reset and self.iter_idx + self.batch_size >= self.__len__():
self.iter_idx = 0
# shuffle data after each epoch
idx = th.randperm(len(self.rels))
self.rels = self.rels[idx]
self.heads = self.heads[idx]
self.tails = self.tails[idx]
# the rel weight need to shuffle together to ensure consistency
self.rel_weight = self.rel_weight[idx]
# if we run out of neg sample data, we shuffle it again
if self.pool_idx + self.pool_size >= len(self.node_pool):
self.pool_idx = 0
idx = th.randperm(len(self.node_pool))
self.node_pool = self.node_pool[idx]
# use None for impts for now
return pos, neg, neg_type, None
# def corrupt(self):
# # we currently only support chunk_head and chunk_tail
# # MARK - discuss replacement with mentor
# if self.step % 2 == 0:
# chunk_idx = th.multinomial(self.h_weight, num_samples=self.neg_sample_size * (self.batch_size // self.chunk_size),replacement=True)
# return self.u_hid[chunk_idx]
# else:
# chunk_idx = th.multinomial(self.t_weight, num_samples=self.neg_sample_size * (self.batch_size // self.chunk_size), replacement=True)
# return self.u_tid[chunk_idx]
def __len__(self):
return self.rels.shape[0] if not self.drop_last \
else (self.rels.shape[0] // self.batch_size * self.batch_size)
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,125
|
menjarleev/dgl-ke
|
refs/heads/master
|
/python/dglke/models/pytorch/score_fun.py
|
# -*- coding: utf-8 -*-
#
# score_fun.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch as th
import torch.nn as nn
import torch.nn.functional as functional
import torch.nn.init as INIT
from dglke.util.math import hyp_distance_multi_c
import numpy as np
import os
def batched_l2_dist(a, b):
a_squared = a.norm(dim=-1).pow(2)
b_squared = b.norm(dim=-1).pow(2)
squared_res = th.baddbmm(
b_squared.unsqueeze(-2), a, b.transpose(-2, -1), alpha=-2
).add_(a_squared.unsqueeze(-1))
res = squared_res.clamp_min_(1e-30).sqrt_()
return res
def batched_l1_dist(a, b):
res = th.cdist(a, b, p=1)
return res
class TransEScore(nn.Module):
"""TransE score function
Paper link: https://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data
"""
def __init__(self, gamma, dist_func='l2'):
super(TransEScore, self).__init__()
self.gamma = gamma
if dist_func == 'l1':
self.neg_dist_func = batched_l1_dist
self.dist_ord = 1
else: # default use l2
self.neg_dist_func = batched_l2_dist
self.dist_ord = 2
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb']
rel = edges.data['emb']
score = head + rel - tail
return {'score': self.gamma - th.norm(score, p=self.dist_ord, dim=-1)}
def infer(self, head_emb, rel_emb, tail_emb):
head_emb = head_emb.unsqueeze(1)
rel_emb = rel_emb.unsqueeze(0)
score = (head_emb + rel_emb).unsqueeze(2) - tail_emb.unsqueeze(0).unsqueeze(0)
return self.gamma - th.norm(score, p=self.dist_ord, dim=-1)
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg_prepare(self, neg_head):
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def predict(self, emb):
head = emb['head']
tail = emb['tail']
rel = emb['rel']
score = head + rel - tail
return self.gamma - th.norm(score, p=self.dist_ord, dim=-1)
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def update(self, gpu_id=-1):
pass
def reset_parameters(self):
pass
def save(self, path, name):
state_dict = self.cpu().state_dict()
file_path = os.path.join(path, name)
th.save(state_dict, file_path)
def load(self, path, name):
pass
def create_neg(self, neg_head):
gamma = self.gamma
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = tails - relations
tails = tails.reshape(num_chunks, chunk_size, hidden_dim)
return gamma - self.neg_dist_func(tails, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads + relations
heads = heads.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
return gamma - self.neg_dist_func(heads, tails)
return fn
class TransRScore(nn.Module):
"""TransR score function
Paper link: https://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/download/9571/9523
"""
def __init__(self, gamma, projection_emb, relation_dim, entity_dim):
super(TransRScore, self).__init__()
self.gamma = gamma
self.projection_emb = projection_emb
self.relation_dim = relation_dim
self.entity_dim = entity_dim
def edge_func(self, edges):
head = edges.data['head_emb']
tail = edges.data['tail_emb']
rel = edges.data['emb']
score = head + rel - tail
return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
def infer(self, head_emb, rel_emb, tail_emb):
pass
def prepare(self, g, gpu_id, trace=False):
head_ids, tail_ids = g.all_edges(order='eid')
projection = self.projection_emb(g.edata['id'], gpu_id, trace)
projection = projection.reshape(-1, self.entity_dim, self.relation_dim)
g.edata['head_emb'] = th.einsum('ab,abc->ac', g.ndata['emb'][head_ids], projection)
g.edata['tail_emb'] = th.einsum('ab,abc->ac', g.ndata['emb'][tail_ids], projection)
def create_neg_prepare(self, neg_head):
if neg_head:
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
# pos node, project to its relation
projection = self.projection_emb(rel_id, gpu_id, trace)
projection = projection.reshape(num_chunks, -1, self.entity_dim, self.relation_dim)
tail = tail.reshape(num_chunks, -1, 1, self.entity_dim)
tail = th.matmul(tail, projection)
tail = tail.reshape(num_chunks, -1, self.relation_dim)
# neg node, each project to all relations
head = head.reshape(num_chunks, 1, -1, self.entity_dim)
# (num_chunks, num_rel, num_neg_nodes, rel_dim)
head = th.matmul(head, projection)
return head, tail
return fn
else:
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
# pos node, project to its relation
projection = self.projection_emb(rel_id, gpu_id, trace)
projection = projection.reshape(num_chunks, -1, self.entity_dim, self.relation_dim)
head = head.reshape(num_chunks, -1, 1, self.entity_dim)
head = th.matmul(head, projection)
head = head.reshape(num_chunks, -1, self.relation_dim)
# neg node, each project to all relations
tail = tail.reshape(num_chunks, 1, -1, self.entity_dim)
# (num_chunks, num_rel, num_neg_nodes, rel_dim)
tail = th.matmul(tail, projection)
return head, tail
return fn
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def reset_parameters(self):
self.projection_emb.init(1.0)
def update(self, gpu_id=-1):
self.projection_emb.update(gpu_id)
def save(self, path, name):
self.projection_emb.save(path, name + 'projection')
def load(self, path, name):
self.projection_emb.load(path, name + 'projection')
def prepare_local_emb(self, projection_emb):
self.global_projection_emb = self.projection_emb
self.projection_emb = projection_emb
def prepare_cross_rels(self, cross_rels):
self.projection_emb.setup_cross_rels(cross_rels, self.global_projection_emb)
def writeback_local_emb(self, idx):
self.global_projection_emb.emb[idx] = self.projection_emb.emb.cpu()[idx]
def load_local_emb(self, projection_emb):
device = projection_emb.emb.device
projection_emb.emb = self.projection_emb.emb.to(device)
self.projection_emb = projection_emb
def share_memory(self):
self.projection_emb.share_memory()
def create_neg(self, neg_head):
gamma = self.gamma
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
relations = relations.reshape(num_chunks, -1, self.relation_dim)
tails = tails - relations
tails = tails.reshape(num_chunks, -1, 1, self.relation_dim)
score = heads - tails
return gamma - th.norm(score, p=1, dim=-1)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
relations = relations.reshape(num_chunks, -1, self.relation_dim)
heads = heads - relations
heads = heads.reshape(num_chunks, -1, 1, self.relation_dim)
score = heads - tails
return gamma - th.norm(score, p=1, dim=-1)
return fn
class DistMultScore(nn.Module):
"""DistMult score function
Paper link: https://arxiv.org/abs/1412.6575
"""
def __init__(self):
super(DistMultScore, self).__init__()
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb']
rel = edges.data['emb']
score = head * rel * tail
# TODO: check if there exists minus sign and if gamma should be used here(jin)
return {'score': th.sum(score, dim=-1)}
def infer(self, head_emb, rel_emb, tail_emb):
head_emb = head_emb.unsqueeze(1)
rel_emb = rel_emb.unsqueeze(0)
score = (head_emb * rel_emb).unsqueeze(2) * tail_emb.unsqueeze(0).unsqueeze(0)
return th.sum(score, dim=-1)
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg_prepare(self, neg_head):
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def update(self, gpu_id=-1):
pass
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = th.transpose(heads, 1, 2)
tmp = (tails * relations).reshape(num_chunks, chunk_size, hidden_dim)
return th.bmm(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = tails.shape[1]
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = th.transpose(tails, 1, 2)
tmp = (heads * relations).reshape(num_chunks, chunk_size, hidden_dim)
return th.bmm(tmp, tails)
return fn
class ComplExScore(nn.Module):
"""ComplEx score function
Paper link: https://arxiv.org/abs/1606.06357
"""
def __init__(self):
super(ComplExScore, self).__init__()
def edge_func(self, edges):
real_head, img_head = th.chunk(edges.src['emb'], 2, dim=-1)
real_tail, img_tail = th.chunk(edges.dst['emb'], 2, dim=-1)
real_rel, img_rel = th.chunk(edges.data['emb'], 2, dim=-1)
score = real_head * real_tail * real_rel \
+ img_head * img_tail * real_rel \
+ real_head * img_tail * img_rel \
- img_head * real_tail * img_rel
# TODO: check if there exists minus sign and if gamma should be used here(jin)
return {'score': th.sum(score, -1)}
def infer(self, head_emb, rel_emb, tail_emb):
real_head, img_head = th.chunk(head_emb, 2, dim=-1)
real_tail, img_tail = th.chunk(tail_emb, 2, dim=-1)
real_rel, img_rel = th.chunk(rel_emb, 2, dim=-1)
score = (real_head.unsqueeze(1) * real_rel.unsqueeze(0)).unsqueeze(2) * real_tail.unsqueeze(0).unsqueeze(0) \
+ (img_head.unsqueeze(1) * real_rel.unsqueeze(0)).unsqueeze(2) * img_tail.unsqueeze(0).unsqueeze(0) \
+ (real_head.unsqueeze(1) * img_rel.unsqueeze(0)).unsqueeze(2) * img_tail.unsqueeze(0).unsqueeze(0) \
- (img_head.unsqueeze(1) * img_rel.unsqueeze(0)).unsqueeze(2) * real_tail.unsqueeze(0).unsqueeze(0)
return th.sum(score, dim=-1)
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg_prepare(self, neg_head):
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def update(self, gpu_id=-1):
pass
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real = tails[..., :hidden_dim // 2]
emb_imag = tails[..., hidden_dim // 2:]
rel_real = relations[..., :hidden_dim // 2]
rel_imag = relations[..., hidden_dim // 2:]
real = emb_real * rel_real + emb_imag * rel_imag
imag = -emb_real * rel_imag + emb_imag * rel_real
emb_complex = th.cat((real, imag), dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = th.transpose(heads, 1, 2)
return th.bmm(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real = heads[..., :hidden_dim // 2]
emb_imag = heads[..., hidden_dim // 2:]
rel_real = relations[..., :hidden_dim // 2]
rel_imag = relations[..., hidden_dim // 2:]
real = emb_real * rel_real - emb_imag * rel_imag
imag = emb_real * rel_imag + emb_imag * rel_real
emb_complex = th.cat((real, imag), dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = th.transpose(tails, 1, 2)
return th.bmm(tmp, tails)
return fn
class RESCALScore(nn.Module):
"""RESCAL score function
Paper link: http://www.icml-2011.org/papers/438_icmlpaper.pdf
"""
def __init__(self, relation_dim, entity_dim):
super(RESCALScore, self).__init__()
self.relation_dim = relation_dim
self.entity_dim = entity_dim
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb'].unsqueeze(-1)
rel = edges.data['emb']
rel = rel.view(-1, self.relation_dim, self.entity_dim)
score = head * th.matmul(rel, tail).squeeze(-1)
# TODO: check if use self.gamma
return {'score': th.sum(score, dim=-1)}
# return {'score': self.gamma - th.norm(score, p=1, dim=-1)}
def infer(self, head_emb, rel_emb, tail_emb):
head_emb = head_emb.unsqueeze(1).unsqueeze(1)
rel_emb = rel_emb.view(-1, self.relation_dim, self.entity_dim)
score = head_emb * th.einsum('abc,dc->adb', rel_emb, tail_emb).unsqueeze(0)
return th.sum(score, dim=-1)
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg_prepare(self, neg_head):
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def update(self, gpu_id=-1):
pass
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = th.transpose(heads, 1, 2)
tails = tails.unsqueeze(-1)
relations = relations.view(-1, self.relation_dim, self.entity_dim)
tmp = th.matmul(relations, tails).squeeze(-1)
tmp = tmp.reshape(num_chunks, chunk_size, hidden_dim)
return th.bmm(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = th.transpose(tails, 1, 2)
heads = heads.unsqueeze(-1)
relations = relations.view(-1, self.relation_dim, self.entity_dim)
tmp = th.matmul(relations, heads).squeeze(-1)
tmp = tmp.reshape(num_chunks, chunk_size, hidden_dim)
return th.bmm(tmp, tails)
return fn
class RotatEScore(nn.Module):
"""RotatE score function
Paper link: https://arxiv.org/abs/1902.10197
"""
def __init__(self, gamma, emb_init):
super(RotatEScore, self).__init__()
self.gamma = gamma
self.emb_init = emb_init
def edge_func(self, edges):
re_head, im_head = th.chunk(edges.src['emb'], 2, dim=-1)
re_tail, im_tail = th.chunk(edges.dst['emb'], 2, dim=-1)
phase_rel = edges.data['emb'] / (self.emb_init / np.pi)
re_rel, im_rel = th.cos(phase_rel), th.sin(phase_rel)
re_score = re_head * re_rel - im_head * im_rel
im_score = re_head * im_rel + im_head * re_rel
re_score = re_score - re_tail
im_score = im_score - im_tail
score = th.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
return {'score': self.gamma - score.sum(-1)}
def infer(self, head_emb, rel_emb, tail_emb):
re_head, im_head = th.chunk(head_emb, 2, dim=-1)
re_tail, im_tail = th.chunk(tail_emb, 2, dim=-1)
phase_rel = rel_emb / (self.emb_init / np.pi)
re_rel, im_rel = th.cos(phase_rel), th.sin(phase_rel)
re_score = re_head.unsqueeze(1) * re_rel.unsqueeze(0) - im_head.unsqueeze(1) * im_rel.unsqueeze(0)
im_score = re_head.unsqueeze(1) * im_rel.unsqueeze(0) + im_head.unsqueeze(1) * re_rel.unsqueeze(0)
re_score = re_score.unsqueeze(2) - re_tail.unsqueeze(0).unsqueeze(0)
im_score = im_score.unsqueeze(2) - im_tail.unsqueeze(0).unsqueeze(0)
score = th.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
return self.gamma - score.sum(-1)
def update(self, gpu_id=-1):
pass
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg_prepare(self, neg_head):
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg(self, neg_head):
gamma = self.gamma
emb_init = self.emb_init
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real = tails[..., :hidden_dim // 2]
emb_imag = tails[..., hidden_dim // 2:]
phase_rel = relations / (emb_init / np.pi)
rel_real, rel_imag = th.cos(phase_rel), th.sin(phase_rel)
real = emb_real * rel_real + emb_imag * rel_imag
imag = -emb_real * rel_imag + emb_imag * rel_real
emb_complex = th.cat((real, imag), dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, 1, hidden_dim)
heads = heads.reshape(num_chunks, 1, neg_sample_size, hidden_dim)
score = tmp - heads
score = th.stack([score[..., :hidden_dim // 2],
score[..., hidden_dim // 2:]], dim=-1).norm(dim=-1)
return gamma - score.sum(-1)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real = heads[..., :hidden_dim // 2]
emb_imag = heads[..., hidden_dim // 2:]
phase_rel = relations / (emb_init / np.pi)
rel_real, rel_imag = th.cos(phase_rel), th.sin(phase_rel)
real = emb_real * rel_real - emb_imag * rel_imag
imag = emb_real * rel_imag + emb_imag * rel_real
emb_complex = th.cat((real, imag), dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, 1, hidden_dim)
tails = tails.reshape(num_chunks, 1, neg_sample_size, hidden_dim)
score = tmp - tails
score = th.stack([score[..., :hidden_dim // 2],
score[..., hidden_dim // 2:]], dim=-1).norm(dim=-1)
return gamma - score.sum(-1)
return fn
class SimplEScore(nn.Module):
"""SimplE score function
Paper link: http://papers.nips.cc/paper/7682-simple-embedding-for-link-prediction-in-knowledge-graphs.pdf
"""
def __init__(self):
super(SimplEScore, self).__init__()
def edge_func(self, edges):
head_i, head_j = th.chunk(edges.src['emb'], 2, dim=-1)
tail_i, tail_j = th.chunk(edges.dst['emb'], 2, dim=-1)
rel, rel_inv = th.chunk(edges.data['emb'], 2, dim=-1)
forward_score = head_i * rel * tail_j
backward_score = tail_i * rel_inv * head_j
# clamp as official implementation does to avoid NaN output
# might because of gradient explode
score = th.clamp(1 / 2 * (forward_score + backward_score).sum(-1), -20, 20)
return {'score': score}
def infer(self, head_emb, rel_emb, tail_emb):
head_i, head_j = th.chunk(head_emb.unsqueeze(1), 2, dim=-1)
tail_i, tail_j = th.chunk(tail_emb.unsqueeze(0).unsqueeze(0), 2, dim=-1)
rel, rel_inv = th.chunk(rel_emb.unsqueeze(0), 2, dim=-1)
forward_tmp = (head_i * rel).unsqueeze(2) * tail_j
backward_tmp = (head_j * rel_inv).unsqueeze(2) * tail_i
score = (forward_tmp + backward_tmp) * 1 / 2
return th.sum(score, dim=-1)
def update(self, gpu_id=-1):
pass
def reset_parameters(self):
pass
def save(self, path, name):
pass
def load(self, path, name):
pass
def forward(self, g):
g.apply_edges(lambda edges: self.edge_func(edges))
def create_neg_prepare(self, neg_head):
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = tails.shape[1]
tail_i = tails[..., :hidden_dim // 2]
tail_j = tails[..., hidden_dim // 2:]
rel = relations[..., : hidden_dim // 2]
rel_inv = relations[..., hidden_dim // 2:]
forward_tmp = (rel * tail_j).reshape(num_chunks, chunk_size, hidden_dim//2)
backward_tmp = (rel_inv * tail_i).reshape(num_chunks, chunk_size, hidden_dim//2)
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = th.transpose(heads, 1, 2)
head_i = heads[..., :hidden_dim // 2, :]
head_j = heads[..., hidden_dim // 2:, :]
tmp = 1 / 2 * (th.bmm(forward_tmp, head_i) + th.bmm(backward_tmp, head_j))
score = th.clamp(tmp, -20, 20)
return score
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
head_i = heads[..., :hidden_dim // 2]
head_j = heads[..., hidden_dim // 2:]
rel = relations[..., :hidden_dim // 2]
rel_inv = relations[..., hidden_dim // 2:]
forward_tmp = (head_i * rel).reshape(num_chunks, chunk_size, hidden_dim//2)
backward_tmp = (rel_inv * head_j).reshape(num_chunks, chunk_size, hidden_dim//2)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = th.transpose(tails, 1, 2)
tail_i = tails[..., :hidden_dim // 2, :]
tail_j = tails[..., hidden_dim // 2:, :]
tmp = 1 / 2 * (th.bmm(forward_tmp, tail_j) + th.bmm(backward_tmp, tail_i))
score = th.clamp(tmp, -20, 20)
return score
return fn
class ConvEScore(nn.Module):
"""ConvE score function
Paper link: https://arxiv.org/pdf/1707.01476.pdf
"""
def __init__(self, hidden_dim, tensor_height, dropout_ratio: tuple = (0, 0, 0), batch_norm=False):
super(ConvEScore, self).__init__()
self._build_model(hidden_dim, tensor_height, dropout_ratio, batch_norm)
def _build_model(self, hidden_dim, tensor_height, dropout_ratio, batch_norm):
# get height of reshape tensor
assert hidden_dim % tensor_height == 0, 'input dimension %d must be divisible to tensor height %d' % (hidden_dim, tensor_height)
h = tensor_height
w = hidden_dim // h
conv = []
if batch_norm:
conv += [nn.BatchNorm2d(1)]
if dropout_ratio[0] != 0:
conv += [nn.Dropout(p=dropout_ratio[0])]
conv += [nn.Conv2d(1, 32, 3, 1, 0, bias=True)]
if batch_norm:
conv += [nn.BatchNorm2d(32)]
conv += [nn.ReLU()]
if dropout_ratio[1] != 0:
conv += [nn.Dropout2d(p=dropout_ratio[1])]
self.conv = nn.Sequential(*conv)
fc = []
linear_dim = 32 * (h* 2 - 2) * (w - 2)
fc += [nn.Linear(linear_dim, hidden_dim)]
if dropout_ratio[2] != 0:
fc += [nn.Dropout(p=dropout_ratio[2])]
if batch_norm:
fc += [nn.BatchNorm1d(hidden_dim)]
fc += [nn.ReLU()]
self.fc = nn.Sequential(*fc)
def edge_func(self, edges):
head = edges.src['emb']
tail = edges.dst['emb']
rel = edges.data['emb']
score = self.model(head, rel, tail)
return {'score': score}
def infer(self, head_emb, rel_emb, tail_emb):
pass
def prepare(self, g, gpu_id, trace=False):
pass
def create_neg_prepare(self, neg_head):
if neg_head:
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
else:
def fn(rel_id, num_chunks, head, tail, gpu_id, trace=False):
return head, tail
return fn
def forward(self, embs, mode='all', bmm=False):
"""
Parameters
----------
concat_emb : Tensor
embedding concatenated from head and tail and reshaped
tail_emb : Tensor
tail embedding
mode : str
choice = ['lhs', 'rhs', 'full']. Which part of score function to perform. This is used to accelerate test process.
"""
if mode in ['all', 'lhs']:
concat_emb = embs[0]
# reshape tensor to fit in conv
if concat_emb.dim() == 3:
batch, height, width = concat_emb.shape
concat_emb = concat_emb.reshape(batch, 1, height, width)
x = self.conv(concat_emb)
x = x.view(x.shape[0], -1)
fc = self.fc(x)
if mode == 'lhs':
return fc
else:
fc = embs[0]
tail_emb, bias = embs[1:]
if not bmm:
assert fc.dim() == tail_emb.dim() == bias.dim(), 'batch operation only allow embedding with same dimension'
x = th.sum(fc * tail_emb, dim=-1, keepdim=True)
else:
if tail_emb.dim() == 3:
tail_emb = tail_emb.transpose(1, 2)
x = th.bmm(fc, tail_emb)
bias = bias.transpose(1, 2).expand_as(x)
else:
tail_emb = tail_emb.transpose(1, 0)
x = th.mm(fc, tail_emb)
bias = bias.transpose(1, 0).expand_as(x)
x = x + bias
return x
def reset_parameters(self):
# use default init tech of pytorch
pass
def update(self, gpu_id=-1):
pass
def save(self, path, name):
file_name = os.path.join(path, name)
# MARK - is .cpu() available if it's already in CPU ?
th.save(self.cpu().state_dict(), file_name)
def load(self, path, name):
file_name = os.path.join(path, name)
# TODO: lingfei - determine whether to map location here
self.load_state_dict(th.load(file_name))
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
# extreme overhead, fix later, could use multiprocess to reduce it
batch, dim = heads.shape()
tmps = []
for i in range(neg_sample_size + 1):
heads = heads.reshape(-1, neg_sample_size, dim)
head_i = th.cat([heads[:, i:, ...], heads[:, :i, ...]], dim=1)
head_i = head_i.reshape(-1, dim)
tmp_i = self.model(head_i, relations, tails)
tmps += [tmp_i]
score = th.stack(tmps, dim=-1)
return score
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
batch, dim = tails.shape()
tmps = []
for i in range(neg_sample_size + 1):
tails = tails.reshape(-1, neg_sample_size, dim)
tail_i = th.cat([tails[:, i:, ...], tails[:, :i, ...]], dim=1)
tail_i = tail_i.reshape(-1, dim)
tmp_i = self.model(heads, relations, tails)
tmps += [tmp_i]
score = th.stack(tmps, dim=-1)
return score
return fn
class ATTHScore(nn.Module):
def __init__(self):
super(ATTHScore, self).__init__()
def forward(self, lhs_e, rhs_e, c, comp='batch'):
return - hyp_distance_multi_c(lhs_e, rhs_e, c, comp) ** 2
|
{"/python/dglke/util/argparser/__init__.py": ["/python/dglke/util/argparser/common_argparser.py", "/python/dglke/util/argparser/train_argparser.py"], "/python/dglke/models/pytorch/ke_tensor.py": ["/python/dglke/models/pytorch/ke_optimizer.py"], "/python/dglke/models/transe.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py"], "/python/dglke/util/argparser/train_argparser.py": ["/python/dglke/util/argparser/common_argparser.py"], "/python/dglke/models/conve.py": ["/python/dglke/models/ke_model.py", "/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py"], "/python/dglke/util/__init__.py": ["/python/dglke/util/misc.py", "/python/dglke/util/argparser/__init__.py", "/python/dglke/util/math.py", "/python/dglke/util/logger.py"], "/python/dglke/models/hypere.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/ke_model.py"], "/python/dglke/models/ke_model.py": ["/python/dglke/models/pytorch/score_fun.py", "/python/dglke/models/pytorch/ke_tensor.py", "/python/dglke/models/pytorch/regularizer.py"]}
|
7,136
|
tincan39/Bot
|
refs/heads/master
|
/urllist.py
|
class UrlList:
def __init__(self):
self.uList = []
# adds a link to the list and removes the oldest one if it exceeds 500
def add_elem(self, emb_list: list):
if len(self.uList) > 500:
self.uList.pop(0)
self.uList.extend(emb_list)
def last_5(self):
return self.uList[0:5]
# returns a specified number of urls
def get_n_elem(self, num: int) -> list:
if num > len(self.uList):
raise IndexError("That number exceeds the amount of urls")
return self.uList[0:num]
# returns urls by its extension type
def get_by_ext(self, num: int, ext: str):
val_links = [x for x in self.uList if x.endswith(ext)]
return val_links[0:num]
|
{"/bot.py": ["/urllist.py"]}
|
7,137
|
tincan39/Bot
|
refs/heads/master
|
/bot.py
|
import discord
import re
from discord.ext import commands
from urllist import UrlList
urls = UrlList() # manages the urls
help_msg = "This discord bot extracts urls/links from user messages and these links" \
"be accessed easily via the bot.\n\n" \
"Commands:\n\n" \
"$last5links : \n returns the last 5 urls/links sent on the server\n\n" \
"$getlinks number_of_links :\n returns a specified amount of links if available. An error message is " \
"returned" \
"if there isn't\n\n" \
"$getlinktype number_of_links ext :\n returns links based on the extension(e.g: .pdf). If the link" \
"amount exceeds what is available, then whatever is available is returned"
client = commands.Bot(command_prefix='$')
client.remove_command('help') # removes the default help command
# function that displays a list's contents in a single string
def list_to_str(url_list):
message = ''
for url in url_list:
message += url + '\n\n'
return message
@client.event # variable is client, so decorator must be client.____
async def on_ready(): # causes the bot to go online
print('bot is ready.')
# reads messages and extracts links
@client.event
async def on_message(message):
if not message.author.bot: # ignores messages from bots
msg = message.content
url_list = re.findall(r'(https?://\S+)', msg) # extracts all links into a list
urls.add_elem(url_list)
await client.process_commands(message)
@client.command(name='getlinks')
async def _get_links(ctx, arg: int):
"""
Gets a certain amount of links based on the number provided
-if number exceeds amount of urls, on_command_error() gets called
arg:number of links requested
"""
url = list_to_str(urls.get_n_elem(arg))
await ctx.send(url)
@client.command(name='getlinktype')
async def _get_link_ext(ctx, num: int, ext: str):
"""
Gets links based on the extension type(e.g .pdf, .html, .png)
arg - number of links requested.
ext- the extension used to filter
If arg exceeds the number of links with a particular extension, the all the links with that extension are sent
"""
url = list_to_str(urls.get_by_ext(num, ext))
await ctx.send(url)
# returns the last 5 links sent in the discord chat
@client.command(name='last5links')
async def last_5_links(ctx):
msg = list_to_str(urls.last_5())
await ctx.send(msg)
@client.command(pass_context=True)
async def help(ctx):
user = ctx.message.author
await user.send(help_msg)
# message that gets sent when a command is called incorrectly
@client.event
async def on_command_error(ctx, error):
await ctx.send("Invalid arguments")
if __name__=='__main__':
import config
client.run(config.token)
|
{"/bot.py": ["/urllist.py"]}
|
7,139
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/migrations/0005_delete_admin.py
|
# Generated by Django 3.2.5 on 2021-07-21 12:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publics', '0005_remove_public_admin'),
('admins', '0004_remove_admin_public'),
]
operations = [
migrations.DeleteModel(
name='Admin',
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,140
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/migrations/0003_auto_20210719_0604.py
|
# Generated by Django 3.2.5 on 2021-07-19 06:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('publics', '0002_auto_20210719_0604'),
('admins', '0002_auto_20210719_0603'),
]
operations = [
migrations.RemoveField(
model_name='admin',
name='number',
),
migrations.AlterField(
model_name='admin',
name='password',
field=models.CharField(max_length=8),
),
migrations.AlterField(
model_name='admin',
name='public',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='publics', to='publics.public'),
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,141
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/urls.py
|
from rest_framework import urlpatterns, routers
from .views import PublicListAPIView
from django.urls import path
urlpatterns = [
path('', PublicListAPIView.as_view())
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,142
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/urls.py
|
from .views import LoginAPI
from django.urls import path
urlpatterns = [
path('', LoginAPI.as_view())
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,143
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/serializer.py
|
from rest_framework.serializers import ModelSerializer
from .models import Public
class PublicSerializer(ModelSerializer):
class Meta:
model = Public
fields = ('number', 'password', 'cost')
class LoginSerializer(ModelSerializer):
class Meta:
model = Public
fields = ('number', 'password')
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,144
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/migrations/0001_initial.py
|
# Generated by Django 3.2.5 on 2021-07-19 05:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('publics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=8)),
('public', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='public', to='publics.public')),
],
options={
'db_table': 'admins',
},
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,145
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/migrations/0005_remove_public_admin.py
|
# Generated by Django 3.2.5 on 2021-07-21 12:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publics', '0004_rename_paymeny_public_cost'),
]
operations = [
migrations.RemoveField(
model_name='public',
name='admin',
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,146
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/migrations/0004_rename_paymeny_public_cost.py
|
# Generated by Django 3.2.5 on 2021-07-19 09:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publics', '0003_auto_20210719_0940'),
]
operations = [
migrations.RenameField(
model_name='public',
old_name='paymeny',
new_name='cost',
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,147
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/views.py
|
from django.http.response import JsonResponse
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics, serializers, viewsets
from rest_framework.permissions import IsAdminUser
from api.publics.models import Public
from .serializer import AdminSerializer
class PublicListAPIView(generics.ListAPIView):
queryset = Public.objects.all()
serializer_class = AdminSerializer
permission_classes = [IsAdminUser]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,148
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/models.py
|
from django.db import models
from django.db.models.deletion import CASCADE
class Public(models.Model):
number = models.CharField(max_length=4)
password = models.CharField(max_length=4)
cost = models.CharField(max_length=10)
class Meta:
db_table = 'publics'
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,149
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/serializer.py
|
from api.publics.models import Public
from rest_framework.serializers import ModelSerializer
from rest_framework import serializers
class AdminSerializer(ModelSerializer):
class Meta:
model = Public
fields = ('number', 'cost', 'password')
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,150
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/migrations/0003_auto_20210719_0940.py
|
# Generated by Django 3.2.5 on 2021-07-19 09:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('admins', '0004_remove_admin_public'),
('publics', '0002_auto_20210719_0604'),
]
operations = [
migrations.RenameField(
model_name='public',
old_name='cost',
new_name='paymeny',
),
migrations.AddField(
model_name='public',
name='admin',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='admins.admin'),
preserve_default=False,
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,151
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/admins/migrations/0004_remove_admin_public.py
|
# Generated by Django 3.2.5 on 2021-07-19 09:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('admins', '0003_auto_20210719_0604'),
]
operations = [
migrations.RemoveField(
model_name='admin',
name='public',
),
]
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,152
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/views.py
|
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics, mixins, serializers, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import Public
from .serializer import LoginSerializer, PublicSerializer
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
try:
number = request.data['number']
password = request.data['password']
public = Public.objects.get(number = number, password = password)
serializers = PublicSerializer(public)
return Response(
{
'public' : serializers.data
}, status=status.HTTP_200_OK
)
except Public.DoesNotExist:
return Response(
{
'message': '입력정보가 틀렸습니다.'
}, status=status.HTTP_400_BAD_REQUEST
)
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,153
|
wecode-bootcamp-korea/21-Final-Assignment
|
refs/heads/main
|
/api/publics/tests.py
|
import json
from django.contrib.auth.models import User
from django.http import response
from django.urls import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from api.publics.models import Public
class PublicAPITest(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username = 'root',
password = 'go9511455',
is_staff = True
)
self.token = Token.objects.create(user=self.user)
Public.objects.create(id=1, password=1234, number='2001', cost='1000')
self.api_authentication()
def api_authentication(self):
self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.token}')
def test_Public_post_success(self):
data = {'number':'2001', 'password':'1234'}
response = self.client.post('/api/public', data =json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'public' : {'number':'2001', 'password':'1234', 'cost':'1000'}})
def test_Public_post_failed(self):
data = {'number':'1111', 'password':'1111'}
response = self.client.post('/api/public', data =json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), {'message' : '입력정보가 틀렸습니다.'})
def test_Public_list_user_none(self):
self.client.force_authenticate(user=None)
response = self.client.post('/api/public')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
|
{"/api/admins/urls.py": ["/api/admins/views.py"], "/api/publics/urls.py": ["/api/publics/views.py"], "/api/publics/serializer.py": ["/api/publics/models.py"], "/api/admins/views.py": ["/api/publics/models.py", "/api/admins/serializer.py"], "/api/admins/serializer.py": ["/api/publics/models.py"], "/api/publics/views.py": ["/api/publics/models.py", "/api/publics/serializer.py"], "/api/publics/tests.py": ["/api/publics/models.py"]}
|
7,162
|
zabini/flask-rest-api-example
|
refs/heads/master
|
/routes.py
|
import datetime
import json
from flask import Blueprint,request,jsonify,render_template
import models
# Routes for clients
clients_bp = Blueprint('clients', __name__)
@clients_bp.route('/clients', methods=['POST'] )
def clients_create( ):
data = request.get_json( )
try:
client = models.Client(**data)
except :
return jsonify({'message' : 'Invalid parameter for client'}), 403
if models.Client.get_one( id=client.id ) != None:
return jsonify({'message' : 'Resource already exists'}), 409
if not client.add( ) :
return jsonify({'message' : 'Something is going wrong'}), 500
return jsonify( { 'client' : models.ClientSchema( ).dump(client).data } ), 201
## end of clients_create method
@clients_bp.route('/clients', methods=['GET'] )
def clients_get_all( ):
clientsList = models.Client().get_all( )
return jsonify( { 'clients' : models.ClientSchema(many=True).dump(clientsList).data } ), 200
## end of clients_get_all method
@clients_bp.route('/clients/<int:id>', methods=['GET'] )
def clients_get(id):
client = models.Client.get_one( id=id )
if client == None:
return jsonify({'message' : 'Resource not found'}), 404
return jsonify( { 'client' : models.ClientSchema( ).dump(client).data } ), 200
## end of clients_get method
@clients_bp.route('/clients/<int:id>', methods=['PUT'] )
def clients_update(id):
data = request.get_json()
client = models.Client.get_one( id=id )
if client == None:
return jsonify({'message' : 'Resource not found'}), 404
for key in data:
if hasattr(client,key) and key != 'id' :
setattr(client,key,data[key])
if not client.update( ) :
return jsonify({'message' : 'Something is going wrong'}), 500
return jsonify( { 'client' : models.ClientSchema( ).dump(client).data } ), 200
## end of clients_update method
@clients_bp.route('/clients/<int:id>', methods=['DELETE'] )
def clients_delete(id):
client = models.Client.get_one( id=id )
if client == None:
return jsonify({'message' : 'Resource not found'}), 404
if not client.delete( ) :
return jsonify({'message' : 'Something is going wrong'}), 500
return 'This content response will not be displayed', 204
## end of clients_delete method
# Routes for contacts of clients
contacts_bp = Blueprint('contacts', __name__)
@contacts_bp.route('/clients/<int:id>/contacts', methods=['POST'] )
def contacts_create(id):
data = request.get_json()
try:
contact = models.Contact(**data)
except :
return jsonify({'message' : 'Invalid parameters for contact'}), 403
if models.Contact.get_one( id=contact.id ) != None:
return jsonify({'message' : 'Resource already exists'}), 409
contact.client = models.Client().get_one(id=id)
if contact.client == None :
return jsonify({'message' : 'resource not found'}), 404
if not contact.add( ) :
return jsonify({'message' : 'Something is going wrong'}), 500
return jsonify( { 'contact' : models.ContactSchema( ).dump(contact).data } ), 201
## end of contacts_create method
@contacts_bp.route('/clients/<int:id>/contacts', methods=['GET'] )
def contacts_search(id):
contactList = models.Contact().get_all(client_id=id)
return jsonify( { 'contacts' : models.ContactSchema(many=True).dump(contactList).data } ), 200
## end of contacts_search method
@contacts_bp.route('/clients/<int:id>/contacts/<int:contact_id>', methods=['GET'] )
def contacts_get(id,contact_id):
contact = models.Contact.get_one( client_id=id, id=contact_id )
if contact == None:
return jsonify({'message' : 'Resource not found'}), 404
return jsonify( { 'contact' : models.ContactSchema( ).dump(contact).data } ), 200
## end of contacts_get method
@contacts_bp.route('/clients/<int:id>/contacts/<int:contact_id>', methods=['PUT'] )
def contacts_update(id,contact_id):
data = request.get_json()
contact = models.Contact.get_one( client_id=id, id=contact_id )
if contact == None:
return jsonify({'message' : 'Resource not found'}), 404
for key in data:
if hasattr(contact,key) and key != 'id' and key != 'client_id' :
setattr(contact,key,data[key])
if not contact.update( ) :
return jsonify({'message' : 'Something is going wrong'}), 500
return jsonify( { 'contact' : models.ContactSchema( ).dump(contact).data } ), 200
## end of contacts_update method
@contacts_bp.route('/clients/<int:id>/contacts/<int:contact_id>', methods=['DELETE'] )
def contacts_delete(id,contact_id):
contact = models.Contact.get_one( client_id=id, id=contact_id )
if contact == None:
return jsonify({'message' : 'Resource not found'}), 404
if not contact.delete( ) :
return jsonify({'message' : 'Something is going wrong'}), 500
return 'This content response will not be displayed', 204
## end of contacts_delete method
# Routes for clients
static_bp = Blueprint('static', __name__)
@static_bp.route('/' )
def index():
year = datetime.date.today()
return render_template("index.html",date_now=year)
## end of index method
|
{"/routes.py": ["/models.py"], "/models.py": ["/app.py"], "/app.py": ["/configs.py", "/models.py", "/routes.py"]}
|
7,163
|
zabini/flask-rest-api-example
|
refs/heads/master
|
/configs.py
|
def create_configs( app ) :
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
return app
|
{"/routes.py": ["/models.py"], "/models.py": ["/app.py"], "/app.py": ["/configs.py", "/models.py", "/routes.py"]}
|
7,164
|
zabini/flask-rest-api-example
|
refs/heads/master
|
/models.py
|
from app import db,ma
from flask_sqlalchemy import SQLAlchemy
class Client(db.Model):
id = db.Column(db.Integer, primary_key=True,index=True)
name = db.Column(db.String(100))
observation = db.Column(db.Text)
contacts = db.relationship('Contact', backref='client', cascade='all, delete-orphan')
def add(self):
try:
db.session.add(self)
db.session.commit()
return True
except:
return False
def update(self):
try:
db.session.commit()
return True
except:
return False
def delete(self):
try:
db.session.delete(self)
db.session.commit()
return True
except:
return False
@classmethod
def get_one(cls,**kwargs):
try:
return db.session.query(cls).filter_by(**kwargs).first( )
except:
return None
@classmethod
def get_all(cls,**kwargs):
try:
return db.session.query(cls).filter_by(**kwargs).all( )
except:
return None
class Contact(db.Model):
id = db.Column(db.Integer, primary_key=True,index=True)
type = db.Column(db.String(10))
value = db.Column(db.String(100))
client_id = db.Column(db.Integer, db.ForeignKey('client.id'),nullable=False)
def add(self):
try:
db.session.add(self)
db.session.commit()
return True
except:
return False
def update(self):
try:
db.session.commit()
return True
except:
return False
def delete(self):
try:
db.session.delete(self)
db.session.commit()
return True
except:
return False
@classmethod
def get_one(cls,**kwargs):
try:
return db.session.query(cls).filter_by(**kwargs).first( )
except:
return None
@classmethod
def get_all(cls,**kwargs):
try:
return db.session.query(cls).filter_by(**kwargs).all( )
except:
return None
class ClientSchema(ma.ModelSchema):
class Meta:
model = Client
class ContactSchema(ma.ModelSchema):
class Meta:
model = Contact
|
{"/routes.py": ["/models.py"], "/models.py": ["/app.py"], "/app.py": ["/configs.py", "/models.py", "/routes.py"]}
|
7,165
|
zabini/flask-rest-api-example
|
refs/heads/master
|
/app.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from configs import create_configs
app = Flask(__name__,template_folder='static')
app = create_configs(app)
db = SQLAlchemy(app,session_options={'autoflush': True})
ma = Marshmallow(app)
from models import *
# import routes from file
from routes import clients_bp,contacts_bp,static_bp
app.register_blueprint( clients_bp )
app.register_blueprint( contacts_bp )
app.register_blueprint( static_bp )
if __name__ == "__main__":
app.run(debug=True,port=80)
|
{"/routes.py": ["/models.py"], "/models.py": ["/app.py"], "/app.py": ["/configs.py", "/models.py", "/routes.py"]}
|
7,184
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/registration/views.py
|
from django.contrib.auth.models import User
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.contrib.auth import login, logout, authenticate
from .forms import SignUpForm, LogInForm
# # # Create your views here.
def register(request):
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
return redirect('registration:login')
else:
return render(request, "registration/signup.html", {'form': form})
return render(request, "registration/signup.html", {'form': SignUpForm()})
def logout_view(request):
logout(request)
return render(request, 'home/index.html')
def login_view(request):
if request.method == "POST":
form = LogInForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect('home:index')
else:
print(form)
form.add_error(None, error='Inavlid Credentials')
print(form)
return render(request, 'registration/login.html',
{'form': form})
else:
return render(request, 'registration/login.html', {'form': form})
return render(request, 'registration/login.html', {'form': LogInForm()})
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,185
|
Suman2023/Lets-Django
|
refs/heads/main
|
/rest_api/notes/urls.py
|
from django.urls import path
from . import views
app_name = "notes"
urlpatterns = [
path("", views.getRoutes, name="routes"),
path("notes/", views.getNotes, name="getNotes"),
path("notes/create/", views.createNote, name="createNote"),
path("notes/<str:pk>/", views.getNote, name="getNote"),
path("notes/<str:pk>/update/", views.updateNote, name="updatenote"),
path("notes/<str:pk>/delete", views.deleteNote, name="deleteNote")
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,186
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/home/admin.py
|
from django.contrib import admin
from .models import Cities, Ride, OfferRide
# Register your models here.
admin.site.register(Cities)
admin.site.register(Ride)
admin.site.register(OfferRide)
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,187
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/modelBasics/admin.py
|
from django.contrib import admin
from .models import Person, Employee
# Register your models here.
admin.site.register(Person)
admin.site.register(Employee)
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,188
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/getride/views.py
|
from django.http.response import HttpResponse
from django.shortcuts import redirect, render
from django.db.models import Q
from home.models import Cities, Ride, OfferRide
from django.forms import ValidationError
# Create your views here.
def getRide(request):
if request.method == "POST":
try:
origin = request.POST["origin"]
destination = request.POST["destination"]
date = request.POST["date"]
seats_required = request.POST["seat_available"]
rides = Ride.objects.all().filter(
origin=origin,
destination=destination,
seat_available__gte=seats_required,
journey_date__gte=date)
relatedRides = Ride.objects.all().filter(
(Q(origin=origin)
| Q(destination=destination))
& Q(journey_date__gte=date))
return render(request, "getride/availability.html", {
"rides": rides,
"relatedRides": relatedRides
})
except ValidationError:
return render(
request, "home/index.html", {
'cities': Cities.objects.all(),
"validationerror": "Invalid queries"
})
return render(request, "getride/availability.html")
def chat(request, queryparams=None):
if request.user.is_authenticated:
print(request.user.username)
return HttpResponse("hello world")
else:
print("Nope")
return HttpResponse("goodbye:(")
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,189
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/createride/views.py
|
from django.http.response import HttpResponse
from django.shortcuts import redirect, render
from home.models import Cities, OfferRide
# Create your views here.
def createride(request):
if request.user.is_authenticated:
if request.method == "POST":
user_name = request.user.username
origin = request.POST["origin"]
destination = request.POST["destination"]
journey_date = request.POST["journey_date"]
seat_available = request.POST["seat_available"]
origin_location = request.POST["origin_location"]
destination_location = request.POST["destination_location"]
contact = request.POST["contact"]
fare = request.POST["fare"]
# We will add validation later on
OfferRide.objects.create(user_name=user_name,
origin=origin,
destination=destination,
journey_date=journey_date,
seat_available=seat_available,
origin_location=origin_location,
destination_location=destination_location,
contact=contact,
fare=fare)
return redirect('home:index')
cities = Cities.objects.all()
return render(request, 'createride/createride.html',
{"cities": cities})
else:
return redirect('registration:login')
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,190
|
Suman2023/Lets-Django
|
refs/heads/main
|
/rest_api/notes/serializers.py
|
from django.db.models.base import Model
from rest_framework.serializers import ModelSerializer
from .models import Note
# we are using this to make our objects that returns from the table to json compatible
class NoteSerializer(ModelSerializer):
class Meta:
model = Note
fields = '__all__' # we can do like [ 'id' 'title' blah blah] but __all__ takes all the fields
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,191
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/modelBasics/forms.py
|
from .models import Employee
from django import forms
from django.forms.fields import CharField
gender_choices = [
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
]
dept_choices = [
('FS', "Full Stack"),
('FE', 'Front End'),
('BE', 'Backend'),
]
class EmployeeForm(forms.ModelForm):
first_name = forms.CharField(max_length=15, required=True)
last_name = forms.CharField(max_length=15, required=True)
gender = forms.ChoiceField(choices=gender_choices, required=True)
department = forms.ChoiceField(choices=dept_choices, required=True)
class Meta:
model = Employee
fields = ('first_name', 'last_name', 'department', 'gender')
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,192
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/hello/urls.py
|
from django.urls import path
from . import views
#There is a main urls.py for the whole project but we add for every app we create just for sake of simplicity and not cluttering the main urls.py
urlpatterns = [
path("",views.index, name = "index"), #"" -> default route
path("name", views.name, name = "name"), # this has url -> hello/name
path("<str:name>",views.greet, name = "greet"), # here we pass any string after hello/<here> we return with Hello <here>
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,193
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/connect/models.py
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Room(models.Model):
group_name = models.TextField(unique=True)
group_admin = models.ForeignKey(User, on_delete=models.CASCADE)
# Create your models here.
class Message(models.Model):
group = models.ForeignKey(
Room,
related_name='messages_group',
on_delete=models.CASCADE,
)
author = models.ForeignKey(
User,
related_name='author_messages',
on_delete=models.CASCADE,
)
content = models.TextField(default="")
timeStamp = models.DateTimeField(auto_now_add=True, )
def __str__(self):
return self.author.username
def last_10_messages(groupName, group_admin):
return Message.objects.order_by('-timeStamp').filter(
group__group_name=groupName,
group__group_admin__username=group_admin)[:10:-1]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,194
|
Suman2023/Lets-Django
|
refs/heads/main
|
/rest_api/notes/views.py
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import NoteSerializer
from .models import Note
# Create your views here.
@api_view(['GET'])
def getRoutes(req):
routes = [
{
'Endpoint': '/notes/',
'method': 'GET',
'body': None,
'description': 'Returns all the notes'
},
{
'Endpoint': '/notes/id',
'method': 'GET',
'body': None,
'description': 'Returns notes with the given id'
},
{
'Endpoint': '/notes/create/',
'method': 'POST',
'body': {
'body': ""
},
'description': 'Create a new note'
},
{
'Endpoint': '/notes/id/update/',
'method': 'PUT',
'body': {
'body': ""
},
'description': 'Update note based on passed id'
},
{
'Endpoint': '/notes/id/delete/',
'method': 'DELETE',
'body': None,
'description': 'Delete note based on passed id'
},
]
return Response(routes)
@api_view(['GET'])
def getNotes(req):
notes = Note.objects.all()
serializer = NoteSerializer(notes, many=True)
return Response(serializer.data)
@api_view(['GET'])
def getNote(req, pk):
note = Note.objects.get(id=pk)
serializer = NoteSerializer(note, many=False)
return Response(serializer.data)
@api_view(['POST'])
def createNote(req):
data = req.data
note = Note.objects.create(
title=data['title'],
body=data['body'],
)
serializer = NoteSerializer(note, many=False)
return Response(serializer.data)
@api_view(['PUT'])
def updateNote(req, pk):
data = req.data
note = Note.objects.get(id=pk)
serializer = NoteSerializer(note, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def deleteNote(req, pk):
note = Note.objects.get(id=pk)
note.delete()
return Response("Note deleted!")
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,195
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/modelBasics/views.py
|
from .models import Employee, Person
from .forms import EmployeeForm
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
if request.method == "POST":
form = EmployeeForm(request.POST)
if form.is_valid():
first_name = form.cleaned_data["first_name"]
last_name = form.cleaned_data["last_name"]
department = form.cleaned_data["department"]
gender = form.cleaned_data["gender"]
person = Person.objects.create(first_name=first_name,
last_name=last_name,
gender=gender)
employee = Employee.objects.create(profile=person,
department=department)
return HttpResponse("Added Successfully")
else:
return render(request, "modelBasics/index.html", {"form": form})
return render(request, "modelBasics/index.html", {
"form": EmployeeForm(),
"lst": [1, 2, 3, 4, 5, 6]
})
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,196
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/tasks/views.py
|
from django.shortcuts import render
from django import forms
from django.http import HttpResponseRedirect
from django.urls import reverse
# Create your views here.
class NewTaskform(forms.Form): # creates form without writing html form code
task = forms.CharField(label="New Task")
# priority = forms.IntegerField(label = "priority", min_value=1, max_value = 99)
def index(req):
if "tasks" not in req.session:
req.session["tasks"] = []
return render(req, "tasks/index.html", {"tasks": req.session["tasks"]})
def add(req):
if req.method == "POST":
form = NewTaskform(
req.POST) # now all thst is Posted by user are in a var form
if form.is_valid():
task = form.cleaned_data["task"]
req.session["tasks"] += [task]
return HttpResponseRedirect(reverse("tasks:index"))
else:
return render(req, "tasks/add.html", {"form": form})
return render(req, "tasks/add.html", {"form": NewTaskform()})
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,197
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/modelBasics/migrations/0004_employee.py
|
# Generated by Django 3.2.5 on 2021-07-17 19:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('modelBasics', '0003_alter_person_gender'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('department', models.CharField(choices=[('FS', 'Full Stack'), ('FE', 'Front End'), ('BE', 'Backend')], default='department', max_length=2)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modelBasics.person')),
],
),
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,198
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/home/views.py
|
from django.shortcuts import render
from django.contrib.auth.models import User
from .models import Cities, Ride, OfferRide
# Create your views here.
def index(request):
cities = Cities.objects.all()
return render(request, "home/index.html", {"cities": cities})
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,199
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/home/models.py
|
from django.contrib.auth.models import User
from django.db import models
# # Create your models here.
# class UserProfile(models.Models):
# user = models.OneToOneField(User, on_delete=models.CASCADE)
# # picture = models.TextField(null=True, blank=True)
class Cities(models.Model):
city = models.CharField(max_length=15)
code = models.CharField(max_length=4)
class Ride(models.Model):
user_name = models.CharField(default="", max_length=50)
origin = models.CharField(max_length=50)
destination = models.CharField(max_length=50)
journey_date = models.DateTimeField(auto_now=False, auto_now_add=False)
seat_available = models.IntegerField()
class OfferRide(Ride):
origin_location = models.CharField(max_length=50)
destination_location = models.CharField(max_length=50)
contact = models.IntegerField()
fare = models.IntegerField()
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,200
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/hello/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(req): #req -> request
return render(req, "hello\index.html")
def name(req):
return HttpResponse("Hello, SUM4N!")
def greet(req, name):
return render(req,"hello\greet.html",
{
"name": name.capitalize(),
}) # passing some parameters to the link as name in dict
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,201
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/users/views.py
|
from django.contrib.auth import authenticate, login, logout
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import *
# Create your views here.
def index(req):
if not req.user.is_authenticated:
return HttpResponseRedirect(reverse("users:login_view"))
else:
return render(req, "users/user.html")
def login_view(req):
if req.method == "POST":
username = req.POST["username"]
password = req.POST["password"]
user = authenticate(req, username=username, password=password)
if user is not None:
login(req, user)
return HttpResponseRedirect(reverse("users:index"))
else:
return render(req, "users/login.html",
{"message": "Invalid Credentials"})
return render(req, "users/login.html")
def logout_view(req):
logout(req)
return render(req, "users/login.html",
{"message": "Logged Out Successfully"})
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,202
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/flights/views.py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import *
# Create your views here.
def index(req):
return render(req, "flights/index.html", {"flights": Flight.objects.all()})
def flight(req, flight_id):
flight = Flight.objects.get(id=flight_id)
return render(
req, "flights/flight.html", {
"flight": flight,
"passengers": flight.passengers.all(),
"non_passengers": Passenger.objects.exclude(flights=flight).all()
})
def bookFlight(req, flight_id):
if req.method == "POST":
flight = Flight.objects.get(id=flight_id)
passenger = Passenger.objects.get(
pk=int(req.POST["passenger"])
) #we can use pk or id same thing as id is primary key but pk is more appropriate so from nxt pk
passenger.flights.add(flight)
return HttpResponseRedirect(
reverse("flights:flight", args=(flight.id, )))
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,203
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/modelBasics/models.py
|
from django.db import models
from django.db.models.aggregates import Max
from django.db.models.deletion import CASCADE
# Create your models here.
gender_choices = [
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
]
class Person(models.Model):
gender_choices = gender_choices
first_name = models.CharField(max_length=15)
last_name = models.CharField(max_length=15)
gender = models.CharField(max_length=1,
choices=gender_choices,
help_text="Select your gender")
def __str__(self):
return self.first_name
class Employee(models.Model):
dept_choices = [
('FS', "Full Stack"),
('FE', 'Front End'),
('BE', 'Backend'),
]
profile = models.ForeignKey('Person', on_delete=models.CASCADE)
department = models.CharField(max_length=2,
choices=dept_choices,
default="department")
def __str__(self):
return self.profile.first_name + '(' + self.department + ')'
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,204
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/users/urls.py
|
from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path("", views.index, name="index"),
path("login_view", views.login_view, name="login_view"),
path("logout_view", views.logout_view, name="logout_view")
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,205
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/home/migrations/0001_initial.py
|
# Generated by Django 3.2.5 on 2021-07-14 14:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cities',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=15)),
('code', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='Ride',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('origin', models.CharField(max_length=50)),
('destination', models.CharField(max_length=50)),
('journey_date', models.DateField()),
('seat_available', models.IntegerField()),
],
),
migrations.CreateModel(
name='OfferRide',
fields=[
('ride_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.ride')),
('origin_location', models.CharField(max_length=50)),
('destination_location', models.CharField(max_length=50)),
('contact', models.IntegerField()),
('fare', models.IntegerField()),
],
bases=('home.ride',),
),
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,206
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/createride/urls.py
|
from django.urls import path
from . import views
app_name = "createride"
urlpatterns = [
path('', views.createride, name="createride"),
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,207
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Car_Pooling/getride/urls.py
|
from django.urls import path
from . import views
app_name = "getride"
urlpatterns = [
path("", views.getRide, name="getRide"),
path("chat/<str:queryparams>", views.chat, name="chat")
]
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,208
|
Suman2023/Lets-Django
|
refs/heads/main
|
/Hello_World/connect/views.py
|
from connect.models import Room
import json
from django.shortcuts import render
from django.utils.safestring import mark_safe
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, 'connect/index.html')
@login_required
def room(request, room_name):
try:
room = Room.objects.create(
group_admin=request.user,
group_name=room_name,
)
except:
room = Room.objects.filter(group_name=room_name)[0]
print("in except mode", room)
return render(
request, 'connect/room.html', {
'room_name_json': mark_safe(json.dumps(room_name)),
'username': mark_safe(json.dumps(request.user.username)),
'group_admin': mark_safe(json.dumps(room.group_admin.username)),
})
|
{"/Car_Pooling/home/admin.py": ["/Car_Pooling/home/models.py"], "/Hello_World/modelBasics/admin.py": ["/Hello_World/modelBasics/models.py"], "/Hello_World/modelBasics/forms.py": ["/Hello_World/modelBasics/models.py"], "/rest_api/notes/views.py": ["/rest_api/notes/serializers.py"], "/Hello_World/modelBasics/views.py": ["/Hello_World/modelBasics/models.py", "/Hello_World/modelBasics/forms.py"], "/Car_Pooling/home/views.py": ["/Car_Pooling/home/models.py"]}
|
7,209
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/LoginPage.py
|
import sys
sys.path.append('..')
sys.path.append('...')
import utilidades
from BasePage import *
from HomePage import *
class LoginPage(BasePage):
def __init__(self,browser,baseUrl):
super(LoginPage,self).__init__(browser, baseUrl)
pass
def fazer_login(self,usuario,senha):
utilidades.logar(self.browser,usuario,senha)
return HomePage(self.browser,self.baseUrl)
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,210
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/HomePage.py
|
from selenium.webdriver.common.by import By
import sys
sys.path.append('..')
import utilidades
from BasePage import *
from LoginPage import *
from BuscaSessaoPage import *
from SessionPage import *
class HomePage(BasePage):
def __init__(self,browser, baseUrl):
super(HomePage,self).__init__(browser,baseUrl)
pass
def abrir_pagina_busca(self):
utilidades.clicar(self.browser,By.LINK_TEXT,"Encontre sua Foto")
utilidades.esperar_elemento(self.browser,By.ID,"searchbar_search_btn",30)
return BuscaSessaoPage(self.browser,self.baseUrl)
def abrir_pagina_criar_session(self):
utilidades.acessar(self.browser,self.baseUrl+"/p/schedule")
return SessionPage(self.browser,self.baseUrl)
def sair_do_sistema(self):
utilidades.sair(self.browser)
return LoginPage(self.browser,self.baseUrl)
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,211
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/utilidades.py
|
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import os
import sys
def acessar(driver, url):
driver.get(url)
pass
def logar(driver, usuario, senha):
clicar(driver,By.LINK_TEXT,"Entrar")
esperar_elemento(driver,By.ID,"btn-login",30)
digitar(driver,By.NAME,"email",usuario)
digitar(driver,By.NAME,"password",senha)
clicar(driver,By.ID,"btn-login")
esperar_elemento(driver,By.LINK_TEXT,"Vender Fotos", 60)
pass
def sair(driver):
acessar(driver,"http://200.17.142.223:6500/logout")
pass
def esperar_elemento(driver, maneiraProcura, elemento, tempoLimite):
WebDriverWait(driver, tempoLimite).until(
EC.presence_of_element_located((maneiraProcura, elemento)))
time.sleep(2)
pass
def esperar_elemento_visivel(driver, maneiraProcura, elemento, tempoLimite):
WebDriverWait(driver, tempoLimite).until(
EC.visibility_of_element_located((By.CLASS_NAME,"searchbar_option")))
time.sleep(1)
pass
def encontrar_elemento(driver, maneiraProcura, elemento):
return driver.find_element(maneiraProcura,elemento)
def encontrar_elementos(driver, elemento, maneiraProcura):
return driver.find_elements(maneiraProcura,elemento)
def clicar(driver, maneiraProcura, elemento):
elemento = encontrar_elemento(driver, maneiraProcura, elemento)
if (elemento != None):
elemento.click()
pass
def digitar(driver, maneiraProcura, elemento, texto):
elemento = encontrar_elemento(driver,maneiraProcura,elemento)
if (elemento != None):
elemento.clear()
elemento.send_keys(texto)
pass
def coletar_nome_para_url(nome,separador):
return nome.lower().replace(" ", separador)
def selecionar_opcao_combobox(driver,procuraSelect,elementoCb,selecionado):
expressaoXpath = "//select[@procuraSelect='elementoCb']/option[text()='selecionado']"
expressaoXpath = expressaoXpath.replace("procuraSelect",procuraSelect).replace("elementoCb",elementoCb).replace("selecionado",selecionado)
driver.find_element_by_xpath(expressaoXpath).click()
pass
def clicar_no_botao(driver,tipoElementoHtml,textoBotao):
expressaoXpath = "//tipoElementoHtml[contains(text(), 'textoBotao')]"
expressaoXpath = expressaoXpath.replace("tipoElementoHtml", tipoElementoHtml).replace("textoBotao",textoBotao)
driver.find_element_by_xpath(expressaoXpath).click()
pass
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,212
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/BuscaSessaoPage.py
|
import sys
sys.path.append('..')
import utilidades
from BasePage import *
from selenium.webdriver.common.by import By
import time
class BuscaSessaoPage(BasePage):
def __init__(self,browser,baseUrl):
super(BuscaSessaoPage,self).__init__(browser,baseUrl)
pass
def buscar_praia(self, estado, nomePraia):
estadoUrl = utilidades.coletar_nome_para_url(estado,"-")
nomePraiaUrl = utilidades.coletar_nome_para_url(nomePraia,"_")
utilidades.acessar(self.browser,self.baseUrl+"/album/list?country=brazil&state=" + estadoUrl + "&spot=" + nomePraiaUrl)
pass
def possui_resultados(self, nomePraia):
try:
utilidades.esperar_elemento(self.browser,By.PARTIAL_LINK_TEXT,nomePraia,10)
return True
except:
return False
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,213
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/SessionPage.py
|
from selenium.webdriver.common.by import By
import sys
sys.path.append('..')
sys.path.append('...')
import utilidades
from BasePage import *
from HomePage import *
class SessionPage(BasePage):
def __init__(self,browser,baseUrl):
super(SessionPage,self).__init__(browser, baseUrl)
pass
def preencher_informacoes_session(self,nomePraia,dia,hora):
utilidades.selecionar_opcao_combobox(self.browser,"id","spots",nomePraia)
#self.browser.find_element_by_xpath("//select[@id='spots']/option[text()='Areia Preta - Natal/RN']").click()
utilidades.digitar(self.browser,By.ID,"entryDate",dia + " " + hora)
pass
def salvar_session(self):
utilidades.clicar_no_botao(self.browser,"button","Salvar")
utilidades.esperar_elemento(self.browser,By.CLASS_NAME,"profile-photo",20)
pass
def ir_para_pagina_principal(self):
utilidades.acessar(self.browser,self.baseUrl)
return HomePage(self.browser,self.baseUrl)
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,214
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/BasePage.py
|
import sys
sys.path.append('..')
sys.path.append('...')
import utilidades
class BasePage:
def __init__(self, browser, baseUrl):
self.browser = browser
self.baseUrl = baseUrl
pass
def acessar(self,url):
utilidades.acessar(self.browser,url)
pass
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,215
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/buscas.py
|
from selenium.webdriver.common.by import By
import time, os, sys
import utilidades
from selenium import webdriver
from LoginPage import *
driver = webdriver.Chrome()
driver.implicitly_wait(30)
base_url = "http://200.17.142.223:6500"
utilidades.acessar(driver,base_url)
paginaLogin = LoginPage(driver,base_url)
paginaPrincipal = paginaLogin.fazer_login("oliveira.rafael203@gmail.com","rafaelteste")
paginaBusca = paginaPrincipal.abrir_pagina_busca()
paginaBusca.buscar_praia("Rio Grande do Norte","Ponta negra")
if(paginaBusca.possui_resultados("Ponta Negra")):
print("Busca OK")
else:
print("Busca não retornou resultado!")
driver.quit()
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,216
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/ListaSessoesPage.py
|
from selenium.webdriver.common.by import By
import sys
sys.path.append('..')
sys.path.append('...')
import utilidades
from BasePage import *
class ListaSessoesPage(BasePage):
def __init__(self,browser,baseUrl):
super(ListaSessoesPage,self).__init__(browser, baseUrl)
pass
def possui_sessao(self,nomePraia,dia,hora):
linhaEsperada = "<td>dia hora</td>"
linhaEsperada = linhaEsperada.replace("dia",dia).replace("hora",hora)
return linhaEsperada in self.browser.page_source
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,217
|
Rafael260/TestesSurfmappers
|
refs/heads/master
|
/criarSessao.py
|
from selenium.webdriver.common.by import By
import time, os, sys
import utilidades
from selenium import webdriver
from LoginPage import *
from ListaSessoesPage import *
driver = webdriver.Chrome()
driver.implicitly_wait(30)
base_url = "http://200.17.142.223:6500"
utilidades.acessar(driver,base_url)
paginaLogin = LoginPage(driver,base_url)
paginaPrincipal = paginaLogin.fazer_login("oliveira.rafael203@gmail.com","rafaelteste")
paginaSessao = paginaPrincipal.abrir_pagina_criar_session()
paginaSessao.preencher_informacoes_session("Areia Preta - Natal/RN","15/12/2017","18:30")
paginaSessao.salvar_session()
time.sleep(2)
utilidades.acessar(driver,base_url+"/p/sheduled_sessions/")
paginaListaSessoes = ListaSessoesPage(driver,base_url)
if(paginaListaSessoes.possui_sessao("Areia Preta - Natal/RN","15/12/2017","18:30")):
print("Sessão criada e encontrada")
else:
print("Sessão não encontrada")
time.sleep(5)
driver.quit()
|
{"/LoginPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/HomePage.py": ["/utilidades.py", "/BasePage.py", "/LoginPage.py", "/BuscaSessaoPage.py", "/SessionPage.py"], "/BuscaSessaoPage.py": ["/utilidades.py", "/BasePage.py"], "/SessionPage.py": ["/utilidades.py", "/BasePage.py", "/HomePage.py"], "/BasePage.py": ["/utilidades.py"], "/buscas.py": ["/utilidades.py", "/LoginPage.py"], "/ListaSessoesPage.py": ["/utilidades.py", "/BasePage.py"], "/criarSessao.py": ["/utilidades.py", "/LoginPage.py", "/ListaSessoesPage.py"]}
|
7,247
|
RemiPapillier/Systeme_Expert
|
refs/heads/master
|
/pythonObjects/equation.py
|
#Premisse (avant le égal) Conclusion (apres le egal)
class Equation:
#Constructeur qui prend en parametre la premisse et la conclusion de l'equation sous forme de liste
#La premisse et la conclusion sont des attributs de l'objet equation
def __init__(self, premisse, conclusion):
self.premisse = premisse
self.conclusion = conclusion
#On vide l'attribut conclusion d'une equation
def remove_conclusion(self):
self.conclusion = []
|
{"/pythonObjects/main.py": ["/pythonObjects/hypothese.py", "/pythonObjects/systeme.py", "/pythonObjects/equation.py"], "/app.py": ["/pythonObjects/main.py"], "/pythonObjects/systeme.py": ["/pythonObjects/equation.py"]}
|
7,248
|
RemiPapillier/Systeme_Expert
|
refs/heads/master
|
/pythonObjects/main.py
|
from .hypothese import *
from .systeme import *
from .equation import *
#Fonction principale appelée lorsqu'on appuie sur Start et qui prend en paramètre un dictionnaire de dictionnaire contenant toutes les données
def get_solution(myDic):
#Initialisation d'un système et d'une hypothèse
S = Systeme()
H = Hypothese()
#Boucle pour récupérer les faits dans l'hypothese
hyp = myDic['0']['hypothese']
for i in hyp:
if i.isalpha():
H.append_fact(i)
#Boucle pour créer les equations avec leur premisse et conclusion puis ajoutant l'équation au systeme
indice = len(myDic)
for i in range(1, indice):
prem, conc = myDic[str(i)]['premisse'], myDic[str(i)]['conclusion']
#Récupération de la premisse de l'equation i
premisse_list=[]
for i in prem:
if i.isalpha():
premisse_list.append(i)
#Récupération de la conclusion de l'equation i
conclusion_list = []
for j in conc:
if j.isalpha():
conclusion_list.append(j)
#Création de l'equation et ajout au systeme
S.append_equation(Equation(premisse_list, conclusion_list))
#tour d'algorithme pour éliminer les conclusions s'il n'y a pas de premisse
for e in S.systeme:
if(not e.premisse and e.conclusion):
e.remove_conclusion()
#Algorithme principal pour récupérer la solution
solution = []
#Tant qu'on a des faits
while H.facts:
fait = H.first_item()
#On parcours les equations du systeme
for e in S.systeme:
if(fait in e.premisse):
e.premisse.remove(fait)
#Condition qui permet d'éviter de relire une ligne dont on a déjà récupéré la conclusion
if(not e.premisse and e.conclusion):
for f in e.conclusion:
H.append_fact(f)
e.remove_conclusion()
#Si le fait n'est pas deja dans la solution, on l'ajout
if(fait not in solution):
solution.append(fait)
H.delete_first_item()
#On renvoie la solution en chaine de caractere
return(str(solution))
|
{"/pythonObjects/main.py": ["/pythonObjects/hypothese.py", "/pythonObjects/systeme.py", "/pythonObjects/equation.py"], "/app.py": ["/pythonObjects/main.py"], "/pythonObjects/systeme.py": ["/pythonObjects/equation.py"]}
|
7,249
|
RemiPapillier/Systeme_Expert
|
refs/heads/master
|
/app.py
|
from flask import Flask, render_template, request, jsonify
from pythonObjects.main import *
app = Flask(__name__)
@app.route('/')
def index():
return render_template('home.html')
@app.route('/solution', methods=['POST','GET'])
def solution():
data = request.get_json()
solution = get_solution(data)
#Creation d'un string contenant les solutions
str_sol = "{ "
for i in solution:
if i.isalpha():
str_sol += str(i) + " "
str_sol += "}"
return str_sol
if __name__ == '__main__':
app.run(debug=True)
|
{"/pythonObjects/main.py": ["/pythonObjects/hypothese.py", "/pythonObjects/systeme.py", "/pythonObjects/equation.py"], "/app.py": ["/pythonObjects/main.py"], "/pythonObjects/systeme.py": ["/pythonObjects/equation.py"]}
|
7,250
|
RemiPapillier/Systeme_Expert
|
refs/heads/master
|
/pythonObjects/systeme.py
|
#Un certain nombre d'équations
from .equation import *
class Systeme:
#Constructeur par défaut qui initialise un attribut systeme correspondant à une liste vide qui sera ensuite composé d'objets Equation
def __init__(self):
self.systeme = []
#Fonction pour ajouter une équation dans la liste systeme
def append_equation(self, eq):
self.systeme.append(eq)
|
{"/pythonObjects/main.py": ["/pythonObjects/hypothese.py", "/pythonObjects/systeme.py", "/pythonObjects/equation.py"], "/app.py": ["/pythonObjects/main.py"], "/pythonObjects/systeme.py": ["/pythonObjects/equation.py"]}
|
7,251
|
RemiPapillier/Systeme_Expert
|
refs/heads/master
|
/pythonObjects/hypothese.py
|
#Comporte les faits
class Hypothese:
#Constructeur par defaut de l'hypothese qui cree un attribut facts correspondant à une liste vide
def __init__(self):
self.facts = []
#Fonction pour ajouter un fait à l'attribut
def append_fact(self, fact):
self.facts.append(fact)
#Fonction pour récupérer le premier fait de la liste
def first_item(self):
return self.facts[0]
#Fonction pour supprimer le premier fait de la liste
def delete_first_item(self):
del self.facts[0]
|
{"/pythonObjects/main.py": ["/pythonObjects/hypothese.py", "/pythonObjects/systeme.py", "/pythonObjects/equation.py"], "/app.py": ["/pythonObjects/main.py"], "/pythonObjects/systeme.py": ["/pythonObjects/equation.py"]}
|
7,252
|
vmfilatova/Influx
|
refs/heads/main
|
/writeinflux.py
|
# -*- coding: utf-8 -*-
import json
import os
from influxdb import InfluxDBClient
class WriteInflux:
def __init__(self, data):
"""Constructor"""
self.data = data
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
with open(ROOT_DIR+'/config.json') as f:
data_config = json.load(f)
config_influx = data_config.get('influx')
self.config_influx = config_influx
self.database = config_influx['database']
if self.connect_influx():
client = self.connect_influx()
self.check_db(client)
self.export_to_influxdb(client)
def connect_influx(self):
host = self.config_influx['host']
port = self.config_influx['port']
username = self.config_influx['username']
password = self.config_influx['password']
client = InfluxDBClient(host=host, port=port, username=username, password=password)
try:
client.ping()
except Exception as e:
print(e)
return False
return client
def check_db(self, client):
database_name = self.database
if not ({'name': database_name} in client.get_list_database()):
client.create_database(database_name)
client.switch_database(database_name)
def export_to_influxdb(self, client):
"""
Запись в базу данных временных рядов
:param polling_time_value:
:return:
"""
json_body = self.data
print(json_body)
client.write_points(json_body)
|
{"/getinfluxtime.py": ["/writeinflux.py"]}
|
7,253
|
vmfilatova/Influx
|
refs/heads/main
|
/getinfluxtime.py
|
import time
from writeinflux import WriteInflux
def get_influx_time(t):
return int(t * 1000000000)
measurement = "cluster"
data = [
{
"measurement": measurement,
"time": get_influx_time(time.time()),
"fields": {
"pressure": "0.64"
}
}
]
WriteInflux(data)
|
{"/getinfluxtime.py": ["/writeinflux.py"]}
|
7,285
|
logambigaik/flaskpostgres
|
refs/heads/main
|
/application.py
|
from flask import Flask, render_template,request
from flask import jsonify
from models import *
app=Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"]="postgresql+psycopg2://postgres:19821983@localhost:5432/postgres"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"]=False
db.init_app(app)
'''@app.route("/home")
def index():
flights=Airlines.query.all()
return render_template("index.html",flights=flights)
'''
@app.route("/book",methods=["POST"])
def book():
""" Book a Flight """
#Get form information
name=request.form.get("name")
try:
flight_id = int(request.form.get("flight_id"))
except Error:
return render_template("error.html",message='Invalid Flight number')
#Make sure the flight exists
flight=Airlines.query.get(flight_id)
if flight is None:
return render_template("error.html",message='No such flight exists')
#Add passenger.
passenger=Customers(name=name,flight_id=flight_id)
db.session.add(passenger)
db.session.commit()
return render_template("success.html")
@app.route("/")
def flights():
""" List all the flights """
flights=Airlines.query.all()
return render_template("flights.html",flights=flights)
@app.route("/flights/<int:flight_id>")
def flight(flight_id):
""" List details about a single flight """
#Make sure flight exists
flight=Airlines.query.get(flight_id)
if flight is None:
return render_template("error.html",message="No such flight exist")
#Get all passenger
passengers=flight.passenger #Not required sql for selecting the passenger from tableinstead refering the class relation
return render_template("flight.html",flight=flight,passengers=passengers)
@app.route("/api/flights/<int:flight_id>")
def flight_api(flight_id):
""" List details about a single flight """
#Make sure flight exists
flight=Airlines.query.get(flight_id)
if flight is None:
return jsonify({"error":"Invalid flight_id"}),422
#Get all passenger
passengers=flight.passenger #Not required sql for selecting the passenger from tableinstead refering the class relation
names=[]
for passenger in passengers:
names.append(passenger.name)
return jsonify({
"origin" : flight.origin,
"destination" : flight.destination,
"duration" : flight.duration,
"passengers" : names
})
|
{"/application.py": ["/models.py"]}
|
7,286
|
logambigaik/flaskpostgres
|
refs/heads/main
|
/models.py
|
import os
from flask import Flask
from flask_sqlalchemy import *
db=SQLAlchemy()
class Airlines(db.Model):
__tablename__="airline"
id=db.Column(db.Integer,primary_key=True)
origin=db.Column(db.String,nullable=False)
destination=db.Column(db.String,nullable=False)
duration=db.Column(db.Integer,nullable=False)
passenger=db.relationship("Customers",backref="airlines",lazy=True) #Class relation
def add_customer(self,name):
p=Customers(name=name,flight_id=self.id)
db.session.add(p)
db.session.commit()
class Customers(db.Model):
__tablename__="customer"
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String,nullable=False)
flight_id =db.Column(db.Integer,db.ForeignKey("airline.id"),nullable=False)
|
{"/application.py": ["/models.py"]}
|
7,323
|
jeremyblalock/precede
|
refs/heads/master
|
/tests/unittest.py
|
import os, sys, unittest
# Hack to allow test class to access module
sys.path.insert(0, os.path.abspath('..'))
import datastore.core
|
{"/tests/unittest.py": ["/datastore/core.py"]}
|
7,324
|
jeremyblalock/precede
|
refs/heads/master
|
/datastore/core.py
|
from datastore import Datastore
def initdb(filename):
db = datastore(filename)
return db
|
{"/tests/unittest.py": ["/datastore/core.py"]}
|
7,325
|
jeremyblalock/precede
|
refs/heads/master
|
/datastore/datastore.py
|
import os
class Datastore(object):
def __init__(self, filename):
if not os.isdir(filename) and not os.path.exists:
try:
os.mkdir(os.path.expanduser(filename))
except:
raise Exception("Could not create directory '%s'" % os.path.expanduser('filename'))
try:
datafile = open('%s/data' % filename, 'w')
indexfile = open('%s/index' % filename, 'w')
except:
raise Exception("Could not create database files")
elif os.path.exists:
raise Exception('Specified filename is not a directory')
try:
self.datafile = open('%s/data' % filename, 'r+')
self.index = open('%s/index' % filename, 'r+')
except:
raise Exception('Database files appear to be corrupted')
|
{"/tests/unittest.py": ["/datastore/core.py"]}
|
7,331
|
smantinc/pyaml
|
refs/heads/master
|
/libaml/aml.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ctypes
import struct
import pkgutil
from .utils.decorator import Struct
class ByteArrayBuffer:
def __init__(self):
self._buffers = []
@property
def size(self):
return sum([i.size for i in self._buffers])
def append(self, data):
class ByBytes:
def __init__(self, bytesbuf):
self._bytesbuf = bytesbuf
@property
def size(self):
return len(self._bytesbuf)
def tobytes(self):
return self._bytesbuf
self._buffers.append(ByBytes(data) if type(data) is bytes else data)
def tobytes(self):
return b''.join([i.tobytes() for i in self._buffers])
class ResTypes:
RES_NULL_TYPE = 0x0000
RES_STRING_POOL_TYPE = 0x0001
RES_TABLE_TYPE = 0x0002
RES_XML_TYPE = 0x0003
RES_XML_FIRST_CHUNK_TYPE = 0x0100
RES_XML_START_NAMESPACE_TYPE = 0x0100
RES_XML_END_NAMESPACE_TYPE = 0x0101
RES_XML_START_ELEMENT_TYPE = 0x0102
RES_XML_END_ELEMENT_TYPE = 0x0103
RES_XML_CDATA_TYPE = 0x0104
RES_XML_LAST_CHUNK_TYPE = 0x017f
RES_XML_RESOURCE_MAP_TYPE = 0x0180
RES_TABLE_PACKAGE_TYPE = 0x0200
RES_TABLE_TYPE_TYPE = 0x0201
RES_TABLE_TYPE_SPEC_TYPE = 0x0202
def parsestruct(buf, structformat):
size = struct.calcsize(structformat)
return struct.unpack(structformat, buf[:size])
class ResXMLElement:
def __init__(self, node, stringpool, namespace, name):
self._node = node
self._stringpool = stringpool
self._namespace = namespace
self._name = name
def tobytes(self):
ns = 0xffffffff if not self._namespace else self._stringpool.getstringref(self._namespace)
n = self._stringpool.getstringref(self._name)
return struct.pack('II', ns, n)
@property
def node(self):
return self._node
@property
def nodename(self):
return self._name
@property
def size(self):
return 8
@Struct('I', ['ref'])
class ResourceRef(object):
def __init__(self, stringpool, value=None):
self._stringpool = stringpool
self._value = value
self._ref = AML.NONE_NAMESPACE_REF
@property
def value(self):
return self._value
@property
def ref(self):
return AML.NONE_NAMESPACE_REF if self._value is None else self._stringpool.getstringref(self._value)
@ref.setter
def ref(self, ref):
if ref != AML.NONE_NAMESPACE_REF:
self._value = self._stringpool.originalstrings[ref]
self._ref = ref
def tobytes(self):
return struct.pack('I', self.ref)
class ResChunk:
@staticmethod
def parse(buf):
header, nul = ResChunk.Header.parse(buf, buffer=buf)
return header, buf[:header.chunkSize]
@Struct('HHI', ['type', 'headerSize', 'chunkSize'])
class Header:
def __init__(self, buffer=None):
self._buf = buffer
def tobytesbybuf(self):
return self._buf[:self.headerSize]
def getbody(self):
return self._buf[self.headerSize:self.chunkSize]
def getnextchunkbuf(self):
return self._buf[self.chunkSize:]
def dump(self):
print('type = 0x%04x headerSize = %d size = %d' % (self.type, self.headerSize, self.chunkSize))
@Struct([ResourceRef, ResourceRef, 'HHHHHH'], ['ns', 'name', 'attributeStart', 'attributeSize',
'attributeCount', 'idIndex', 'classIndex', 'styleIndex'])
class ResXMLTree_attrExt:
pass
@Struct('HBBI', ['size', 'res0', 'dataType', 'data'])
class Res_value(object):
# Contains no data.
TYPE_NULL = 0x00
# The 'data' holds a ResTable_ref, a reference to another resource
# table entry.
TYPE_REFERENCE = 0x01
# The 'data' holds an attribute resource identifier.
TYPE_ATTRIBUTE = 0x02
# The 'data' holds an index into the containing resource table's
# global value string pool.
TYPE_STRING = 0x03
# The 'data' holds a single-precision floating point number.
TYPE_FLOAT = 0x04
# The 'data' holds a complex number encoding a dimension value,
# such as "100in".
TYPE_DIMENSION = 0x05
# The 'data' holds a complex number encoding a fraction of a
# container.
TYPE_FRACTION = 0x06
# Beginning of integer flavors...
TYPE_FIRST_INT = 0x10
# The 'data' is a raw integer value of the form n..n.
TYPE_INT_DEC = 0x10
# The 'data' is a raw integer value of the form 0xn..n.
TYPE_INT_HEX = 0x11
# The 'data' is either 0 or 1, for input "false" or "true" respectively.
TYPE_INT_BOOLEAN = 0x12
# Beginning of color integer flavors...
TYPE_FIRST_COLOR_INT = 0x1c
# The 'data' is a raw integer value of the form #aarrggbb.
TYPE_INT_COLOR_ARGB8 = 0x1c
# The 'data' is a raw integer value of the form #rrggbb.
TYPE_INT_COLOR_RGB8 = 0x1d
# The 'data' is a raw integer value of the form #argb.
TYPE_INT_COLOR_ARGB4 = 0x1e
# The 'data' is a raw integer value of the form #rgb.
TYPE_INT_COLOR_RGB4 = 0x1f
# ...end of integer flavors.
TYPE_LAST_COLOR_INT = 0x1f
# ...end of integer flavors.
TYPE_LAST_INT = 0x1f
def __init__(self, stringpool, value=None):
self._data = 0
self._stringpool = stringpool
self._value = value
@property
def data(self):
if self.dataType == Res_value.TYPE_STRING and self._value:
return self._stringpool.getstringref(self._value)
return self._data
@data.setter
def data(self, val):
self._data = val
if self.dataType == Res_value.TYPE_STRING and val != AML.NONE_NAMESPACE_REF:
self._value = self._stringpool.originalstrings[val]
@property
def value(self):
if self.dataType == Res_value.TYPE_INT_DEC:
return str(ctypes.c_int32(self._data).value)
elif self.dataType == Res_value.TYPE_STRING:
return self._stringpool.getstringbyref(self._data)
elif self.dataType == Res_value.TYPE_INT_BOOLEAN:
return 'true' if self._data else 'false'
return '@%08x' % self._data
@Struct('II', ['lineNumber', 'comment'])
class ResXMLTree_node:
pass
@Struct([ResourceRef, ResourceRef, 'I', Res_value], ['ns', 'name', 'rawValue', 'typedValue'])
class ResXMLTree_attribute:
def __init__(self, aml=None):
self._aml = aml
@property
def namespace(self):
return self.ns.value
@property
def attributename(self):
return self.name.value
def __str__(self):
if self.namespace:
return '%s:%s' % (self._aml.namespaces[self.namespace], self.attributename)
return self.attributename
@staticmethod
def make(ns, stringpool, name, value):
if type(value) == str:
resval = Res_value.create(8, 0, Res_value.TYPE_STRING, AML.NONE_NAMESPACE_REF, stringpool=stringpool, value=value)
elif type(value) == bool:
valueref = 0xffffffff if value else 0
resval = Res_value.create(8, 0, Res_value.TYPE_INT_BOOLEAN, valueref, stringpool=stringpool)
elif type(value) == int:
resval = Res_value.create(8, 0, Res_value.TYPE_INT_DEC, value, stringpool=stringpool)
else:
print('Other data types aren\'t supported, sorry')
raise NotImplementedError()
return ResXMLTree_attribute.create(ns, name, 0xffffffff, resval)
def tobytes(self):
if self.typedValue.dataType == Res_value.TYPE_STRING:
self.rawValue = self.typedValue.data
return self._tobytes()
@Struct([ResChunk.Header, ResXMLTree_node, ResXMLTree_attrExt], ['header', 'node', 'attrExt'])
class ResXMLTree:
def __init__(self, aml):
self._attributes = []
self._aml = aml
@property
def attributes(self):
return self._attributes
def tobytes(self):
self.header.chunkSize = self.size
self.attrExt.attributeCount = len(self._attributes)
return self._tobytes() + b''.join([i.tobytes() for i in self._attributes])
@property
def nodename(self):
return self.attrExt.name.value
@property
def size(self):
return ResXMLTree._size + sum([i.size for i in self._attributes])
class AML:
ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android'
NONE_NAMESPACE_REF = 0xffffffff
class StringList:
def __init__(self, strings):
self._strings = strings
self._stringmapping = dict((j, i) for i, j in enumerate(strings))
def getstringref(self, s):
return self._stringmapping[s]
def __contains__(self, item):
return item in self._stringmapping
def __getitem__(self, item):
return self._strings[item]
def __len__(self):
return len(self._strings)
class Chunk:
def __init__(self, header):
self._bytebuffer = ByteArrayBuffer()
self._header = header
@property
def size(self):
return self._bytebuffer.size
@property
def body(self):
return self._header.getbody()
def append(self, data):
self._bytebuffer.append(data)
def tobytes(self):
self._header.chunkSize = self.size + self._header.headerSize
return b''.join([self._header.tobytes(),
self._header.tobytesbybuf()[ResChunk.Header.size:],
self._bytebuffer.tobytes()])
class ResourceMapChunk:
ATTRS = eval(pkgutil.get_data('libaml', 'android-attrs.json'))
def __init__(self, header, strings):
self._header = header
idlen = int((header.chunkSize - header.headerSize) / 4)
ids = parsestruct(header.getbody(), str(idlen) + 'I')
self._attrs = [(strings[i], j) for i, j in enumerate(ids)]
self._attrset = set([i for i, j in self._attrs])
@property
def attrs(self):
return self._attrs
@property
def attrnames(self):
return [i for i, j in self._attrs]
def __contains__(self, attrname):
return attrname in self._attrset
def append(self, attrname):
if attrname not in AML.ResourceMapChunk.ATTRS:
print("Couldn't find R.attr.%s value" % attrname)
raise NotImplementedError()
self._attrs.append((attrname, AML.ResourceMapChunk.ATTRS[attrname]))
@property
def size(self):
return self._header.headerSize + len(self._attrs) * 4
def tobytes(self):
self._header.chunkSize = self.size
resources = [i[1] for i in self._attrs]
return self._header.tobytes() + struct.pack(str(len(resources)) + 'I', *resources)
class StringPoolChunk(object):
# If set, the string index is sorted by the string values (based
# on strcmp16()).
SORTED_FLAG = 1 << 0
# String pool is encoded in UTF-8
UTF8_FLAG = 1 << 8
class _UTF16StringList:
def __init__(self, aml):
self._aml = aml
def loadstrings(self, buf):
strings = []
indices = {}
for i in range(self._aml.stringCount):
stringlen = parsestruct(buf, 'H')[0]
s = buf[2:2 + stringlen * 2].decode('utf-16')
buf = buf[(stringlen + 1) * 2 + 2:]
strings.append(s)
indices[s] = i
return strings, indices
@property
def size(self):
return sum([len(i) * 2 + 4 for i in self._aml.strings])
class _UTF8StringList:
def __init__(self, aml):
self._aml = aml
def loadstrings(self, buf):
strings = []
indices = {}
for i in range(self._aml.stringCount):
stringlen = parsestruct(buf, 'H')[0] & 0xff
s = buf[2:2 + stringlen].decode('utf-8')
buf = buf[(stringlen + 1) + 2:]
strings.append(s)
indices[s] = i
return strings, indices
@property
def size(self):
return sum([len(i) + 3 for i in self._aml.strings])
def __init__(self, buf):
self._resourcemap = None
self._header, self._body = ResChunk.Header.parse(buf, buffer=buf)
self.stringCount, self.styleCount, self.flags, self.stringsStart, self.stylesStart = parsestruct(buf[8:], '5I')
self._stringlist = self._UTF8StringList(self) if self.flags & AML.StringPoolChunk.UTF8_FLAG else self._UTF16StringList(self)
self._strings, self._indices = self._stringlist.loadstrings(buf[self.stringsStart:])
self._originalstrings = list(self._strings)
@property
def originalstrings(self):
return self._originalstrings
@property
def size(self):
size = self.stringslen()
return size + (4 - size % 4) % 4
@property
def attrs(self):
return [] if self._resourcemap is None else self._resourcemap.attrnames
@property
def strings(self):
return list(self._strings if self._resourcemap is None else self._resourcemap.attrnames + self._strings)
@property
def resourcemap(self):
return self._resourcemap
@resourcemap.setter
def resourcemap(self, resourcemap):
attrlen = len(resourcemap.attrs)
self._strings = self.strings[attrlen:]
self._resourcemap = resourcemap
def getstringref(self, s):
return self._indices[s]
def getstringbyref(self, ref):
attrslen = len(self.attrs)
return self.attrs[ref] if ref < attrslen else self._strings[ref - attrslen]
def stringslen(self):
return sum([len(i) * 2 + 4 for i in self.strings]) + self.stringCount * 4 + self._header.headerSize
def _append(self, s):
self.stringCount += 1
self.stringsStart = self.stringCount * 4 + self._header.headerSize
def _rebuildindices(self):
self._indices = dict((j, i) for i, j in enumerate(self.strings))
def setattribute(self, name, value):
if name not in self._resourcemap:
self._resourcemap.append(name)
self._append(name)
if type(value) is str:
self.ensure(value)
self._rebuildindices()
def ensure(self, s):
if s not in self._indices:
self._strings.append(s)
self._append(s)
self._indices[s] = len(self.attrs) + len(self._strings) - 1
def tobytes(self):
bos = ByteArrayBuffer()
bos.append(self._header)
self.stringCount = (0 if self._resourcemap is None else len(self._resourcemap.attrs)) + len(self._strings)
self.stringsStart = self.stringCount * 4 + self._header.headerSize
bos.append(struct.pack('5I', self.stringCount, self.styleCount, self.flags,
self.stringsStart, self.stylesStart))
class OffsetCalculator:
def __init__(self):
self._offset = 0
def offset(self, s):
l = len(s) * 2 + 4
o = self._offset
self._offset += l
return o
calculator = OffsetCalculator()
strings = self.strings
stringmaps = [calculator.offset(i) for i in strings]
bos.append(struct.pack(str(self.stringCount) + 'I', *stringmaps))
for i in strings:
bos.append(struct.pack(str(len(i) + 2) + 'H', *([len(i)] + [ord(j) for j in i] + [0])))
self._header.chunkSize = self.size
stringslen = self.stringslen()
bos.append(b'\x00' * (self._header.chunkSize - stringslen))
return bos.tobytes()
class InsertedPlaceHolder:
def __init__(self, aml, node):
self._aml = aml
self._node = node
self._bytebuffer = ByteArrayBuffer()
def append(self, data):
self._bytebuffer.append(data)
@property
def size(self):
return self._bytebuffer.size
def tobytes(self):
return self._bytebuffer.tobytes()
def writexmlstartelement(self, name, attrs, linenumber=0):
stringpool = self._aml.stringpool
self._aml.stringpool.ensure(name)
attrExt = ResXMLTree_attrExt.create(ResourceRef.create(AML.NONE_NAMESPACE_REF, stringpool=stringpool),
ResourceRef.create(stringpool=stringpool, value=name),
20, 20, len(attrs), 0, 0, 0)
element = ResXMLTree.create(ResChunk.Header.create(ResTypes.RES_XML_START_ELEMENT_TYPE, 16, 0),
ResXMLTree_node.create(linenumber or self._node.lineNumber, 0xffffffff),
attrExt, aml=self)
androidns = ResourceRef(stringpool=stringpool, value=AML.ANDROID_NAMESPACE)
for k, v in attrs.items():
stringpool.setattribute(k, v)
attr = ResXMLTree_attribute.make(androidns, stringpool,
ResourceRef.create(stringpool=stringpool, value=k), v)
element.attributes.append(attr)
self._bytebuffer.append(element)
return element
def writexmlendelement(self, name, linenumber=0):
self._bytebuffer.append(ResChunk.Header.create(ResTypes.RES_XML_END_ELEMENT_TYPE, 16, 24))
self._bytebuffer.append(ResXMLTree_node.create(linenumber or self._node.lineNumber, 0xffffffff))
self._bytebuffer.append(struct.pack('I', 0xffffffff))
self._bytebuffer.append(ResourceRef(self._aml.stringpool, name))
@Struct([ResourceRef, ResourceRef], ['_name', '_namespace'])
class XMLNamespace:
@property
def name(self):
return self._name.value
@property
def namespace(self):
return self._namespace.value
def __init__(self, buffer):
self._namespaces = {}
self._stringpool = None
self._strings = None
self._header, self._body = ResChunk.Header.parse(buffer, buffer=buffer)
self._rootchunk = AML.Chunk(self._header)
self._bufptr = self._rootchunk.body
self._firstchunk = True
@property
def stringpool(self):
return self._stringpool
@property
def strings(self):
return self._strings
@property
def namespaces(self):
return self._namespaces
def hasnext(self):
return self._firstchunk or len(self._bufptr) > 0
def next(self):
if self._firstchunk:
self._firstchunk = False
return self._header, self._body
self._header, chunk = ResChunk.parse(self._bufptr)
self._body = self._header.getbody()
if self._header.type == ResTypes.RES_STRING_POOL_TYPE:
self._stringpool = AML.StringPoolChunk(self._bufptr)
self._strings = AML.StringList(self._stringpool.strings)
self._rootchunk.append(self._stringpool)
elif self._header.type == ResTypes.RES_XML_START_NAMESPACE_TYPE:
self._body, nul = AML.XMLNamespace.parse(self._header.getbody(), stringpool=self._stringpool)
self._namespaces[self._body.namespace] = self._body.name
self._rootchunk.append(self._header.tobytesbybuf())
self._rootchunk.append(self._body)
elif self._header.type == ResTypes.RES_XML_START_ELEMENT_TYPE:
self._body, nul = ResXMLTree.parse(self._bufptr, aml=self, stringpool=self._stringpool)
self._rootchunk.append(self._body)
buf = self._header.getbody()[self._body.attrExt.attributeStart:]
for i in range(self._body.attrExt.attributeCount):
attribute, p = ResXMLTree_attribute.parse(buf, stringpool=self._stringpool, aml=self)
self._body.attributes.append(attribute)
buf = buf[self._body.attrExt.attributeSize:]
elif self._header.type == ResTypes.RES_XML_END_ELEMENT_TYPE:
node, nul = ResXMLTree_node.parse(self._bufptr[8:])
ns, name = parsestruct(self._header.getbody(), 'II')
self._body = ResXMLElement(node, self._stringpool, None, self._strings[name])
self._rootchunk.append(self._header.tobytesbybuf())
self._rootchunk.append(self._body)
elif self._header.type == ResTypes.RES_XML_END_NAMESPACE_TYPE:
self._body, nul = AML.XMLNamespace.parse(self._header.getbody(), stringpool=self._stringpool)
self._rootchunk.append(self._header.tobytesbybuf())
self._rootchunk.append(self._body)
elif self._header.type == ResTypes.RES_XML_RESOURCE_MAP_TYPE:
self._stringpool.resourcemap = AML.ResourceMapChunk(self._header, self._strings)
self._rootchunk.append(self._stringpool.resourcemap)
else:
self._rootchunk.append(chunk)
self._bufptr = self._header.getnextchunkbuf()
return self._header, self._body
def insert(self):
try:
inserted = AML.InsertedPlaceHolder(self, self._body.node)
except AttributeError:
raise AssertionError('Cannot insert after none xml node types!')
self._rootchunk.append(inserted)
return inserted
def tobytes(self):
return self._rootchunk.tobytes()
|
{"/libaml/aml.py": ["/libaml/utils/decorator.py"], "/examples/parse.py": ["/libaml/aml.py"], "/examples/increase_version_code.py": ["/libaml/aml.py"]}
|
7,332
|
smantinc/pyaml
|
refs/heads/master
|
/examples/parse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
from libaml.aml import AML
from libaml.aml import ResTypes
INDENT_BLOCK = ' '
"""
This example demonstrates the basic usage of libaml.
It parses Android binary xml and prints it to the stdout.
"""
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'i:')
params = dict([(i.lstrip('-'), j) for i, j in opts])
if 'i' not in params:
print('Usage:\n%s -i android-binary-xml.xml' % sys.argv[0])
sys.exit(0)
infile = params['i']
with open(infile, 'rb') as fp:
buf = fp.read()
aml = AML(buf)
namespaces = []
indent = 0
while aml.hasnext():
header, body = aml.next()
if header.type == ResTypes.RES_XML_START_ELEMENT_TYPE:
print(INDENT_BLOCK * indent + '<%s%s>' % (body.nodename, ''.join(namespaces + [' %s="%s"' % (i, i.typedValue.value) for i in body.attributes])))
namespaces = []
indent += 1
elif header.type == ResTypes.RES_XML_END_ELEMENT_TYPE:
indent -= 1
print(INDENT_BLOCK * indent + '</%s>' % body.nodename)
elif header.type == ResTypes.RES_XML_START_NAMESPACE_TYPE:
namespaces.append(' xmlns:%s="%s"' % (body.name, body.namespace))
elif header.type == ResTypes.RES_XML_TYPE:
print('<?xml version="1.0" encoding="utf-8"?>')
|
{"/libaml/aml.py": ["/libaml/utils/decorator.py"], "/examples/parse.py": ["/libaml/aml.py"], "/examples/increase_version_code.py": ["/libaml/aml.py"]}
|
7,333
|
smantinc/pyaml
|
refs/heads/master
|
/examples/increase_version_code.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
from libaml.aml import AML
from libaml.aml import ResTypes
"""
This example demonstrates the how to modify binary XML using libaml.
It parses AndroidManifest.xml and increases version code by one.
"""
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'i:o:')
params = dict([(i.lstrip('-'), j) for i, j in opts])
if 'i' not in params:
print('Usage:\n%s -i AndroidManifest.xml [-o outfile.xml]' % sys.argv[0])
sys.exit(0)
infile = params['i']
outfile = params['o'] if 'o' in params else infile
with open(infile, 'rb') as fp:
buf = fp.read()
aml = AML(buf)
while aml.hasnext():
header, body = aml.next()
if header.type == ResTypes.RES_XML_START_ELEMENT_TYPE and body.nodename == 'manifest':
for i in body.attributes:
if str(i) == 'android:versionCode':
i.typedValue.data += 1
with open(outfile, 'wb') as fp:
fp.write(aml.tobytes())
print('Done.')
|
{"/libaml/aml.py": ["/libaml/utils/decorator.py"], "/examples/parse.py": ["/libaml/aml.py"], "/examples/increase_version_code.py": ["/libaml/aml.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.