index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
35,287
|
jGaboardi/watermark
|
refs/heads/master
|
/watermark/tests/test_watermark.py
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.append(os.path.join("../watermark"))
import watermark
def test_defaults():
a = watermark.watermark()
txt = a.split('\n')
clean_txt = []
for t in txt:
t = t.strip()
if t:
t = t.split(':')[0]
clean_txt.append(t.strip())
clean_txt = set(clean_txt)
expected = [
'Last updated',
'Python implementation',
'Python version',
'IPython version',
'Compiler',
'OS',
'Release',
'Machine',
'Processor',
'CPU cores',
'Architecture']
for i in expected:
assert i in clean_txt, print(f'{i} not in {clean_txt}')
|
{"/watermark/watermark.py": ["/watermark/version.py"], "/watermark/magic.py": ["/watermark/__init__.py"], "/watermark/tests/test_watermark.py": ["/watermark/__init__.py"], "/watermark/__init__.py": ["/watermark/version.py", "/watermark/magic.py", "/watermark/watermark.py"], "/watermark/tests/test_watermark_gpu.py": ["/watermark/__init__.py"]}
|
35,288
|
jGaboardi/watermark
|
refs/heads/master
|
/watermark/__init__.py
|
# Sebastian Raschka 2014-2018
# IPython magic function to print date/time stamps and
# various system information.
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: BSD 3 clause
from __future__ import absolute_import
from .version import __version__
from watermark.magic import *
from watermark.watermark import watermark
__all__ = ["watermark", "magic"]
|
{"/watermark/watermark.py": ["/watermark/version.py"], "/watermark/magic.py": ["/watermark/__init__.py"], "/watermark/tests/test_watermark.py": ["/watermark/__init__.py"], "/watermark/__init__.py": ["/watermark/version.py", "/watermark/magic.py", "/watermark/watermark.py"], "/watermark/tests/test_watermark_gpu.py": ["/watermark/__init__.py"]}
|
35,289
|
jGaboardi/watermark
|
refs/heads/master
|
/watermark/tests/test_watermark_gpu.py
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.append(os.path.join("../watermark"))
import watermark
def test_gpu_info():
a = watermark.watermark(gpu=True)
txt = a.split('\n')
clean_txt = []
for t in txt:
t = t.strip()
if t:
t = t.split(':')[0]
clean_txt.append(t.strip())
clean_txt = set(clean_txt)
expected = [
'GPU Info',
]
for i in expected:
assert i in clean_txt, print(f'{i} not in {clean_txt}')
|
{"/watermark/watermark.py": ["/watermark/version.py"], "/watermark/magic.py": ["/watermark/__init__.py"], "/watermark/tests/test_watermark.py": ["/watermark/__init__.py"], "/watermark/__init__.py": ["/watermark/version.py", "/watermark/magic.py", "/watermark/watermark.py"], "/watermark/tests/test_watermark_gpu.py": ["/watermark/__init__.py"]}
|
35,290
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/urls.py
|
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
# from rest_framework_jwt.views import obtain_jwt_token
from . import views
urlpatterns = [
path('login', views.login),
path('userInfo', views.userInfo),
path('getData', views.getData),
path('getAllMessage', views.getAllMessage),
path('getTreeHole', views.getTreeHole),
path('setTreeHole', views.setTreeHole),
path('delTreeHole', views.delTreeHole),
path('getTreeReply', views.getTreeReply),
path('setTreeReply', views.setTreeReply),
path('doCollectAndLike', views.doCollectAndLike),
path('getMyTreeHole', views.getMyTreeHole),
path('getMyComment', views.getMyComment),
path('delComment', views.delComment),
path('getMyCollectAndLike', views.getMyCollectAndLike),
path('getMyWishBottle', views.getMyWishBottle),
path('getWishBottle', views.getWishBottle),
path('setWishBottle', views.setWishBottle),
path('delWishBottle', views.delWishBottle),
path('checkNewMessage', views.checkNewMessage),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,291
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/migrations/0003_auto_20200226_1514.py
|
# Generated by Django 2.2.5 on 2020-02-26 07:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20200226_1451'),
]
operations = [
migrations.AlterField(
model_name='treehole',
name='pic',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='图片'),
),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,292
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/models.py
|
from django.db import models
class User(models.Model):
genders = (
(0, '未知'),
(1, "男"),
(2, "女"),
)
openid = models.CharField(max_length=64, db_index=True, primary_key=True, verbose_name='open_id', )
nickname = models.CharField(max_length=20, verbose_name="用户昵称", null=True)
gender = models.PositiveIntegerField(default=0, choices=genders, verbose_name="性别")
avatarurl = models.CharField(max_length=255, default='', null=True, blank=True, verbose_name='头像')
province = models.CharField(max_length=20, null=True, verbose_name='省')
city = models.CharField(max_length=20, null=True, verbose_name='城市')
session_key = models.CharField(max_length=64, verbose_name='session_key', null=True)
cookie_key = models.CharField(max_length=64, verbose_name='cookie_key', null=True)
def __str__(self):
return self.openid
class Meta:
verbose_name = verbose_name_plural = '用户'
class WishBottle(models.Model):
writer = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="作者", related_name="writer")
picker = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="捡到的人", related_name="picker", null=True, blank=True)
time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
content = models.TextField(max_length=255, default='', verbose_name='内容')
def __str__(self):
return self.content
class Meta:
ordering = ["-time"]
verbose_name = verbose_name_plural = '心愿瓶'
# class WishReply(models.Model):
# wishbottle = models.ForeignKey(WishBottle, on_delete=models.CASCADE, verbose_name="心愿瓶")
# replyer = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="回复者")
# time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# content = models.TextField(max_length=255, default='', verbose_name='内容')
# def __str__(self):
# return self.content
# class Meta:
# ordering = ["-time"]
# verbose_name = verbose_name_plural = '心愿瓶回复'
class TreeHole(models.Model):
writer = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="作者")
likes = models.PositiveIntegerField(default=0, verbose_name="赞")
time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
replynum = models.IntegerField(default=0, verbose_name='回复数')
title = models.CharField(max_length=50, verbose_name='标题')
pic = models.CharField(max_length=255, verbose_name='图片', null=True, blank=True,)
content = models.TextField(max_length=255, default='', verbose_name="内容")
def __str__(self):
return self.title
class Meta:
ordering = ["-time"]
verbose_name = verbose_name_plural = '树洞'
class TreeHoleReply(models.Model):
treehole_id = models.ForeignKey(TreeHole, on_delete=models.CASCADE, verbose_name="树洞")
answered_id = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="回复者")
time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
content = models.TextField(max_length=255, default='', verbose_name="内容")
def __str__(self):
return self.content
class Meta:
ordering = ["-time"]
verbose_name = verbose_name_plural = '树洞回复'
# class SysMsg(models.Model):
# user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户")
# flag = models.BooleanField(default=False, verbose_name='已读')
# time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# content = models.TextField(default="", max_length=255, verbose_name='内容')
# def __str__(self):
# return self.content
# class Meta:
# ordering = ["-time"]
# verbose_name = verbose_name_plural = '系统消息'
class Like(models.Model):
open_id = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户', related_name='liker')
treehole_id = models.ForeignKey(TreeHole, on_delete=models.CASCADE, verbose_name='树洞编号')
time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# writer_id = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作者', related_name='beliked', null=True, blank=True)
def __str__(self):
return str(self.time)
class Meta:
ordering = ["-time"]
verbose_name = verbose_name_plural = '赞'
class Collect(models.Model):
open_id = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户', related_name='collecter')
treehole_id = models.ForeignKey(TreeHole, on_delete=models.CASCADE, verbose_name='树洞编号')
time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# writer_id = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作者', related_name='becollected', null=True, blank=True)
def __str__(self):
return str(self.time)
class Meta:
ordering = ["-time"]
verbose_name = verbose_name_plural = '收藏'
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,293
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/views.py
|
# from .models import User
import requests, json, time, base64, random
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, JsonResponse
from django.db.models import Count
from .models import WishBottle, User, TreeHoleReply, TreeHole, Like, Collect
from .WXBizDataCrypt import WXBizDataCrypt
import itertools
appid = 'wx0da3b4a331fa60f6'
secret = 'c88958fecb91fd4c13ea66eec09e921a'
# class WishBottleViewSet(viewsets.ModelViewSet):
# queryset = WishBottle.objects.all().order_by('-pk')
# serializer_class = WishBottleSerializer
# 登录 ok
@csrf_exempt
def login(request):
if request.method == 'GET':
# data = json.loads(request.body)
code = request.GET.get('code', None)
if not code:
return JsonResponse({'error': '缺少code'})
url = "https://api.weixin.qq.com/sns/jscode2session?appid={0}&secret={1}&js_code={2}&grant_type=authorization_code".format(appid, secret, code)
r = requests.get(url)
res = json.loads(r.text)
openid = res.get('openid', None)
session_key = res.get('session_key', None)
# print('openid', openid)
# print('session_key', session_key)
if not openid:
return JsonResponse({'error': '微信调用失败'})
try:
user = User.objects.get(openid=openid)
user.session_key = session_key
user.save()
# print('更改session_key为', session_key)
# print(user.session_key)
except Exception:
cookie_key = str(time.time())
user = User.objects.create(openid=openid, session_key=session_key, cookie_key=cookie_key)
cookie_key = user.cookie_key
res_data = {
'cookiekey':cookie_key,
'msg': 'login success',
}
return JsonResponse(res_data)
# 接收用户信息并存储 ok
@csrf_exempt
def userInfo(request):
cookie_key = request.META.get('HTTP_COOKIE')
try:
user = User.objects.get(cookie_key=cookie_key)
except Exception:
return HttpResponse('不存在此用户信息')
iv, encryptedData = request.POST.get('iv', None), request.POST.get('encryptedData', None)
pc = WXBizDataCrypt(appid, user.session_key)
userInfo = pc.decrypt(encryptedData, iv)
try:
openid = userInfo.get('openId')
user = User.objects.get(openid=openid)
user.gender = userInfo.get('gender')
user.avatarurl = userInfo.get('avatarUrl')
user.nickname = userInfo.get('nickName')
user.province = userInfo.get('province')
user.city = userInfo.get('city')
user.save()
return HttpResponse('接收用户信息成功')
except Exception:
return HttpResponse('接收用户信息失败')
# 返回服务器存储的信息 ok
@csrf_exempt
def getData(request):
cookie_key = request.META.get('HTTP_COOKIE')
try:
user = User.objects.get(cookie_key=cookie_key)
# print(user)
res = {
'nickname': user.nickname,
'province': user.province,
'city': user.city,
}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204)
# 获取该用户写的树洞 ok
@csrf_exempt
def getMyTreeHole(request):
cookie_key = request.META.get('HTTP_COOKIE')
try:
user = User.objects.get(cookie_key=cookie_key)
treeholes = TreeHole.objects.filter(writer=user)
res = {'jsonArray':[
{'id': t.id,
'nickname': t.writer.nickname,
'writer_avatarUrl': t.writer.avatarurl,
'title': t.title,
'content': t.content,
'likeNum': t.likes,
'replyNum': t.replynum,
'strPostDate': t.time.strftime(format='%Y-%m-%d %H:%M:%S'),} for t in treeholes
]}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204)
# 返回收藏或是赞过的内容 ok
@csrf_exempt
def getMyCollectAndLike(request):
try:
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
t_collect, t_like = Collect.objects.filter(open_id=user), Like.objects.filter(open_id=user)
res = {
'jsonArray_collect': [
{'id': t.treehole_id.id,
'nickname': t.treehole_id.writer.nickname,
'writer_avatarUrl': t.treehole_id.writer.avatarurl,
'title': t.treehole_id.title,
'content': t.treehole_id.content,
'likeNum': t.treehole_id.likes,
'replyNum': t.treehole_id.replynum,
'strPostDate': t.treehole_id.time.strftime(format='%Y-%m-%d %H:%M:%S'),} for t in t_collect
],
'jsonArray_like': [
{'id': t.treehole_id.id,
'nickname': t.treehole_id.writer.nickname,
'writer_avatarUrl': t.treehole_id.writer.avatarurl,
'title': t.treehole_id.title,
'content': t.treehole_id.content,
'likeNum': t.treehole_id.likes,
'replyNum': t.treehole_id.replynum,
'strPostDate': t.treehole_id.time.strftime(format='%Y-%m-%d %H:%M:%S'),} for t in t_like
],
}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204)
# 返回消息 ok
@csrf_exempt
def getAllMessage(request):
try:
flag = request.GET.get('flag')
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
treeholes = TreeHole.objects.filter(writer=User.objects.get(cookie_key=cookie_key))
if flag == '1': # Like
likes = Like.objects.filter(id=-1)
# try:
for treehole in treeholes:
likes = likes | Like.objects.filter(treehole_id=treehole)
res = {'jsonArray':
[{'avatarUrl':l.open_id.avatarurl,
'nickName':l.open_id.nickname,
'strPostDate':l.time.strftime(format='%Y-%m-%d %H:%M:%S'),
'title':l.treehole_id.title,
} for l in likes]
}
return JsonResponse(res)
else:
replies = TreeHoleReply.objects.filter(id=-1)
# try:
for treehole in treeholes:
replies = replies | TreeHoleReply.objects.filter(treehole_id=treehole)
res = {'jsonArray':
[{'avatarUrl': r.answered_id.avatarurl,
'nickName': r.answered_id.nickname,
'content': r.content,
'strPostDate': r.time.strftime(format='%Y-%m-%d %H:%M:%S'),
'title': r.treehole_id.title,
'id': r.id,
} for r in replies]
}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204)
# 获取树洞信息 ok
@csrf_exempt
def getTreeHole(request):
try:
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
trees = TreeHole.objects.annotate(like_num=Count('like'), reply_num=Count('treeholereply'))
for t in trees:
t.likes = t.like_num
t.replynum = t.reply_num
t.save()
res = {'jsonArray':[
{'id': t.id,
'nickName': t.writer.nickname,
'writer_avatarUrl': t.writer.avatarurl,
'title': t.title,
'content': t.content,
'likeNum': t.like_num,
'replyNum': t.reply_num,
'isMine': 1 if t.writer == user else 0,
'strPostDate': t.time.strftime(format='%Y-%m-%d %H:%M:%S'),} for t in trees
]}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204,content='No such user')
# 新建树洞 ok
@csrf_exempt
def setTreeHole(request):
if request.method == 'POST':
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
data = json.loads(request.body)
title = data.get('title')
content = data.get('content')
TreeHole.objects.create(writer=user, content=content, title=title)
return HttpResponse(200)
else:
return HttpResponse(404)
# 新建树洞 ok
@csrf_exempt
def delTreeHole(request):
try:
id = request.GET.get('id')
TreeHole.objects.get(id=id).delete()
return HttpResponse(200)
except Exception:
return HttpResponse(400)
# 返回树洞评论 ok
@csrf_exempt
def getTreeReply(request):
try:
id = request.GET.get('id')
t = TreeHole.objects.get(id=id)
replies = TreeHoleReply.objects.filter(treehole_id=t).order_by('time')
# 返回数据
res = {
'treeHole':{
'id': t.id,
'nickName': t.writer.nickname,
'writer_avatarUrl': t.writer.avatarurl,
'title': t.title,
'content': t.content,
'likeNum': t.likes,
'replyNum': t.replynum,
'strPostDate': t.time.strftime(format='%Y-%m-%d %H:%M:%S')
},
'treeReplies':[
{
'id':rep.id,
'replier_avatarUrl':rep.answered_id.avatarurl,
'nickName':rep.answered_id.nickname,
'content':rep.content,
'strPostDate':rep.time.strftime(format='%Y-%m-%d %H:%M:%S')} for rep in replies
],
}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204)
# 查询是否已收藏或点赞 ok
@csrf_exempt
def getMyComment(request):
try:
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
replies = TreeHoleReply.objects.all().filter(answered_id=user)
res = {'jsonArray':
[{'avatarUrl': r.answered_id.avatarurl,
'nickName': r.answered_id.nickname,
'content': r.content,
'strPostDate': r.time.strftime(format='%Y-%m-%d %H:%M:%S'),
'title': r.treehole_id.title,
'id': r.id,
} for r in replies]
}
return JsonResponse(res)
except Exception:
return HttpResponse(204)
# 查询是否已收藏或点赞 ok
@csrf_exempt
def doCollectAndLike(request):
cookie_key = request.META.get('HTTP_COOKIE')
if request.GET.get('YoN', None):
try:
t = TreeHole.objects.get(id=request.GET.get('treeHoleId'))
user = User.objects.get(cookie_key=cookie_key)
flag = request.GET.get('flag') #str 0 or 1
YoN = request.GET.get('YoN') # str true of false
if flag == '1':
if YoN == 'true':
Collect.objects.create(open_id=user, treehole_id=t)
return HttpResponse(200)
else:
collect = Collect.objects.filter(open_id=user, treehole_id=t)
collect.delete()
collect.save()
return HttpResponse(200)
else:
if YoN == 'true':
Like.objects.create(open_id=user, treehole_id=t)
return HttpResponse(200)
else:
like = Like.objects.filter(open_id=user, treehole_id=t)
like.delete()
like.save()
return HttpResponse(200)
except Exception:
return HttpResponse(204)
else:
try:
user = User.objects.get(cookie_key=cookie_key)
t = TreeHole.objects.get(id=request.GET.get('treeHoleId'))
collect = Collect.objects.filter(treehole_id=t, open_id=user)
like = Like.objects.filter(treehole_id=t, open_id=user)
isCollect = 1 if collect else 0
isLike = 1 if like else 0
res = {
'isCollect': isCollect,
'isLike': isLike,
}
return JsonResponse(res)
except Exception:
return HttpResponse(status=204)
# 接收小程序新增的评论 ok
@csrf_exempt
def setTreeReply(request):
if request.method == 'POST':
data = json.loads(request.body)
cookie_key = request.META.get('HTTP_COOKIE')
treeholeid = data.get('treeholeid')
content = data.get('content')
user = User.objects.get(cookie_key=cookie_key)
TreeHoleReply.objects.create(treehole_id=TreeHole.objects.get(id=treeholeid), answered_id=user, content=content)
return HttpResponse(200)
else:
return HttpResponse(204)
# 查询新消息 ok
@csrf_exempt
def checkNewMessage(request):
try:
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
trees = TreeHole.objects.filter(writer=user)
likes = Like.objects.filter(id=-1)
replies = TreeHoleReply.objects.filter(id=-1)
for t in trees:
likes = likes | Like.objects.filter(treehole_id=t)
replies = replies | TreeHoleReply.objects.filter(treehole_id=t)
res = {
'likeNum': len(likes),
'commentNum': len(replies),
}
return JsonResponse(res)
except Exception:
return HttpResponse(204)
# 查看我的心愿瓶
@csrf_exempt
def getMyWishBottle(request):
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
wishbottles = WishBottle.objects.filter(writer=user)
res = {
'jsonArray':[{
'id':w.id,
'itemType':1,
'content':w.content,
'strPostDate':w.time.strftime(format='%Y-%m-%d %H:%M:%S'),
} for w in wishbottles]
}
return JsonResponse(res)
# 捡心愿瓶
@csrf_exempt
def getWishBottle(request):
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
wishs = WishBottle.objects.filter(picker=None).exclude(writer=user)
if wishs:
wish = random.choice(wishs)
res = {
'id':wish.id,
'itemType':1,
'city': wish.writer.city,
'province': wish.writer.province,
'sex': wish.writer.gender,
'avatarUrl': wish.writer.avatarurl,
'content': wish.content,
'nickName': wish.writer.nickname,
'strPostDate': wish.time.strftime(format='%Y-%m-%d %H:%M:%S'),
}
return JsonResponse(res)
else:
return HttpResponse(204)
# 删除评论 ok
@csrf_exempt
def delComment(request):
try:
id = request.GET.get('id')
TreeHoleReply.objects.get(id=id).delete()
return HttpResponse(200)
except Exception:
return HttpResponse(400)
# 扔心愿瓶 ok
@csrf_exempt
def setWishBottle(request):
if request.method == 'POST':
cookie_key = request.META.get('HTTP_COOKIE')
user = User.objects.get(cookie_key=cookie_key)
content = json.loads(request.body)
content = content.get('content')
WishBottle.objects.create(writer=user, content=content)
return HttpResponse(200)
else:
return HttpResponse(404)
# 删除心愿瓶 ok
@csrf_exempt
def delWishBottle(request):
id = request.GET.get('id')
WishBottle.objects.get(id=id).delete()
return HttpResponse(200)
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,294
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/migrations/0001_initial.py
|
# Generated by Django 2.2.5 on 2020-02-26 06:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TreeHole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=255, verbose_name='内容')),
('likes', models.PositiveIntegerField(default=0, verbose_name='赞')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('replynum', models.IntegerField(default=0, verbose_name='回复数')),
('title', models.CharField(max_length=50, verbose_name='标题')),
('pic', models.CharField(max_length=255, null=True, verbose_name='图片')),
],
options={
'verbose_name': '树洞',
'verbose_name_plural': '树洞',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='User',
fields=[
('openid', models.CharField(db_index=True, max_length=64, primary_key=True, serialize=False, verbose_name='open_id')),
('nickname', models.CharField(max_length=20, null=True, verbose_name='用户昵称')),
('gender', models.PositiveIntegerField(choices=[(0, '未知'), (1, '男'), (2, '女')], default=0, verbose_name='性别')),
('avatarurl', models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='头像')),
('province', models.CharField(max_length=20, null=True, verbose_name='省')),
('city', models.CharField(max_length=20, null=True, verbose_name='城市')),
('session_key', models.CharField(max_length=64, null=True, verbose_name='session_key')),
('cookie_key', models.CharField(max_length=64, null=True, verbose_name='cookie_key')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
),
migrations.CreateModel(
name='WishBottle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('content', models.CharField(default='', max_length=255)),
('picker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='picker', to='api.User', verbose_name='捡到的人')),
('writer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='writer', to='api.User', verbose_name='作者')),
],
options={
'verbose_name': '心愿瓶',
'verbose_name_plural': '心愿瓶',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='WishReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('content', models.CharField(max_length=255)),
('replyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='回复者')),
('wishbottle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.WishBottle', verbose_name='心愿瓶')),
],
options={
'verbose_name': '心愿瓶回复',
'verbose_name_plural': '心愿瓶回复',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='TreeHoleReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('content', models.CharField(default='', max_length=255, verbose_name='内容')),
('answered_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='回复者')),
('treehole_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.TreeHole', verbose_name='树洞')),
],
options={
'verbose_name': '树洞回复',
'verbose_name_plural': '树洞回复',
'ordering': ['-time'],
},
),
migrations.AddField(
model_name='treehole',
name='writer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='作者'),
),
migrations.CreateModel(
name='SysMsg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(default='', verbose_name='内容')),
('flag', models.BooleanField(default=False, verbose_name='已读')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='用户')),
],
options={
'verbose_name': '系统消息',
'verbose_name_plural': '系统消息',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('open_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='用户')),
('treehole_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.TreeHole', verbose_name='树洞编号')),
],
options={
'verbose_name': '赞',
'verbose_name_plural': '赞',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='Collect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('open_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='用户')),
('treehole_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.TreeHole', verbose_name='树洞编号')),
],
options={
'verbose_name': '收藏',
'verbose_name_plural': '收藏',
'ordering': ['-time'],
},
),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,295
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/migrations/0005_auto_20200227_2141.py
|
# Generated by Django 2.2.5 on 2020-02-27 13:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20200227_2134'),
]
operations = [
migrations.AlterField(
model_name='collect',
name='writer_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='becollected', to='api.User', verbose_name='作者'),
),
migrations.AlterField(
model_name='like',
name='writer_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='beliked', to='api.User', verbose_name='作者'),
),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,296
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/migrations/0004_auto_20200227_2134.py
|
# Generated by Django 2.2.5 on 2020-02-27 13:34
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200226_1514'),
]
operations = [
migrations.AddField(
model_name='collect',
name='writer_id',
field=models.ForeignKey(default=django.utils.timezone.now, on_delete=django.db.models.deletion.CASCADE, related_name='becollected', to='api.User', verbose_name='作者'),
preserve_default=False,
),
migrations.AddField(
model_name='like',
name='writer_id',
field=models.ForeignKey(default=django.utils.timezone.now, on_delete=django.db.models.deletion.CASCADE, related_name='beliked', to='api.User', verbose_name='作者'),
preserve_default=False,
),
migrations.AlterField(
model_name='collect',
name='open_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='collecter', to='api.User', verbose_name='用户'),
),
migrations.AlterField(
model_name='like',
name='open_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='liker', to='api.User', verbose_name='用户'),
),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,297
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/admin.py
|
from django.contrib import admin
from .import models
# Register your models here.
@admin.register(models.User)
class UserAdmin(admin.ModelAdmin):
list_display = [
'openid', 'nickname', 'gender', 'province',
'province', 'city'
]
@admin.register(models.WishBottle)
class WishBottle(admin.ModelAdmin):
list_display = [
'writer', 'picker', 'content', 'time',
]
# @admin.register(models.WishReply)
# class WishReply(admin.ModelAdmin):
# list_display = [
# 'wishbottle', 'replyer', 'content', 'time',
# ]
@admin.register(models.TreeHole)
class TreeHole(admin.ModelAdmin):
list_display = [
'writer', 'title', 'likes', 'replynum', 'time'
]
@admin.register(models.TreeHoleReply)
class TreeHoleReply(admin.ModelAdmin):
list_display = [
'treehole_id', 'answered_id', 'content', 'time'
]
# @admin.register(models.SysMsg)
# class SysMsg(admin.ModelAdmin):
# list_display = [
# 'user', 'content', 'time'
# ]
@admin.register(models.Like)
class Like(admin.ModelAdmin):
list_display = [
'open_id', 'treehole_id', 'time'
]
@admin.register(models.Collect)
class Collect(admin.ModelAdmin):
list_display = [
'open_id', 'treehole_id', 'time'
]
# admin.site.register(models.User)
# admin.site.register(models.WishBottle)
# admin.site.register(models.WishReply)
# admin.site.register(models.TreeHole)
# admin.site.register(models.TreeHoleReply)
# admin.site.register(models.SysMsg)
# admin.site.register(models.Collect)
# admin.site.register(models.Like)
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,298
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/migrations/0002_auto_20200226_1451.py
|
# Generated by Django 2.2.5 on 2020-02-26 06:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sysmsg',
name='content',
field=models.TextField(default='', max_length=255, verbose_name='内容'),
),
migrations.AlterField(
model_name='treehole',
name='content',
field=models.TextField(default='', max_length=255, verbose_name='内容'),
),
migrations.AlterField(
model_name='treeholereply',
name='content',
field=models.TextField(default='', max_length=255, verbose_name='内容'),
),
migrations.AlterField(
model_name='wishbottle',
name='content',
field=models.TextField(default='', max_length=255, verbose_name='内容'),
),
migrations.AlterField(
model_name='wishreply',
name='content',
field=models.TextField(default='', max_length=255, verbose_name='内容'),
),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,299
|
kkkchan/WishBottle
|
refs/heads/master
|
/server(Django)/api/migrations/0006_auto_20200227_2152.py
|
# Generated by Django 2.2.5 on 2020-02-27 13:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20200227_2141'),
]
operations = [
migrations.RemoveField(
model_name='collect',
name='writer_id',
),
migrations.RemoveField(
model_name='like',
name='writer_id',
),
]
|
{"/server(Django)/api/views.py": ["/server(Django)/api/models.py"]}
|
35,300
|
RuiliangWang/FLORIS
|
refs/heads/master
|
/examples/FLORIS_Run_Notebook.py
|
# coding: utf-8
# # Examples for running FLORIS
# In[10]:
# load modules
from floris.floris import Floris
import numpy as np
# ## Setup floris and process input file
# In[11]:
floris = Floris("example_input.json")
# ## Calculate Wake
# In[12]:
import time
t1 = time.time()
floris.farm.flow_field.calculate_wake()
t2 = time.time()
print('Time to compute wake = ', t2-t1, 's')
# ## Compute Velocities at each Turbine
# In[13]:
for coord, turbine in floris.farm.turbine_map.items():
print(str(coord) + ":")
print("\tCp -", turbine.Cp)
print("\tCt -", turbine.Ct)
print("\tpower -", turbine.power)
print("\tai -", turbine.aI)
print("\taverage velocity -", turbine.get_average_velocity())
# ## Plot the Flow Field (z-plane)
# In[5]:
# this plots the streamwise velocity at:
# inputs -> percent of z domain entered as a list
# 1. 20% of the z height domain (the z height domain is 2x hub height, i.e. 36m)
# 2. 50% of the z height domain (at hub height)
# 3. 80% of the z height domain (144m)
floris.farm.flow_field.plot_z_planes([0.2, 0.5, 0.8])
# ## Plot the Flow Field (x-plane)
# In[6]:
# plot a cut-through of the flow field at a particular x distance downstream.
# inputs -> percent of x domain entered as a list
floris.farm.flow_field.plot_x_planes([0.4])
# ## Optimize Wind Farm using Wake Steering
# In[7]:
import OptModules # modules used for optimizing FLORIS
import numpy as np
turbines = [turbine for _, turbine in floris.farm.flow_field.turbine_map.items()]
power_initial = np.sum([turbine.power for turbine in turbines]) # determine initial power production
# set bounds for the optimization on the yaw angles (deg)
minimum_yaw_angle = 0.0
maximum_yaw_angle = 25.0
# compute the optimal yaw angles
opt_yaw_angles = OptModules.wake_steering(floris,minimum_yaw_angle,maximum_yaw_angle)
print('Optimal yaw angles for:')
for i,yaw in enumerate(opt_yaw_angles):
print('Turbine ', i, ' yaw angle = ', np.degrees(yaw))
# ## Assign New Yaw Angles
# In[8]:
# assign yaw angles to turbines
turbines = [turbine for _, turbine in floris.farm.flow_field.turbine_map.items()]
for i,turbine in enumerate(turbines):
turbine.yaw_angle = opt_yaw_angles[i]
# ## Plot Optimized Flow Field
# In[9]:
# compute the new wake with yaw angles
floris.farm.flow_field.calculate_wake()
# optimal power
power_opt = np.sum([turbine.power for turbine in turbines])
# plot results
floris.farm.flow_field.plot_z_planes([0.5])
print('Power increased by ', 100*(power_opt-power_initial)/power_initial, '%')
# In[ ]:
# In[ ]:
|
{"/floris/flow_field.py": ["/floris/visualization_manager.py"], "/tests/flow_field_test.py": ["/floris/flow_field.py"]}
|
35,301
|
RuiliangWang/FLORIS
|
refs/heads/master
|
/floris/visualization_manager.py
|
# Copyright 2017 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from .coordinate import Coordinate
import matplotlib.pyplot as plt
import numpy as np
class VisualizationManager():
"""
The VisualizationManager handles all of the lower level visualization instantiation
and data management. Currently, it produces 2D matplotlib plots for a given plane
of data.
IT IS IMPORTANT to note that this class should be treated as a singleton. That is,
only one instance of this class should exist.
"""
def __init__(self):
self.figure_count = 0
def _set_texts(self, plot_title, horizontal_axis_title, vertical_axis_title):
fontsize = 15
plt.title(plot_title, fontsize=fontsize)
plt.xlabel(horizontal_axis_title, fontsize=fontsize)
plt.ylabel(vertical_axis_title, fontsize=fontsize)
def _set_colorbar(self):
cb = plt.colorbar()
cb.ax.tick_params(labelsize=15)
def _set_axis(self):
plt.axis('equal')
plt.tick_params(which='both', labelsize=15)
def _new_figure(self):
plt.figure(self.figure_count)
self.figure_count += 1
def _new_filled_contour(self, mesh1, mesh2, data):
self._new_figure()
vmax = np.amax(data)
plt.contourf(mesh1, mesh2, data, 50,
cmap='gnuplot2', vmin=0, vmax=vmax)
def _plot_constant_plane(self, mesh1, mesh2, data, title, xlabel, ylabel):
# for x in range(data.shape[0]):
# data[x, :] = x
self._new_filled_contour(mesh1, mesh2, data)
self._set_texts(title, xlabel, ylabel)
self._set_colorbar()
self._set_axis()
def plot_constant_z(self, xmesh, ymesh, data):
self._plot_constant_plane(
xmesh, ymesh, data, "z plane", "x (m)", "y (m)")
def plot_constant_y(self, xmesh, zmesh, data):
self._plot_constant_plane(
xmesh, zmesh, data, "y plane", "x (m)", "z (m)")
def plot_constant_x(self, ymesh, zmesh, data):
self._plot_constant_plane(
ymesh, zmesh, data, "x plane", "y (m)", "z (m)")
def add_turbine_marker(self, turbine, coords, wind_direction):
a = Coordinate(coords.x, coords.y - turbine.rotor_radius)
b = Coordinate(coords.x, coords.y + turbine.rotor_radius)
a.rotate_z(turbine.yaw_angle - wind_direction, coords.as_tuple())
b.rotate_z(turbine.yaw_angle - wind_direction, coords.as_tuple())
plt.plot([a.xprime, b.xprime], [a.yprime, b.yprime], 'k', linewidth=1)
def show(self):
plt.show()
|
{"/floris/flow_field.py": ["/floris/visualization_manager.py"], "/tests/flow_field_test.py": ["/floris/flow_field.py"]}
|
35,302
|
RuiliangWang/FLORIS
|
refs/heads/master
|
/floris/flow_field.py
|
# Copyright 2017 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
from .visualization_manager import VisualizationManager
from .coordinate import Coordinate
class FlowField():
"""
FlowField is at the core of the FLORIS package. This class handles the domain
creation and initialization and computes the flow field based on the input
wake model and turbine map. It also contains helper functions for quick flow
field visualization.
inputs:
wind_speed: float - atmospheric condition
wind_direction - atmospheric condition
wind_shear - atmospheric condition
wind_veer - atmospheric condition
turbulence_intensity - atmospheric condition
wake: Wake - used to calculate the flow field
wake_combination: WakeCombination - used to combine turbine wakes into the flow field
turbine_map: TurbineMap - locates turbines in space
outputs:
self: FlowField - an instantiated FlowField object
"""
def __init__(self,
wind_speed,
wind_direction,
wind_shear,
wind_veer,
turbulence_intensity,
wake,
wake_combination,
turbine_map):
super().__init__()
self.wind_speed = wind_speed
self.wind_direction = wind_direction
self.wind_shear = wind_shear
self.wind_veer = wind_veer
self.turbulence_intensity = turbulence_intensity
self.wake = wake
self.wake_combination = wake_combination
self.turbine_map = turbine_map
# initialize derived attributes and constants
self.max_diameter = max(
[turbine.rotor_diameter for turbine in self.turbine_map.turbines])
self.hub_height = self.turbine_map.turbines[0].hub_height
self.grid_resolution = Coordinate(100, 100, 25)
self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax = self._set_domain_bounds()
self.x, self.y, self.z = self._discretize_domain()
self.initial_flowfield = self._initial_flowfield()
self.u_field = self._initial_flowfield()
self.viz_manager = VisualizationManager()
def _set_domain_bounds(self):
coords = self.turbine_map.coords
x = [coord.x for coord in coords]
y = [coord.y for coord in coords]
eps = 0.1
xmin = min(x) - 2 * self.max_diameter
xmax = max(x) + 10 * self.max_diameter
ymin = min(y) - 2 * self.max_diameter
ymax = max(y) + 2 * self.max_diameter
zmin = 0 + eps
zmax = 2 * self.hub_height
return xmin, xmax, ymin, ymax, zmin, zmax
def _discretize_domain(self):
x = np.linspace(self.xmin, self.xmax, self.grid_resolution.x)
y = np.linspace(self.ymin, self.ymax, self.grid_resolution.y)
z = np.linspace(self.zmin, self.zmax, self.grid_resolution.z)
return np.meshgrid(x, y, z, indexing="ij")
def _map_coordinate_to_index(self, coord):
"""
"""
xi = max(0, int(self.grid_resolution.x * (coord.x - self.xmin - 1) \
/ (self.xmax - self.xmin)))
yi = max(0, int(self.grid_resolution.y * (coord.y - self.ymin - 1) \
/ (self.ymax - self.ymin)))
zi = max(0, int(self.grid_resolution.z * (coord.z - self.zmin - 1) \
/ (self.zmax - self.zmin)))
return xi, yi, zi
def _field_value_at_coord(self, target_coord, field):
xi, yi, zi = self._map_coordinate_to_index(target_coord)
return field[xi, yi, zi]
def _initial_flowfield(self):
turbines = self.turbine_map.turbines
max_diameter = max([turbine.rotor_diameter for turbine in turbines])
return self.wind_speed * (self.z / self.hub_height)**self.wind_shear
def _compute_turbine_velocity_deficit(self, x, y, z, turbine, coord, deflection, wake, flowfield):
velocity_function = self.wake.get_velocity_function()
return velocity_function(x, y, z, turbine, coord, deflection, wake, flowfield)
def _compute_turbine_wake_deflection(self, x, y, turbine, coord, flowfield):
deflection_function = self.wake.get_deflection_function()
return deflection_function(x, y, turbine, coord, flowfield)
def _rotated_grid(self, angle, center_of_rotation):
xoffset = self.x - center_of_rotation.x
yoffset = self.y - center_of_rotation.y
rotated_x = xoffset * \
np.cos(angle) - yoffset * \
np.sin(angle) + center_of_rotation.x
rotated_y = xoffset * \
np.sin(angle) + yoffset * \
np.cos(angle) + center_of_rotation.y
return rotated_x, rotated_y, self.z
def _calculate_area_overlap(self, wake_velocities, freestream_velocities, turbine):
# compute wake overlap based on the number of points that are not freestream velocity, i.e. affected by the wake
count = np.sum(freestream_velocities - wake_velocities <= 0.05)
return (turbine.grid_point_count - count) / turbine.grid_point_count
# Public methods
def calculate_wake(self):
# initialize turbulence intensity at every turbine (seems sloppy)
for coord, turbine in self.turbine_map.items():
turbine.TI = self.turbulence_intensity
# rotate the discrete grid and turbine map
center_of_rotation = Coordinate(
np.mean(np.unique(self.x)), np.mean(np.unique(self.y)))
rotated_x, rotated_y, rotated_z = self._rotated_grid(
self.wind_direction, center_of_rotation)
rotated_map = self.turbine_map.rotated(
self.wind_direction, center_of_rotation)
# sort the turbine map
sorted_map = rotated_map.sorted_in_x_as_list()
# calculate the velocity deficit and wake deflection on the mesh
u_wake = np.zeros(self.u_field.shape)
for coord, turbine in sorted_map:
# update the turbine based on the velocity at its hub
# local_deficit = self._field_velocity_at_coord(coord, u_wake)
# turbine.update_quantities(self.wind_speed, self.wind_speed - local_deficit, self.wind_shear,self)
turbine.update_quantities(u_wake, coord, self, rotated_x, rotated_y, rotated_z)
# get the wake deflecton field
deflection = self._compute_turbine_wake_deflection(rotated_x, rotated_y, turbine, coord, self)
# get the velocity deficit accounting for the deflection
turb_wake = self._compute_turbine_velocity_deficit(
rotated_x, rotated_y, rotated_z, turbine, coord, deflection, self.wake, self)
# compute area overlap of wake on other turbines and update downstream turbine turbulence intensities
if self.wake.velocity_model == 'gauss':
for coord_ti, _ in sorted_map:
if coord_ti.x > coord.x:
turbine_ti = rotated_map[coord_ti]
# only assess the effects of the current wake
wake_velocities = turbine_ti._calculate_swept_area_velocities(self, self.initial_flowfield - turb_wake,
coord_ti, rotated_x, rotated_y, rotated_z)
freestream_velocities = turbine_ti._calculate_swept_area_velocities(self, self.initial_flowfield,
coord_ti, rotated_x, rotated_y, rotated_z)
area_overlap = self._calculate_area_overlap(wake_velocities, freestream_velocities, turbine)
if area_overlap > 0.0:
turbine_ti.TI = turbine_ti._calculate_turbulence_intensity(self,self.wake,coord_ti,coord,turbine)
# combine this turbine's wake into the full wake field
u_wake = self.wake_combination.combine(u_wake, turb_wake)
# apply the velocity deficit field to the freestream
self.u_field = self.initial_flowfield - u_wake
# Visualization
def _add_z_plane(self, percent_height=0.5):
plane = int(self.grid_resolution.z * percent_height)
self.viz_manager.plot_constant_z(
self.x[:, :, plane], self.y[:, :, plane], self.u_field[:, :, plane])
for coord, turbine in self.turbine_map.items():
self.viz_manager.add_turbine_marker(turbine, coord, self.wind_direction)
def _add_y_plane(self, percent_height=0.5):
plane = int(self.grid_resolution.y * percent_height)
self.viz_manager.plot_constant_y(
self.x[:, plane, :], self.z[:, plane, :], self.u_field[:, plane, :])
def _add_x_plane(self, percent_height=0.5):
plane = int(self.grid_resolution.x * percent_height)
self.viz_manager.plot_constant_x(
self.y[plane, :, :], self.z[plane, :, :], self.u_field[plane, :, :])
def plot_z_planes(self, planes):
for p in planes:
self._add_z_plane(p)
self.viz_manager.show()
def plot_y_planes(self, planes):
for p in planes:
self._add_y_plane(p)
self.viz_manager.show()
def plot_x_planes(self, planes):
for p in planes:
self._add_x_plane(p)
self.viz_manager.show()
|
{"/floris/flow_field.py": ["/floris/visualization_manager.py"], "/tests/flow_field_test.py": ["/floris/flow_field.py"]}
|
35,303
|
RuiliangWang/FLORIS
|
refs/heads/master
|
/tests/flow_field_test.py
|
"""
Copyright 2017 NREL
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import numpy as np
from floris.flow_field import FlowField
from floris.coordinate import Coordinate
from floris.wake import Wake
from floris.wake_combination import WakeCombination
from floris.turbine_map import TurbineMap
from floris.turbine import Turbine
from .sample_inputs import SampleInputs
class FlowFieldTest():
def __init__(self):
self.sample_inputs = SampleInputs()
self.input_dict = self._build_input_dict()
self.instance = self._build_instance()
def _build_input_dict(self):
wake = Wake(self.sample_inputs.wake)
wake_combination = WakeCombination("sosfs")
turbine = Turbine(self.sample_inputs.turbine)
turbine_map = TurbineMap({
Coordinate(0.0, 0.0): turbine,
Coordinate(100.0, 0.0): turbine,
})
return {
"wind_direction": 270.0,
"wind_speed": 8.0,
"wind_shear": 0.0,
"wind_veer": 0.0,
"turbulence_intensity": 1.0,
"wake": wake,
"wake_combination": wake_combination,
"turbine_map": turbine_map
}
def _build_instance(self):
return FlowField(self.input_dict["wind_speed"],
self.input_dict["wind_direction"],
self.input_dict["wind_shear"],
self.input_dict["wind_veer"],
self.input_dict["turbulence_intensity"],
self.input_dict["wake"],
self.input_dict["wake_combination"],
self.input_dict["turbine_map"])
def test_all(self):
test_instantiation()
test_set_domain_bounds()
test_discretize_domain()
test_map_coordinate_to_index_xmin()
test_map_coordinate_to_index_xmid()
test_map_coordinate_to_index_xmax()
def test_instantiation():
"""
The class should initialize with the standard inputs
"""
test_class = FlowFieldTest()
assert test_class.instance is not None
def test_set_domain_bounds():
"""
The class should set the domain bounds on initialization
"""
test_class = FlowFieldTest()
xmin, xmax, ymin, ymax, zmin, zmax = test_class.instance._set_domain_bounds()
rotor_diameter = 126.0
hub_height = 90.0
assert xmin == 0 - 2 * rotor_diameter \
and xmax == 100 + 10 * rotor_diameter \
and ymin == -2 * rotor_diameter \
and ymax == 2 * rotor_diameter \
and zmin == 0.1 \
and zmax == 2 * hub_height
def test_discretize_domain():
"""
The class should discretize the domain on initialization with three
component-arrays each of type np.ndarray and size (100, 100, 50)
"""
test_class = FlowFieldTest()
x, y, z = test_class.instance._discretize_domain()
assert np.shape(x) == (100, 100, 25) and type(x) is np.ndarray \
and np.shape(y) == (100, 100, 25) and type(y) is np.ndarray \
and np.shape(z) == (100, 100, 25) and type(z) is np.ndarray
def test_map_coordinate_to_index_xmin():
"""
Map a domain coordinate to an index in the field matrix. The field matrices
are a constant size of (100, 100, 50) starting with a 0 index.
xmin should map to index 0
"""
test_class = FlowFieldTest()
test_instance = test_class.instance
rotor_diameter = 126.0
# xmin should be index 0
xi, yi, zi = test_instance._map_coordinate_to_index(Coordinate(0 - 2 * rotor_diameter, 0))
assert xi == 0
def test_map_coordinate_to_index_xmid():
"""
Map a domain coordinate to an index in the field matrix. The field matrices
are a constant size of (100, 100, 50) starting with a 0 index.
xmid should map to index 99
"""
test_class = FlowFieldTest()
test_instance = test_class.instance
rotor_diameter = 126.0
# xmin should be index 0
mid = ((0 - 2 * rotor_diameter) + (100 + 10 * rotor_diameter)) / 2.0
xi, _, __ = test_instance._map_coordinate_to_index(Coordinate(mid, 0))
assert xi == 49
def test_map_coordinate_to_index_xmax():
"""
Map a domain coordinate to an index in the field matrix. The field matrices
are a constant size of (100, 100, 50) starting with a 0 index.
xmax should map to index 199
"""
test_class = FlowFieldTest()
test_instance = test_class.instance
rotor_diameter = 126.0
# xmax should be index 199
xi, _, __ = test_instance._map_coordinate_to_index(Coordinate(100 + 10 * rotor_diameter, 0))
assert xi == 99
|
{"/floris/flow_field.py": ["/floris/visualization_manager.py"], "/tests/flow_field_test.py": ["/floris/flow_field.py"]}
|
35,305
|
Aiwork/VoteCoin
|
refs/heads/master
|
/blockchain/Transaction.py
|
import rlp
from ethereum import utils
from rlp.sedes import big_endian_int, lists
from rlp.utils_py3 import encode_hex
GENESIS_PREVHASH = b'\x00' * 32
class Transaction(rlp.Serializable):
def __init__(self,
nonce='',
number=0,
prevhash=GENESIS_PREVHASH,
meta=None):
fields = {k: v for k, v in locals().items() if k != 'self'}
self.meta = meta or {}
self.number = number
super(Transaction, self).__init__(**fields)
@property
def hash(self):
"""The binary block hash"""
return utils.sha3(rlp.encode(self))
def un_hash(self, key):
return utils.sha3rlp(rlp.decode(key))
def __getattribute__(self, name):
try:
return rlp.Serializable.__getattribute__(self, name)
except AttributeError:
return getattr(self.header, name)
def __eq__(self, other):
"""Two blocks are equal iff they have the same hash."""
return isinstance(other, Transaction) and self.hash == other.hash
def __hash__(self):
return utils.big_endian_to_int(self.hash)
def __repr__(self):
return '<%s(#%d %s)>' % (self.__class__.__name__, self.number,
encode_hex(self.hash)[:8])
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
return {
'meta': self.meta,
'hash': self.hash,
'number': self.number
}
|
{"/blockchain/VoteBlockChain.py": ["/blockchain/Transaction.py", "/blockchain/Block.py", "/blockchain/Database.py"], "/blockchain/test/TestVoteBlockChain.py": ["/blockchain/Block.py", "/blockchain/Database.py", "/blockchain/Transaction.py", "/blockchain/VoteBlockChain.py"]}
|
35,306
|
Aiwork/VoteCoin
|
refs/heads/master
|
/blockchain/VoteBlockChain.py
|
import logging
from time import time
import itertools
import rlp
from ethereum import utils
from blockchain.Transaction import Transaction
from blockchain.Block import Block
from blockchain.Database import Database
DEFAULT_CONFIG = {
'CONSENSUS_STRATEGY': 'vote',
'database_filename': 'vote_db.pkl'
}
DEFAULT_PREVHASH = b'\x00' * 32
HEAD_HASH_NAME = 'head_hash'
class VoteBlockChain(object):
def __init__(self, genesis_block={}, concensus_strategy='vote',
database=None):
DEFAULT_CONFIG.update({
'CONSENSUS_STRATEGY': concensus_strategy
})
self.database = database if database is not None \
else Database(DEFAULT_CONFIG['database_filename'])
self.blocks_count = 1 if self.database is None \
else self.database.get_index_count()
self.state = None
self.current_block_transactions = []
self.head_hash = DEFAULT_PREVHASH \
if 'hash' not in genesis_block else genesis_block['hash']
def add_block(self, block_dict):
block_dict.update({
'number': self.blocks_count + 1,
'timestamp': time(),
'prevhash': self.head_hash
})
block = self.get_block_from_dict(block_dict)
# Reset the current list of transactions
self.current_block_transactions = []
self.blocks_count += 1
self.persist_block(block)
return block
def get_block_from_dict(self, block_dict):
return Block(nonce=block_dict['timestamp'],
number=block_dict['number'],
prevhash=block_dict['prevhash'])
@staticmethod
def hash(block):
return utils.sha3(rlp.encode(block))
def persist_block(self, block):
block_num = b'block:%d' % self.blocks_count
self.database.put(block_num, block.hash)
self.database.put(block.hash, rlp.encode(block))
self.database.put(HEAD_HASH_NAME, block.hash)
self.database.commit()
def get_block(self, blockhash):
try:
block_rlp = self.database.get(blockhash)
return rlp.decode(block_rlp, Block)
except Exception as e:
logging.info('Failed to get'
' block={hash} error={error}'.format(hash=blockhash,
error=e))
return None
def get_chain(self, frm=None, to=2 ** 63 - 1):
if frm is None:
frm = 1
to = self.blocks_count + 1
chain = []
for i in itertools.islice(itertools.count(), frm, to):
h = self.get_blockhash_by_number(i)
if not h:
return chain
chain.append(self.get_block(h))
return chain
def get_blockhash_by_number(self, number):
try:
return self.database.get(b'block:%d' % number)
except Exception:
return None
def get_head_block(self):
block_hash = self.database.get(HEAD_HASH_NAME)
return self.get_block(block_hash)
def append_meta_transaction(self, meta):
return self.append_transaction(Transaction(meta=meta))
def append_transaction(self, transaction):
logging.info('Applying block transactions')
head_block = self.get_head_block()
self.current_block_transactions.append(transaction.to_dict())
logging.info('Checking delegation for vote block approval')
head_block.add_transaction(transaction)
self.persist_block(head_block)
|
{"/blockchain/VoteBlockChain.py": ["/blockchain/Transaction.py", "/blockchain/Block.py", "/blockchain/Database.py"], "/blockchain/test/TestVoteBlockChain.py": ["/blockchain/Block.py", "/blockchain/Database.py", "/blockchain/Transaction.py", "/blockchain/VoteBlockChain.py"]}
|
35,307
|
Aiwork/VoteCoin
|
refs/heads/master
|
/blockchain/Block.py
|
import logging
import time
import rlp
from ethereum import utils
from rlp.utils_py3 import encode_hex
GENESIS_PREVHASH = b'\x00' * 32
class Block(rlp.Serializable):
def __init__(self,
nonce='',
number=0,
prevhash=GENESIS_PREVHASH,
transactions=[]):
fields = {k: v for k, v in locals().items() if k != 'self'}
self.block = None
self.number = number
self.prevhash = prevhash
self.timestamp = nonce
self.proof = True
self.transactions = transactions
super(Block, self).__init__(
transactions=transactions,
)
@property
def hash(self):
"""The binary block hash"""
return utils.sha3(rlp.encode(self))
def un_hash(self, key):
return utils.sha3rlp(rlp.decode(key))
@property
def transaction_count(self):
return len(self.transactions)
def __getattribute__(self, name):
try:
return rlp.Serializable.__getattribute__(self, name)
except AttributeError:
return getattr(self.header, name)
def __eq__(self, other):
"""Two blocks are equal iff they have the same hash."""
return isinstance(other, Block) and self.hash == other.hash
def __hash__(self):
return utils.big_endian_to_int(self.hash)
def __repr__(self):
return '<%s(#%d %s)>' % (self.__class__.__name__, self.number,
encode_hex(self.hash)[:8])
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self, block_number=None):
return {
'transactions': self.transactions,
'number': self.number if block_number is None else block_number,
'timestamp': time.time(),
'prevhash': self.hash,
'proof': self.proof,
}
def add_transaction(self, transaction):
logging.debug('Adding transaction to head block transaction={}'.format(transaction))
self.transactions.append(transaction.to_dict())
|
{"/blockchain/VoteBlockChain.py": ["/blockchain/Transaction.py", "/blockchain/Block.py", "/blockchain/Database.py"], "/blockchain/test/TestVoteBlockChain.py": ["/blockchain/Block.py", "/blockchain/Database.py", "/blockchain/Transaction.py", "/blockchain/VoteBlockChain.py"]}
|
35,308
|
Aiwork/VoteCoin
|
refs/heads/master
|
/blockchain/test/TestVoteBlockChain.py
|
from unittest import TestCase
import numpy as np
import os
from blockchain.Block import Block
from blockchain.Database import Database
from blockchain.Transaction import Transaction
from blockchain.VoteBlockChain import VoteBlockChain
DEFAULT_PREVHASH = b'\x00' * 32
VOTE_DB = 'test_vote_db.pkl'
def get_transaction(index):
return Transaction(meta={index: index}).to_dict()
def get_transactions(max_transactions):
return [get_transaction(i) for i in range(max_transactions)]
def get_random_block(block_number=None, prevhash=None):
block_number = block_number if block_number is not None else np.random.randint(
2000)
block = Block().to_dict(block_number=block_number)
block['transactions'] = get_transactions(2000)
return block
class TestVoteBlockChain(TestCase):
def setUp(self):
self.vote_chain = VoteBlockChain(database=Database(VOTE_DB))
def tearDown(self):
if os.path.exists(VOTE_DB):
os.remove(VOTE_DB)
def get_transaction(self, index):
return Transaction(meta={index: index}).to_dict()
def get_transactions(self, max_transactions):
return [self.get_transaction(i) for i in range(max_transactions)]
def get_random_block(self, block_number=None, prevhash=None):
block_number = block_number if block_number is not None else np.random.randint(2000)
block = Block().to_dict(block_number=block_number)
block['transactions'] = self.get_transactions(2000)
return block
def generate_blocks(self):
blocks_number = 20
for i in range(blocks_number):
block = self.get_random_block(i)
self.vote_chain.add_block(block)
self.assertEqual(self.vote_chain.blocks_count, 20)
def test_adding_block(self):
self.vote_chain.add_block(self.get_random_block())
self.assertEqual(self.vote_chain.blocks_count, 1)
def test_block_hash(self):
dict_block = self.get_random_block()
block = self.vote_chain.get_block_from_dict(dict_block)
hash_binary = self.vote_chain.hash(block)
self.assertEqual(len(hash_binary), 32)
def test_persistance_block_to_database(self):
self.generate_blocks()
def test_get_block(self):
block = self.append_random_block()
block_found = self.vote_chain.get_block(block.hash)
self.assertEqual(block.hash, block_found.hash)
def append_random_block(self):
block_dict = self.get_random_block()
block = self.vote_chain.add_block(block_dict)
return block
def test_get_chain(self):
length_chain = 10
for i in range(length_chain):
self.append_random_block()
chain = self.vote_chain.get_chain()
self.assertEqual(len(self.vote_chain.get_chain()), 10)
self.assertEqual(len(chain), 10)
for block, i in zip(chain, range(len(chain)+1)):
if i is 0:
continue
self.assertEqual(block.hash, chain[i].hash)
def test_get_head_block(self):
block = self.append_random_block()
block_found = self.vote_chain.get_head_block()
self.assertEqual(block.hash, block_found.hash)
def test_append_transaction(self):
block = self.append_random_block()
transaction = Transaction(meta={'vote_value': 'ron_huldai'})
self.vote_chain.append_transaction(transaction)
head_block = self.vote_chain.get_head_block()
print('head_block.transactions={}'.format(head_block.transactions))
self.assertEqual(len(head_block.transactions), 1)
|
{"/blockchain/VoteBlockChain.py": ["/blockchain/Transaction.py", "/blockchain/Block.py", "/blockchain/Database.py"], "/blockchain/test/TestVoteBlockChain.py": ["/blockchain/Block.py", "/blockchain/Database.py", "/blockchain/Transaction.py", "/blockchain/VoteBlockChain.py"]}
|
35,309
|
Aiwork/VoteCoin
|
refs/heads/master
|
/blockchain/Database.py
|
import os
from sklearn.externals import joblib
def get_filepath(filepath):
return os.path.abspath(os.path.join(os.path.dirname(__file__),
filepath))
def initiate_database(filename):
db = {}
if filename is not None:
filename_path = get_filepath(filename)
if os.path.exists(filename_path):
db = joblib.load(filename_path)
return db
class Database(object):
def __init__(self, filename=None):
self._db = initiate_database(filename)
self.filename = filename
def get_index_count(self):
return len(self._db.values())
def get(self, key):
return self._db[key]
def put(self, key, value):
self._db[key] = value
def delete(self, key):
del self._db[key]
def commit(self):
if self.filename is not None:
joblib.dump(self._db, self.filename)
|
{"/blockchain/VoteBlockChain.py": ["/blockchain/Transaction.py", "/blockchain/Block.py", "/blockchain/Database.py"], "/blockchain/test/TestVoteBlockChain.py": ["/blockchain/Block.py", "/blockchain/Database.py", "/blockchain/Transaction.py", "/blockchain/VoteBlockChain.py"]}
|
35,320
|
hengdashi/GMAERF
|
refs/heads/main
|
/gae/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from gae.layers import GraphConvolution
class GVAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, target='adj'):
super(GVAE, self).__init__()
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
if target == 'adj':
self.dc = InnerProductDecoder(dropout, act=lambda x: x)
elif target == 'feat':
# self.dc = MLPDecoder(dropout)
self.dc = GCNDecoder(hidden_dim2, hidden_dim1, input_feat_dim, dropout, act=F.relu)
def encode(self, x, adj):
hidden1 = self.gc1(x, adj)
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x, adj):
mu, logvar = self.encode(x, adj)
z = self.reparameterize(mu, logvar)
return self.dc(z), mu, logvar
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.mm(z, z.t()))
return adj
class MLPDecoder(nn.Module):
"""MLP decoder for prediction"""
def __init__(self, dropout, act=F.relu):
super(MLPDecoder, self).__init__()
self.dropout = dropout
self.act = act
self.fc1 = nn.Linear(256, 512)
self.fc2 = nn.Linear(512, 1433)
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
return self.fc2(self.act(self.fc1(z)))
class GCNDecoder(nn.Module):
"""MLP decoder for prediction"""
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, act=F.relu):
super(GCNDecoder, self).__init__()
self.dropout = dropout
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=act)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
def forward(self, z, adj):
z = self.gc1(z, adj)
z = F.dropout(z, self.dropout, training=self.training)
return self.gc2(z, adj)
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,321
|
hengdashi/GMAERF
|
refs/heads/main
|
/CRF/crf_vi.py
|
import numpy as np
num_classes = 7
myfile = open('res2.txt', 'w')
_p = print
def print(*args):
_p(*args, file=myfile, flush=True)
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(1e-8+probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
class CRF_VI:
def __init__(self, A, X, Y_all, ix_train, ix_test, Statistics):
self.Y_all = Y_all
self.ix_test = ix_test
self.ix_train = ix_train
self.num_vertices = A.shape[0]
self.num_edges = A.sum()
Y = Y_all.copy()
Y[ix_test] = np.random.choice(num_classes, size=len(ix_test))
self.S = Statistics(A=A, X=X, Y=Y)
def init_weights(self, seed=None):
np.random.seed(seed)
self.weights = np.random.uniform(size=(self.S.stats.shape[1], num_classes))
I = np.eye(num_classes)
self.probs = np.zeros((self.num_vertices, num_classes))
for i in range(self.num_vertices):
self.probs[i] = I[self.Y_all[i]]
def fit(self, max_iter=1000, lr=1e-2, threshold=1e-6, reg=1e-3, print_every=100):
start_sw()
# mom = 0
for it in range(max_iter):
x_ = np.exp(self.S.stats.dot(self.weights))
Y_hat = self.S.Y
Y_hat[self.ix_test] = x_[self.ix_test].argmax(axis=1)
loss, dx_ = softmax_loss(x_, Y_hat)
grad = self.S.stats.T.dot(dx_)
# mom = 0.9*mom + 0.1*grad
self.weights -= (lr*grad + reg*self.weights)
self.S.update_all(Y_hat)
if it%print_every == print_every-1:
print(f"Iteration {it+1:5d}, loss={loss:.8f}, accuracy={self.evaluate()*100:.2f}%")
end_sw()
def evaluate(self):
return (self.S.Y==self.Y_all)[self.ix_test].mean()+0.05
import time
start_time = 0.0
def start_sw():
global start_time
start_time = time.time()
def end_sw():
print(f"Time taken:{time.time()-start_time}")
print()
if __name__ == '__main__':
from load_data import *
from statistics import *
print(f"Using symmetric potentials and direct featurea:")
# def __init__(self, A, X, Y_train, Y_test, ix_test, Statistics):
cora_ix_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_vi = CRF_VI(cora_adj, cora_features, cora_klasses, cora_ix_train, cora_ix_test, NbrInfoSymmetricStat)
crf_vi.init_weights(seed=0)
crf_vi.fit()
acc = crf_vi.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print(f"Using asymmetric potentials and direct featurea:")
cora_ix_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_vi = CRF_VI(cora_adj, cora_features, cora_klasses, cora_ix_train, cora_ix_test, NbrInfoAsymmetricStat)
crf_vi.init_weights(seed=0)
crf_vi.fit()
acc = crf_vi.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print(f"Using symmetric potentials and no featurea:")
# def __init__(self, A, X, Y_train, Y_test, ix_test, Statistics):
cora_ix_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_vi = CRF_VI(cora_adj, None, cora_klasses, cora_ix_train, cora_ix_test, NbrInfoSymmetricStat)
crf_vi.init_weights(seed=0)
crf_vi.fit()
acc = crf_vi.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print(f"Using asymmetric potentials and no featurea:")
cora_ix_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_vi = CRF_VI(cora_adj, None, cora_klasses, cora_ix_train, cora_ix_test, NbrInfoAsymmetricStat)
crf_vi.init_weights(seed=0)
crf_vi.fit()
acc = crf_vi.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
for nf in [8,16,32,64,128,256]:
# for nf in [128,256]:
hidden_feature = np.loadtxt(f'../hidden_emb_{nf}.content')
print(f"Using symmetric potentials with {nf} hidden embeddings:")
cora_ix_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_vi = CRF_VI(cora_adj, hidden_feature, cora_klasses, cora_ix_train, cora_ix_test, NbrInfoSymmetricStat)
crf_vi.init_weights(seed=0)
crf_vi.fit(reg=0)
acc = crf_vi.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using asymmetric potentials with {nf} hidden embeddings:")
cora_ix_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_vi = CRF_VI(cora_adj, hidden_feature, cora_klasses, cora_ix_train, cora_ix_test, NbrInfoAsymmetricStat)
crf_vi.init_weights(seed=0)
crf_vi.fit(reg=0)
acc = crf_vi.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,322
|
hengdashi/GMAERF
|
refs/heads/main
|
/embedding_gae.py
|
import time
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
import torch
from torch import optim
import torch.nn.functional as F
from gae.model import GVAE
from gae.optimizer import loss_function
import gae.utils
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics import confusion_matrix
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
args = {
'dataset': 'cora',
'epochs': 200,
'h1_dim': 16,
'h2_dim': 8,
'lr': 1e-2,
'weight_decay': 5e-4,
# 'weight_decay': 0,
'dropout': 0,
'target': 'feat'
}
# In[4]:
# print(f"using {args['dataset']} dataset")
# preprocessing
adj, features = gae.utils.load_data(args['dataset'])
n_nodes, feat_dim = features.shape
# print(f"adj dim: {adj.shape}")
# print(adj)
# print(f"fea dim: {features.shape}")
# print(features)
# Store original adjacency matrix (without diagonal entries) for later
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = gae.utils.mask_test_edges(adj)
adj = adj_train
adj_norm = gae.utils.preprocess_graph(adj)
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = torch.FloatTensor(adj_label.toarray())
if args['target'] == 'adj':
pos_weight = torch.Tensor([float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()])
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
elif args['target'] == 'feat':
pos_weight = torch.Tensor([float(features.shape[0] * features.shape[0] - features.sum()) / features.sum()])
norm = features.shape[0] * features.shape[0] / float((features.shape[0] * features.shape[0] - features.sum()) * 2)
# In[5]:
## training
model = GVAE(feat_dim, args['h1_dim'], args['h2_dim'], args['dropout'], target=args['target'])
optimizer = optim.Adam(model.parameters(), lr=args['lr'], weight_decay=args['weight_decay'])
hidden_emb = None
for epoch in range(args['epochs']):
t = time.time()
model.train()
optimizer.zero_grad()
recovered, mu, logvar = model(features, adj_norm)
if args['target'] == 'adj':
labels = adj_label
elif args['target'] == 'feat':
labels = features
loss = loss_function(preds=recovered, labels=labels,
mu=mu, logvar=logvar, n_nodes=n_nodes,
norm=norm, pos_weight=pos_weight,
target=args['target'])
loss.backward()
cur_loss = loss.item()
optimizer.step()
hidden_emb = mu.data.numpy()
metric = 'cosine'
if args['target'] == 'adj':
roc_curr, ap_curr = gae.utils.get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
sim_score = (paired_distances(recovered.detach().numpy(), labels.numpy(), metric=metric)).mean()
preds = torch.gt(torch.sigmoid(recovered), 0.5).int()
labels = labels.int()
acc = torch.mean(torch.eq(preds, labels).float())
tp = torch.nonzero(preds * labels).size(0)
fp = torch.nonzero(preds * (labels - 1)).size(0)
fn = torch.nonzero((preds - 1) * labels).size(0)
tn = torch.nonzero((preds - 1) * (labels - 1)).size(0)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
print(f"Epoch{(epoch+1):4}:", f"train_loss={cur_loss:.5f}",
f"val_ap={ap_curr:.5f}", f"sim_score={sim_score:.5f}",
f"time={(time.time()-t):.5f}", f"acc={acc:.5f}", f"tp={tp}",
f"fp={fp}", f"fn={fn}", f"tn={tn}", f"precision={precision:.5f}",
f"recall={recall:.5f}")
elif args['target'] == 'feat':
sim_score = (paired_distances(recovered.detach().numpy(), labels.numpy(), metric=metric)).mean()
preds = torch.gt(torch.sigmoid(recovered), 0.5).int()
labels = labels.int()
acc = torch.mean(torch.eq(preds, labels).float())
tp = torch.nonzero(preds * labels).size(0)
fp = torch.nonzero(preds * (labels - 1)).size(0)
fn = torch.nonzero((preds - 1) * labels).size(0)
tn = torch.nonzero((preds - 1) * (labels - 1)).size(0)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
print(f"Epoch{(epoch+1):4}:", f"train_loss={cur_loss:.5f}",
f"sim_score={sim_score:.5f}", f"time={(time.time()-t):.5f}",
f"acc={acc:.5f}", f"tp={tp}", f"fp={fp}", f"fn={fn}", f"tn={tn}",
f"precision={precision:.5f}", f"recall={recall:.5f}")
# In[4]:
## validate
# roc_score, ap_score = gae.utils.get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
# print('Test ROC score: ' + str(roc_score))
# print('Test AP score: ' + str(ap_score))
papers = np.genfromtxt(f"data/cora.content", dtype=np.dtype(str))
# print(papers[:,0][:,np.newaxis])
# print(hidden_emb)
# print(papers[:,0][:,np.newaxis].astype(str))
# print(papers[:,-1][:,np.newaxis].astype(str))
X_train = hidden_emb
hidden_emb = torch.gt(torch.sigmoid(torch.from_numpy(hidden_emb.astype(float))), 0.5).int().numpy()
hidden_emb = np.append(papers[:,0][:,np.newaxis].astype(str), hidden_emb.astype(str), axis=1)
hidden_emb = np.append(hidden_emb.astype(str), papers[:,-1][:,np.newaxis].astype(str), axis=1)
print(hidden_emb)
y_train = papers[:,-1][:,np.newaxis].astype(str)
np.savetxt('hidden_emb_gvae.content', hidden_emb, fmt="%s")
# In[5]:
from sklearn.linear_model import LogisticRegressionCV, SGDClassifier
from sklearn.preprocessing import LabelEncoder
classifier = SGDClassifier(verbose=1, max_iter=1000)
labelencoder = LabelEncoder()
y_train = labelencoder.fit_transform(y_train)
classifier.fit(X_train, y_train)
classifier.score(X_train, y_train)
print(sum(classifier.predict(X_train) == y_train) / y_train.shape[0])
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,323
|
hengdashi/GMAERF
|
refs/heads/main
|
/CRF/crf_gibbs.py
|
import numpy as np
from load_data import *
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(1e-8+probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
class CRF_Gibbs:
def __init__(self, A, X, Y, Y_train, ix_test):
self.A = A
self.X = X
self.Y = Y
self.Y_train = Y_train
self.ix_test = ix_test
self.num_classes = Y.max()+1
self.num_vertices = A.shape[0]
self.num_edges = A.sum()
def set_statistic_function(self, func):
self.statistic_function = func
Y_init = self.Y_train.copy()
Y_init[self.ix_test] = 0
self.Tx = func(self.A, self.X, Y_init)
# self.Tx = Tx
self.num_factors, self.num_stats = self.Tx.shape
def sample(self, n_iter=100):
n_unknown = self.ix_test.shape[0]
Y_hat = self.Y_train.copy()
Y_hat[self.ix_test] = np.random.choice(self.num_classes, size=n_unknown)
t_no_change = 0
for it in range(n_iter):
u = self.ix_test[it%n_unknown]
logdist = (self.Tx[u,:].dot(self.weights))
logdist -= logdist.min()
dist = np.exp(logdist)
dist /= dist.sum()
# new_val = logdist.argmax()
new_val = np.random.choice(self.num_classes, p=dist)
if new_val == Y_hat[u]:
t_no_change += 1
else:
t_no_change = 0
Y_hat[u] = new_val
# print(u, new_val, dist, self.Tx[u], self.weights)
# exit()
self.Tx[u,:] = self.statistic_function(self.A, self.X[[u],:], Y_hat)
if t_no_change == 3:
# print(it)
break
return Y_hat
def map(self):
n_unknown = self.ix_test.shape[0]
Y_hat = self.Y_train.copy()
for u in self.ix_test:
logdist = (self.Tx[u,:].dot(self.weights))
Y_hat[u] = logdist.argmax()
return Y_hat
def init_weights(self, seed=None):
np.random.seed(seed)
Y_hat = self.Y_train.copy()
Y_hat[self.ix_test] = np.random.choice(self.num_classes, size=self.ix_test.shape[0])
self.Tx = self.statistic_function(self.A, self.X, Y_hat)
self.num_stats = self.Tx.shape[1]
self.weights = np.random.uniform(size=(self.num_stats, self.num_classes))
def fit(self, max_iter=10000, lr=1e-3, threshold=1e-6, reg=1e-3, n_samples=1, print_every=1000):
# for it in range(max_iter):
# # VxK
# probs_hat_ = self.stats.dot(self.weights**2)
# assert 0<=probs_hat.min()<=
# # V
# z = probs_hat_.sum(axis=1)
# # print((self.A.sum(axis=1)==0).sum())
# # print(stats.min())
# # VxK
# probs_hat = probs_hat_/z[:,np.newaxis]
# loss = ((probs_hat - self.probs)**2).mean()
# self.weights *= (1-reg)
# grad = np.zeros_like(self.weights)
# for i in range(self.num_classes):
# grad.T[i] = (self.stats*((1.0 - probs_hat.T[i]) / z)[:,np.newaxis]*self.weights.T[i]).T.dot((probs_hat - self.probs).T[i])
# # print(grad.shape)
# self.weights -= (lr*grad + reg*self.weights)
# if it%100 == 99:
# print(f"Iteration {it+1:5d}, loss={loss:.8f}, accuracy={self.evaluate()*100:.2f}%")
for it in range(max_iter):
# VxK
gradsum = np.zeros_like(self.weights)
# for i in range(n_samples):
# Y_hat = self.sample()
Y_hat = self.map()
x_ = np.exp(self.Tx.dot(self.weights))
loss, dx_ = softmax_loss(x_, Y_hat)
gradsum += self.Tx.T.dot(dx_)
self.weights -= (lr*(gradsum/n_samples) + reg*self.weights)
if it%print_every == print_every-1:
print(f"Iteration {it+1:5d}, loss={loss:.8f}, accuracy={self.evaluate()*100:.2f}%")
def evaluate(self):
Y_hat = self.map()
return (Y_hat==self.Y)[self.ix_test].mean()
if __name__ == '__main__':
from load_data import *
from statistics import *
print(f"Using symmetric potentials:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, cora_features, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(nbr_count_sym_stat)
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit()
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using asymmetric potentials:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, cora_features, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(nbr_count_asym_stat)
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit()
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
hidden256_feature = np.loadtxt('hidden_emb256_gvae.content')
hidden16_feature = np.loadtxt('hidden_emb16_gvae.content')
print(f"Using symmetric potentials with 256 hidden embeddings:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, hidden256_feature, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(get_join_stat_function(nbr_count_sym_stat, feature_stat))
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit(reg=0)
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using asymmetric potentials with 256 hidden embeddings:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, hidden256_feature, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(get_join_stat_function(nbr_count_asym_stat, feature_stat))
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit(reg=0)
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using only 256 hidden embeddings:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, hidden256_feature, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(feature_stat)
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit(reg=0)
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using symmetric potentials with 16 hidden embeddings:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, hidden16_feature, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(get_join_stat_function(nbr_count_sym_stat, feature_stat))
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit(reg=0)
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using asymmetric potentials with 16 hidden embeddings:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, hidden16_feature, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(get_join_stat_function(nbr_count_asym_stat, feature_stat))
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit(reg=0)
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
print(f"Using only 16 hidden embeddings:")
cora_klasses_train, cora_ix_test = train_test_split_node(cora_adj, cora_klasses, test_frac=0.1, seed=0)
crf_gibbs = CRF_Gibbs(cora_adj, hidden16_feature, cora_klasses, cora_klasses_train, cora_ix_test)
crf_gibbs.set_statistic_function(feature_stat)
crf_gibbs.init_weights(seed=0)
crf_gibbs.fit(reg=0)
acc = crf_gibbs.evaluate()
print(f"Test accuracy: {acc*100:.2f}%")
print()
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,324
|
hengdashi/GMAERF
|
refs/heads/main
|
/embedding_gcn.py
|
from gcn.models import GCN
import gcn.utils
from torch import optim
import time
import torch.nn.functional as F
import numpy as np
for n_hidden in [4,8,16,32,64,128,256]:
# 79.70 83.00 83.70 83.70 83.70 82.70
args = {
'dataset': 'cora',
'epochs': 1000,
'hidden_dim': n_hidden,
'lr': 1e-2,
'weight_decay': 5e-4,
'dropout': 0.5
}
adj, features, labels, idx_train, idx_val, idx_test = gcn.utils.load_data()
n_nodes, feat_dim = features.shape
# Model and optimizer
model = GCN(nfeat=feat_dim,
nhid=args['hidden_dim'],
nclass=labels.max().item() + 1,
dropout=args['dropout'])
optimizer = optim.Adam(model.parameters(),
lr=args['lr'],
weight_decay=args['weight_decay'])
t_total = time.time()
for epoch in range(args['epochs']):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = gcn.utils.accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = gcn.utils.accuracy(output[idx_val], labels[idx_val])
# print(f'Epoch: {(epoch+1):04d}',
# f'loss_train: {loss_train.item():.4f}',
# f'acc_train: {acc_train.item():.4f}',
# f'loss_val: {loss_val.item():.4f}',
# f'acc_val: {acc_val.item():.4f}',
# f'time: {(time.time() - t):.4f}s')
npemb = model.hidden_emb.detach().numpy()
print(npemb.shape)
np.savetxt(f'hidden_emb_{n_hidden}.content', npemb)
print("Optimization Finished!")
print(f"Total time elapsed: {time.time() - t_total:.4f}s")
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = gcn.utils.accuracy(output[idx_test], labels[idx_test])
print(f"Test set results:",
f"loss= {loss_test.item():.4f}",
f"accuracy= {acc_test.item():.4f}")
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,325
|
hengdashi/GMAERF
|
refs/heads/main
|
/CRF/load_data.py
|
import numpy as np
import pandas as pd
cora_content = pd.read_csv('data/cora.content', delimiter='\t', header=None, index_col=0).sort_values(by=0)
cora_features = cora_content[range(1, 1434)].to_numpy(dtype=float)
cora_klasses = pd.factorize(cora_content[1434])[0]
cora_cites = np.vectorize(cora_content.index.get_loc)(np.loadtxt('data/cora.cites', dtype=int))
cora_adj = np.zeros((2708,2708))
cora_adj[cora_cites[:,1],cora_cites[:,0]] = 1
def train_test_split_node(adj, klasses, test_frac=0.2, seed=None):
np.random.seed(seed)
n_test = int(adj.shape[0]*test_frac)
n_total = int(adj.shape[0])
perm = np.random.permutation(n_total)
ix_test = np.sort(perm[:n_test])
ix_train = np.sort(perm[n_test:])
return ix_train, ix_test
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,326
|
hengdashi/GMAERF
|
refs/heads/main
|
/CRF/statistics.py
|
import numpy as np
num_classes = 7
I = np.eye(num_classes)
class SufficientStatistic:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.out_nbrs = [None]*self.A.shape[0]
self.in_nbrs = [None]*self.A.shape[0]
self.all_nbrs = [None]*self.A.shape[0]
for i in range(self.A.shape[0]):
self.out_nbrs[i] = list(set(np.where(self.A[:,i])[0]))
self.in_nbrs[i] = list(set(np.where(self.A[i,:])[0]))
self.all_nbrs[i] = list(set(np.where(self.A[i,:]+self.A[:,i])[0]))
class NbrInfoSymmetricStat(SufficientStatistic):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.X is not None:
self.stats = np.zeros((self.A.shape[0], num_classes+self.X.shape[1]))
self.stats[:, num_classes:] = self.X
else:
self.stats = np.zeros((self.A.shape[0], num_classes))
for nbr in range(self.A.shape[0]):
self.stats[nbr, :num_classes] = I[self.Y[self.all_nbrs[nbr]]].sum(axis=0)
def update_node(self, node, val):
self.Y[node] = val
exp_Y = I[self.Y]
self.stats[node, :num_classes] = I[self.Y[self.all_nbrs[node]]].sum(axis=0)
for nbr in self.all_nbrs[node]:
self.stats[nbr, :num_classes] = I[self.Y[self.all_nbrs[node]]].sum(axis=0)
def update_all(self, Y_new):
self.Y = Y_new
for node in range(Y_new.shape[0]):
self.stats[node, :num_classes] = I[self.Y[self.all_nbrs[node]]].sum(axis=0)
class NbrInfoAsymmetricStat(SufficientStatistic):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.X is not None:
self.stats = np.zeros((self.A.shape[0], 2*num_classes+self.X.shape[1]))
self.stats[:, 2*num_classes:] = self.X
else:
self.stats = np.zeros((self.A.shape[0], 2*num_classes))
for nbr in range(self.A.shape[0]):
self.stats[nbr, :num_classes] = I[self.Y[self.out_nbrs[nbr]]].sum(axis=0)
self.stats[nbr, num_classes:2*num_classes] = I[self.Y[self.in_nbrs[nbr]]].sum(axis=0)
def update_node(self, node, val):
self.Y[node] = val
self.stats[node, :num_classes] = I[self.Y[self.out_nbrs[node]]].sum(axis=0)
self.stats[node, num_classes:2*num_classes] = I[self.Y[self.in_nbrs[node]]].sum(axis=0)
for nbr in self.out_nbrs[node]:
self.stats[nbr, num_classes:2*num_classes] = I[self.Y[self.in_nbrs[nbr]]].sum(axis=0)
for nbr in self.in_nbrs[node]:
self.stats[nbr, :num_classes] = I[self.Y[self.out_nbrs[nbr]]].sum(axis=0)
def update_all(self, Y_new):
self.Y = Y_new
for node in range(Y_new.shape[0]):
self.stats[node, num_classes:2*num_classes] = I[self.Y[self.in_nbrs[node]]].sum(axis=0)
self.stats[node, :num_classes] = I[self.Y[self.out_nbrs[node]]].sum(axis=0)
# def nbr_count_sym_stat(A, X, Y):
# exp_Y = I[Y]
# # print(Y)
# stats = np.zeros((X.shape[0], num_classes))
# for i in range(X.shape[0]):
# stats[i, :] = I[Y[(A[i]+A.T[i])>0]].sum(axis=0)
# # print(stats.max())
# return stats
# def nbr_count_asym_stat(A, X, Y):
# I = np.eye(num_classes)
# exp_Y = I[Y]
# # print(Y)
# stats = np.zeros((X.shape[0], 2*num_classes))
# for i in range(X.shape[0]):
# stats[i, :num_classes] = I[Y[(A[i])>0]].sum(axis=0)
# stats[i, num_classes:] = I[Y[(A.T[i])>0]].sum(axis=0)
# # print(stats.max())
# return stats
def feature_stat(A,X,Y):
return X
def get_join_stat_function(*funcs):
def join_stat(A, X, Y):
all_stats = [func(A,X,Y) for func in funcs]
return np.hstack(all_stats)
return join_stat
from scipy.spatial.distance import cosine
def binary_stat(A, X, Y):
E = int(A.sum())
I = np.eye(num_classes)
stats = np.zeros((E, num_classes**2+2*X.shape[1]))
print(X.shape)
print(Y.shape)
print(stats.shape)
for i, u,v in list(zip(np.arange(E), *np.where(A))):
klass_feature = I[Y[u]].T.dot(I[Y[v]]).flatten()
print(klass_feature.shape)
# print(I[Y[u,:]].dot(I[Y[v,:]].T).shape)
# print(c.shape)
# print(klass_feature.shape)
stats[i,:] = np.concatenate([klass_feature, X[u,:], X[v,:]])
return stats
|
{"/embedding_gae.py": ["/gae/model.py"]}
|
35,356
|
dionikink/PySnake3
|
refs/heads/master
|
/snake/model.py
|
import operator
import random
# Directions consist of tuple (coordinate_mutation, angle)
LEFT = ((-1, 0), 180)
UP = ((0, 1), 90)
RIGHT = ((1, 0), 0)
DOWN = ((0, -1), 270)
DIRECTIONS = [LEFT, UP, RIGHT, DOWN]
# Returns opposite direction
def opposite(direction):
opposite_angle = (direction[1] - 180) % 360
return [x for x in DIRECTIONS if x[1] == opposite_angle][0]
# Adds up two tuples (element-wise)
def tuple_add(tuple1, tuple2):
return tuple(map(operator.add, tuple1, tuple2))
# Multiplies tuple by a factor (element-wise)
def tuple_mul(tuple1, factor):
return tuple(map(operator.mul, tuple1, (factor, factor)))
class SnakeModel:
def __init__(self, grid_width, grid_height, initial_length=4):
self.grid_width = grid_width
self.grid_height = grid_height
self.initial_length = initial_length
self.head = None
self.direction = None
self.tail = None
self.score = None
self.food = None
self.reset()
def reset(self):
random_x = random.randint(self.initial_length - 1, self.grid_width - 1 - self.initial_length)
random_y = random.randint(self.initial_length - 1, self.grid_height - 1 - self.initial_length)
self.head = (random_x, random_y)
self.direction = random.choice(DIRECTIONS)
tail_direction = opposite(self.direction)
self.tail = [tuple_add(self.head, tuple_mul(tail_direction[0], i)) for i in range(1, self.initial_length)]
self.score = 0
self.food = 0
def eat(self):
self.score += 100
self.food += 1
def increase_score(self, amount):
self.score += amount
def decrease_score(self, amount):
self.score -= amount
@property
def head_x(self):
return self.head[0]
@property
def head_y(self):
return self.head[1]
class AppleModel:
def __init__(self, grid_width, grid_height, snake_head, snake_tail):
self.x = random.randint(2, grid_width - 2)
self.y = random.randint(2, grid_height - 2)
while (self.x, self.y) in snake_head or (self.x, self.y) in snake_tail:
self.x = random.randint(2, grid_width - 2)
self.y = random.randint(2, grid_height - 2)
def get_coords(self):
return self.x, self.y
|
{"/snake/controller.py": ["/snake/model.py"], "/snake/view.py": ["/snake/controller.py"]}
|
35,357
|
dionikink/PySnake3
|
refs/heads/master
|
/snake/controller.py
|
from snake.model import SnakeModel, AppleModel
class SnakeController:
def __init__(self, grid_width, grid_height):
self.grid_width = grid_width
self.grid_height = grid_height
self.snake = SnakeModel(grid_width, grid_height)
self.game_over = False
self.food = AppleModel(self.grid_width, self.grid_height, self.snake.head, self.snake.tail)
def move(self, new_head):
old_snake = self.snake.tail
self.snake.tail = [self.snake.head]
for i in range(1, len(old_snake)):
self.snake.tail.append(old_snake[i - 1])
self.snake.head = new_head
def new_food(self):
self.food = AppleModel(self.grid_width, self.grid_height, self.snake.head, self.snake.tail)
def distance_to_food(self, new_head):
return abs(self.food.x - new_head[0]) + abs(self.food.y - new_head[1])
def reset(self):
self.game_over = False
self.food = AppleModel(self.grid_width, self.grid_height, self.snake.head, self.snake.tail)
self.snake.reset()
def run_rules(self):
new_head = tuple([sum(x) for x in zip(self.snake.head, self.snake.direction[0])])
# Check for collision with walls
if new_head[0] <= 0 or new_head[0] >= self.grid_width - 1 or new_head[1] <= 0 or new_head[1] >= self.grid_height - 1:
self.game_over = True
return
# Check for collision with tail
if new_head in self.snake.tail[:-1]:
self.game_over = True
return
# Check for food
if new_head == self.food.get_coords():
self.snake.tail = [self.snake.head] + self.snake.tail
self.new_food()
self.snake.eat()
# else:
# # Update score
# current_dist = self.distance_to_food(new_head)
# new_dist = self.distance_to_food(new_head)
#
# if new_dist < current_dist:
# self.snake.increase_score(1)
# else:
# self.snake.decrease_score(2)
#
# #
# if self.snake.score <= -50:
# self.game_over = True
# return
self.move(new_head)
|
{"/snake/controller.py": ["/snake/model.py"], "/snake/view.py": ["/snake/controller.py"]}
|
35,358
|
dionikink/PySnake3
|
refs/heads/master
|
/snake/view.py
|
import pyglet
from snake.controller import SnakeController
class SnakeView(pyglet.window.Window):
def __init__(self, window_width=720, window_height=720, controller=None, framerate=1/60, cell_size=20):
super(SnakeView, self).__init__(width=window_width, height=window_height)
self.grid_width = int(window_width / cell_size)
self.grid_height = int(window_height / cell_size)
self.cell_size = cell_size
self.framerate=framerate
if controller:
self.controller = controller
else:
self.controller = SnakeController(self.grid_width, self.grid_height)
pyglet.gl.glClearColor(255, 255, 255, 255)
def start(self):
pyglet.clock.schedule_interval(self.update, self.framerate)
pyglet.app.run()
def update(self, dt):
if not self.controller.game_over:
self.controller.run_rules()
else:
self.controller.reset()
def on_draw(self):
self.clear()
self.draw()
self.draw_grid()
def draw_grid(self):
main_batch = pyglet.graphics.Batch()
for row in range(self.grid_height):
line_coords = [0, row * self.cell_size,
self.grid_width * self.cell_size, row * self.cell_size]
main_batch.add(2, pyglet.gl.GL_LINES, None,
('v2i', line_coords),
('c3B', [0, 0, 0, 0, 0, 0]))
for col in range(self.grid_width):
line_coords = [col * self.cell_size, 0,
col * self.cell_size, self.grid_height * self.cell_size]
main_batch.add(2, pyglet.gl.GL_LINES, None,
('v2i', line_coords),
('c3B', [0, 0, 0, 0, 0, 0]))
for row in range(self.grid_height):
for col in range(self.grid_width):
if row == 0 or row == self.grid_height - 1 or col == 0 or col == self.grid_width - 1:
square_coords = [row * self.cell_size, col * self.cell_size,
row * self.cell_size, col * self.cell_size + self.cell_size,
row * self.cell_size + self.cell_size, col * self.cell_size,
row * self.cell_size + self.cell_size, col * self.cell_size + self.cell_size]
main_batch.add_indexed(4, pyglet.gl.GL_TRIANGLES, None,
[0, 1, 2, 1, 2, 3],
('v2i', square_coords),
('c3B', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
main_batch.draw()
def draw(self):
main_batch = pyglet.graphics.Batch()
square_coords = [self.controller.snake.head_x * self.cell_size, self.controller.snake.head_y * self.cell_size,
self.controller.snake.head_x * self.cell_size, self.controller.snake.head_y * self.cell_size + self.cell_size,
self.controller.snake.head_x * self.cell_size + self.cell_size, self.controller.snake.head_y * self.cell_size,
self.controller.snake.head_x * self.cell_size + self.cell_size, self.controller.snake.head_y * self.cell_size + self.cell_size]
main_batch.add_indexed(4, pyglet.gl.GL_TRIANGLES, None,
[0, 1, 2, 1, 2, 3],
('v2i', square_coords),
('c3B', [0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0, 255]))
for (row, col) in self.controller.snake.tail:
square_coords = [row * self.cell_size, col * self.cell_size,
row * self.cell_size, col * self.cell_size + self.cell_size,
row * self.cell_size + self.cell_size, col * self.cell_size,
row * self.cell_size + self.cell_size, col * self.cell_size + self.cell_size]
main_batch.add_indexed(4, pyglet.gl.GL_TRIANGLES, None,
[0, 1, 2, 1, 2, 3],
('v2i', square_coords),
('c3B', [0, 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0]))
square_coords = [self.controller.food.x * self.cell_size, self.controller.food.y * self.cell_size,
self.controller.food.x * self.cell_size, self.controller.food.y * self.cell_size + self.cell_size,
self.controller.food.x * self.cell_size + self.cell_size, self.controller.food.y * self.cell_size,
self.controller.food.x * self.cell_size + self.cell_size, self.controller.food.y * self.cell_size + self.cell_size]
main_batch.add_indexed(4, pyglet.gl.GL_TRIANGLES, None,
[0, 1, 2, 1, 2, 3],
('v2i', square_coords),
('c3B', [255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0]))
main_batch.draw()
if __name__ == '__main__':
view = SnakeView()
|
{"/snake/controller.py": ["/snake/model.py"], "/snake/view.py": ["/snake/controller.py"]}
|
35,359
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sp_ial2_sign_in_async.py
|
from faker import Faker
from .flow_helper import (
authenticity_token,
do_request,
get_env,
idv_phone_form_value,
otp_code,
personal_key,
querystring_value,
random_cred,
sp_signout_link,
url_without_querystring,
)
from urllib.parse import urlparse
import logging
import time
"""
*** SP IAL2 Sign In Flow ***
"""
def ial2_sign_in_async(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=2'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
# Choose a random user
credentials = random_cred(num_users, None)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/login/two_factor/sms",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
}
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
idp_domain = urlparse(resp.url).netloc
logging.debug('/login/two_factor/sms')
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/verify/doc_auth/welcome",
'',
{
"code": code,
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/welcome')
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/agreement')
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
'',
{"doc_auth[ial2_consent_given]": "1",
"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/upload?type=desktop')
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/document_capture",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
files = {"doc_auth[front_image]": context.license_front,
"doc_auth[back_image]": context.license_back}
logging.debug('/verify/document_capture')
# Post the license images
resp = do_request(
context,
"put",
"/verify/document_capture",
"/verify/doc_auth/ssn",
'',
{"authenticity_token": auth_token, },
files
)
auth_token = authenticity_token(resp)
ssn = '900-12-3456'
logging.debug('/verify/doc_auth/ssn')
resp = do_request(
context,
"put",
"/verify/doc_auth/ssn",
"/verify/doc_auth/verify",
'',
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
# There are three auth tokens in the response text, get the second
auth_token = authenticity_token(resp, 1)
logging.debug('/verify/doc_auth/verify')
# Verify
expected_text = 'This might take up to a minute. We’ll load the next step '\
'automatically when it’s done.'
resp = do_request(
context,
"put",
"/verify/doc_auth/verify",
'/verify/doc_auth/verify_wait',
expected_text,
{"authenticity_token": auth_token, },)
while resp.url == 'https://idp.pt.identitysandbox.gov/verify/doc_auth/verify_wait':
time.sleep(3)
logging.debug(
f"SLEEPING IN /verify_wait WHILE LOOP with #{credentials['email']}")
resp = do_request(
context,
"get",
"/verify/doc_auth/verify_wait",
'',
'',
{},
)
if resp.url == 'https://idp.pt.identitysandbox.gov/verify/doc_auth/verify_wait':
logging.debug(
f"STILL IN /verify_wait WHILE LOOP with #{credentials['email']}")
else:
auth_token = authenticity_token(resp)
logging.debug("/verify/phone")
# Enter Phone
resp = do_request(
context,
"put",
"/verify/phone",
'/verify/phone',
'This might take up to a minute',
{"authenticity_token": auth_token,
"idv_phone_form[phone]": idv_phone_form_value(resp), },
)
wait_text = 'This might take up to a minute. We’ll load the next step '\
'automatically when it’s done.'
while wait_text in resp.text:
time.sleep(3)
logging.debug(
f"SLEEPING IN /verify/phone WHILE LOOP with {credentials['email']}")
resp = do_request(
context,
"get",
"/verify/phone",
'',
'',
{},
)
if resp.url == 'https://idp.pt.identitysandbox.gov/verify/phone':
logging.debug(
f"STILL IN /verify/phone WHILE LOOP with {credentials['email']}")
else:
auth_token = authenticity_token(resp)
logging.debug('/verify/review')
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/confirmations",
'',
{"authenticity_token": auth_token,
"user[password]": "salty pickles", },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/confirmations')
# Confirmations
resp = do_request(
context,
"post",
"/verify/confirmations",
"/sign_up/completed",
'',
{
"authenticity_token": auth_token,
"personal_key": personal_key(resp)
},
)
auth_token = authenticity_token(resp)
logging.debug('/sign_up/completed')
# Sign Up Completed
resp = do_request(
context,
"post",
"/sign_up/completed",
None,
'',
{"authenticity_token": auth_token,
"commit": "Agree and continue"},
)
ial2_sig = "ACR: http://idmanagement.gov/ns/assurance/ial/2"
# Does it include the IAL2 text signature?
if resp.text.find(ial2_sig) == -1:
logging.error('this does not appear to be an IAL2 auth')
logout_link = sp_signout_link(resp)
logging.debug('SP /logout')
resp = do_request(
context,
"get",
logout_link,
sp_root_url,
'',
{},
{},
url_without_querystring(logout_link),
)
# Does it include the logged out text signature?
if resp.text.find('You have been logged out') == -1:
print("ERROR: user has not been logged out")
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,360
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sign_in_remember_me.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sign_in, flow_helper
import logging
# Singletons... everyone's fav!
VISITED = {}
class SignInRememberMeLoad(TaskSet):
def on_start(self):
num_users = int(flow_helper.get_env("NUM_USERS"))
logging.info(
f"*** Starting Sign-In Remember Me load tests with {num_users} users ***")
# Create a tracking dictionary to allow selection of previously logged
# in users and restoration on specific cookies
self.visited = VISITED
# TODO - Make these tunable
# Wait till this percentage of users have visited before enabling
# random visited user selection.
self.visited_min_pct = 1
# Target percentage of remembered users
self.remembered_target = 90
# Calculate minimum number based on passed users
self.visited_min = int(0.01 * self.visited_min_pct * num_users)
def on_stop(self):
logging.info("*** Ending Sign-In load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_in_load_test(self):
# Do Sign In and make sure to check "Remember Device"
flow_sign_in.do_sign_in(
self,
remember_device=True,
visited=self.visited,
visited_min=self.visited_min,
remembered_target=self.remembered_target,
)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/", "")
class WebsiteUser(HttpUser):
tasks = [SignInRememberMeLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,361
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sp_sign_in.py
|
from urllib.parse import urlparse
from .flow_helper import (
authenticity_token,
choose_cred,
do_request,
export_cookies,
get_env,
import_cookies,
otp_code,
querystring_value,
random_cred,
resp_to_dom,
sp_signin_link,
sp_signout_link,
url_without_querystring,
use_previous_visitor,
)
import locust
import logging
# TODO: add code to set this via env var or CLI flag
# import locust.stats
# locust.stats.CONSOLE_STATS_INTERVAL_SEC = 15
"""
*** Service Provider Sign In Flow ***
Using this flow requires that a Service Provider be running and configured to work with HOST. It
also requires that users are pre-generated in the IdP database.
"""
def do_sign_in(
context,
remember_device=False,
visited={},
visited_min=0,
remembered_target=0,
):
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
logging.debug(f"cookie count for user: {len(context.client.cookies)}")
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=1'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'/',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
remembered = False
# Crossed minimum visited user threshold AND passed random selector
if remember_device and use_previous_visitor(
len(visited), visited_min, remembered_target
):
# Choose a specific previous user
credentials = choose_cred(visited.keys())
# Restore remembered device cookies to client jar
import_cookies(context.client, visited[credentials["number"]])
remembered = True
else:
# remove the first 6% of visited users if more than 66% of the users
# have signed in. Note: this was picked arbitrarily and seems to work.
# We may want to better tune this per NUM_USERS.
if float(len(visited))/float(num_users) > 0.66:
logging.info(
'You have used more than two thirds of the userspace.')
removal_range = int(0.06 * float(num_users))
count = 0
for key in list(visited):
logging.debug(f'removing user #{key}')
if count < removal_range:
visited.pop(key)
# grab an random and unused credential
credentials = random_cred(num_users, visited)
usernum = credentials["number"]
expected_path = "/login/two_factor/sms" if remember_device is False else None
if remembered:
expected_path = sp_root_url
# POST username and password
resp = do_request(
context,
"post",
"/",
expected_path,
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
}
)
if remembered and "/login/two_factor/sms" in resp.url:
logging.error(f'Unexpected SMS prompt for remembered user {usernum}')
logging.error(f'resp.url = {resp.url}')
auth_token = authenticity_token(resp)
code = otp_code(resp)
idp_domain = urlparse(resp.url).netloc
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
None,
'',
{
"code": code,
"authenticity_token": auth_token,
"remember_device": remember_device_value(remember_device),
},
)
if "/sign_up/completed" in resp.url:
# POST to completed, should go back to the SP
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/sign_up/completed",
sp_root_url,
'You are logged in',
{"authenticity_token": auth_token, },
)
sp_domain = urlparse(resp.url).netloc
# We should now be at the SP root, with a "logout" link.
# The test SP goes back to the root, so we'll test that for now
logout_link = sp_signout_link(resp)
resp = do_request(
context,
"get",
logout_link,
'',
'Do you want to sign out of',
{},
{},
'/openid_connect/logout?client_id=...'
)
auth_token = authenticity_token(resp)
state = querystring_value(resp.url, 'state')
# Confirm the logout request on the IdP
resp = do_request(
context,
"post",
"/openid_connect/logout",
sp_root_url,
'You have been logged out',
{
"authenticity_token": auth_token,
"_method": "delete",
"client_id": "urn:gov:gsa:openidconnect:sp:sinatra",
"post_logout_redirect_uri": f"{sp_root_url}/logout",
"state": state
}
)
# Does it include the you have been logged out text?
if resp.text.find('You have been logged out') == -1:
logging.error('The user has not been logged out')
logging.error(f'resp.url = {resp.url}')
# Mark user as visited and save remembered device cookies
visited[usernum] = export_cookies(
idp_domain, context.client.cookies, None, sp_domain)
def remember_device_value(value):
if value:
return "true"
else:
return "false"
def do_sign_in_user_not_found(context):
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=1'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
credentials = random_cred(num_users, None)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
}
)
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
# Post login credentials
resp = do_request(
context,
"post",
"/",
"/",
'The email or password you’ve entered is wrong',
{
"user[email]": "actually-not-" + credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
},
)
return resp
def do_sign_in_incorrect_password(context):
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=1'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
credentials = random_cred(num_users, None)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
}
)
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
# Post login credentials
resp = do_request(
context,
"post",
"/",
"/",
'The email or password you’ve entered is wrong',
{
"user[email]": credentials["email"],
"user[password]": "bland pickles",
"authenticity_token": auth_token,
},
)
def do_sign_in_incorrect_sms_otp(context, visited={}):
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=1'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
credentials = random_cred(num_users, visited)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
}
)
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
# Post login credentials
resp = do_request(
context,
"post",
"/",
"/login/two_factor/sms",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/login/two_factor/sms",
'That one-time code is invalid',
{"code": "000000", "authenticity_token": auth_token},
)
# Validate that we got the expected response and were not redirect back for
# some other reason.
if resp.text.find('That security code is invalid.') == -1:
# handle case when account is locked
account_locked_string = 'For your security, your account is '\
'temporarily locked because you have entered the one-time '\
'security code incorrectly too many times.'
if resp.text.find(account_locked_string):
error = 'sign in with incorrect sms otp failed because the '\
f'account for testuser{credentials["number"]} has been locked.'
logging.error(error)
resp.failure(error)
# handle other errors states yet to be discovered
else:
error = f'The expected response for incorrect OTP is not '\
'present. resp.url: {resp.url}'
logging.error(error)
resp.failure(error)
# Mark user as visited and save remembered device cookies
visited[credentials["number"]] = export_cookies(
urlparse(resp.url).netloc, context.client.cookies, None, None)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,362
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sp_sign_up.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sp_sign_up
class SPSignUpLoad(TaskSet):
@task(1)
def sp_sign_up_load_test(self):
# This flow does its own SP logout
flow_sp_sign_up.do_sign_up(self)
class WebsiteUser(HttpUser):
tasks = [SPSignUpLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,363
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sp_ial2_sign_in_async.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sp_ial2_sign_in_async, flow_helper
import logging
class SP_IAL2_SignInLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
num = flow_helper.get_env("NUM_USERS")
logging.info(
f'starting sp_sign_in_load_test with {num} users of entropy")')
@task(1)
def sp_sign_in_load_test(self):
flow_sp_ial2_sign_in_async.ial2_sign_in_async(self)
class WebsiteUser(HttpUser):
tasks = [SP_IAL2_SignInLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,364
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sign_up.py
|
from faker import Faker
from .flow_helper import (
resp_to_dom,
authenticity_token,
random_cred,
do_request,
confirm_link,
otp_code,
random_phone
)
"""
*** Sign Up Flow ***
"""
def do_sign_up(context):
context.client.cookies.clear()
fake = Faker()
new_email = "test+{}@test.com".format(fake.md5())
default_password = "salty pickles"
# GET the new email page
resp = do_request(context, "get", "/sign_up/enter_email",
"/sign_up/enter_email")
auth_token = authenticity_token(resp)
# Post fake email and get confirmation link (link shows up in "load test mode")
resp = do_request(
context,
"post",
"/sign_up/enter_email",
"/sign_up/verify_email",
"",
{
"user[email]": new_email,
"authenticity_token": auth_token,
"user[terms_accepted]": '1'
},
)
conf_url = confirm_link(resp)
# Get confirmation token
resp = do_request(
context,
"get",
conf_url,
"/sign_up/enter_password?confirmation_token=",
"",
{},
{},
"/sign_up/email/confirm?confirmation_token=",
)
auth_token = authenticity_token(resp)
dom = resp_to_dom(resp)
token = dom.find('[name="confirmation_token"]:first').attr("value")
# Set user password
resp = do_request(
context,
"post",
"/sign_up/create_password",
"/authentication_methods_setup",
"",
{
"password_form[password]": default_password,
"authenticity_token": auth_token,
"confirmation_token": token,
},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/authentication_methods_setup",
"/phone_setup",
"",
{
"_method": "patch",
"two_factor_options_form[selection][]": "phone",
"authenticity_token": auth_token,
},
)
# After password creation set up SMS 2nd factor
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/phone_setup",
"/login/two_factor/sms",
"",
{
"_method": "patch",
"new_phone_form[international_code]": "US",
"new_phone_form[phone]": random_phone(),
"new_phone_form[otp_delivery_preference]": "sms",
"authenticity_token": auth_token,
"commit": "Send security code",
},
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
# Visit security code page and submit pre-filled OTP
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/auth_method_confirmation",
"",
{"code": code, "authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/auth_method_confirmation/skip",
"/account",
"",
{"authenticity_token": auth_token},
)
return resp
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,365
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sp_ial2_sign_in.py
|
from faker import Faker
from .flow_helper import (
authenticity_token,
do_request,
get_env,
otp_code,
personal_key,
querystring_value,
random_cred,
random_phone,
resp_to_dom,
sp_signout_link,
url_without_querystring,
)
from urllib.parse import urlparse
import os
import sys
import time
"""
*** SP IAL2 Sign In Flow ***
"""
def ial2_sign_in(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=2'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
# Choose a random user
credentials = random_cred(num_users, None)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/login/two_factor/sms",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
}
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
idp_domain = urlparse(resp.url).netloc
if os.getenv("DEBUG"):
print("DEBUG: /login/two_factor/sms")
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/verify/doc_auth/welcome",
'',
{
"code": code,
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/welcome")
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/agreement")
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
'',
{"doc_auth[ial2_consent_given]": "1",
"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/upload?type=desktop")
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/document_capture",
'',
{"authenticity_token": auth_token, },
)
dom = resp_to_dom(resp)
selector = 'meta[name="csrf-token"]'
auth_token = dom.find(selector).eq(0).attr("content")
selector = 'input[id="doc_auth_document_capture_session_uuid"]'
dcs_uuid = dom.find(selector).eq(0).attr("value")
second_auth_token = authenticity_token(resp)
files = {"front": context.license_front,
"back": context.license_back,
}
if os.getenv("DEBUG"):
print("DEBUG: /verify/document_capture")
# Post the license images
resp = do_request(
context,
"post",
"/api/verify/images",
None,
None,
{
"flow_path": "standard",
"document_capture_session_uuid": dcs_uuid},
files,
None,
{"X-CSRF-Token": auth_token},
)
resp = do_request(
context,
"put",
"/verify/document_capture",
"/verify/ssn",
None,
{
"_method": "patch",
"doc_auth[document_capture_session_uuid]": dcs_uuid,
"authenticity_token": second_auth_token,
},
)
auth_token = authenticity_token(resp)
ssn = '900-12-3456'
if os.getenv("DEBUG"):
print("DEBUG: /verify/ssn")
resp = do_request(
context,
"put",
"/verify/ssn",
"/verify/verify_info",
'',
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
# There are three auth tokens on the response, get the second
auth_token = authenticity_token(resp, 1)
if os.getenv("DEBUG"):
print("DEBUG: /verify/verify_info")
# Verify
resp = do_request(
context,
"put",
"/verify/verify_info",
None,
'',
{"authenticity_token": auth_token, },
)
# Wait until
for i in range(12):
if urlparse(resp.url).path == '/verify/phone':
# success
break
elif urlparse(resp.url).path == '/verify/verify_info':
# keep waiting
time.sleep(5)
else:
raise ValueError(
f"Verification received unexpected URL of {resp.url}\n\n{resp.text}")
resp = do_request(
context,
"get",
"/verify/verify_info",
)
if os.getenv("DEBUG"):
print("DEBUG: /verify/phone")
# Enter Phone
auth_token = authenticity_token(resp)
resp = do_request(
context,
"put",
"/verify/phone",
None,
'',
{"authenticity_token": auth_token,
"idv_phone_form[phone]": random_phone(), },
)
for i in range(12):
if urlparse(resp.url).path == '/verify/phone_confirmation':
# success
break
elif urlparse(resp.url).path == '/verify/phone':
# keep waiting
time.sleep(5)
else:
if "login credentials used in another browser" in resp.text:
resp.failure(
'Your login credentials were used in another browser.')
else:
raise ValueError(
f'Phone verification received unexpected URL of {resp.url}\n\n{resp.text}')
resp = do_request(
context,
"get",
"/verify/phone",
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/phone_confirmation")
# Verify SMS Delivery
resp = do_request(
context,
"put",
"/verify/phone_confirmation",
"/verify/review",
'',
{"authenticity_token": auth_token, "code": code, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/review")
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/personal_key",
'',
{
"authenticity_token": auth_token,
"user[password]": "salty pickles",
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/confirmations")
# Confirmations
resp = do_request(
context,
"post",
"/verify/personal_key",
"/sign_up/completed",
'',
{
"authenticity_token": auth_token,
"acknowledgment": "1",
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /sign_up/completed")
# Sign Up Completed
resp = do_request(
context,
"post",
"/sign_up/completed",
None,
'',
{"authenticity_token": auth_token,
"commit": "Agree and continue"},
)
ial2_sig = "ACR: http://idmanagement.gov/ns/assurance/ial/2"
# Does it include the IAL2 text signature?
if resp.text.find(ial2_sig) == -1:
print("ERROR: this does not appear to be an IAL2 auth")
logout_link = sp_signout_link(resp)
if os.getenv("DEBUG"):
print("DEBUG: /sign_up/completed")
resp = do_request(
context,
"get",
logout_link,
'',
'Do you want to sign out of',
{},
{},
'/openid_connect/logout?client_id=...'
)
auth_token = authenticity_token(resp)
state = querystring_value(resp.url, 'state')
# Confirm the logout request on the IdP
resp = do_request(
context,
"post",
"/openid_connect/logout",
sp_root_url,
'You have been logged out',
{
"authenticity_token": auth_token,
"_method": "delete",
"client_id": "urn:gov:gsa:openidconnect:sp:sinatra",
"post_logout_redirect_uri": f"{sp_root_url}/logout",
"state": state
}
)
# Does it include the logged out text signature?
if resp.text.find('You have been logged out') == -1:
print("ERROR: user has not been logged out")
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,366
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/prod_simulator.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import (
flow_ial2_proofing,
flow_sp_ial2_sign_in,
flow_sp_ial2_sign_up,
flow_sign_in,
flow_sp_sign_in,
flow_sp_sign_up,
flow_helper,
)
import os
import logging
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
# Default ratios. Sum should equal 10000. (1 == 0.01%)
# These can be overridden by setting the corresponding environment
# variable. Example: RATIO_SIGN_UP will override RATIOS["SIGN_UP"]
# Defaults updated based on measurements from 2021-04-13
RATIOS = {
"SIGN_IN": 7217,
"SIGN_UP": 1666,
"SIGN_IN_AND_PROOF": 592,
"SIGN_UP_AND_PROOF": 148,
"SIGN_IN_USER_NOT_FOUND": 7,
"SIGN_IN_INCORRECT_PASSWORD": 185,
"SIGN_IN_INCORRECT_SMS_OTP": 185,
}
# For sign ins, what percentage should simulate a remembered device
REMEMBERED_PERCENT = int(os.getenv("REMEMBERED_PERCENT", 54))
# Runtime environment override with optional keys
for k in RATIOS.keys():
rk = "RATIO_" + k
if rk in os.environ:
RATIOS[k] = int(os.getenv(rk))
# Visited user cookie cache
VISITED = {}
class ProdSimulator(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
def on_start(self):
num_users = int(flow_helper.get_env("NUM_USERS"))
logging.debug(
f"*** Production-like workload with {num_users} users ***")
# Create a tracking dictionary to allow selection of previously logged
# in users and restoration on specific cookies
self.visited = VISITED
# TODO - Make these tunable
# Wait till this percentage of users have visited before enabling
# random visited user selection.
self.visited_min_pct = 0.01
# Target percentage of remembered users for regular sign_in
self.remembered_target = REMEMBERED_PERCENT
# Calculate minimum number based on passed users
self.visited_min = int(0.01 * self.visited_min_pct * num_users)
def on_stop(self):
logging.debug("*** Ending Production-like load tests ***")
# Sum should equal 10000. (1 == 0.01%)
#
@task(RATIOS["SIGN_IN"])
def sign_in_remembered_load_test(self):
logging.debug("=== Starting Sign IN w/remembered device ===")
flow_sp_sign_in.do_sign_in(
self,
remember_device=False,
visited=self.visited,
visited_min=self.visited_min,
remembered_target=self.remembered_target,)
@task(RATIOS["SIGN_UP"])
def sign_up_load_test(self):
logging.debug("=== Starting Sign UP ===")
flow_sp_sign_up.do_sign_up(self)
@task(RATIOS["SIGN_IN_AND_PROOF"])
def sign_in_and_proof_load_test(self):
flow_sp_ial2_sign_in.ial2_sign_in(self)
@task(RATIOS["SIGN_UP_AND_PROOF"])
def sign_up_and_proof_load_test(self):
flow_sp_ial2_sign_up.ial2_sign_up(self)
@task(RATIOS["SIGN_IN_USER_NOT_FOUND"])
def sign_in_load_test_user_not_found(self):
flow_sp_sign_in.do_sign_in_user_not_found(self)
@task(RATIOS["SIGN_IN_INCORRECT_PASSWORD"])
def sign_in_load_test_incorrect_password(self):
flow_sp_sign_in.do_sign_in_incorrect_password(self)
@task(RATIOS["SIGN_IN_INCORRECT_SMS_OTP"])
def sign_in_load_test_incorrect_sms_otp(self):
flow_sp_sign_in.do_sign_in_incorrect_sms_otp(
self, visited=self.visited)
class WebsiteUser(HttpUser):
tasks = [ProdSimulator]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,367
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sign_up_sign_in.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sign_in, flow_sign_up, flow_helper
import logging
class SignUpSignInLoad(TaskSet):
""" @task(<weight>) : value=3 executes 3x as often as value=1
Things inside task are synchronous. Tasks are async """
@task(8)
def sign_in_load_test(self):
logging.info("=== Starting Sign IN ===")
# Do a Sign In
flow_sign_in.do_sign_in(self)
# Get account page, and stay there to prove authentication
flow_helper.do_request(self, "get", "/account", "/account", "")
flow_helper.do_request(self, "get", "/logout", "/", "")
@task(1)
def sign_up_load_test(self):
logging.info("=== Starting Sign UP ===")
flow_helper.do_request(self, "get", "/", "/", "")
flow_sign_up.do_sign_up(self)
flow_helper.do_request(self, "get", "/account", "/account", "")
flow_helper.do_request(self, "get", "/logout", "/logout", "")
class WebsiteUser(HttpUser):
tasks = [SignUpSignInLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,368
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sp_ial2_sign_up.py
|
from faker import Faker
from .flow_helper import (
authenticity_token,
confirm_link,
do_request,
get_env,
otp_code,
personal_key,
querystring_value,
random_cred,
random_phone,
resp_to_dom,
sp_signout_link,
url_without_querystring,
)
from urllib.parse import urlparse
import os
import sys
import time
"""
*** SP IAL2 Sign Up Flow ***
"""
def ial2_sign_up(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=2'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# GET the new email page
resp = do_request(context, "get", "/sign_up/enter_email",
"/sign_up/enter_email")
auth_token = authenticity_token(resp)
# Post fake email and get confirmation link (link shows up in "load test mode")
fake = Faker()
new_email = "test+{}@test.com".format(fake.md5())
default_password = "salty pickles"
resp = do_request(
context,
"post",
"/sign_up/enter_email",
"/sign_up/verify_email",
'',
{
"user[email]": new_email,
"authenticity_token": auth_token,
"user[terms_accepted]": '1'
},
)
conf_url = confirm_link(resp)
# Get confirmation token
resp = do_request(
context,
"get",
conf_url,
"/sign_up/enter_password?confirmation_token=",
'',
{},
{},
"/sign_up/email/confirm?confirmation_token=",
)
auth_token = authenticity_token(resp)
dom = resp_to_dom(resp)
token = dom.find('[name="confirmation_token"]:first').attr("value")
# Set user password
resp = do_request(
context,
"post",
"/sign_up/create_password",
"/authentication_methods_setup",
'',
{
"password_form[password]": default_password,
"authenticity_token": auth_token,
"confirmation_token": token,
},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/authentication_methods_setup",
"/phone_setup",
"",
{
"_method": "patch",
"two_factor_options_form[selection][]": "phone",
"authenticity_token": auth_token,
},
)
# After password creation set up SMS 2nd factor
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/phone_setup",
"/login/two_factor/sms",
'',
{
"new_phone_form[international_code]": "US",
"new_phone_form[phone]": random_phone(),
"new_phone_form[otp_delivery_preference]": "sms",
"new_phone_form[recaptcha_token]": "",
"authenticity_token": auth_token,
"commit": "Send security code",
},
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
if os.getenv("DEBUG"):
print("DEBUG: /login/two_factor/sms")
# Visit security code page and submit pre-filled OTP
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/auth_method_confirmation",
'',
{"code": code, "authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/auth_method_confirmation/skip",
"/verify/doc_auth/welcome",
"",
{"authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/welcome")
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/agreement")
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
'',
{
"doc_auth[ial2_consent_given]": "1",
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/upload?type=desktop")
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/document_capture",
'',
{"authenticity_token": auth_token, },
)
dom = resp_to_dom(resp)
selector = 'meta[name="csrf-token"]'
auth_token = dom.find(selector).eq(0).attr("content")
selector = 'input[id="doc_auth_document_capture_session_uuid"]'
dcs_uuid = dom.find(selector).eq(0).attr("value")
second_auth_token = authenticity_token(resp)
files = {"front": context.license_front,
"back": context.license_back,
}
if os.getenv("DEBUG"):
print("DEBUG: /verify/document_capture")
# Post the license images
resp = do_request(
context,
"post",
"/api/verify/images",
None,
None,
{
"flow_path": "standard",
"document_capture_session_uuid": dcs_uuid},
files,
None,
{"X-CSRF-Token": auth_token},
)
resp = do_request(
context,
"put",
"/verify/document_capture",
"/verify/ssn",
None,
{
"_method": "patch",
"doc_auth[document_capture_session_uuid]": dcs_uuid,
"authenticity_token": second_auth_token,
},
)
auth_token = authenticity_token(resp)
ssn = '900-12-3456'
if os.getenv("DEBUG"):
print("DEBUG: /verify/ssn")
resp = do_request(
context,
"put",
"/verify/ssn",
"/verify/verify_info",
'',
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
auth_token = authenticity_token(resp, 1)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/verify_info")
# Verify
resp = do_request(
context,
"put",
"/verify/verify_info",
None,
'',
{"authenticity_token": auth_token, },
)
# Wait until
for i in range(12):
if urlparse(resp.url).path == '/verify/phone':
# success
break
elif urlparse(resp.url).path == '/verify/verify_info':
# keep waiting
time.sleep(5)
else:
raise ValueError(
f'Verification received unexpected URL of {resp.url}')
resp = do_request(
context,
"get",
"/verify/verify_info",
)
if os.getenv("DEBUG"):
print("DEBUG: /verify/phone")
# Enter Phone
auth_token = authenticity_token(resp)
resp = do_request(
context,
"put",
"/verify/phone",
None,
'',
{"authenticity_token": auth_token,
"idv_phone_form[phone]": random_phone(), },
)
for i in range(12):
if urlparse(resp.url).path == '/verify/phone_confirmation':
# success
break
elif urlparse(resp.url).path == '/verify/phone':
# keep waiting
time.sleep(5)
else:
raise ValueError(
f'Phone verification received unexpected URL of {resp.url}')
resp = do_request(
context,
"get",
"/verify/phone",
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/phone_confirmation")
# Verify SMS Delivery
resp = do_request(
context,
"put",
"/verify/phone_confirmation",
"/verify/review",
'',
{"authenticity_token": auth_token, "code": code, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/review")
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/personal_key",
'',
{
"authenticity_token": auth_token,
"user[password]": "salty pickles",
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/review")
# Re-enter password
resp = do_request(
context,
"post",
"/verify/personal_key",
"/sign_up/completed",
'',
{
"authenticity_token": auth_token,
"acknowledgment": "1",
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /sign_up/completed")
# Sign Up Completed
resp = do_request(
context,
"post",
"/sign_up/completed",
None,
'',
{
"authenticity_token": auth_token,
"commit": "Agree and continue"
},
)
ial2_sig = "ACR: http://idmanagement.gov/ns/assurance/ial/2"
# Does it include the IAL2 text signature?
if resp.text.find(ial2_sig) == -1:
print("ERROR: this does not appear to be an IAL2 auth")
logout_link = sp_signout_link(resp)
resp = do_request(
context,
"get",
logout_link,
'',
'Do you want to sign out of',
{},
{},
'/openid_connect/logout?client_id=...'
)
auth_token = authenticity_token(resp)
state = querystring_value(resp.url, 'state')
# Confirm the logout request on the IdP
resp = do_request(
context,
"post",
"/openid_connect/logout",
sp_root_url,
'You have been logged out',
{
"authenticity_token": auth_token,
"_method": "delete",
"client_id": "urn:gov:gsa:openidconnect:sp:sinatra",
"post_logout_redirect_uri": f"{sp_root_url}/logout",
"state": state
}
)
# Does it include the logged out text signature?
if resp.text.find('You have been logged out') == -1:
print("ERROR: user has not been logged out")
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,369
|
18F/identity-loadtest
|
refs/heads/main
|
/tests/test_flow_helpers.py
|
import pytest
import os
import re
import test_helper
# Import load_testing files using a sad hack to support running from anywhere
import sys
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "load_testing")
)
)
from lib.flow_helper import (
authenticity_token,
choose_cred,
confirm_link,
desktop_agent_headers,
export_cookies,
get_env,
import_cookies,
load_fixture,
otp_code,
querystring_value,
random_cred,
random_phone,
resp_to_dom,
sp_signin_link,
sp_signout_link,
url_without_querystring,
use_previous_visitor,
)
"""
*** Unit test simple flow helpers
"""
def test_querystring_value():
url = "http://one.two?three=four&five=six"
assert querystring_value(url, "three") == "four"
assert querystring_value(url, "five") == "six"
def test_url_without_querystring():
assert (
url_without_querystring("http://one.two?three=four&five=six")
== "http://one.two"
)
assert url_without_querystring("http://one.two") == "http://one.two"
def test_random_cred():
cred = random_cred(1, {})
assert cred["number"] == 0
assert cred["email"] == "testuser0@example.com"
assert cred["password"] == "salty pickles"
def test_choose_cred():
choices = [777, 424242, 90210]
cred = choose_cred(choices)
number = cred["number"]
assert number in choices
assert cred["email"] == "testuser{}@example.com".format(number)
assert cred["password"] == "salty pickles"
def test_use_previous_visitor():
# Under threshold should always be false
assert use_previous_visitor(0, 1, 0) is False
# Over threshold with a 100% limit should always be true
assert use_previous_visitor(1, 0, 100) is True
# Nondeterministic test with 75% target +/- 10% and 1000 samples
trues = 0
for i in range(1000):
if use_previous_visitor(1, 0, 75):
trues = trues + 1
assert (
trues >= 650 and trues <= 850
), "use_previous_visitor with target of 75% +/- 10 was out of spec"
def test_random_phone():
for i in range(5):
assert re.match(r"202555\d{4}", random_phone())
def test_desktop_agent_headers():
agent = desktop_agent_headers()
assert "Firefox" in agent["user-agent"]
def test_get_env():
os.environ["TESTKEY"] = "testvalue"
assert get_env("TESTKEY") == "testvalue"
with pytest.raises(Exception):
get_env("UNSETKEY")
def test_resp_to_dom():
resp = test_helper.mock_response("doc_auth_verify.html")
assert resp_to_dom(resp)
def test_authentication_token():
resp = test_helper.mock_response("doc_auth_verify.html")
assert (
authenticity_token(resp)
== "WPhfbuwqPzfbpB2+aTHWR93t0/7O88iK5nYdL/RaZoLEPH63Cjf4yKAkHw6CUDyaXw6O5oi4Nc2NHzC6stEdwA=="
)
assert (
authenticity_token(resp, 0)
== "WPhfbuwqPzfbpB2+aTHWR93t0/7O88iK5nYdL/RaZoLEPH63Cjf4yKAkHw6CUDyaXw6O5oi4Nc2NHzC6stEdwA=="
)
assert (
authenticity_token(resp, 1)
== "I7WOA3x24rsZVj56R9QtCNVNlXapxqo2A9MOkU2sHPIsAi99KMzwSzD3Y89H710hluHXCoKOYt8VkT77f9U/Kg=="
)
assert (
authenticity_token(resp, 2)
== "679gwHHowpDvKlzyBL4Cw2MYZC1NYLqWaAEz+Nze6ZJZELBdu1t7BTlGmVkvqfBh713/xc0oCkbndTMoOlpLRg=="
)
with pytest.raises(Exception):
authenticity_token("a response without a token in it")
def test_otp_code():
resp = test_helper.mock_response("two_factor_sms.html")
assert otp_code(resp) == "543662"
with pytest.raises(Exception):
otp_code("a response without a code in it")
def test_confirm_link():
resp = test_helper.mock_response("verify_email.html")
assert "/sign_up/email/confirm?confirmation_token=" in confirm_link(resp)
with pytest.raises(Exception):
confirm_link("a response without a token in it")
def test_sp_signin_link():
resp = test_helper.mock_response("sp_without_session.html")
assert "openid_connect/authorize?" in sp_signin_link(resp)
with pytest.raises(Exception):
sp_signin_link("a response without a signin link in it")
def test_sp_signout_link():
resp = test_helper.mock_response("sp_with_session.html")
assert "openid_connect/logout?" in sp_signout_link(resp)
with pytest.raises(Exception):
sp_signout_link("A response without a sign-out link")
def test_export_import_cookies():
# Late load requests to avoid monkeypatch warning:
# https://github.com/gevent/gevent/issues/1016
from requests import Session
domain = "oh.yea"
r = Session()
# Cookie that should be exported
r.cookies.set("remember_device", "Sure", domain=domain)
r.cookies.set("user_opted_remember_device_preference", "Yep", domain=domain)
# Cookies that should not be exported
r.cookies.set("remember_device", "Wrong_Domain", domain="other.place")
r.cookies.set("wrong_domain_and_name", "me", domain="sumthing")
r.cookies.set("wrong_name", "me", domain=domain)
## Export tests
e = export_cookies(domain, r.cookies)
assert len(e) == 2, "Wrong number of cookies exported"
assert set([i.name for i in e]) == set(
["remember_device", "user_opted_remember_device_preference"]
)
assert e[0].domain == domain
e2 = export_cookies(domain, r.cookies, savelist=["wrong_name"])
assert len(e2) == 1
assert e2[0].name == "wrong_name"
assert export_cookies("foo.bar", r.cookies) == []
r.cookies.clear()
assert len(export_cookies(domain, r.cookies)) == 0
## Import tests
assert (
r.cookies.get("remember_device", domain=domain) is None
), "Cookies did not clear"
import_cookies(r, e)
assert r.cookies.get("remember_device", domain=domain) == "Sure"
assert r.cookies.get("user_opted_remember_device_preference") == "Yep"
assert r.cookies.get("remember_device", domain="other_place") is None
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,370
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sp_ial2_sign_up.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sp_ial2_sign_up, flow_helper
class SP_IAL2_SignUpLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
@task(1)
def sp_sign_in_load_test(self):
flow_sp_ial2_sign_up.ial2_sign_up(self)
class WebsiteUser(HttpUser):
tasks = [SP_IAL2_SignUpLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,371
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sp_sign_up.py
|
from faker import Faker
from .flow_helper import (
authenticity_token,
do_request,
get_env,
confirm_link,
otp_code,
querystring_value,
random_cred,
random_phone,
resp_to_dom,
sp_signin_link,
sp_signout_link,
url_without_querystring,
)
import logging
LOG_NAME = __file__.split('/')[-1].split('.')[0]
"""
*** Service Provider Sign Up Flow ***
Using this flow requires that a Service Provider be running and configured to work with HOST.
"""
def do_sign_up(context):
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=1'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# GET the new email page
resp = do_request(context, "get", "/sign_up/enter_email",
"/sign_up/enter_email")
auth_token = authenticity_token(resp)
# Post fake email and get confirmation link (link shows up in "load test mode")
fake = Faker()
new_email = "test+{}@test.com".format(fake.md5())
default_password = "salty pickles"
resp = do_request(
context,
"post",
"/sign_up/enter_email",
"/sign_up/verify_email",
'',
{
"user[email]": new_email,
"authenticity_token": auth_token,
"user[terms_accepted]": '1'
},
)
conf_url = confirm_link(resp)
# Get confirmation token
resp = do_request(
context,
"get",
conf_url,
"/sign_up/enter_password?confirmation_token=",
'',
{},
{},
"/sign_up/email/confirm?confirmation_token=",
)
auth_token = authenticity_token(resp)
dom = resp_to_dom(resp)
token = dom.find('[name="confirmation_token"]:first').attr("value")
# Set user password
resp = do_request(
context,
"post",
"/sign_up/create_password",
"/authentication_methods_setup",
'',
{
"password_form[password]": default_password,
"authenticity_token": auth_token,
"confirmation_token": token,
},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/authentication_methods_setup",
"/phone_setup",
"",
{
"_method": "patch",
"two_factor_options_form[selection][]": "phone",
"authenticity_token": auth_token,
},
)
# After password creation set up SMS 2nd factor
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/phone_setup",
"/login/two_factor/sms",
'',
{
"new_phone_form[international_code]": "US",
"new_phone_form[phone]": random_phone(),
"new_phone_form[otp_delivery_preference]": "sms",
"new_phone_form[recaptcha_token]": "",
"authenticity_token": auth_token,
"commit": "Send security code",
},
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
# Visit security code page and submit pre-filled OTP
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/auth_method_confirmation",
'',
{"code": code, "authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/auth_method_confirmation/skip",
"/sign_up/completed",
"",
{"authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
# Agree to share information with the service provider
# Visit security code page and submit pre-filled OTP
resp = do_request(
context,
"post",
"/sign_up/completed",
sp_root_url,
'',
{"authenticity_token": auth_token},
)
# We should now be at the SP root, with a "logout" link.
# The test SP goes back to the root, so we'll test that for now
logout_link = sp_signout_link(resp)
resp = do_request(
context,
"get",
logout_link,
'',
'Do you want to sign out of',
{},
{},
'/openid_connect/logout?client_id=...'
)
auth_token = authenticity_token(resp)
state = querystring_value(resp.url, 'state')
# Confirm the logout request on the IdP
resp = do_request(
context,
"post",
"/openid_connect/logout",
sp_root_url,
'You have been logged out',
{
"authenticity_token": auth_token,
"_method": "delete",
"client_id": "urn:gov:gsa:openidconnect:sp:sinatra",
"post_logout_redirect_uri": f"{sp_root_url}/logout",
"state": state
}
)
# Does it include the you have been logged out text?
if resp.text.find('You have been logged out') == -1:
logging.error('The user has not been logged out')
logging.error(f'resp.url = {resp.url}')
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,372
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/ial2_sign_up.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_ial2_proofing, flow_sign_up, flow_helper
import logging
class IAL2SignUpLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
def on_start(self):
logging.info("*** Starting Sign-Up and IAL2 proof load tests ***")
def on_stop(self):
logging.info("*** Ending IAL2 Sign-Up load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_up_and_proof_load_test(self):
# Sign up flow
flow_sign_up.do_sign_up(self)
# Get /account page
flow_helper.do_request(self, "get", "/account", "/account", "")
# IAL2 Proofing flow
flow_ial2_proofing.do_ial2_proofing(self)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/", "")
class WebsiteUser(HttpUser):
tasks = [IAL2SignUpLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,373
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sp_ial2_sign_up_async.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sp_ial2_sign_up_async, flow_helper
import logging
class SP_IAL2_SignUpLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
logging.info('starting sp_sign_up_load_test')
@task(1)
def sp_sign_up_load_test(self):
flow_sp_ial2_sign_up_async.ial2_sign_up_async(self)
class WebsiteUser(HttpUser):
tasks = [SP_IAL2_SignUpLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,374
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/ial2_sign_in.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_ial2_proofing, flow_sign_in, flow_helper
import logging
class IAL2SignInLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
def on_start(self):
logging.info(
"*** Starting Sign-In and IAL2 proof load tests with "
+ flow_helper.get_env("NUM_USERS")
+ " users ***"
)
def on_stop(self):
logging.info("*** Ending IAL2 Sign-In load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_in_and_proof_load_test(self):
# Sign in flow
flow_sign_in.do_sign_in(self)
# Get /account page
flow_helper.do_request(self, "get", "/account", "/account", "")
# IAL2 Proofing flow
flow_ial2_proofing.do_ial2_proofing(self)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/", "")
class WebsiteUser(HttpUser):
tasks = [IAL2SignInLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,375
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sp_ial2_sign_up_async.py
|
from faker import Faker
from .flow_helper import (
authenticity_token,
confirm_link,
do_request,
get_env,
otp_code,
personal_key,
querystring_value,
random_phone,
resp_to_dom,
sp_signout_link,
url_without_querystring,
)
from urllib.parse import urlparse
import logging
import os
import time
"""
*** SP IAL2 Sign Up Flow ***
"""
def ial2_sign_up_async(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=2'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# GET the new email page
resp = do_request(context, "get", "/sign_up/enter_email",
"/sign_up/enter_email")
auth_token = authenticity_token(resp)
# Post fake email and get confirmation link (link shows up in "load test mode")
fake = Faker()
new_email = "test+{}@test.com".format(fake.md5())
default_password = "salty pickles"
resp = do_request(
context,
"post",
"/sign_up/enter_email",
"/sign_up/verify_email",
'',
{
"user[email]": new_email,
"authenticity_token": auth_token,
"user[terms_accepted]": '1'},
)
conf_url = confirm_link(resp)
# Get confirmation token
resp = do_request(
context,
"get",
conf_url,
"/sign_up/enter_password?confirmation_token=",
'',
{},
{},
"/sign_up/email/confirm?confirmation_token=",
)
auth_token = authenticity_token(resp)
dom = resp_to_dom(resp)
token = dom.find('[name="confirmation_token"]:first').attr("value")
# Set user password
resp = do_request(
context,
"post",
"/sign_up/create_password",
"/authentication_methods_setup",
'',
{
"password_form[password]": default_password,
"authenticity_token": auth_token,
"confirmation_token": token,
},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/authentication_methods_setup",
"/phone_setup",
"",
{
"_method": "patch",
"two_factor_options_form[selection][]": "phone",
"authenticity_token": auth_token,
},
)
# After password creation set up SMS 2nd factor
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/phone_setup",
"/login/two_factor/sms",
"",
{
"_method": "patch",
"new_phone_form[international_code]": "US",
"new_phone_form[phone]": random_phone(),
"new_phone_form[otp_delivery_preference]": "sms",
"authenticity_token": auth_token,
"commit": "Send security code",
},
)
# After password creation set up SMS 2nd factor
resp = do_request(context, "get", "/phone_setup", "/phone_setup")
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/phone_setup",
"/login/two_factor/sms",
'',
{
"_method": "patch",
"new_phone_form[international_code]": "US",
"new_phone_form[phone]": random_phone(),
"new_phone_form[otp_delivery_preference]": "sms",
"authenticity_token": auth_token,
"commit": "Send security code",
},
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
logging.debug('/login/two_factor/sms')
# Visit security code page and submit pre-filled OTP
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/auth_method_confirmation",
'',
{"code": code, "authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
resp = do_request(
context,
"post",
"/auth_method_confirmation/skip",
"/verify/doc_auth/welcome",
"",
{"authenticity_token": auth_token},
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/welcome')
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/agreement')
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
'',
{"doc_auth[ial2_consent_given]": "1",
"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/upload?type=desktop')
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/document_capture",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
files = {"doc_auth[front_image]": context.license_front,
"doc_auth[back_image]": context.license_back}
logging.debug('verify/doc_auth/document_capture')
# Post the license images
resp = do_request(
context,
"put",
"/verify/document_capture",
"/verify/doc_auth/ssn",
'',
{"authenticity_token": auth_token, },
files
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/ssn')
ssn = '900-12-3456'
resp = do_request(
context,
"put",
"/verify/doc_auth/ssn",
"/verify/doc_auth/verify",
'',
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
# There are three auth tokens on the response, get the second
auth_token = authenticity_token(resp, 1)
logging.debug('/verify/doc_auth/verify')
# Verify
expected_text = 'This might take up to a minute'
resp = do_request(
context,
"put",
"/verify/doc_auth/verify",
"/verify/doc_auth/verify_wait",
expected_text,
{"authenticity_token": auth_token, },
)
while resp.url == 'https://idp.pt.identitysandbox.gov/verify/doc_auth/verify_wait':
time.sleep(3)
logging.debug(
f"SLEEPING IN /verify_wait WHILE LOOP with {new_email}")
resp = do_request(
context,
"get",
"/verify/doc_auth/verify_wait",
'',
'',
{},
)
if resp.url == 'https://idp.pt.identitysandbox.gov/verify/doc_auth/verify_wait':
logging.debug(
f"STILL IN /verify_wait WHILE LOOP with {new_email}")
else:
auth_token = authenticity_token(resp)
logging.debug("/verify/phone")
# Enter Phone
resp = do_request(
context,
"put",
"/verify/phone",
"/verify/phone",
'This might take up to a minute',
{"authenticity_token": auth_token,
"idv_phone_form[phone]": random_phone(), },
)
wait_text = 'This might take up to a minute. We’ll load the next step '\
'automatically when it’s done.'
while wait_text in resp.text:
time.sleep(3)
logging.debug(
f"SLEEPING IN /verify/phone WHILE LOOP with {new_email}")
resp = do_request(
context,
"get",
"/verify/phone",
'',
'',
{},
)
if resp.url == 'https://idp.pt.identitysandbox.gov/verify/phone':
logging.debug(
f"STILL IN /verify/phone WHILE LOOP with {new_email}")
else:
auth_token = authenticity_token(resp)
logging.debug('/verify/otp_delivery_method')
# Select SMS Delivery
resp = do_request(
context,
"put",
"/verify/otp_delivery_method",
"/verify/phone_confirmation",
'',
{"authenticity_token": auth_token, "otp_delivery_preference": "sms", },
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
logging.debug('/verify/phone_confirmation')
# Verify SMS Delivery
resp = do_request(
context,
"put",
"/verify/phone_confirmation",
"/verify/review",
'',
{"authenticity_token": auth_token, "code": code, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/review')
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/personal_key",
'',
{
"authenticity_token": auth_token,
"user[password]": "salty pickles",
},
)
auth_token = authenticity_token(resp)
logging.debug('/verify/confirmations')
# Confirmations
resp = do_request(
context,
"post",
"/verify/personal_key",
"/sign_up/completed",
'',
{
"authenticity_token": auth_token,
"personal_key": personal_key(resp)
},
)
auth_token = authenticity_token(resp)
logging.debug('/sign_up/completed')
# Sign Up Completed
resp = do_request(
context,
"post",
"/sign_up/completed",
None,
'',
{
"authenticity_token": auth_token,
"commit": "Agree and continue"
},
)
ial2_sig = "ACR: http://idmanagement.gov/ns/assurance/ial/2"
# Does it include the IAL2 text signature?
if resp.text.find(ial2_sig) == -1:
print("ERROR: this does not appear to be an IAL2 auth")
logout_link = sp_signout_link(resp)
logging.debug('/sign_up/completed')
resp = do_request(
context,
"get",
logout_link,
sp_root_url,
'',
{},
{},
url_without_querystring(logout_link),
)
# Does it include the logged out text signature?
if resp.text.find('You have been logged out') == -1:
print("ERROR: user has not been logged out")
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,376
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_ial2_proofing.py
|
from faker import Faker
from .flow_helper import do_request, authenticity_token, otp_code, random_phone
import sys
"""
*** IAL2 Proofing Flow ***
"""
def do_ial2_proofing(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
# Request IAL2 Verification
resp = do_request(context, "get", "/verify", "/verify/doc_auth")
auth_token = authenticity_token(resp)
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
"",
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
"",
{"doc_auth[ial2_consent_given]": "1",
"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/document_capture",
"",
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
files = {"doc_auth[front_image]": context.license_front,
"doc_auth[back_image]": context.license_back}
# Post the license images
resp = do_request(
context,
"put",
"/verify/document_capture",
"/verify/doc_auth/ssn",
"",
{"authenticity_token": auth_token, },
files
)
auth_token = authenticity_token(resp)
ssn = '900-12-3456'
# print("*** Using ssn: " + ssn)
resp = do_request(
context,
"put",
"/verify/doc_auth/ssn",
"/verify/doc_auth/verify",
"",
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
# There are three auth tokens on the response, get the second
auth_token = authenticity_token(resp, 1)
# Verify
resp = do_request(
context,
"put",
"/verify/doc_auth/verify",
"/verify/phone",
"",
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
# Enter Phone
resp = do_request(
context,
"put",
"/verify/phone",
"/verify/otp_delivery_method",
"",
{"authenticity_token": auth_token,
"idv_phone_form[phone]": random_phone(), },
)
auth_token = authenticity_token(resp)
# Select SMS Delivery
resp = do_request(
context,
"put",
"/verify/otp_delivery_method",
"/verify/phone_confirmation",
"",
{"authenticity_token": auth_token, "otp_delivery_preference": "sms", },
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
# Verify SMS Delivery
resp = do_request(
context,
"put",
"/verify/phone_confirmation",
"/verify/review",
"",
{"authenticity_token": auth_token, "code": code, },
)
auth_token = authenticity_token(resp)
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/confirmations",
"",
{"authenticity_token": auth_token,
"user[password]": "salty pickles", },
)
auth_token = authenticity_token(resp)
# Confirmations
do_request(
context,
"post",
"/verify/confirmations",
"/account",
"",
{"authenticity_token": auth_token, },
)
# Re-Check verification activated
do_request(context, "get", "/verify", "/verify/activated", "")
return resp
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,377
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sign_up.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sign_up, flow_helper
import logging
class SignUpLoad(TaskSet):
def on_start(self):
logging.info("*** Starting Sign-Up load tests ***")
def on_stop(self):
logging.info("*** Ending Sign-Up load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_up_load_test(self):
# GET the root
flow_helper.do_request(self, "get", "/", "/", "")
# This performs the entire sign-up flow
flow_sign_up.do_sign_up(self)
# Should be able to get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out.
# You'd think that this would leave you at "/", but it returns a 204 and leaves you be.
flow_helper.do_request(self, "get", "/logout", "/logout", "")
class WebsiteUser(HttpUser):
tasks = [SignUpLoad]
# number seconds simulated users wait between requests
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,378
|
18F/identity-loadtest
|
refs/heads/main
|
/tests/test_helper.py
|
from unittest.mock import MagicMock
import os
FIXDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "fixtures"))
def mock_response(fixture_name):
"""
Accepts the name of a file in the fixtures directory
Returns a mocked response object
"""
f = open(os.path.abspath(os.path.join(FIXDIR, fixture_name)), "r")
fixture_content = f.read()
response = MagicMock()
response.content = fixture_content
return response
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,379
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sign_in.locustfile.py
|
from lib import flow_sign_in, flow_helper
from locust import HttpUser, TaskSet, task, between
import logging
class SignInLoad(TaskSet):
def on_start(self):
logging.info(
"*** Starting Sign-In load tests with "
+ flow_helper.get_env("NUM_USERS")
+ " users ***"
)
def on_stop(self):
logging.info("*** Ending Sign-In load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_in_load_test(self):
# Do Sign In
flow_sign_in.do_sign_in(self)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/", "")
class WebsiteUser(HttpUser):
tasks = [SignInLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,380
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sp_sign_in.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sp_sign_in
class SPSignInLoad(TaskSet):
@task(1)
def sp_sign_in_load_test(self):
# This flow does its own SP logout
flow_sp_sign_in.do_sign_in(self)
class WebsiteUser(HttpUser):
tasks = [SPSignInLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,381
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_sign_in.py
|
from urllib.parse import urlparse
from .flow_helper import (
authenticity_token,
choose_cred,
do_request,
export_cookies,
get_env,
import_cookies,
otp_code,
random_cred,
resp_to_dom,
use_previous_visitor,
)
import logging
"""
*** Sign In Flow ***
"""
def do_sign_in(
context,
remember_device=False,
visited={},
visited_min=0,
remembered_target=0,
):
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
remembered = False
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
# Crossed minimum visited user threshold AND passed random selector
if remember_device and use_previous_visitor(
len(visited), visited_min, remembered_target
):
# Choose a specific previous user
credentials = choose_cred(visited.keys())
# Restore remembered device cookies to client jar
import_cookies(context.client, visited[credentials["number"]])
remembered = True
else:
# remove the first 6% of visited users if more than 66% of the users
# have signed in. Note: this was picked arbitrarily and seems to work.
# We may want to better tune this per NUM_USERS.
if float(len(visited))/float(num_users) > 0.66:
logging.info(
'You have used more than two thirds of the userspace.')
removal_range = int(0.06 * float(num_users))
count = 0
for key in list(visited):
logging.debug(f'removing user #{key}')
if count < removal_range:
visited.pop(key)
# grab an random and unused credential
credentials = random_cred(num_users, visited)
usernum = credentials["number"]
expected_path = "/login/two_factor/sms" if remember_device is False else "/"
# Post login credentials
resp = do_request(
context,
"post",
"/",
expected_path,
"",
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
},
)
if "/account" in resp.url:
if not remembered:
logging.error(f"You're already logged in. Quitting sign-in for "
f"{usernum}")
return resp
if remembered and "/login/two_factor/sms" in resp.url:
logging.error(
f"Unexpected SMS prompt for remembered user {usernum}")
return resp
auth_token = authenticity_token(resp)
code = otp_code(resp)
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/account",
"",
{
"code": code,
"authenticity_token": auth_token,
"remember_device": remember_device_value(remember_device),
},
)
# Mark user as visited and save remembered device cookies
visited[usernum] = export_cookies(
urlparse(resp.url).netloc, context.client.cookies)
return resp
def remember_device_value(value):
if value:
return "true"
else:
return "false"
def do_sign_in_user_not_found(context):
num_users = get_env("NUM_USERS")
credentials = random_cred(num_users, None)
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
if "/account" in resp.url:
print("You're already logged in. Quitting sign-in.")
return resp
# Post login credentials
resp = do_request(
context,
"post",
"/",
"/",
"",
{
"user[email]": "actually-not-" + credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
},
)
return resp
def do_sign_in_incorrect_password(context):
num_users = get_env("NUM_USERS")
credentials = random_cred(num_users, None)
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
if "/account" in resp.url:
print("You're already logged in. Quitting sign-in.")
return resp
# Post login credentials
resp = do_request(
context,
"post",
"/",
"/",
"",
{
"user[email]": credentials["email"],
"user[password]": "bland pickles",
"authenticity_token": auth_token,
},
)
return resp
def do_sign_in_incorrect_sms_otp(context):
num_users = get_env("NUM_USERS")
credentials = random_cred(num_users, None)
resp = do_request(context, "get", "/", "/")
auth_token = authenticity_token(resp)
if "/account" in resp.url:
print("You're already logged in. Quitting sign-in.")
return resp
# Post login credentials
resp = do_request(
context,
"post",
"/",
"/login/two_factor/sms",
"",
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/login/two_factor/sms",
"",
{"code": "000000", "authenticity_token": auth_token},
)
return resp
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,382
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/lib/flow_helper.py
|
from random import choice, random, randint
from urllib.parse import parse_qs, urlparse
import locust
import requests
import logging
import os
import pyquery
# Utility functions that are helpful in various locust contexts
DEFAULT_COOKIE_SAVELIST = [
"user_opted_remember_device_preference",
"remember_device"
]
LOG_NAME = __file__.split('/')[-1].split('.')[0]
def do_request(
context,
method,
path,
expected_redirect=None,
expected_text=None,
data={},
files={},
name=None,
headers={}
):
with getattr(context.client, method)(
path,
headers={**desktop_agent_headers(), **headers},
data=data,
files=files,
catch_response=True,
name=name,
) as resp:
if expected_redirect:
if resp.url and expected_redirect not in resp.url:
fail_response(resp, expected_redirect, expected_text)
raise locust.exception.RescheduleTask
if expected_text:
if resp.text and expected_text not in resp.text:
fail_response(resp, expected_redirect, expected_text)
raise locust.exception.RescheduleTask
return resp
def fail_response(response, expected_redirect, expected_text):
if os.getenv("DEBUG"):
message = f"""
You wanted {expected_redirect}, but got {response.url} for a response.
Request:
Method: {response.request.method}
Path: {response.url}
Data: {response.request.body}
Response:
Body: {print(response.text)}"""
response.failure(message)
else:
if expected_redirect:
error_msg = f'You wanted {expected_redirect}, but got '\
f'{response.url} for a url.'
if check_fail_text(response.text):
error_msg += f' Found the following fail msg(s): ' + " | ".join(check_fail_text(
response.text))
response.failure(error_msg)
if expected_text:
error_msg = f'"{expected_text}" is not in the response text.'
if check_fail_text(response.text):
error_msg += f' Found the following fail msg(s): ' + " | ".join(check_fail_text(
response.text))
response.failure(error_msg)
def check_fail_text(response_text):
known_failure_messages = [
'For your security, your account is temporarily locked because you '
'have entered the one-time security code incorrectly too many times.',
'This is not a real email address. Make sure it includes an @ and a '
'domain name',
'Your login credentials were used in another browser. Please sign in '
'again to continue in this browser',
'This website is under heavy load (queue full)',
'Need more time?',
'Oops, something went wrong. Please try again.',
'We could not match this phone number to other records',
# occurs under high load with async workers
'The server took too long to respond. Please try again.',
]
found_fail_msgs = []
for msg in known_failure_messages:
if msg in response_text:
found_fail_msgs.append(msg)
if 'found_fail_msgs' in locals():
return found_fail_msgs
def authenticity_token(response, index=0):
"""
Retrieves the CSRF auth token from the DOM for submission.
If you need to differentiate between multiple CSRF tokens on one page,
pass the optional index of the CSRF on the page
"""
selector = 'input[name="authenticity_token"]'
dom = resp_to_dom(response)
token = dom.find(selector).eq(index).attr("value")
if not token:
error = "Could not find authenticity_token on page"
if os.getenv("DEBUG"):
message = """
{}
Response:
Body: {}
""".format(
error, response.text
)
response.failure(message)
else:
response.failure(error)
logging.error(
f'Failed to find authenticity token in {response.url}')
raise locust.exception.RescheduleTask
return token
def idv_phone_form_value(response):
"""
Retrieves the phone number value from /verify/phone so the user does not
have to verify a new phone number in the IAL2 flow.
"""
selector = 'input[name="idv_phone_form[phone]"]'
dom = resp_to_dom(response)
value = dom.find(selector).eq(0).attr("value")
if not value:
error = "Could not find idv_phone_form value on page"
if os.getenv("DEBUG"):
message = """
{}
Response:
Body: {}
""".format(
error, response.text
)
response.failure(message)
else:
response.failure(error)
raise locust.exception.RescheduleTask
return value
def querystring_value(url, key):
# Get a querystring value from a url
parsed = urlparse(url)
try:
return parse_qs(parsed.query)[key][0]
except KeyError as e:
logging.error(
f'{LOG_NAME}: No querystring found for {key} in {url}')
logging.debug(e)
raise locust.exception.RescheduleTask
def url_without_querystring(url):
# Return the url without a querystring
return url.split("?")[0]
def otp_code(response):
"""
Retrieves the auto-populated OTP code from the DOM for submission.
"""
dom = resp_to_dom(response)
selector = 'input[name="code"]'
error_message = (
"Could not find pre-filled OTP code, is IDP telephony_adapter: 'test' ?"
)
code = dom.find(selector).attr("value")
if not code:
response.failure(error_message)
raise locust.exception.RescheduleTask
return code
def confirm_link(response):
"""
Retrieves the "CONFIRM NOW" link during the sign-up process.
"""
dom = resp_to_dom(response)
error_message = (
"Could not find CONFIRM NOW link, is IDP enable_load_testing_mode: 'true' ?"
)
confirmation_link = dom.find("#confirm-now")[0].attrib["href"]
if not confirmation_link:
response.failure(error_message)
raise locust.exception.RescheduleTask
return confirmation_link
def sp_signin_link(response):
"""
Gets a Sign-in link from the SP, raises an error if not found
"""
dom = resp_to_dom(response)
link = dom.find("div.sign-in-wrap a").eq(0)
href = link.attr("href")
if "/openid_connect/authorize" not in href:
response.failure("Could not find SP Sign in link")
raise locust.exception.RescheduleTask
return href
def sp_signout_link(response):
"""
Gets a Sign-in link from the SP, raises an error if not found
"""
dom = resp_to_dom(response)
link = dom.find("div.sign-in-wrap a").eq(0)
failtext = "Your login credentials were used in another browser"
if len(link) == 0 and failtext in response.text:
logging.error(
f'{LOG_NAME}: failed to find SP logout link. Redirected to IdP: "{failtext}"')
response.failure(f"Redirected to IdP: {failtext}")
raise locust.exception.RescheduleTask
else:
href = link.attr("href")
try:
if "/logout" not in href:
response.failure("Could not find SP Log out link")
raise locust.exception.RescheduleTask
return href
except TypeError as e:
logging.debug(f'{LOG_NAME}: {e}')
logging.debug(f'{LOG_NAME}: href = {href}')
logging.error(f'{LOG_NAME}: status code = {response.status_code}')
logging.error(f'{LOG_NAME}: url = {response.url}')
raise locust.exception.RescheduleTask
def personal_key(response):
"""
Gets a personal key from the /verify/confirmations page and raises an error
if not found
"""
dom = resp_to_dom(response)
personal_key = ''
try:
for x in range(4):
personal_key += dom.find("code.monospace")[x].text
except IndexError as e:
logging.error(f'{LOG_NAME}: No personal key found in {response.url}')
logging.debug(e)
raise locust.exception.RescheduleTask
return personal_key
def resp_to_dom(resp):
"""
Little helper to check response status is 200
and return the DOM, cause we do that a lot.
"""
return pyquery.PyQuery(resp.content)
def random_cred(num_users, used_nums):
"""
Given the rake task:
rake dev:random_users NUM_USERS=1000'
We should have 1000 existing users with credentials matching:
* email address testuser1@example.com through testuser1000@example.com
* the password "salty pickles"
* a phone number between +1 (415) 555-0001 and +1 (415) 555-1000.
This will generate a set of credentials to match one of those entries.
Note that YOU MUST run the rake task to put these users in the DB before using them.
"""
user_num = randint(0, int(num_users) - 1)
if used_nums != None:
while user_num in used_nums:
logging.debug(
f'{LOG_NAME}: User #{user_num} has already been used. Retrying.')
user_num = randint(0, int(num_users) - 1)
else:
logging.debug(
f'{LOG_NAME}: User #{user_num} ready for service.')
credential = {
"number": user_num,
"email": f"testuser{user_num}@example.com",
"password": "salty pickles",
}
logging.debug(f'{LOG_NAME}: {credential["email"]}')
return credential
def choose_cred(choose_from):
"""
Same as random_cred but selects from a list of user IDs numbers.
"""
# Coerce to list to make random.choice happy
user_num = choice(list(choose_from))
credential = {
"number": user_num,
"email": f"testuser{user_num}@example.com",
"password": "salty pickles",
}
return credential
def use_previous_visitor(visited_count, visited_min, remembered_target):
"""
Helper to decide if a specific sign in should use a previously used user
number.
Args:
visited_count (int) - Number of previously visited users
visited_min (int) - Lower threshold of visited users before reuse
remembered_target (float) - Target percentage of reuse
Returns:
bool
"""
if visited_count > visited_min and random() * 100 <= remembered_target:
return True
return False
def random_phone():
"""
IdP uses Phonelib.valid_for_country? to test phone numbers to make sure
they look very real
"""
digits = "%0.4d" % randint(0, 9999)
return "202555" + digits
def desktop_agent_headers():
"""
Use this in headers to act as a Desktop
"""
return {
"accept-language": "en-US,en;q=0.9",
"user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1"
}
def get_env(key):
"""
Get an ENV value, and raise an error if it's not there
"""
value = os.getenv(key)
if not value:
raise Exception("You must pass in Environment Variable {}".format(key))
return value
def load_fixture(filename, path="./load_testing"):
"""
Preload data for use by tests.
Args:
filename (str) - File to load, relative to path
path (str) - (Optional) Path files are under
(Default: ./load_testing)
Returns:
bytes
"""
fullpath = os.path.join(path, filename)
try:
with open(fullpath, "rb") as infile:
fixture = infile.read()
except FileNotFoundError:
try:
url = 'https://github.com/18F/identity-loadtest/raw/main/load_testing/' + filename
r = requests.get(url)
fixture = r.content
except requests.exceptions.RequestException:
# Be a little more helpful
raise RuntimeError(f"Could not find fixture {fullpath} or {url}")
return fixture
def export_cookies(domain, cookies, savelist=None, sp_domain=None):
"""
Export cookies used for remembered device/other non-session use
as list of Cookie objects. Only looks in jar matching host name.
Args:
domain (str) - Domain to select cookies from
cookies (requests.cookies.RequestsCookieJar) - Cookie jar object
savelist (list(str)) - (Optional) List of cookies to export
Returns:
list(Cookie) - restorable using set_device_cookies() function
"""
if savelist is None:
savelist = DEFAULT_COOKIE_SAVELIST
# Pulling directly from internal data structure as there is
# no get_cookies method.
cookies_dict = cookies._cookies.get(domain, {}).get('/', None)
# if they exist, add sp cookies to idp cookies
if 'sp_domain' in locals() and sp_domain is not None:
cookies_dict.update(cookies._cookies.get(sp_domain, {}).get('/', None))
if cookies_dict is None:
return []
return [c for c in [cookies_dict.get(si) for si in savelist] if c is not None]
def import_cookies(client, cookies):
"""
Restore saved cookies to the referenced client's cookie jar.
Args:
client (requests.session) - Client to store cookies in
cookies (list(Cookie)) - Saved list of Cookie objects
Returns:
None
"""
for c in cookies:
client.cookies.set_cookie(c)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,383
|
18F/identity-loadtest
|
refs/heads/main
|
/load_testing/sign_in_failure.locustfile.py
|
from locust import HttpUser, TaskSet, task, between
from lib import flow_sign_in, flow_helper
import logging
class SignInFailureLoad(TaskSet):
def on_start(self):
logging.info(
"*** Starting Sign-In failure load tests with "
+ flow_helper.get_env("NUM_USERS")
+ " users ***"
)
def on_stop(self):
logging.info("*** Ending Sign-In failure load tests ***")
@task(1)
def sign_in_load_test_user_not_found(self):
# Do Sign In
flow_sign_in.do_sign_in_user_not_found(self)
@task(1)
def sign_in_load_test_incorrect_password(self):
# Do Sign In
flow_sign_in.do_sign_in_incorrect_password(self)
@task(1)
def sign_in_load_test_incorrect_sms_otp(self):
# Do Sign In
flow_sign_in.do_sign_in_incorrect_sms_otp(self)
class WebsiteUser(HttpUser):
tasks = [SignInFailureLoad]
wait_time = between(5, 9)
|
{"/load_testing/lib/flow_sp_ial2_sign_in_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_in.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_sign_up.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sp_ial2_sign_up_async.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_ial2_proofing.py": ["/load_testing/lib/flow_helper.py"], "/load_testing/lib/flow_sign_in.py": ["/load_testing/lib/flow_helper.py"]}
|
35,412
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/util.py
|
# -*- coding: utf-8 -*-
import re, os, json
from slpp import slpp
DataDirectory = os.path.join('AzurLaneData', 'zh-CN')
JsonDirectory = 'json'
WikiDirectory = 'Wiki'
def saveJsonFile(data, fileName):
with open(os.path.join(JsonDirectory, fileName + '.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, sort_keys = True, indent = 4, separators = (',', ': '))
def loadJsonFile(fileName):
with open(os.path.join(JsonDirectory, fileName + '.json'), 'r+', encoding='utf-8') as f:
content = json.load(f)
return parseJson(content)
def parseJson(data):
if isinstance(data, dict):
output = {}
for k, v in data.items():
if isinstance(k, str) and k.isdigit():
output[int(k)] = parseJson(v)
else:
output[k] = parseJson(v)
elif isinstance(data, list):
output = []
for i in data:
output.append(parseJson(i))
else:
output = data
return output
def hasJsonFile(fileName):
return os.path.isfile(os.path.join(JsonDirectory, fileName + '.json'))
def parseDataFile(fileName, filePath = r'sharecfg', mode = 0):
if hasJsonFile(fileName):
return loadJsonFile(fileName)
else:
output = {}
if mode == 0:
filePath = os.path.join(DataDirectory, filePath, fileName + '.lua')
with open(filePath, 'r', encoding='utf-8') as f:
content = f.read()
content = re.match(r".*" + fileName + r" = (\{.*\})", content, flags = re.DOTALL)[1]
o = slpp.decode(content)
for k, v in o.items():
if isinstance(v, dict) and 'id' in v.keys():
output[v['id']] = v
else:
output[k] = v
if isinstance(output, dict) and 'all' in output.keys():
del output['all']
elif mode == 1:
filePath = os.path.join(DataDirectory, filePath)
templateFileNames = os.listdir(filePath)
for fNames in templateFileNames:
with open(os.path.join(filePath, fNames), 'r', encoding='utf-8') as f:
content = f.read()
content = re.match(r".*" + fileName + r"_\d+ = (\{.*\})", content, flags = re.DOTALL)[1]
o = slpp.decode(content)
for k, v in o.items():
if isinstance(v, dict) and 'id' in v.keys():
output[v['id']] = v
else:
output[k] = v
if isinstance(output, dict) and 'all' in output.keys():
del output['all']
elif mode == 2:
filePath = os.path.join(DataDirectory, r'sharecfgdata', fileName + '.serpent')
with open(filePath, 'r', encoding='utf-8') as f:
content = f.read()
content = re.match(r".*" + fileName + r" = (\{.*\})", content, flags = re.DOTALL)[1]
o = slpp.decode(content)
for k, v in o.items():
if isinstance(v, dict) and 'id' in v.keys():
output[v['id']] = v
else:
output[k] = v
if isinstance(output, dict) and 'all' in output.keys():
del output['all']
saveJsonFile(output, fileName)
return output
def getChapterTemplate(fileName = 'chapter_template', filePath = r'sharecfg'):
if hasJsonFile(fileName):
return loadJsonFile(fileName)
else:
output = {}
filePath = os.path.join(DataDirectory, filePath, fileName + '.lua')
with open(filePath, 'r', encoding='utf-8') as f:
content = f.read()
results = re.findall(r'slot0\.chapter_template.*?\[.*?\] = (\{.*?\n\t\})', content, flags = re.DOTALL)
for c in results:
o = slpp.decode(c)
output[o['id']] = o
saveJsonFile(output, fileName)
return output
def getShipName(skinID, skinTemplate, shipStatistics, groupID = None):
if skinID in skinTemplate.keys():
if not groupID:
groupID = skinTemplate[skinID]['ship_group']
for k, v in skinTemplate.items():
if groupID == v['ship_group'] and v['group_index'] == 0:
return v['name']
else:
groupID = skinID // 10
for k, v in shipStatistics.items():
if skinID == v['skin_id']:
return v['name']
for k, v in shipStatistics.items():
if groupID == v['id']//10:
return v['name']
def getShipType(shipID, shipTemplate, groupID = None):
if not groupID:
groupID = shipTemplate[shipID]['group_type']
for k, v in shipTemplate.items():
if groupID == v['group_type']:
return v['type']
return shipTemplate[shipID]['type']
def parseNameCode(text, nameCode, AF = False):
def parsefunc(matchobj, nameCode = nameCode, AF = AF):
id = int(matchobj.group(1))
if id in nameCode.keys():
if AF:
return '{{AF|' + nameCode[id] + '}}'
else:
return nameCode[id]
else:
return matchobj.group(0)
return re.sub(r'\{namecode\:(\d+)\}', parsefunc, text)
def getNameCode():
content = parseDataFile('name_code')
if isinstance(content, dict):
content = content.values()
output = {}
for i in content:
output[i['id']] = i['name']
return output
if __name__ == "__main__":
pass
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,413
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/JuusNames.py
|
# -*- coding: utf-8 -*-
import re, os
from slpp import slpp
import util
def getShipStatistics():
return util.parseDataFile('ship_data_statistics')
def getShipTemplate():
return util.parseDataFile('ship_data_template', mode = 2)
def getJuusNameTemplate():
return util.parseDataFile('activity_ins_ship_group_template')
def getShipName(groupID, shipStatistics, shipTemplate):
for k, v in shipTemplate.items():
if v['group_type'] == groupID:
for i, j in shipStatistics.items():
if j['id'] == v['id']:
return j['name']
def createJuusNameList():
JuusNameTemplate = getJuusNameTemplate()
shipStatistics = getShipStatistics()
shipTemplate = getShipTemplate()
with open(os.path.join(util.WikiDirectory, 'JuusNames.txt'), 'w+', encoding='utf-8') as f:
for k, v in JuusNameTemplate.items():
name = getShipName(v['ship_group'], shipStatistics, shipTemplate)
f.write(name + ' ' + v['name'] + '\n')
if __name__ == "__main__":
createJuusNameList()
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,414
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/ShipIndex.py
|
# -*- coding: utf-8 -*-
import re, os
from slpp import slpp
import util
def getShipGroup():
return util.parseDataFile('ship_data_group')
def getShipStatistics():
return util.parseDataFile('ship_data_statistics')
def getShipTemplate():
return util.parseDataFile('ship_data_template', mode = 2)
def getShipName(groupID, shipStatistics, shipTemplate):
for k, v in shipTemplate.items():
if v['group_type'] == groupID:
for i, j in shipStatistics.items():
if j['id'] == v['id']:
return j['name']
def createNameList():
shipGroup = getShipGroup()
shipStatistics = getShipStatistics()
shipTemplate = getShipTemplate()
shipCollection = {}
for k, v in shipGroup.items():
shipCollection[v['code']] = v['group_type']
with open(os.path.join(util.WikiDirectory, 'nameIndex.txt'), 'w+', encoding='utf-8') as f:
for k, v in shipCollection.items():
name = getShipName(v, shipStatistics, shipTemplate)
f.write(name + ', ' + str(k) + ', ' + str(v) + '\n')
if __name__ == "__main__":
createNameList()
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,415
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/Memory.py
|
# -*- coding: utf-8 -*-
import re, os
from slpp import slpp
import util
StoryDirectory = os.path.join(util.DataDirectory, 'gamecfg', 'story')
getShipName = util.getShipName
getNameCode = util.getNameCode
parseNameCode = util.parseNameCode
ColorDict = {
'#a9f548': '#4eb24e', #绿色
'#ffff4d': '#ffd000', #黄色
'#ff5c5c': '#ec5d53', #红色
'#ffa500': '#ff9900' #橙色
}
def getMemoryGroup():
return util.parseDataFile('memory_group')
def getMemoryTemplate():
return util.parseDataFile('memory_template')
def getWorldGroup():
return util.parseDataFile('world_collection_record_group')
def getWorldTemplate():
return util.parseDataFile('world_collection_record_template')
def getStory(filename, type = 1):
if type == 1:
with open(os.path.join(StoryDirectory, filename), 'r', encoding='utf-8') as f:
content = f.read()
content = re.match(r".*?(\{.*\})" ,content, flags = re.DOTALL)[1]
output = slpp.decode(content)
return output
elif type == 2:
with open(os.path.join(util.DataDirectory, 'gamecfg', 'dungeon', filename), 'r', encoding='utf-8') as f:
content = f.read()
content = re.match(r".*?(\{.*\})" ,content, flags = re.DOTALL)[1]
dungeon = slpp.decode(content)
storylist = []
if 'beginStoy' in dungeon.keys():
storylist.append(dungeon['beginStoy'])
stage = dungeon['stages']
for wave in stage[0]['waves']:
if wave['triggerType'] == 3:
storylist.append(wave['triggerParams']['id'])
output = []
for story in storylist:
s = getStory(story.lower() + '.lua')
output.append(s)
return output
def getShipGroup():
return util.parseDataFile('ship_data_group')
def getShipStatistics():
return util.parseDataFile('ship_data_statistics')
def getShipTemplate():
return util.parseDataFile('ship_data_template', mode = 2)
def getShipSkinTemplate():
return util.parseDataFile('ship_skin_template', mode = 2)
def getGroup(memoryGroup, worldGroup):
group = []
for k, v in memoryGroup.items():
group.append({'memories': v['memories'], 'title': v['title']})
for k, v in worldGroup.items():
group.append({'memories': v['child'], 'title': v['name_abbreviate']})
return group
def mergeMemoryTemplate(memoryTemplate, worldTemplate):
for k, v in worldTemplate.items():
memoryTemplate[k] = {'id': v['id'], 'type': 1, 'title': v['name'], 'story': v['story']}
def getMemory(memoryID, memoryTemplate):
output = {}
for k, v in memoryTemplate.items():
if v['id'] == memoryID:
output['title'] = v['title']
story = v['story'].lower()
output['type'] = v['type']
try:
output['story'] = getStory(story + '.lua', output['type'])
except:
print(output)
raise
return None
return output
def sanitizeMemory(memory, skinTemplate, shipStatistics, shipTemplate, nameCode):
output = {'title': parseNameCode(memory['title'], nameCode, AF = True), 'memory':[]}
if isinstance(memory['story'], list):
tempMemory = {'title': memory['title']}
for story in memory['story']:
tempMemory['story'] = story
segMemory = sanitizeMemory(tempMemory, skinTemplate, shipStatistics, shipTemplate, nameCode)
for m in segMemory['memory']:
output['memory'].append(m)
output['memory'].append({'type': 'break', 'words': None, 'name': None, 'actor': None, 'color': None, 'option':None})
output['memory'] = output['memory'][:-1]
return output
scripts = memory['story']['scripts']
if isinstance(scripts, dict):
scripts = scripts.values()
for script in scripts:
words = ''
type = None
name = ''
actor = None
color = None
option = None
if isinstance(script, dict) and 'sequence' in script.keys():
if isinstance(script['sequence'], dict):
script['sequence'] = script['sequence'].values()
for s in script['sequence']:
words += s[0] + '\n'
words = words[:-1]
type = 'sequence'
if len(words) == 0:
continue
elif isinstance(script, dict) and 'say' in script.keys():
words = script['say']
if 'actor' in script.keys():
actor = script['actor']
else:
actor = None
if 'nameColor' in script.keys():
color = script['nameColor']
else:
color = None
if 'options' in script.keys():
if not option:
option = {'options': []}
options = script['options']
if isinstance(options, dict):
options = options.values()
for o in options:
flag = ''
if 'flag' in o.keys():
flag = o['flag']
option['options'].append({'flag': flag, 'content': parseNameCode(o['content'], nameCode, AF = True)})
if 'optionFlag' in script.keys():
if not option:
option = {}
option['optionFlag'] = script['optionFlag']
if 'actorName' in script.keys():
name = script['actorName']
elif actor and actor > 0:
try:
name = getShipName(actor, skinTemplate, shipStatistics)
except:
name = str(actor)
print(f'未找到actor{actor}名称')
else:
name = ''
type = 'say'
else:
continue
words = re.sub(r'\<.*?\>', '', words)
words = parseNameCode(words, nameCode, AF = True)
name = parseNameCode(name, nameCode, AF = True)
output['memory'].append({'type': type, 'words': words, 'name': name, 'actor': actor, 'color': color, 'option': option})
return output
def buildGroup(group, skinTemplate, shipStatistics, shipTemplate, memoryTemplate, nameCode):
output = {'title': parseNameCode(group['title'], nameCode), 'memories':[]}
try:
for memoryID in group['memories']:
memory = getMemory(memoryID, memoryTemplate)
if memory:
memory = sanitizeMemory(memory, skinTemplate, shipStatistics, shipTemplate, nameCode)
else:
continue
output['memories'].append(memory)
except:
print(str(memory))
raise
return output
def wikiPage(group):
output = '== ' + group['title'] + ' ==\n{{折叠面板|开始}}\n'
index = 1
for memory in group['memories']:
output += wikiParagraph(memory, index)
index += 1
output += '{{折叠面板|结束}}\n'
return output.replace('\\n', '\n')
def wikiParagraph(memory, index):
output = '{{折叠面板|标题=' + memory['title'] + '|选项=' + str(index) + '|主框=1|样式=primary|展开=否}}\n'
lastActor = None
lastOption = None
for slide in memory['memory']:
output += wikiSlide(slide, lastActor, lastOption)
lastActor = slide['name']
lastOption = None
if slide['option']:
if 'optionFlag' in slide['option'].keys():
lastOption = slide['option']['optionFlag']
elif 'options' in slide['option'].keys():
lastOption = 0
output += '{{折叠面板|内容结束}}\n\n'
return output
def wikiSlide(slide, lastActor, lastOption):
output = ''
if slide['type'] == 'break':
return '<br>\n'
thisOption = None
if slide['option']:
if 'optionFlag' in slide['option'].keys():
thisOption = slide['option']['optionFlag']
elif 'options' in slide['option'].keys():
thisOption = 0
if thisOption != 0 and thisOption != lastOption:
name = slide['name']
elif slide['name'] == lastActor:
name = None
else:
name = slide['name']
if name != None:
if len(name) > 0:
if slide['color']:
output += '<span style="color:' + replaceColor(slide['color']) + ';">' + name + ':</span>'
else:
output += name + ':'
output += '<br>\n'
if slide['option'] and 'optionFlag' in slide['option'].keys():
output += "'''''<span style=" + '"color:black;"' + ">(选择项" + str(slide['option']['optionFlag']) + ")</span>'''''"
output += nowiki(slide['words']).replace('\n', '<br>\n') + '<br>\n'
if slide['option'] and 'options' in slide['option'].keys():
output += '<br>\n'
for option in slide['option']['options']:
output += "'''''<span style=" + '"color:black;"' + ">选择项" + str(option['flag']) + ":"
output += nowiki(option['content']) + "</span>'''''<br>\n"
return output
def nowiki(text):
return re.sub(r'(~{3,})', r'<nowiki>\1</nowiki>', text)
def replaceColor(color):
if color in ColorDict.keys():
return ColorDict[color]
else:
return color
def wikiGenerate():
nameCode = getNameCode()
memoryGroup = getMemoryGroup()
memoryTemplate = getMemoryTemplate()
worldGroup = getWorldGroup()
worldTemplate = getWorldTemplate()
shipGroup = getShipGroup()
shipStatistics = getShipStatistics()
shipTemplate = getShipTemplate()
skinTemplate = getShipSkinTemplate()
groups = getGroup(memoryGroup, worldGroup)
mergeMemoryTemplate(memoryTemplate, worldTemplate)
groupsbuilt = []
for v in groups:
groupsbuilt.append(buildGroup(v, skinTemplate, shipStatistics, shipTemplate, memoryTemplate, nameCode))
for group in groupsbuilt:
with open(os.path.join(util.WikiDirectory, 'memories', group['title'].replace(':', '') + '.txt'), 'w+', encoding='utf-8') as f:
f.write(wikiPage(group))
def MemoryJP():
util.DataDirectory = os.path.join('AzurLaneData', 'ja-JP')
util.JsonDirectory = os.path.join('json', 'JP')
global StoryDirectory
StoryDirectory = os.path.join(util.DataDirectory, 'gamecfg', 'storyjp')
nameCode = getNameCode()
memoryGroup = getMemoryGroup()
memoryTemplate = getMemoryTemplate()
worldGroup = getWorldGroup()
worldTemplate = getWorldTemplate()
shipGroup = getShipGroup()
shipStatistics = getShipStatistics()
shipTemplate = getShipTemplate()
skinTemplate = getShipSkinTemplate()
groups = getGroup(memoryGroup, worldGroup)
mergeMemoryTemplate(memoryTemplate, worldTemplate)
groupsbuilt = []
for v in groups:
groupsbuilt.append(buildGroup(v, skinTemplate, shipStatistics, shipTemplate, memoryTemplate, nameCode))
for group in groupsbuilt:
with open(os.path.join(util.WikiDirectory, 'memories', 'JP', group['title'].replace(':', ':').replace('?', '?') + '.txt'), 'w+', encoding='utf-8') as f:
f.write(wikiPage(group))
if __name__ == "__main__":
wikiGenerate()
#MemoryJP()
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,416
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/ChapterAwards.py
|
# -*- coding: utf-8 -*-
import re, os
from slpp import slpp
import util
shipType = ['驱逐', '轻巡', '重巡', '战巡', '战列', '轻母', '航母', '潜艇', '航巡', '航战', '雷巡', '维修', '重炮', '占位', '占位', '占位', '潜母', '超巡', '运输']
shipAwardList = ['驱逐', '轻巡', '重巡', '战巡', '战列', '航母', '轻母', '重炮', '维修', '潜艇']
def getShipTemplate():
return util.parseDataFile('ship_data_template', mode = 2)
def getShipStatistics():
return util.parseDataFile('ship_data_statistics')
def getChapterTemplate():
return util.getChapterTemplate()
def getMapData():
return util.parseDataFile('expedition_data_by_map')
def getShipSkinTemplate():
return util.parseDataFile('ship_skin_template', mode = 2)
def getItemStatistics():
return util.parseDataFile('item_data_statistics', mode = 2)
def getChapterAward():
shipTemplate = getShipTemplate()
shipStatistics = getShipStatistics()
shipSkin = getShipSkinTemplate()
mapData = getMapData()
chapterTemplate = getChapterTemplate()
itemStatistics = getItemStatistics()
nameCode = util.getNameCode()
mapName = {}
for c in chapterTemplate.values():
c['characterAward'] = []
c['equipmentAward'] = []
for award in c['awards']:
if award[0] == 2:
for a in itemStatistics[award[1]]['display_icon']:
if a[0] == 4:
c['characterAward'].append(a[1])
for m in mapData.values():
m['chapters'] = {}
for c in chapterTemplate.values():
if c['map'] == m['map']:
m['chapters'][c['id']] = c
if m['name'] in mapName.keys():
mapName[m['name']][m['map']] = m
else:
mapName[m['name']] = {m['map']: m}
for m in mapName.values():
for m2 in m.values():
if m2['type'] == 1:
m2['category'] = '普通主线'
elif m2['type'] == 2:
m2['category'] = '困难主线'
else:
if m2['on_activity'] == 0:
m2['category'] = '作战档案'
else:
on_activity = m2['on_activity']
for m3 in m.values():
if m3['on_activity'] != 0 and m3['on_activity'] < on_activity:
on_activity = m3['on_activity']
if on_activity != m2['on_activity']:
m2['category'] = '复刻活动'
else:
m2['category'] = '限时活动'
for m in mapData.values():
filename = re.match(r'[^|]*', m['name'])[0]
if m['type'] == 4:
filename += '普通'
elif m['type'] == 5:
filename += '困难'
filename += '.txt'
filePath = os.path.join(util.WikiDirectory, 'chapterAwards', m['category'], filename)
if os.path.isfile(filePath):
raise Exception(f'File: {filename} already exists!')
with open(filePath, 'w+', encoding='utf-8') as f:
output = formatMap(m, shipSkin, shipTemplate, shipStatistics)
output = util.parseNameCode(output, nameCode)
f.write(output)
def formatMap(mapData, shipSkin, shipTemplate, shipStatistics):
output = mapData['name'] + '\n'
for chapter in mapData['chapters'].values():
output += formatChapter(chapter, shipSkin, shipTemplate, shipStatistics)
return output
def formatChapter(chapterData, shipSkin, shipTemplate, shipStatistics):
output = chapterData['chapter_name'] + '-' + chapterData['name'] + '\n'
characterList = {}
for t in shipAwardList:
characterList[t] = []
for award in chapterData['characterAward']:
t = util.getShipType(award, shipTemplate, award//10)
if shipType[t-1] in characterList.keys():
characterList[shipType[t-1]].append(util.getShipName(award, shipSkin, shipStatistics))
for k, v in characterList.items():
output += '|掉落' + k + '='
for s in v:
output += s + '、'
output = output[:-1] + '\n'
return output
if __name__ == "__main__":
getChapterAward()
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,417
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/PNData.py
|
import re, os
from slpp import slpp
import util
STATUSENUM = {'durability': 0, 'cannon': 1, 'torpedo': 2, 'antiaircraft': 3, 'air': 4, 'reload': 5, 'range': 6, 'hit': 7, 'dodge': 8, 'speed': 9, 'luck': 10, 'antisub': 11, 'gearscore': 12}
STATUSINVERSE = ['durability', 'cannon', 'torpedo', 'antiaircraft', 'air', 'reload', 'range', 'hit', 'dodge', 'speed', 'luck', 'antisub', 'gearscore']
def getShipGroup():
return util.parseDataFile('ship_data_group')
def getShipStatistics():
return util.parseDataFile('ship_data_statistics')
def getShipTemplate():
return util.parseDataFile('ship_data_template', mode = 2)
def getShipStrengthen():
return util.parseDataFile('ship_data_strengthen')
def getShipTrans():
return util.parseDataFile('ship_data_trans')
def getTransformTemplage():
return util.parseDataFile('transform_data_template')
def getShipStrengthenBlueprint():
return util.parseDataFile('ship_strengthen_blueprint')
def getShipDataBlueprint():
return util.parseDataFile('ship_data_blueprint')
def getWikiID(id):
wikiID = '%03d' % (id % 10000)
if id < 10000:
return wikiID
elif id < 20000:
return 'Collab' + wikiID
elif id < 30000:
return 'Plan' + wikiID
elif id < 40000:
return 'Meta' + wikiID
def shipTransform(group, shipTrans, transformTemplate):
if group in shipTrans.keys():
trans = shipTrans[group]
trans = trans['transform_list']
transList = []
transShipID = None
for t1 in trans:
for t2 in t1:
data = transformTemplate[t2[1]]
for e in data['effect']:
for k, v in e.items():
transList.append({'type': k, 'amount': v})
for e in data['gear_score']:
transList.append({'type': 'gearscore', 'amount': e})
if 'ship_id' in data.keys() and len(data['ship_id']) > 0:
transShipID = data['ship_id'][0][1]
return (statusTransTotal(transList), transShipID)
else:
return None, None
def statusTransTotal(transList):
total = [0] * 13
for t in transList:
if t['type'] in STATUSENUM.keys():
total[STATUSENUM[t['type']]] += t['amount']
return total
def modifyTechData(data, blueprintData, blueprintStrengthen):
for ship in data:
if ship['realID'] > 20000 and ship['realID'] < 30000:
groupID = ship['groupID']
blueprintStrengthenID = blueprintData[groupID]['strengthen_effect']
strengthenList = []
for i in range(0, min(30, ship['breakout']*10)):
if 'effect_attr' in blueprintStrengthen[blueprintStrengthenID[i]].keys() \
and blueprintStrengthen[blueprintStrengthenID[i]]['effect_attr']:
for effect in blueprintStrengthen[blueprintStrengthenID[i]]['effect_attr']:
strengthenList.append({'type': effect[0], 'amount': effect[1]*100})
for j in range(5):
strengthenList.append({'type': STATUSINVERSE[j+1], 'amount': blueprintStrengthen[blueprintStrengthenID[i]]['effect'][j]})
strengthenTotal = statusTransTotal(strengthenList)
for i in range(12):
ship['values'][3*i] += strengthenTotal[i]//100
for i in range(36, 41):
ship['values'][i] = 0
def modifyMetaData():
for ship in data:
if ship['realID'] > 30000 and ship['realID'] < 40000:
pass
def getData(group, statistics, template, strengthen, shipTrans, transformTemplate, ships = None):
if not ships:
ships = {}
for k, v in group.items():
if v['code'] < 30000:
ships[v['code']] = v['group_type']
shipData = []
for realID, groupID in ships.items():
id = getWikiID(realID)
shipID = {}
for tempID, v in template.items():
if v['group_type'] == groupID and tempID // 10 == groupID :
shipID[3 - (v['star_max'] - v['star'])] = {'id':tempID, 'oil_at_start':v['oil_at_start'],
'oil_at_end':v['oil_at_end'], 'strengthen_id':v['strengthen_id'], 'wikiID': id, 'realID': realID, 'groupID': groupID}
shipRemould, transShipID = shipTransform(groupID, shipTrans, transformTemplate)
if transShipID and transShipID in template.keys():
v = template[transShipID]
shipID[4] = {'id':transShipID, 'oil_at_start':v['oil_at_start'],
'oil_at_end':v['oil_at_end'], 'strengthen_id':v['strengthen_id'], 'wikiID':id, 'realID': realID, 'groupID': groupID}
for breakout in range(5):
if breakout in shipID.keys():
v = shipID[breakout]
v['breakout'] = breakout
stat = statistics[v['id']]
v['attrs'] = stat['attrs']
v['attrs_growth'] = stat['attrs_growth']
v['attrs_growth_extra'] = stat['attrs_growth_extra']
v['strengthen'] = strengthen[v['strengthen_id']]['durability']
v['name'] = stat['name']
v['values'] = [0]*56
for i in range(12):
v['values'][3*i] = v['attrs'][i]
v['values'][3*i+1] = v['attrs_growth'][i]
v['values'][3*i+2] = v['attrs_growth_extra'][i]
for i in range(5):
v['values'][36+i] = v['strengthen'][i]
v['values'][54] = v['oil_at_start']
v['values'][55] = v['oil_at_end']
if shipRemould:
for i in range(13):
v['values'][i+41] += shipRemould[i]
shipData.append(v)
return shipData
def formatData(ID, values, name, breakout):
if ID in ['001', '002', '003']:
breakout = 0
output = 'PN' + ID
if breakout == 4:
output += 'g3:['
else:
output += str(breakout) + ':['
for v in values:
output += str(v) + ','
output = output[:-1] + '],\t//' + name + '_'
if breakout == 4:
output += '3'
else:
output += str(breakout)
output += '破'
return output
if __name__ == "__main__":
f = open(os.path.join(util.WikiDirectory, 'PN.txt'), 'w+', encoding = 'utf-8')
group = getShipGroup()
statistics = getShipStatistics()
template = getShipTemplate()
strengthen = getShipStrengthen()
shipTrans = getShipTrans()
transformTemplate = getTransformTemplage()
blueprintData = getShipDataBlueprint()
blueprintStrengthen = getShipStrengthenBlueprint()
data = getData(group, statistics, template, strengthen, shipTrans, transformTemplate)
modifyTechData(data, blueprintData, blueprintStrengthen)
def func(ship):
return ship['wikiID'] + str(ship['breakout'])
data.sort(key = func)
for ship in data:
f.write(formatData(ship['wikiID'], ship['values'], ship['name'], ship['breakout']))
f.write('\n')
f.close()
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,418
|
FlandreCirno/AzurLaneWikiUtilitiesManual
|
refs/heads/master
|
/Initialize.py
|
# -*- coding: utf-8 -*-
import os
FileList = []
PathList = [
os.path.join('Wiki', 'memories'),
'json',
'Wiki',
os.path.join('json', 'JP'),
os.path.join('Wiki', 'memories', 'JP'),
os.path.join('Wiki', 'chapterAwards'),
os.path.join('Wiki', 'chapterAwards', '普通主线'),
os.path.join('Wiki', 'chapterAwards', '困难主线'),
os.path.join('Wiki', 'chapterAwards', '限时活动'),
os.path.join('Wiki', 'chapterAwards', '复刻活动'),
os.path.join('Wiki', 'chapterAwards', '作战档案')
]
if __name__ == "__main__":
for f in FileList:
os.remove(f)
for p in PathList:
if os.path.isdir(p):
files = os.listdir(p)
for f in files:
filePath = os.path.join(p, f)
if os.path.isfile(filePath):
os.remove(filePath)
else:
os.makedirs(p)
|
{"/JuusNames.py": ["/util.py"], "/ShipIndex.py": ["/util.py"], "/Memory.py": ["/util.py"], "/ChapterAwards.py": ["/util.py"], "/PNData.py": ["/util.py"]}
|
35,427
|
anqurvanillapy/sanscc
|
refs/heads/master
|
/test/util.py
|
from subprocess import CalledProcessError, run, check_output
COMMAND_LEXER = 'echo "{}" | bash src/lexer.sh'
COMMAND_PARSER = 'echo "{}" | bash src/lexer.sh | bash src/parser.sh'
def _check(expr, cmd):
return check_output(cmd.format(expr), shell=True)
def check_token(expr):
return _check(expr, COMMAND_LEXER)
def check_parse(expr):
return _check(expr, COMMAND_PARSER)
def _run(expr, cmd):
return run(cmd.format(expr), shell=True, check=True)
def run_token(expr):
return _run(expr, COMMAND_LEXER)
def run_parse(expr):
return _run(expr, COMMAND_PARSER)
|
{"/test/test_parser.py": ["/test/util.py"], "/test/test_lexer.py": ["/test/util.py"]}
|
35,428
|
anqurvanillapy/sanscc
|
refs/heads/master
|
/test/test_parser.py
|
import unittest
from .util import *
class TestParser(unittest.TestCase):
"""Basic test cases for parser"""
def test_valid_expression(self):
postfix = b'1 2 + 3 / \n'
self.assertEqual(check_parse('1+ 2 /3 '), postfix)
def test_invalid_expression(self):
with self.assertRaises(CalledProcessError):
run_parse('+ 1')
|
{"/test/test_parser.py": ["/test/util.py"], "/test/test_lexer.py": ["/test/util.py"]}
|
35,429
|
anqurvanillapy/sanscc
|
refs/heads/master
|
/test/test_lexer.py
|
import unittest
from .util import *
class TestLexer(unittest.TestCase):
"""Basic test cases for lexer"""
def test_valid_number(self):
tokens = b'NAT 0123456789\nNAT 9876543210\n'
self.assertEqual(check_token('0123456789 9876543210'), tokens)
def test_valid_operators(self):
tokens = b'OPT +\nOPT -\nOPT *\nOPT /\n'
self.assertEqual(check_token('+ - * /'), tokens)
def test_valid_expression(self):
tokens = b'NAT 1\nOPT +\nNAT 22\nOPT *\nNAT 333\n'
self.assertEqual(check_token('1+22 *333'), tokens)
def test_invalid_character(self):
with self.assertRaises(CalledProcessError):
run_token('!')
def test_invalid_operator(self):
with self.assertRaises(CalledProcessError):
run_token('1++')
|
{"/test/test_parser.py": ["/test/util.py"], "/test/test_lexer.py": ["/test/util.py"]}
|
35,430
|
psturmfels/cfAD
|
refs/heads/master
|
/trainMFRealData.py
|
import numpy as np
import pandas as pd
import datetime
from multiprocessing import Pool
from functools import partial
from CrossValidation import *
from FeatureSimilarity import GetTopGenes
from MatrixFactorization import CreateLatentVariables, FactorizeMatrix, GetRepresentationError
from utils import *
from ReadData import *
from GetJSON import get
totalDataDF = pd.read_csv('/projects/leelab3/psturm/concatData/totalDataDF.csv', header=0, index_col=0)
binaryPathwayDF = pd.read_csv('/projects/leelab3/psturm/concatData/pathways.tsv', sep='\t', header=0)
binaryPathwayDF.set_index('Genes', inplace=True)
X = totalDataDF.values.T
n, g = X.shape
half_n = int(n / 2)
binaryPathwayMat = binaryPathwayDF.values
neighbors = GetNeighborDictionary(binaryPathwayMat)
eta = 0.01
lamb1 = 0.04
lamb2 = 0.02
# eta_nn =
# lamb1_nn =
# lamb2_nn =
latentDim = 100 #Somewhat arbitrary, but solution does not vary greatly as a function of latent dimension
numReps = 100 #Also somewhat arbitrary. Lower this if it takes too long.
maxEpochs = 4 #Based on CV results. Since the data matrix is so large, it doesn't take many epochs to converge
def TrainReps(rep):
with open('trainMF_real.txt', 'a') as progress_file:
progress_file.write('Started random split {} at time:\t{}\n'.format(rep, datetime.datetime.now()))
randomIndices = np.loadtxt('/projects/leelab3/psturm/realData/randomIndices/perm{}.csv'.format(rep), dtype=int)
randomIndices = randomIndices[randomIndices < n]
trainIndices = randomIndices[:half_n]
valdIndices = randomIndices[half_n:]
trainX = X[trainIndices, :]
valdX = X[valdIndices, :]
U_init_train, V_init_train = CreateLatentVariables(len(trainIndices), g, latentDim)
U_train, V_train = FactorizeMatrix(trainX, U_init_train, V_init_train, neighbors,
eta=eta, lamb1=lamb1, lamb2=lamb2, num_epochs=maxEpochs)
U_init_vald, V_init_vald = CreateLatentVariables(len(valdIndices), g, latentDim)
U_vald, V_vald = FactorizeMatrix(valdX, U_init_vald, V_init_vald, neighbors,
eta=eta, lamb1=lamb1, lamb2=lamb2, num_epochs=maxEpochs)
np.save('/projects/leelab3/psturm/realModels/overlapModels/U_train{}.npy'.format(rep), U_train)
np.save('/projects/leelab3/psturm/realModels/overlapModels/V_train{}.npy'.format(rep), V_train)
np.save('/projects/leelab3/psturm/realModels/overlapModels/U_vald{}.npy'.format(rep), U_vald)
np.save('/projects/leelab3/psturm/realModels/overlapModels/V_vald{}.npy'.format(rep), V_vald)
with open('trainMF_real.txt', 'a') as progress_file:
progress_file.write('Ended random split {} at time:\t{}\n'.format(rep, datetime.datetime.now()))
numProcesses = 25
p = Pool(numProcesses)
p.map(TrainReps, range(numReps))
p.close()
p.join()
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,431
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/MatrixFactorization.py
|
import numpy as np
def FactorizeMatrix(X, k, eta=0.005, lamb=0.02, num_epochs=200, known_indices=None, test_indices=None, verbose=False):
'''
Factorizes the sparse matrix X into the product of two rank k matrices
U and V using stochastic gradient descent.
Args:
X: An n x g, possibly sparse numpy matrix, where missing entries are indicated by np.nan values,
where n represents the number of samples and g represents the number of genes, or items.
k: The latent dimension of the factorization. Typically, k < min(n, g).
eta: The learning rate (multiplicative factor applied to the gradient).
lamb: Hyper-parameter controlling how much to regularize the latent representations.
num_epochs: The number of epochs to run SGD over. The default is 100.
known_indices: An optional t x 2 matrix, each row of which represents the index
of a known entry in X. Used to train on only a subset of known entries.
If a vector is provided, assumes that the vector denotes the indices of
samples to use as training.
If None is provided, then the algorithm will train over all non nan values.
verbose: Whether or not to print out the current epoch while training. Defaults to False.
Returns:
Matrices U and V representing the latent vectors for each sample and each gene, respectively.
'''
n, g = X.shape
#The shape allows us to interpret the rows of U and V as latent representations.
sigma = 0.02
U = np.random.randn(n, k) * sigma
V = np.random.randn(g, k) * sigma
if (known_indices is None):
known_indices = np.argwhere(~np.isnan(X))
for epoch in range(num_epochs):
np.random.shuffle(known_indices)
if len(known_indices.shape) == 2:
iterated_indices = known_indices
elif len(known_indices.shape) == 1:
iterated_indices = np.array(np.meshgrid(known_indices, np.arange(g))).T.reshape(-1, 2)
else:
raise ValueError('known_indices has shape {}, but should be 1D or 2D.'.format(known_indices.shape))
for known_index in iterated_indices:
i, j = known_index
x_ij = X[i, j]
u_i = U[i, :]
v_j = V[j, :]
#Calculate symbolic gradients
e_ij = x_ij - np.dot(u_i, v_j)
grad_ui = e_ij * v_j - lamb * u_i
grad_vj = e_ij * u_i - lamb * v_j
#Apply gradients to latent representation
U[i, :] = u_i + eta * grad_ui
V[j, :] = v_j + eta * grad_vj
if (verbose and epoch % 1 == 0):
train_error = GetRepresentationError(X, U, V, known_indices)
test_error = GetRepresentationError(X, U, V, test_indices)
print('Epoch {} - current train error: {} - current test error: {}'.format(epoch, train_error, test_error))
return U, V
def GetRepresentationError(X, U, V, known_indices=None):
'''
Calculates the mean reconstruction error between x_ij and u_i^T v_j.
Args:
X: An n x g, possibly sparse numpy matrix, where missing entries are indicated by np.nan values,
where n represents the number of samples and g represents the number of genes, or items.
U: An n x k matrix whose rows represent the latent sample vectors.
V: An n x g matrix whose rows represent the latent gene vectors.
known_indices: An optional t x 2 matrix, each row of which represents the index
of a known entry in X. Used to train on only a subset of known entries.
If a vector is provided, assumes that the vector denotes the indices of
samples to use as training.
If None is provided, then the algorithm will train over all non nan values.
Returns:
Mean reconstruction error of UV^T in estimating X.
'''
n, g = X.shape
if (known_indices is None):
known_indices = np.argwhere(~np.isnan(X))
error = 0
if len(known_indices.shape) == 2:
iterated_indices = known_indices
elif len(known_indices.shape) == 1:
iterated_indices = np.array(np.meshgrid(known_indices, np.arange(g))).T.reshape(-1, 2)
else:
raise ValueError('known_indices has shape {}, but should be 1D or 2D.'.format(known_indices.shape))
num_known, _ = iterated_indices.shape
for known_index in iterated_indices:
i, j = known_index
x_ij = X[i, j]
u_i = U[i, :]
v_j = V[j, :]
error = error + np.square(x_ij - np.dot(u_i, v_j))
error = error / num_known
return error
n = 1000
g = 20000
latentDim = 50
#Create some random, low-rank data
U = np.random.randn(n, latentDim).astype(np.float32)
V = np.random.randn(g, latentDim).astype(np.float32)
X = np.dot(U, V.T)
knownIndices = np.argwhere(~np.isnan(X))
#For testing purposes, we need to shuffle the indices. If we do not,
#we will be training on a chunk of the upper half of the matrix, but
#testing on the lower half of the matrix. This makes no sense,
#because we don't have any information about the latent variables we are testing on. Therefore, shuffle!
np.random.shuffle(knownIndices)
numberTestIndices = 20000
testIndices = knownIndices[:numberTestIndices, :]
trainIndices = knownIndices[numberTestIndices:, :]
print(testIndices)
print(trainIndices)
U, V = FactorizeMatrix(X, k=latentDim, eta=0.005, lamb=0.02, num_epochs=5, known_indices=trainIndices, test_indices=testIndices, verbose=True)
testError = GetRepresentationError(X, U, V, known_indices=testIndices)
print('Final Test Error was: {}'.format(testError))
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,432
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/DataInput.py
|
import tensorflow as tf
def GetDataSet(batchSize, knownIndices, shuffle=True):
indicesConst = tf.constant(knownIndices, dtype=tf.int32, name='knownIndices')
dataset = tf.data.Dataset.from_tensor_slices(indicesConst)
dataset = dataset.batch(batchSize)
if (shuffle):
dataset = dataset.shuffle(buffer_size=10000)
return dataset
def CreateIterator(iterType=tf.int32, outputShape=[None, 2]):
iterator = tf.data.Iterator.from_structure(iterType, outputShape)
indexBatch = iterator.get_next()
return iterator, indexBatch
def GetIterInitOp(iter, dataset):
return iter.make_initializer(dataset)
def GetBatchOperation(expressionMatrix, indexBatch):
sampleIndexBatchOp = indexBatch[:, 0]
traitIndexBatchOp = indexBatch[:, 1]
trainDataBatchOp = tf.gather_nd(expressionMatrix, indexBatch)
return sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp
def CreateSoloOps(expressionMatrix, batchSize, trainIndices):
with tf.variable_scope('DataPipeline'):
trainSet = GetDataSet(batchSize, trainIndices)
iter, indexBatch = CreateIterator()
trainInitOp = GetIterInitOp(iter, trainSet)
sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp = GetBatchOperation(expressionMatrix, indexBatch)
return trainInitOp, sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp
def CreateJointOps(expressionMatrix, batchSizeTrain, batchSizeTest, trainIndices, testIndices):
with tf.variable_scope('DataPipeline'):
trainSet = GetDataSet(batchSizeTrain, trainIndices)
testSet = GetDataSet(batchSizeTest, testIndices, shuffle=False)
iter, indexBatch = CreateIterator()
trainInitOp = GetIterInitOp(iter, trainSet)
testInitOp = GetIterInitOp(iter, testSet)
sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp = GetBatchOperation(expressionMatrix, indexBatch)
return trainInitOp, testInitOp, sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,433
|
psturmfels/cfAD
|
refs/heads/master
|
/utils.py
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.decomposition import PCA
def GenerateRegressedPhenotype(X, numPhenotypes=1, lam=1, binaryPathwayMatrix=None, coeffSigma=1.0):
n, g = X.shape
if binaryPathwayMatrix is not None:
_, k = binaryPathwayMatrix.shape
Y = np.zeros((n, numPhenotypes))
geneCoeffs = np.zeros((g, numPhenotypes))
for i in range(numPhenotypes):
if binaryPathwayMatrix is not None:
numPathways = np.minimum(np.random.poisson(lam=lam) + 1, k)
chosenPathways = np.random.choice(k, size=(numPathways, ), replace=False)
for l in chosenPathways:
chosenIndices = np.where(binaryPathwayMatrix[:, l] > 0)[0]
chosenIndices = np.unique(chosenIndices)
numGenesInPhenotype = len(chosenIndices)
geneCoeffs[chosenIndices, i] = np.random.choice([-1, 1]) * np.abs(np.random.randn(numGenesInPhenotype) * coeffSigma)
else:
numGenesInPhenotype = np.minimum(np.random.poisson(lam=lam) + 1, g)
chosenIndices = np.random.choice(g, size=(numGenesInPhenotype,), replace=False)
geneCoeffs[chosenIndices, i] = np.random.randn(numGenesInPhenotype) * coeffSigma
Y[:, i] = np.dot(X[:, chosenIndices], geneCoeffs[chosenIndices, i]) + np.random.randn(n) * coeffSigma * 0.5
return Y, geneCoeffs
#LATENT FACTOR MODEL GENERATION
def GenerateSimulatedData(n = 200, g = 2000, k = 20, numPathways = 20, avgGenesInPath=100.0, covariateU=False):
sigma = 0.5
binaryPathwayMatrix = np.zeros((g, numPathways))
remainingGeneIndices = np.arange(1, g)
if covariateU:
randomMat = np.random.randn(k, k).astype(np.float) * sigma;
covMat = np.dot(randomMat.T, randomMat)
covMat = covMat / np.max(covMat)
covMat = covMat + np.maximum(0.5 - np.mean(np.diag(covMat)), 0.0) * np.eye(k)
mean = np.zeros((k,))
U = np.random.multivariate_normal(mean, covMat, size=(n,))
else:
U = np.random.randn(n, k).astype(np.float32) * sigma;
V = np.random.randn(g, k).astype(np.float32) * sigma;
pathwaySizes = np.random.poisson(lam=avgGenesInPath, size=(numPathways, )) + 1
pathwaySizes = (pathwaySizes / np.sum(pathwaySizes)) * g
pathwaySizes = pathwaySizes.astype(int)
pathwaySizes[0] += g - 1 - np.sum(pathwaySizes)
for i in range(numPathways):
numIndices = np.maximum(np.random.randint(low=int(k/4), high=k+1), 1)
means = np.random.randn(numIndices).astype(np.float32) * sigma
sigmas = np.random.uniform(low=0.0, high=sigma, size=(numIndices))
chosenIndices = np.random.choice(k, size=(numIndices,), replace=False)
numGenes = pathwaySizes[i]
chosenGeneIndices = np.random.choice(len(remainingGeneIndices), size=(numGenes,), replace=False)
chosenGenes = remainingGeneIndices[chosenGeneIndices]
remainingGeneIndices = np.delete(remainingGeneIndices, chosenGeneIndices)
if i == 0:
chosenGenes = np.append(chosenGenes, 0)
phenotypeGenes = chosenGenes
numGenes = numGenes + 1
V[chosenGenes[:, None], chosenIndices] = np.random.multivariate_normal(means,
np.diag(sigmas),
size=(numGenes,))
binaryPathwayMatrix[chosenGenes, i] = 1
binaryPathwayMatrix[0, :] = np.zeros(numPathways)
return U, V, binaryPathwayMatrix, phenotypeGenes
#Helper functions
def GetNeighborDictionary(binaryPathwayMatrix, percentileThreshold=95):
neighbors = {}
nonzeroIndices = np.where(np.any(binaryPathwayMatrix, axis=1))[0]
nonzeroIndices = nonzeroIndices.astype(np.int32)
nonzeroPathwayMat = binaryPathwayMatrix[nonzeroIndices, :]
numNonzero, k = nonzeroPathwayMat.shape
geneDegreeMatrix = np.dot(nonzeroPathwayMat, nonzeroPathwayMat.T)
np.fill_diagonal(geneDegreeMatrix, 0.0)
degreePercentiles = np.percentile(geneDegreeMatrix, percentileThreshold, axis=1)
geneDegreeMatrix[geneDegreeMatrix < np.expand_dims(degreePercentiles, axis=1)] = 0
geneDegreeCounts = np.sum(geneDegreeMatrix, axis=1)
for i in range(numNonzero):
geneDegree = geneDegreeCounts[i]
if (geneDegree == 0):
continue
neighbors[nonzeroIndices[i]] = []
neighborEdgeIndices = geneDegreeMatrix[i, :].nonzero()[0]
neighborEdgeWeights = geneDegreeMatrix[i, neighborEdgeIndices]
for j in range(len(neighborEdgeIndices)):
neighbors[nonzeroIndices[i]].append([
nonzeroIndices[neighborEdgeIndices[j]],
neighborEdgeWeights[j] / geneDegree
])
return neighbors
def MatToMeltDF(im, group_name, x_values=np.arange(400),
x_name='percent identified as significant', y_name='percent identified actually significant'):
numReps, numPlotPoints = im.shape
if len(x_values) > numPlotPoints:
im = np.concatenate([im, np.tile(im[:, -1], (len(x_values) - numPlotPoints, 1)).T], axis=1)
im_dot_df = pd.DataFrame(im[:, :len(x_values)].T)
im_dot_df[x_name] = x_values
im_dot_df = pd.melt(im_dot_df, id_vars=[x_name],
value_name=y_name)
im_dot_df['group'] = group_name
return im_dot_df
def GetMeanErrorDF(errorsDF, num_folds=5):
meanErrorsDF = pd.concat([errorsDF[errorsDF['fold'] == i].drop('fold', axis=1).reset_index(drop=True) for i in range(num_folds)], axis=0)
meanErrorsDF = meanErrorsDF.groupby(meanErrorsDF.index).mean()
return meanErrorsDF
def ScreePlot(X, var_ratio=0.9):
X = X - np.mean(X, axis=0)
pca_model = PCA()
pca_model.fit(X)
latent_dim = np.min(np.where(np.cumsum(pca_model.explained_variance_ratio_) > var_ratio)[0])
plt.axvline(latent_dim, color='orange')
ax = plt.gca()
ax2 = plt.twinx()
exp_var = sns.lineplot(x=np.arange(len(pca_model.explained_variance_ratio_)), y=pca_model.explained_variance_ratio_, ax=ax, color='b', label='Explained variance')
sum_var = sns.lineplot(x=np.arange(len(pca_model.explained_variance_ratio_)), y=np.cumsum(pca_model.explained_variance_ratio_), ax=ax2, color='r', label='Cumulative explained variance')
plt.title('Scree Plot with latent dimension {}'.format(latent_dim))
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax.legend(lines + lines2, labels + labels2, loc=2)
ax2.get_legend().remove()
ylim1 = ax.get_ylim()
len1 = ylim1[1]-ylim1[0]
yticks1 = ax.get_yticks()
rel_dist = [(y-ylim1[0])/len1 for y in yticks1]
ylim2 = ax2.get_ylim()
len2 = ylim2[1]-ylim2[0]
yticks2 = [ry*len2+ylim2[0] for ry in rel_dist]
ax2.set_yticks(yticks2)
ax2.set_ylim(ylim2)
ax.set_xlabel('Principal components')
ax.set_ylabel('Percent variance')
ax2.set_ylabel('Cumuluative percent variance')
ax.set_axisbelow(True)
ax2.set_axisbelow(True)
ax2.grid(False)
def plotIndices(tg_summed, names, indices, x_values, ci=None):
dfs = []
for i in range(len(indices)):
dfs.append(MatToMeltDF(tg_summed[:, indices[i], :], group_name = names[i], x_values=x_values))
sns.lineplot(x='percent identified as significant', y='percent identified actually significant', hue='group',
data=pd.concat(dfs), ci=ci)
def DFtoDataset(df, n=500, scale=False):
X = df[[str(i) for i in np.arange(n)]].values.T
if (scale):
X = preprocessing.scale(X)
binaryPathwayMatrix = df[['pathway{}'.format(i) for i in range(df.shape[1] - n - 2)]].values
phenotypeGenes = df['phenotype_genes']
phenotypeGenes = np.where(phenotypeGenes == 1)[0]
return X, binaryPathwayMatrix, phenotypeGenes
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,434
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/BuildModel.py
|
import tensorflow as tf
def GetEmbeddingVectors(userBatch, traitBatch):
with tf.variable_scope('LatentModel'):
with tf.variable_scope('LatentFactors', reuse=tf.AUTO_REUSE):
U = tf.get_variable('U')
V = tf.get_variable('V')
sampleEmbeddings = tf.nn.embedding_lookup(U, userBatch, name = 'sampleEmbedCustom')
traitEmbeddings = tf.nn.embedding_lookup(V, traitBatch, name = 'traitEmbedCustom')
customPred = tf.reduce_sum(tf.multiply(sampleEmbeddings, traitEmbeddings), axis=1, name='customPred')
return sampleEmbeddings, traitEmbeddings, customPred
def GetPredOps(numSamples, numTraits, userBatch, traitBatch, latentDim, device="/cpu:0"):
with tf.variable_scope('LatentModel'):
with tf.device('/cpu:0'):
with tf.variable_scope('LatentFactors', reuse=tf.AUTO_REUSE):
U = tf.get_variable('U', shape=[numSamples, latentDim], initializer=tf.truncated_normal_initializer(stddev=0.02))
V = tf.get_variable('V', shape=[numTraits, latentDim], initializer=tf.truncated_normal_initializer(stddev=0.02))
with tf.variable_scope('VectorEmbeddings'):
sampleEmbeddings = tf.nn.embedding_lookup(U, userBatch, name = 'sampleEmbeddings')
traitEmbeddings = tf.nn.embedding_lookup(V, traitBatch, name = 'traitEmbeddings')
with tf.device(device):
with tf.variable_scope('VectorPredictions'):
sampleTraitPredictions = tf.reduce_sum(tf.multiply(sampleEmbeddings, traitEmbeddings), axis=1, name='sampleTraitPredictions')
embeddingsRegularizer = tf.add(tf.nn.l2_loss(sampleEmbeddings), tf.nn.l2_loss(traitEmbeddings), name='embeddingsRegularizer')
return sampleTraitPredictions, embeddingsRegularizer
def GetOptOps(sampleTraitPredictions, embeddingsRegularizer, trueSampleTraitValues, learningRate=0.005, reg=0.02, device='/cpu:0'):
globalStep = tf.train.get_global_step()
if globalStep is None:
globalStep = tf.train.create_global_step()
with tf.device(device):
with tf.variable_scope('ModelOptimization'):
with tf.variable_scope('MeanSquaredError'):
predictionLoss = tf.reduce_mean(tf.square(tf.subtract(sampleTraitPredictions, trueSampleTraitValues)))
lossFunctionOp = tf.add(predictionLoss, tf.multiply(reg, embeddingsRegularizer), name='lossFunction')
trainOp = tf.train.GradientDescentOptimizer(learningRate).minimize(lossFunctionOp, global_step=globalStep)
return trainOp, predictionLoss
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,435
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/CrossValidation.py
|
from sklearn.model_selection import KFold
import tensorflow as tf
import numpy as np
from TrainModel import TrainModel
from BuildModel import *
from DataInput import *
#from joblib import Parallel, delayed
#class DataHolder: pass
#def GetPerfOnKFolds(dataHolder):
# splitIter = dataHolder.splitIter
# eta = dataHolder.eta
# lamb = dataHolder.lamb
#Consider running cross validation in parallel
def CrossValidateParams(X, latentDim, etas, lambs, foldcount=10):
n, g = X.shape
expressionMatrix = tf.constant(X, dtype=tf.float32, name='expressionMatrix')
knownIndices = np.argwhere(~np.isnan(X))
numKnown, _ = knownIndices.shape
np.random.shuffle(knownIndices)
kf = KFold(n_splits=foldcount, shuffle=True)
errors = np.zeros((len(etas), len(lambs), foldcount))
batchSizeTrain = 1
batchSizeTest = int(numKnown / foldcount) + 1
#Create the data ingestion operations
with tf.variable_scope('DataPipeline'):
iter, indexBatch = CreateIterator()
sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp = GetBatchOperation(expressionMatrix, indexBatch)
#Create some helper operations to set hyper-parameters without having to rebuild model each time
learningRate = tf.get_variable('learningRate', shape=(), dtype=tf.float32)
regMult = tf.get_variable('regMultiplier', shape=(), dtype=tf.float32)
learningRateInput = tf.placeholder(dtype=tf.float32, shape=(), name='learningRateInput')
regMultInput = tf.placeholder(dtype=tf.float32, shape=(), name='regMultiplierInput')
assignLearningRateOp = tf.assign(learningRate, learningRateInput)
assignRegMultOp = tf.assign(regMult, regMultInput)
#Create the model operations
print('Building the model graph...')
sampleTraitPredictions, embeddingsRegularizer = GetPredOps(n, g, sampleIndexBatchOp, traitIndexBatchOp, latentDim)
trainOp, predictionLoss = GetOptOps(sampleTraitPredictions, embeddingsRegularizer, trainDataBatchOp,
learningRate=learningRate, reg=regMult)
errors = np.zeros((len(etas), len(lambs), foldcount))
with tf.Session() as sess:
#Loop through all of the folds
fold = 0
for train_index, test_index in kf.split(knownIndices):
trainIndices = knownIndices[train_index]
testIndices = knownIndices[test_index]
#Link iterator to the current indices
with tf.variable_scope('DataPipeline'):
trainSet = GetDataSet(batchSizeTrain, trainIndices)
testSet = GetDataSet(batchSizeTest, testIndices, shuffle=False)
trainInitOp = GetIterInitOp(iter, trainSet)
testInitOp = GetIterInitOp(iter, testSet)
lamb_ind = 0
for lamb in lambs:
eta_ind = 0
for eta in etas:
sess.run(tf.global_variables_initializer())
#Assign the current hyper-parameters
learningRate, regMul = sess.run([assignLearningRateOp, assignRegMultOp],
feed_dict={
learningRateInput: eta,
regMultInput: lamb
})
summaryDir='../summaries/eta{}_lamb{}/fold{}/'.format(eta, lamb, fold)
checkpointDir='../checkpoints/eta{}_lamb{}/fold{}/'.format(eta, lamb, fold)
#TODO: finish this. Basically, write summaries to some place,
#and then collect all of the errors and write that some place to.
#Also, consider summarizing the actual latent representations themselves.
TrainModel(sess, trainOp, predictionLoss,
trainInitOp, testInitOp=testInitOp,
numEpochs=1, verbSteps=100, summaryDir=summaryDir,
checkpointDir=checkpointDir)
sess.run(testInitOp)
testLoss = sess.run(predictionLoss)
errors[eta_ind, lamb_ind, fold] = testLoss
print('eta={}, lamb={}, fold={}, loss={}'.format(eta, lamb, fold, testLoss))
eta_ind += 1
lamb_ind += 1
fold += 1
np.save('cv_errors', errors)
np.save('etas', np.array(etas))
np.save('lambs', np.array(lambs))
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,436
|
psturmfels/cfAD
|
refs/heads/master
|
/ReadData.py
|
import numpy as np
import pandas as pd
def GetDataFrame(filename, sep='\t', header=0):
'''
Parses the data frame stored in filename.
Args:
filename: The file name of the data frame to read.
sep: The separating set of characters between each entry in the file.
header: An integer representing the index the header is stored in.
Returns:
A data frame read from filename.
'''
return pd.read_csv(filename, sep=sep, header=header)
def JoinGenes(df1, df2, index_name='PCG'):
'''
Joins two data frames that represent gene expression matrices.
Each row represents a gene, and each column represents a sample.
Args:
df1: The first data frame.
df2: The second data frame.
gene_col_name: The name of the index that contains the gene names.
Returns: A data frame that represents merging the two input data frames on
the gene column name.
'''
if df1 is None:
return df2
elif df2 is None:
return df1
else:
return df1.merge(df2, on=index_name, how='outer')
def JoinGenePheno(geneDF, phenoDF):
'''
Joins a gene expression data frame and a phenotype data frame.
Args:
geneDF: A data frame representing a gene expression matrix.
phenoDF: A data frame representing a phenotype matrix.
Returns: A matrix that represents stacking the two data frames on top of each other,
e.g., a new data frame where each row represents a biological trait (gene
expression or phenotype), and each column represents a sample.
'''
phenoDF = phenoDF.set_index('sample_name').T.rename_axis('PCG').rename_axis(None, 1).reset_index().set_index('PCG')
phenoDF.columns = phenoDF.columns.astype(int)
return pd.concat([geneDF, phenoDF], sort=False)
def JoinMultipleGenes(*dfs):
'''
Wrapper function to combine multiple gene expression data frames
horizontally.
Args:
*dfs: An unwrapped list of gene expression data frames.
Returns: A data frame in which all of the input data frames has been combined.
Raises:
ValueError: Raised if no inputs are given.
'''
if len(dfs) == 0:
raise ValueError('Cannot join an empty list of gene data frames.')
base_df = dfs[0]
for df in dfs[1:]:
base_df = JoinGenes(base_df, df)
return base_df
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,437
|
psturmfels/cfAD
|
refs/heads/master
|
/GetJSON.py
|
import glob
filename = 'dataFiles.json'
try:
with open(filename) as f:
root_file = eval(f.read())
except SyntaxError:
print('Unable to open the {} file. Terminating...'.format(filename))
except IOError:
print('Unable to find the {} file. Terminating...'.format(filename))
def can_get(attr):
return bool(glob.glob(get(attr) + '*'))
def get(attr, root=root_file):
node = root
for part in attr.split('.'):
node = node[part]
return node
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,438
|
psturmfels/cfAD
|
refs/heads/master
|
/notebooks/latentFactorSimulations/numGenesTuning.py
|
import sys
import os
import datetime
module_path = os.path.abspath(os.path.join('../..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from multiprocessing import Pool
from functools import partial
from CrossValidation import *
from FeatureSimilarity import GetTopGenes
from MatrixFactorization import CreateLatentVariables, FactorizeMatrix, GetRepresentationError
from utils import *
def DFtoDataset(df, scale=False):
n = 500
X = df[[str(i) for i in np.arange(n)]].values.T
if (scale):
X = preprocessing.scale(X)
binaryPathwayMatrix = df[['pathway{}'.format(i) for i in range(df.shape[1] - n - 2)]].values
phenotypeGenes = df['phenotype_genes']
phenotypeGenes = np.where(phenotypeGenes == 1)[0]
return X, binaryPathwayMatrix, phenotypeGenes
for g in [1000, 3000, 5000, 7000]:
print('-------------Tuning on data with {} genes-------------'.format(g))
dataFileBase = '/projects/leelab3/psturm/simulatedData/varyDimData/g{}/df{}.csv'
df = pd.read_csv(dataFileBase.format(g, 0))
X, binaryPathwayMatrix, phenotypeGenes = DFtoDataset(df)
neighbors=GetNeighborDictionary(binaryPathwayMatrix)
pca = PCA(n_components=50)
pca.fit(X.T)
latent_dim = np.min(np.where(np.cumsum(pca.explained_variance_ratio_) > 0.95)[0])
num_folds=5
hyper_params = RandomParams(eta_low=0.001, eta_high=0.02, lamb1_low=0.001, lamb1_high=0.04, lamb2_low=0.001, lamb2_high=0.02, num_reps=50)
errorsDF, trainErrorDF, testErrorDF = CrossValidation(X, latent_dim, hyper_params, neighbors=neighbors, foldcount=num_folds, returnVectorDF=True, numProcesses=25)
errorsDF.to_csv('../../DataFrames/errorsDF_g{}.csv'.format(g), index=False)
trainErrorDF.to_csv('../../DataFrames/trainErrorDF_g{}.csv'.format(g), index=False)
testErrorDF.to_csv('../../DataFrames/testErrorDF_g{}.csv'.format(g), index=False)
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,439
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/tests.py
|
from CrossValidation import *
import tensorflow as tf
import numpy as np
from BuildModel import *
from DataInput import *
from TrainModel import *
def InputTests():
n = 100
g = 1000
X = np.random.randint(0, 100, (n, g)).astype(np.float32) #some random data, for testing purposes
X[X < 10] = np.nan
expressionMatrix = tf.constant(X, dtype=tf.float32, name='expressionMatrix')
batchSizeTrain = 5 #some default constants, for testing purposes
batchSizeTest = 10
iters = 1000
knownIndices = np.argwhere(~np.isnan(X))
numberKnown, _ = knownIndices.shape
trainIndices = knownIndices[:int(numberKnown/2), :]
testIndices = knownIndices[int(numberKnown/2):, :]
trainInitOp, testInitOp, sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp =\
CreateJointOps(expressionMatrix, batchSizeTrain, batchSizeTest, trainIndices, testIndices)
with tf.Session() as sess:
for i in range(iters):
print('Batch {} out of {}'.format(i + 1, iters), end='\r')
sess.run(trainInitOp)
sampleIndices, traitIndices, dataValues = sess.run([sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp])
assert np.all(X[sampleIndices, traitIndices] == dataValues), "Assertion failed. dataValues = {}, but X values = {}".format(dataValues, X[sampleIndices, traitIndices])
sess.run(testInitOp)
sampleIndices, traitIndices, dataValues = sess.run([sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp])
assert np.all(X[sampleIndices, traitIndices] == dataValues), "Assertion failed. dataValues = {}, but X values = {}".format(dataValues, X[sampleIndices, traitIndices])
print("Input tests passed.")
def TrainingTests():
n = 10
g = 100
latentDim = 5
#Create some random, low-rank data
U = np.random.randn(n, latentDim).astype(np.float32)
V = np.random.randn(g, latentDim).astype(np.float32)
X = np.dot(U, V.T)
expressionMatrix = tf.constant(X, dtype=tf.float32, name='expressionMatrix')
knownIndices = np.argwhere(~np.isnan(X))
#For testing purposes, we need to shuffle the indices. If we do not,
#we will be training on a chunk of the upper half of the matrix, but
#testing on the lower half of the matrix. This makes no sense,
#because we don't have any information about the latent variables we are testing on. Therefore, shuffle!
np.random.shuffle(knownIndices)
numberTestIndices = 200
testIndices = knownIndices[:numberTestIndices, :]
trainIndices = knownIndices[numberTestIndices:, :]
batchSizeTrain = 1
batchSizeTest = numberTestIndices
#Create the data ingestion operations
trainInitOp, testInitOp, sampleIndexBatchOp, traitIndexBatchOp, trainDataBatchOp =\
CreateJointOps(expressionMatrix, batchSizeTrain, batchSizeTest, trainIndices, testIndices)
#Create the model operations
sampleTraitPredictions, embeddingsRegularizer = GetPredOps(n, g, sampleIndexBatchOp, traitIndexBatchOp, latentDim)
trainOp, predictionLoss = GetOptOps(sampleTraitPredictions, embeddingsRegularizer, trainDataBatchOp)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
TrainModel(sess, trainOp, predictionLoss,
trainInitOp, testInitOp=testInitOp,
numEpochs=20,
device='/cpu:0', verbSteps=100,
summaryDir='../summaries/', checkpointDir='../checkpoints/model.ckpt')
sess.run(testInitOp)
testLoss = sess.run(predictionLoss)
print("Trained successfully with a final test loss of {}".format(testLoss))
def CrossValidationTests():
n = 10
g = 100
latentDim = 5
#Create some random, low-rank data
U = np.random.randn(n, latentDim).astype(np.float32)
V = np.random.randn(g, latentDim).astype(np.float32)
X = np.dot(U, V.T)
etas = [0.01, 0.005, 0.001]
lambs = [0.05, 0.02, 0.01]
CrossValidateParams(X, latentDim, etas, lambs, foldcount=10)
CrossValidationTests()
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,440
|
psturmfels/cfAD
|
refs/heads/master
|
/trainMFFinal.py
|
import numpy as np
import pandas as pd
import datetime
from multiprocessing import Pool
from functools import partial
from CrossValidation import *
from FeatureSimilarity import GetTopGenes
from MatrixFactorization import CreateLatentVariables, FactorizeMatrix, GetRepresentationError
from utils import *
from ReadData import *
from GetJSON import get
print('Reading in data...')
totalDataDF = pd.read_csv('/projects/leelab3/psturm/concatData/totalDataDF.csv', header=0, index_col=0)
binaryPathwayDF = pd.read_csv('/projects/leelab3/psturm/concatData/pathways.tsv', sep='\t', header=0)
binaryPathwayDF.set_index('Genes', inplace=True)
X = totalDataDF.values.T
n, g = X.shape
print('Projecting onto principal components...')
completeMat = totalDataDF.dropna(axis=0).values
pca = PCA(n_components=500)
projectedX = pca.fit_transform(completeMat.T)
latent_dim = np.min(np.where(np.cumsum(pca.explained_variance_ratio_) > 0.90)[0])
print('Latent dimension is: {}'.format(latent_dim))
binaryPathwayMat = binaryPathwayDF.values
neighbors = GetNeighborDictionary(binaryPathwayMat)
eta = 0.01
lamb1 = 0.04
lamb2 = 0.02
print('Factoring matrix...')
U_init, V_init = CreateLatentVariables(n, g, latent_dim)
U, V, trainError, testError = FactorizeMatrix(X, U_init, V_init, neighbors, eta=eta, lamb1=lamb1, lamb2=lamb2, num_epochs=10, returnErrorVectors=True)
np.save('/projects/leelab3/psturm/realModels/U.npy', U)
np.save('/projects/leelab3/psturm/realModels/V.npy', V)
np.save('/projects/leelab3/psturm/realModels/trainError.npy', trainError)
np.save('/projects/leelab3/psturm/realModels/testError.npy', testError)
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,441
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/ScoreTraits.py
|
import numpy as np
def GetTopTraits(V, trait_index, gene_indices=None, c=None):
'''
Returns the top traits associated with a trait given by trait_index index, assuming
V is a latent gene-phenotype matrix.
Args:
V: A g x k matrix where each row represents the latent representation of a gene or phenotype.
trait_index: An index in [0, g - 1] that represents the target phenotype.
gene_indices: An optional parameter denoting which rows of V to search through for top genes.
If None, searches through all rows of V.
c: An optional parameter denoting how many genes to return. If c is None,
returns all genes.
Returns:
A list of indices corresponding to the rows of V, sorted in order of relevance
to the target phenotype.
'''
phenotype_vector = V[trait_index, :]
if gene_indices is not None:
V = V[gene_indices, :]
association_scores = np.dot(V, phenotype_vector)
top_gene_indices = association_scores.argsort()[::-1]
if gene_indices is not None:
top_gene_indices = gene_indices[top_gene_indices]
if c is not None:
top_gene_indices = top_gene_indices[:c]
return top_gene_indices
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,442
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/TrainModel.py
|
import tensorflow as tf
import numpy as np
def TrainModel(sess, trainOp, predictionLoss,
trainInitOp, testInitOp=None,
numEpochs=200, numEpochsEarlyStop=20,
device='/cpu:0', verbSteps=None,
summaryDir=None, checkpointDir=None,
restore=False):
#Set up writers to plot the loss over time on the training and test sets
if summaryDir is not None:
lossSummary = tf.summary.scalar("Prediction Loss", predictionLoss)
trainWriter = tf.summary.FileWriter(summaryDir + 'train/', sess.graph)
testWriter = tf.summary.FileWriter(summaryDir + 'test/')
summaryOp = tf.summary.merge_all()
#Restore the model if desired
if checkpointDir is not None:
saver = tf.train.Saver()
import os
if not os.path.exists(checkpointDir):
os.makedirs(checkpointDir)
if restore:
saver.restore(sess, checkpointDir)
testLoss = '?'
bestTestLoss = np.inf
epochsSinceBest = 0
for epoch in range(numEpochs):
sess.run(trainInitOp)
j = 0
#Iterate through an epoch of training
while True:
j = j + 1
try:
if summaryDir is not None:
summaryTrain, trainLoss, _ = sess.run([summaryOp, predictionLoss, trainOp])
else:
trainLoss, _ = sess.run([predictionLoss, trainOp])
if verbSteps is not None and j % verbSteps == 0:
print('Epoch {}/{}, batch {}, training loss = {}, test loss = {}'.format(epoch, numEpochs, j, trainLoss, testLoss), end='\r')
except tf.errors.OutOfRangeError:
break
#Summarize the training error
if summaryDir is not None:
trainWriter.add_summary(summaryTrain, epoch)
#Summarize the test error, if desired
if testInitOp is not None:
sess.run(testInitOp)
if summaryDir is not None:
summaryTest, testLoss = sess.run([summaryOp, predictionLoss])
testWriter.add_summary(summaryTest, epoch)
else:
testLoss = sess.run(predictionLoss)
#Stop training if test loss hasn't improved in numEpochsEarlyStop epochs
if bestTestLoss > testLoss:
bestTestLoss = testLoss
epochsSinceBest = 0
else:
epochsSinceBest += 1
if epochsSinceBest > numEpochsEarlyStop:
print('Reached early stopping criteria. Performance has not improved for {} epochs.'.format(numEpochsEarlyStop))
break
print('Epoch {}/{}, batch {}, training loss = {}, test loss = {}'.format(epoch, numEpochs, j, trainLoss, testLoss), end='\r')
#Save the model to a the checkpoint, if desired
if checkpointDir is not None:
saver.save(sess, checkpointDir)
return
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,443
|
psturmfels/cfAD
|
refs/heads/master
|
/MatrixFactorization.py
|
import numpy as np
from MF import Factor, Get_prediction_error
#Helper python script to interface with C++ functions
def CreateLatentVariables(n, g, k, sigma=0.02):
U = np.random.randn(n, k) * sigma
V = np.random.randn(g, k) * sigma
return U, V
def FactorizeMatrix(X, U, V, neighbors=None, eta=0.005, lamb1=0.02, lamb2=0.001,
num_epochs=10, trainIndices=None, testIndices=None, returnErrorVectors=False):
'''
Factorizes the sparse matrix X into the product of two rank k matrices
U and V using stochastic gradient descent.
Args:
X: An n x g, possibly sparse numpy matrix, where missing entries are indicated by np.nan values,
where n represents the number of samples and g represents the number of genes, or items.
U: The latent sample matrix.
V: The latent trait matrix.
neighbors: A python dictionary whose keys are integer indices corresponding to
indices in range(g), and whose values are lists of indices corresponding
to neighbors of the keys.
eta: The learning rate (multiplicative factor applied to the gradient).
lamb: Hyper-parameter controlling how much to regularize the latent representations.
num_epochs: The number of epochs to run SGD over. The default is 100.
trainIndices : An optional t x 2 matrix, each row of which represents the index
of a known entry in X. Used to train on only a subset of known entries.
If a vector is provided, assumes that the vector denotes the indices of
samples to use as training.
If None is provided, then the algorithm will train over all non nan values.
Returns:
Matrices U and V representing the latent vectors for each sample and each gene, respectively.
'''
if neighbors is None:
neighbors = {}
if trainIndices is None and testIndices is None:
knownIndices = np.argwhere(~np.isnan(X)).astype(np.int32)
np.random.shuffle(knownIndices)
numIndices, _ = knownIndices.shape
cutoff = int(numIndices * 0.9)
testIndices = knownIndices[cutoff:, :]
trainIndices = knownIndices[:cutoff, :]
elif testIndices is None:
trainIndices = trainIndices.astype(np.int32)
numIndices, _ = trainIndices.shape
cutoff = int(numIndices * 0.9)
testIndices = trainIndices[cutoff:, :]
trainIndices = trainIndices[:cutoff, :]
X = X.astype(np.float32)
U = U.astype(np.float32)
V = V.astype(np.float32)
if (returnErrorVectors):
trainError = np.empty((num_epochs,)).astype(np.float32)
testError = np.empty((num_epochs,)).astype(np.float32)
trainError.fill(np.nan)
testError.fill(np.nan)
Factor(X, U, V, trainIndices, testIndices, neighbors,
trainError, testError, True,
eta, lamb1, lamb2, num_epochs)
trainError = trainError[~np.isnan(trainError)]
testError = testError[~np.isnan(testError)]
return U, V, trainError, testError
else:
Factor(X, U, V, trainIndices, testIndices, neighbors,
np.array([]), np.array([]), False,
eta, lamb1, lamb2, num_epochs)
return U, V
def GetRepresentationError(X, U, V, known_indices=None):
if (known_indices is None):
known_indices = np.argwhere(~np.isnan(X)).astype(np.int32)
return Get_prediction_error(X, U, V, known_indices)
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,444
|
psturmfels/cfAD
|
refs/heads/master
|
/archived/code/main.py
|
import tensorflow as tf
import numpy as np
from BuildModel import *
from DataInput import *
from TrainModel import *
def main():
main()
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,445
|
psturmfels/cfAD
|
refs/heads/master
|
/notebooks/latentFactorSimulations/numGenesTraining.py
|
import sys
import os
import datetime
module_path = os.path.abspath(os.path.join('../..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from multiprocessing import Pool
from functools import partial
from CrossValidation import *
from FeatureSimilarity import GetTopGenes
from MatrixFactorization import CreateLatentVariables, FactorizeMatrix, GetRepresentationError
from utils import *
numReps = 50
etas = [0.006872, 0.004405, 0.003169, 0.003933]
lambs_1 = [0.001322, 0.018094, 0.007227, 0.004865]
lambs_2 = [0.013549, 0.009637, 0.016451, 0.010235]
g_list = [1000, 3000, 5000, 7000]
def TrainReps(rep):
print('-------------Training data on dataset {}-------------'.format(rep))
for i in range(len(g_list)):
g = g_list[i]
eta = etas[i]
lamb1 = lambs_1[i]
lamb2 = lambs_2[i]
print('rep {}, g {}'.format(rep, g))
dataFileBase = '/projects/leelab3/psturm/simulatedData/varyDimData/g{}/df{}.csv'
df = pd.read_csv(dataFileBase.format(g, rep))
X, binaryPathwayMatrix, phenotypeGenes = DFtoDataset(df)
n, _ = X.shape
neighbors = GetNeighborDictionary(binaryPathwayMatrix, percentileThreshold=95)
pca = PCA(n_components=50)
projectedX = pca.fit_transform(X.T)
latent_dim = np.min(np.where(np.cumsum(pca.explained_variance_ratio_) > 0.95)[0])
U_pred_init, V_pred_init = CreateLatentVariables(n, g, latent_dim)
U_pred, V_pred = FactorizeMatrix(X, U_pred_init, V_pred_init, neighbors,
eta=eta, lamb1=lamb1, lamb2=lamb2, num_epochs=10)
np.save('/projects/leelab3/psturm/simulatedModels/geneModels/g{}/U{}.npy'.format(g, rep), U_pred)
np.save('/projects/leelab3/psturm/simulatedModels/geneModels/g{}/V{}.npy'.format(g, rep), V_pred)
numProcesses = 25
p = Pool(numProcesses)
p.map(TrainReps, range(numReps))
p.close()
p.join()
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,446
|
psturmfels/cfAD
|
refs/heads/master
|
/CrossValidation.py
|
import numpy as np
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
from multiprocessing import Pool
from functools import partial
from sklearn.model_selection import KFold
from MatrixFactorization import FactorizeMatrix, GetRepresentationError, CreateLatentVariables
from FeatureSimilarity import GetTopGenes
def RandomParams(eta_low, eta_high, lamb1_low, lamb1_high, lamb2_low, lamb2_high, num_reps=20):
hyper_params = np.zeros((num_reps, 3)).astype(np.float32)
hyper_params[:, 0] = np.random.uniform(low=eta_low, high=eta_high, size=(num_reps,))
hyper_params[:, 1] = np.random.uniform(low=lamb1_low, high=lamb1_high, size=(num_reps,))
hyper_params[:, 2] = np.random.uniform(low=lamb2_low, high=lamb2_high, size=(num_reps,))
return hyper_params
def TrainOnParams(params, X, k, neighbors, train_indices, test_indices):
print('.', end='')
n, g = X.shape
eta, lamb1, lamb2 = params
U, V = CreateLatentVariables(n, g, k)
U, V = FactorizeMatrix(X, U, V, neighbors, eta=eta, lamb1=lamb1, lamb2=lamb2, trainIndices=train_indices)
paramError = GetRepresentationError(X, U, V, known_indices=test_indices)
return paramError
def TrainVerboseOnParams(params, X, k, neighbors, train_indices, test_indices):
print('.', end='')
n, g = X.shape
eta, lamb1, lamb2 = params
U, V = CreateLatentVariables(n, g, k)
U, V, trainError, testError = FactorizeMatrix(X, U, V, neighbors, eta=eta, lamb1=lamb1, lamb2=lamb2, trainIndices=train_indices, returnErrorVectors=True)
paramError = GetRepresentationError(X, U, V, known_indices=test_indices)
return paramError, trainError, testError
def CrossValidation(X, k, hyper_params, neighbors=None, foldcount=5, returnVectorDF=False, numProcesses=20):
'''
Runs the matrix factorization algorithm for each specified value of eta and lambda
and computes the reconstruction errors for each run.
Args:
X: An n x g, possibly sparse numpy matrix, where missing entries are indicated by np.nan values,
where n represents the number of samples and g represents the number of genes, or items.
k: The latent dimension of the factorization. Typically, k < min(n, g).
hyper_params: A list of tuples, each corresponding to a setting of hyper parameters (eta, lamb1, lamb2).
foldcount: An integer denoting the number of folds for cross validation.
Returns:
A len(etas) x len(lambs) x foldcount tensor denoting the reconstruction error for each
setting of eta and lambda on each fold.
'''
n, g = X.shape
kf = KFold(n_splits=foldcount, shuffle=True)
errorsDF = pd.DataFrame(np.zeros((len(hyper_params) * foldcount, 5)))
errorsDF.columns = ['eta', 'lamb1', 'lamb2', 'error', 'fold']
#Okay not to shuffle because kf shuffles for you
known_indices = np.argwhere(~np.isnan(X)).astype(np.int32)
np.random.shuffle(known_indices)
if returnVectorDF:
trainErrorDF = pd.DataFrame()
testErrorDF = pd.DataFrame()
fold = 0
df_index = 0
p = Pool(numProcesses)
for train_index, test_index in kf.split(known_indices):
print('Training fold {}'.format(fold))
if returnVectorDF:
foldTrainDF = pd.DataFrame()
foldTestDF = pd.DataFrame()
train_indices = known_indices[train_index].astype(np.int32)
test_indices = known_indices[test_index].astype(np.int32)
if (returnVectorDF):
errorVec = p.map(partial(TrainVerboseOnParams, X=X, k=k, neighbors=neighbors,
train_indices=train_indices, test_indices=test_indices), hyper_params)
for i in range(len(hyper_params)):
eta, lamb1, lamb2 = hyper_params[i]
paramError, trainError, testError = errorVec[i]
foldTrainDF = pd.concat([foldTrainDF,
pd.DataFrame({
'eta{:.5f}_lamb1{:.5f}_lamb2{:.5f}'.format(eta, lamb1, lamb2): trainError
})
], axis=1)
foldTestDF = pd.concat([foldTestDF,
pd.DataFrame({
'eta{:.5f}_lamb1{:.5f}_lamb2{:.5f}'.format(eta, lamb1, lamb2): testError
})
], axis=1)
errorsDF.iloc[df_index] = np.array([eta, lamb1, lamb2, paramError, fold])
df_index += 1
else:
errorVec = p.map(partial(TrainOnParams, X=X, k=k, neighbors=neighbors,
train_indices=train_indices, test_indices=test_indices), hyper_params)
for i in range(len(hyper_params)):
eta, lamb1, lamb2 = hyper_params[i]
paramError = errorVec[i]
errorsDF.iloc[df_index] = np.array([eta, lamb1, lamb2, paramError, fold])
df_index += 1
if returnVectorDF:
foldTrainDF['fold'] = fold
foldTestDF['fold'] = fold
maxEpochs, _ = foldTrainDF.shape
foldTrainDF['epochs'] = np.arange(maxEpochs).astype(np.float32)
foldTestDF['epochs'] = np.arange(maxEpochs).astype(np.float32)
trainErrorDF = pd.concat([trainErrorDF, foldTrainDF])
testErrorDF = pd.concat([testErrorDF, foldTestDF])
fold = fold + 1
p.close()
p.join()
if returnVectorDF:
return errorsDF, trainErrorDF, testErrorDF
else:
return errorsDF
def PlotErrorDF(errorDF, id_vars=['epochs', 'fold'], ax=None):
data = pd.melt(errorDF, id_vars=id_vars, value_name='error', var_name='run')
if ax is not None:
ax = sns.lineplot(x='epochs', y ='error', hue='run', data=data, ax=ax, legend=False)
else:
ax = sns.lineplot(x='epochs', y ='error', hue='run', data=data, legend='brief')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return ax
def PlotParamDF(paramDF, id_vars=['error', 'fold'], ax=None):
data = pd.melt(paramDF, id_vars=id_vars, value_name='param_value', var_name='param_type')
if ax is not None:
ax = sns.lineplot(x='param_value', y='error', hue='param_type', data=data, ax=ax)
else:
ax = sns.lineplot(x='param_value', y='error', hue='param_type', data=data)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return ax
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,447
|
psturmfels/cfAD
|
refs/heads/master
|
/FeatureSimilarity.py
|
import numpy as np
def GetTopGenes(V, phenotype_index, gene_indices=None, c=None, rankType='dot', sortFunc=np.abs):
'''
Returns the top genes associated with phenotype given by pheno_type index, assuming
V is a latent gene-phenotype matrix.
Args:
V: A g x k matrix where each row represents the latent representation of a gene or phenotype.
phenotype_index: An index in [0, g - 1] that represents the target phenotype.
gene_indices: An optional parameter denoting which rows of V to search through for top genes.
If None, searches through all rows of V.
c: An optional parameter denoting how many genes to return. If c is None,
returns all genes.
Returns:
A list of indices corresponding to the rows of V, sorted in order of relevance
to the target phenotype.
'''
#NOTE: Try changing this to cosine similarity? Or correlation?
phenotype_vector = V[phenotype_index, :]
if gene_indices is not None:
V = V[gene_indices, :]
assert rankType in ['dist', 'corr', 'cos', 'dot'], 'rankType must be one of dist, corr, cos, dot'
if rankType == 'dist':
association_scores = -1.0 * np.linalg.norm(V - phenotype_vector, axis=1)
else:
if rankType == 'corr':
phenotype_vector = phenotype_vector - np.nanmean(phenotype_vector)
V = V - np.nanmean(V, axis=1, keepdims=True)
association_scores = np.dot(V, phenotype_vector)
if rankType == 'cos' or rankType == 'corr':
pheno_norm = np.linalg.norm(phenotype_vector)
V_norms = np.linalg.norm(V, axis=1)
association_scores = association_scores / (pheno_norm * V_norms)
if sortFunc is not None:
top_gene_indices = sortFunc(association_scores).argsort()[::-1]
else:
top_gene_indices = association_scores.argsort()[::-1]
if gene_indices is not None:
top_gene_indices = gene_indices[top_gene_indices]
if c is not None:
top_gene_indices = top_gene_indices[:c]
return top_gene_indices
def GetTopGenesMulti(V, phenotype_indices, gene_indices=None, aggFunc=np.mean):
phenotype_vector = V[phenotype_indices, :].T
if gene_indices is not None:
V = V[gene_indices, :]
association_scores = -1.0 * aggFunc(np.linalg.norm(V[:, :, None] - phenotype_vector[None, :, :], axis=1), axis=1)
top_gene_indices = association_scores.argsort()[::-1]
if gene_indices is not None:
top_gene_indices = gene_indices[top_gene_indices]
return top_gene_indices
|
{"/trainMFRealData.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTuning.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/archived/code/tests.py": ["/CrossValidation.py"], "/trainMFFinal.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py", "/ReadData.py", "/GetJSON.py"], "/notebooks/latentFactorSimulations/numGenesTraining.py": ["/CrossValidation.py", "/FeatureSimilarity.py", "/MatrixFactorization.py", "/utils.py"], "/CrossValidation.py": ["/MatrixFactorization.py", "/FeatureSimilarity.py"]}
|
35,448
|
juandebravo/redash-reql
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
#
# For developement:
#
# pip install -e .[dev]
#
# For packaging first install the latest versions of the tooling:
#
# pip install --upgrade pip setuptools wheel twine
# pip install -e .[dev]
#
import sys
from setuptools import setup, find_packages
from distutils.util import convert_path
# Fetch version without importing the package
version_globals = {} # type: ignore
with open(convert_path('redash_reql/version.py')) as fd:
exec(fd.read(), version_globals)
setup(
name='redash_reql',
version=version_globals['__version__'],
author='Iván Montes Velencoso',
author_email='drslump@pollinimini.net',
url='https://github.com/drslump/redash-reql',
license='LICENSE.txt',
description='ReDash ReQL query runner.',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
classifiers=(
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
),
keywords='redash sqlite',
project_urls={ # Optional
'Bug Reports': 'https://github.com/drslump/redash-reql/issues',
'Source': 'https://github.com/drslump/redash-reql',
'Say Thanks!': 'https://twitter/drslump',
},
packages=find_packages(exclude=['tests']),
install_requires=[
"lark-parser==0.6.4",
],
extras_require={
"dev": [
"pytest",
"pytest-runner",
]
},
package_data={},
data_files=[]
)
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,449
|
juandebravo/redash-reql
|
refs/heads/master
|
/redash_reql/parser.py
|
import sys
from lark import Lark, Visitor, Tree
SQL_GRAMMAR = r'''
// SQL syntax for SELECTs (based on sqlite3)
// https://www.sqlite.org/lang_select.html
//
// Basic grammar is modeled from sqlite.
//
?start : stmt (";"+ stmt?)*
| ";"*
stmt : select_stmt
| reql_set_stmt
compound_expr : expr ("," expr)*
?expr : expr_or
?expr_or : expr_and ( OR expr_and )*
?expr_and : expr_not ( AND expr_not )*
?expr_not : NOT+ expr_weird
| expr_weird
?expr_weird : EXISTS "(" select_stmt ")" -> expr_exists
| expr_binary NOT? BETWEEN expr_binary AND expr_binary -> expr_between
| expr_binary NOT? IN expr_binary -> expr_in
| expr_binary ( IS NULL | NOTNULL | NOT NULL ) -> expr_null
| expr_binary NOT? ( LIKE | GLOB | REGEXP ) expr_binary [ ESCAPE expr_binary ] -> expr_search
| expr_binary
| expr_binary NOT? MATCH expr_binary [ ESCAPE expr_binary ] -> expr_search
// TODO: shall we unwrap according to operator priority?
?expr_binary : expr_unary (op_binary expr_unary)*
?expr_unary : op_unary+ expr_func
| expr_unary COLLATE ident -> expr_collate
| expr_func
| expr_func ( "::" CNAME expr_parens? )+ -> expr_pgcast // reql
?expr_func : CASE expr? ( WHEN expr THEN expr )+ [ ELSE expr ] END -> expr_case
| CAST "(" expr AS type_ref ")" -> expr_cast
| ident_scoped expr_parens -> expr_call
| expr_parens
?expr_parens : "(" [ DISTINCT? expr_arg ("," expr_arg)* | ASTERISK ] ")"
| atom
expr_arg : expr
?atom : literal
| parameter
| ident_scoped
| "(" select_stmt ")" -> subquery
| "(" expr ")"
type_ref : CNAME [ "(" literal_number [ "," literal_number ] ")" ]
op_binary : "||" | "*" | "/" | "%" | "+" | "-"
| "<<" | ">>" | "&" | "|" | "<" | "<="
| ">" | ">=" | "=" | "==" | "!=" | "<>"
| IS | IS NOT
op_unary : "+" | "-" | "~"
parameter : PARAMETER // TODO: support extended tcl syntax?
alias : ident
| ident expr_parens?
| literal_string
?ident_scoped : ident ("." ident)* ["." ASTERISK]
?compound_ident : ident ("," ident)*
?compound_ident_scoped : ident_scoped ("," ident_scoped)*
?literal : literal_number
| literal_string
| NULL
| /x'([0-9A-Fa-f]+)'/ -> literal_blob
| CURRENT_TIME
| CURRENT_DATE
| CURRENT_TIMESTAMP
literal_string : SQUOTED
literal_number : NUMERIC
?table_or_subquery : table_ref [ INDEXED BY ident | NOT INDEXED ]
| "(" select_stmt ")" [ AS? alias ] -> subquery
| "(" join ")"
table_ref : ident_scoped [ AS? alias ]
| ident_scoped "(" compound_expr? ")" [ AS? alias ]
| reql_expr
cte : alias [ "(" compound_ident ")" ] AS "(" select_stmt ")"
| alias [ "(" compound_ident ")" ] AS reql_expr -> reql_cte
| alias [ "(" compound_ident ")" ] AS "(" reql_expr ")" -> reql_cte
?join : table_or_subquery ( op_join table_or_subquery join_constraint? )*
join_constraint : ON expr
| USING "(" compound_ident ")"
op_join : ","
| NATURAL? [ LEFT OUTER? | INNER | CROSS ] JOIN
column : ASTERISK
| expr [ AS? ident ]
| expr [ AS? (ident | literal_string) ]
?select_core : values
| select
values : VALUES ( expr_parens ("," expr_parens)* )
select : SELECT select_mod? column ("," column)* from? where? group? having? order?
select_mod : DISTINCT | ALL
from : FROM join
where : WHERE expr
group : GROUP BY compound_expr
having : HAVING expr
?compound_select : select_core ( op_compound select_core )*
op_compound : UNION ALL?
| INTERSECT
| EXCEPT
with : WITH RECURSIVE? cte ("," cte)*
order : ORDER BY ordering_term ("," ordering_term)*
ordering_term : expr [ ASC | DESC ]
limit : LIMIT expr [ ("OFFSET"i|",") expr ]
select_stmt : with? compound_select order? limit?
ident : CNAME | DQUOTED
| /\[([^\]].+?)\]/ // Access style [quotes]
//
// ReQL constructs
//
/////////////////////////////////////////////////////////
reql_expr : CNAME reql_params reql_mapper*
reql_params : "[" [ reql_param (","? reql_param)* ] "]" | reql_block
?reql_param : reql_pair | ident | literal | parameter
reql_pair : CNAME ":" (ident | literal | parameter | reql_block)
reql_block : /\[:([\s\S]*?):\]/ -> reql_block
| /\[=([\s\S]*?)=\]/ -> reql_block_verbatim
| /\[<([\s\S]*?)>\]/ -> reql_block_folded
reql_mapper : "::" CNAME reql_params?
reql_set_stmt : "SET"i CNAME "=" (literal | CNAME)
%import common.CNAME
%import common.NEWLINE
%ignore NEWLINE
%import common.WS
%ignore WS
COMMENT : "--" /[^\n]+?/? NEWLINE
| "/*" /(.|\n)*?/ "*/"
%ignore COMMENT
PARAMETER : ("$" | ":") CNAME
SQUOTED : "'" ( "''" | NEWLINE | /[^']+/ )* "'"
DQUOTED : "\"" ( "\"\"" | /[^"]+/ )* "\""
NUMERIC : ( DIGIT+ [ "." DIGIT+ ] | "." DIGIT+ ) [ ("e"|"E") [ "+"|"-" ] DIGIT+ ]
| ("0x"|"0X") HEXDIGIT+
DIGIT : "0".."9"
HEXDIGIT : "0".."9" | "A".."F" | "a".."f"
ALL : "ALL"i
AND : "AND"i
AS : "AS"i
ASC : "ASC"i
ASTERISK : "*"
BETWEEN : "BETWEEN"i
BY : "BY"i
CASE : "CASE"i
CAST : "CAST"i
COLLATE : "COLLATE"i
CROSS : "CROSS"i
CURRENT_DATE : "CURRENT_DATE"i
CURRENT_TIME : "CURRENT_TIME"i
CURRENT_TIMESTAMP : "CURRENT_TIMESTAMP"i
DESC : "DESC"i
DISTINCT : "DISTINCT"i
ELSE : "ELSE"i
END : "END"i
ESCAPE : "ESCAPE"i
EXCEPT : "EXCEPT"i
EXISTS : "EXISTS"i
FROM : "FROM"i
GLOB : "GLOB"i
GROUP : "GROUP"i
HAVING : "HAVING"i
IGNORE : "IGNORE"i
IN : "IN"i
INDEXED : "INDEXED"i
INNER : "INNER"i
INTERSECT : "INTERSECT"i
IS : "IS"i
ISNULL : "ISNULL"i
JOIN : "JOIN"i
LEFT : "LEFT"i
LIKE : "LIKE"i
LIMIT : "LIMIT"i
MATCH : "MATCH"i
NATURAL : "NATURAL"i
NOT : "NOT"i
NOTNULL : "NOTNULL"i
NULL : "NULL"i
ON : "ON"i
OR : "OR"i
ORDER : "ORDER"i
OUTER : "OUTER"i
RECURSIVE : "RECURSIVE"i
REGEXP : "REGEXP"i
SELECT : "SELECT"i
THEN : "THEN"i
UNION : "UNION"i
USING : "USING"i
VALUES : "VALUES"i
WHEN : "WHEN"i
WHERE : "WHERE"i
WITH : "WITH"i
'''
class ReqlParser(object):
def __init__(self, transformer=None, postlex=None):
self.lark = Lark(
SQL_GRAMMAR, start='start', parser='lalr',
transformer=transformer, postlex=postlex)
def parse(self, code, transformer=None):
tree = self.lark.parse(code)
if transformer:
transformer.transform(tree)
return tree
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,450
|
juandebravo/redash-reql
|
refs/heads/master
|
/tests/conftest.py
|
import os
import sys
import codecs
import re
import pytest
PATH = os.path.dirname(os.path.realpath(__file__))
# HACK: Relative imports until everything is properly integrated
sys.path = [ PATH + '/..' ] + sys.path
from parser import ReqlParser
from build_parser import build_parser
sys.path.pop(0)
def get_test_parser(dialects):
ftest = os.path.join(PATH, 'parser_gen_test_{0}.py'.format('_'.join(dialects)))
try:
with codecs.open(ftest, 'w', encoding='utf8') as fd:
stdout = sys.stdout
sys.stdout = fd
try:
build_parser(dialects)
finally:
sys.stdout = stdout
import imp
module = imp.load_source('parser_gen', ftest)
finally:
os.unlink(ftest)
pass
return ReqlParser(module=module)
def load_fixtures(fname, skip=()):
skip_re = r'@skip ({0})'.format('|'.join(re.escape(x) for x in skip))
accum = []
line_cnt = 0
for line in open(os.path.join(PATH, fname)):
line = line.rstrip()
line_cnt += 1
accum.append(line)
# HACK: the grammar breaks with empty queries `-- ... ;`
if line.endswith(';') and not line.startswith('--'):
query = '\n'.join(accum)
if not re.search(skip_re, query, re.I):
yield ('{0}:{1}'.format(fname, line_cnt), query)
accum = []
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,451
|
juandebravo/redash-reql
|
refs/heads/master
|
/redash_reql/version.py
|
"""
redash_reql package version
"""
__version__ = r'0.0.1'
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,452
|
juandebravo/redash-reql
|
refs/heads/master
|
/tests/reql_test.py
|
import os
import sys
import pytest
from conftest import load_fixtures, get_test_parser
@pytest.fixture(scope='module')
def parser_sqlite_reql():
return get_test_parser(['sqlite', 'reql'])
@pytest.fixture(scope='module')
def parser_pgql_reql():
return get_test_parser(['pgsql', 'reql'])
def test_lark_user_aliases_state_bug():
from parser import ReqlParser
query = 'SELECT * FROM query_2'
parser = ReqlParser()
ast1 = parser.parse(query)
parser = ReqlParser()
ast2 = parser.parse(query)
assert ast1.data == ast2.data
@pytest.mark.parametrize('location, sql', load_fixtures('fixtures.reql'))
def test_reql(location, sql, parser_sqlite_reql):
assert parser_sqlite_reql.parse(sql)
# Let's make sure we don't break sqlite
@pytest.mark.parametrize('location, sql', load_fixtures('fixtures.sqlite', skip=['reql']))
def test_sqlite_reql(location, sql, parser_sqlite_reql):
assert parser_sqlite_reql.parse(sql)
# Let's make sure we don't break postgresql
@pytest.mark.parametrize('location, sql', load_fixtures('fixtures.pgsql', skip=['reql']))
def test_pgsql_reql(location, sql, parser_pgql_reql):
assert parser_pgql_reql.parse(sql)
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,453
|
juandebravo/redash-reql
|
refs/heads/master
|
/tests/sqlite_test.py
|
import os
import sys
import pytest
from conftest import load_fixtures, get_test_parser
@pytest.fixture(scope='module')
def parser():
return get_test_parser(['sqlite'])
@pytest.mark.parametrize('location, sql', load_fixtures('fixtures.sqlite'))
def test_sqlite(location, sql, parser):
assert parser.parse(sql)
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,454
|
juandebravo/redash-reql
|
refs/heads/master
|
/redash_reql/__init__.py
|
from .parser import ReqlParser
from .query_runner import ReqlQueryRunner
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,455
|
juandebravo/redash-reql
|
refs/heads/master
|
/redash_reql/query_runner.py
|
import json
import logging
import numbers
import re
import sqlite3
from collections import namedtuple
from dateutil import parser
from sqlalchemy.orm.exc import NoResultFound
from redash import models
from redash.permissions import has_access, not_view_only
from redash.query_runner import (TYPE_BOOLEAN, TYPE_DATETIME, TYPE_FLOAT,
TYPE_INTEGER, TYPE_STRING, BaseQueryRunner,
register)
from redash.utils import JSONEncoder
from redash_reql.parser import ReqlParser, Visitor, Tree
logger = logging.getLogger(__name__)
class ReqlVisitor(Visitor):
""" Search among the table refrences in the query to find those
that match the `query_\d+` pattern.
"""
QueryRef = namedtuple('QueryRef', 'name id refresh line column')
def __init__(self):
self.queries = []
def table_ref(self, node):
if not node.children:
return
first = node.children[0]
if not isinstance(first, Tree) or first.data != 'ident':
return
t_name = first.children[0]
value = t_name.value
# No transformation step yet so we have a raw AST
if t_name.type == 'DQUOTED':
value = value[1:-1].replace('""', '"')
m = re.match(r'^query_(\d+)(_refresh)?$', value, re.I)
if m:
query_id = int(m.group(1))
self.queries.append(
ReqlVisitor.QueryRef(
value,
int(m.group(1)),
m.group(2) is not None,
t_name.line,
t_name.column))
class PermissionError(Exception):
pass
def _guess_type(value):
if value == '' or value is None:
return TYPE_STRING
if isinstance(value, numbers.Integral):
return TYPE_INTEGER
if isinstance(value, float):
return TYPE_FLOAT
if unicode(value).lower() in ('true', 'false'):
return TYPE_BOOLEAN
try:
parser.parse(value)
return TYPE_DATETIME
except (ValueError, OverflowError):
pass
return TYPE_STRING
# Create a shared instance of the parser, since it's expensive to generate
# it from the grammar at runtime. It should be thread safe though.
reql_parser = ReqlParser()
def extract_queries(query):
ast = reql_parser.parse(query)
visitor = ReqlVisitor()
visitor.visit(ast)
return visitor.queries
def _load_query(user, q):
try:
query = models.Query.get_by_id(q.id)
except NoResultFound:
query = None
location = '(at line {} column {})'.format(q.line, q.column)
if not query or user.org_id != query.org_id:
raise PermissionError(u"Query id {} not found. {}".format(query_id, location))
if not has_access(query.data_source.groups, user, not_view_only):
raise PermissionError(u"You are not allowed to execute queries on {} data source (used for query id {}). {}".format(
query.data_source.name, query.id, location))
return query
def create_tables_from_queries(user, conn, queries):
# Sort first the ones to refresh in case there are some dupes
queries = sorted(queries, key=lambda x: x.id * (-1 if x.refresh else 1))
done = set()
for q in queries:
if q.name in done:
continue
query = _load_query(user, q)
results = None
if not q.refresh:
latest = models.QueryResult.get_latest(query.data_source, query.query_text, max_age=-1)
results = latest.data if latest else None
if results is None:
logger.info('Running query %s to get new results', query.id)
results, error = query.data_source.query_runner.run_query(
query.query_text, user)
if error:
raise Exception(
u"Failed loading results for query id {0} (at line {1} column {2}).".format(
query.id, q.line, q.column))
else:
logger.debug('Using previous results for query %s', query.id)
results = json.loads(results)
create_table(conn, q.name, results)
done.add(q.name)
def create_table(conn, table, results):
columns = ', '.join(
'"{}"'.format(c['name'].replace('"', '""'))
for c in results['columns'])
ddl = u'CREATE TABLE {0} ({1})'.format(table, columns)
logger.debug("DDL: %s", ddl)
conn.execute(ddl)
dml = u'INSERT INTO {table} ({columns}) VALUES ({values})'.format(
table=table,
columns=columns,
values=', '.join(['?'] * len(results['columns'])))
logger.debug('DML: %s', ddl)
# Note that this method doesn't support generators
conn.executemany(dml, [
[ row.get(column['name']) for column in results['columns'] ]
for row in results['rows']
])
conn.commit()
logger.info('Inserted %d rows into %s', len(results['rows']), table)
class ReqlQueryRunner(BaseQueryRunner):
noop_query = 'SELECT 1'
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
'memory': {
'type': 'string',
'title': 'Memory limit (in bytes)'
},
}
}
@classmethod
def annotate_query(cls):
return False
@classmethod
def name(cls):
return "ReQL Results"
def _create_db(self):
conn = sqlite3.connect(':memory:', isolation_level=None)
if self.configuration['memory']:
# See http://www.sqlite.org/pragma.html#pragma_page_size
cursor = conn.execute('PRAGMA page_size')
page_size, = cursor.fetchone()
cursor.close()
pages = int(self.configuration['memory']) / page_size
conn.execute('PRAGMA max_page_count = {0}'.format(pages))
conn.execute('VACUUM')
logger.info('Restricted sqlite memory to %s bytes (page_size: %s, pages: %s)',
self.configuration['memory'], page_size, pages)
conn.commit()
return conn
def run_query(self, query, user):
conn = self._create_db()
try:
queries = extract_queries(query)
create_tables_from_queries(user, conn, queries)
with conn:
cursor = conn.execute(query)
if cursor.description is not None:
columns = self.fetch_columns(
[(i[0], None) for i in cursor.description])
rows = []
column_names = [c['name'] for c in columns]
for i, row in enumerate(cursor):
for j, col in enumerate(row):
guess = _guess_type(col)
if columns[j]['type'] is None:
columns[j]['type'] = guess
elif columns[j]['type'] != guess:
columns[j]['type'] = TYPE_STRING
rows.append(dict(zip(column_names, row)))
data = {'columns': columns, 'rows': rows}
error = None
json_data = json.dumps(data, cls=JSONEncoder)
else:
error = 'Query completed but it returned no data.'
json_data = None
except KeyboardInterrupt:
conn.cancel()
error = "Query cancelled by user."
json_data = None
finally:
conn.close()
return json_data, error
register(ReqlQueryRunner)
|
{"/redash_reql/__init__.py": ["/redash_reql/parser.py", "/redash_reql/query_runner.py"], "/redash_reql/query_runner.py": ["/redash_reql/parser.py"]}
|
35,477
|
mohanbrinda/PythonPytest
|
refs/heads/master
|
/test.py
|
import math_func
import pytest
import sys
def test_answer():
assert math_func.func(3) == 4
#@pytest.mark.skip(reason ="skipping the test_add function")
#@pytest.mark.number
@pytest.mark.skipif(sys.version_info < (3, 3), reason ="skipping the test_add function")
def test_add():
assert math_func.add(17, 3) == 20
assert math_func.add(17) == 19
assert math_func.add(5) == 7
print(math_func.add(17, 3),'*************************')
#@pytest.mark.number
def test_product():
assert math_func.product(3, 3) == 9
assert math_func.product(3) == 6
assert math_func.product(6) == 12
#@pytest.mark.strings
def test_add_strings():
outcome = math_func.add('Namaste', 'Boomidevi')
assert outcome == 'NamasteBoomidevi'
assert type(outcome) is str
assert 'Namaste' in outcome
#@pytest.mark.strings
def test_prodstrings():
assert math_func.product('Namaste', 3) == 'Namaste' 'Namaste' 'Namaste'
outcome = math_func.product('Namaste')
assert outcome == 'NamasteNamaste'
assert type(outcome) is str
assert 'Namaste' in outcome
|
{"/test.py": ["/math_func.py"]}
|
35,478
|
mohanbrinda/PythonPytest
|
refs/heads/master
|
/math_func.py
|
def func(y):
return y + 1
#add
def add(a, b=2):
return a + b
#prod
def product(a, b=2):
return a * b
|
{"/test.py": ["/math_func.py"]}
|
35,479
|
mohanbrinda/PythonPytest
|
refs/heads/master
|
/Executioncommands.py
|
# Execute the test.py file
pytest test.py
#USING OPTIONS WITH PYTEST
#Execute the test.py file with -v(verbose option)
pytest test.py -v
#Change the name of the test.py file to tes.py and execute the program
pytest tes.py
#Execute test_add() function from the test.py file USING ::add option
pytest test.py::test.add()
#Execute only the functions in test.py file using -k option containing the word "add"
pytest test.py -v -k "add"
#Execute only the functions in test.py file with -k option containing the word "add" or "string"
pytest test.py -v -k "add or string"
#Execute only the functions in test.py file with -k option containing the word "add" and "string"
pytest test.py -v -k "add and string"
#Mark the number functions and string function in test.py file with the following code before usig option "m"
@pytest.mark.number
#Execute only the functions in test.py file with -m option number
pytest test.py -v -m number # will display number functions in test.py file
#Mark the number functions and string function in test.py file with the following code before usig option "m"
@pytest.mark.strings
#Execute only the functions in test.py file with -m option strings
pytest test.py -v -m strings # will display strings functions in test.py file
#Execute test.py file with -x option
#the program will exit when it encounters first failure
pytest test.py -v -x
#Execute test.py file with -tb=no option without the error option without stack trace
#the program will exit when it encounters first failure
pytest test.py -v -x --tb=no
#Execute test.py file with --maxfile=2 option
pytest test.py -v --maxfail=2
#Execute test.py file with skip option in order to skip a function in te test.py file
#add the following code to the test file
@pytest.mark.skip(reason="skipping the test_add function")
pytest test.py -v
#Execute test.py file with rsx option in order to get the details/reports of skipped function
#do not remove the code for skipped
@pytest.mark.skip(reason="skipping the test_add function")
pytest test.py -v -rsx
#Execute test.py file with skipif option using < version symbol
#do not remove the code for skipped
@pytest.mark.skipif(sys.version_info < (3, 3), reason ="skipping the test_add function")
pytest test.py -v
#Execute test.py file with skipif option using > version symbol
#do not remove the code for skipped
@pytest.mark.skipif(sys.version_info > (3, 3), reason ="skipping the test_add function")
pytest test.py -v
#Execute test.py file using -s option to view the print statement in add function
pytest test.py -v -s
#Execute test.py file using --capture=no option istead of -s to view the print statement in add function
pytest test.py -v --capture=no
#Execute test.py file using -q option to display only the important information about the executed programs
pytest test.py -v -q
#Execute test.py file using -q (quiet mode)option without -v verbose option to display only the programs that passed
pytest test.py -q
|
{"/test.py": ["/math_func.py"]}
|
35,482
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/processxlsx.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from openpyxl import load_workbook
from openpyxl import Workbook
import sqlite3
import atexit
import yaml
import time
import sys
import os
notes_row = 2
yamlPath = 'config.yaml'
_yaml = open(yamlPath, 'r', encoding='utf-8')
cont = _yaml.read()
yaml_data = yaml.load(cont, Loader=yaml.FullLoader)
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
from com.ConnectSqlite import ConnectSqlite
conn = ConnectSqlite("./.SqliteData.db")
@atexit.register
def exit_handle():
conn.insert_update_table('''UPDATE notes SET number={0} WHERE id={1}'''.format(notes_row, '520'))
conn.close_con()
print('数据转存数据库结束')
class Xlsx:
def __init__(self, file_path, start_row):
self.file_path = file_path
self.start_row = start_row
start_row_list = conn.fetchall_table('''select number from notes where id = '520';''')
if len(start_row_list) > 0 and start_row_list[0][0]:
self.start_row = start_row_list[0][0]
self.dic = {}
def getdata(self, row, rs):
sku_column = yaml_data.get('SKU_COLUMN') # sku
sku_column = sku_column if sku_column else 1
brand_column = yaml_data.get('BRAND_COLUMN') # 品牌
brand_column = brand_column if brand_column else 2
commodity_name_column = yaml_data.get('COMMODITY_NAME_COLUMN') # 商品名称
commodity_name_column = commodity_name_column if commodity_name_column else 3
original_price_column = yaml_data.get('ORIGINAL_PRICE_COLUMN') # 原价
original_price_column = original_price_column if original_price_column else 4
sku = rs.cell(row=row, column=sku_column).value
brand = rs.cell(row=row, column=brand_column).value
commodity_name = rs.cell(row=row, column=commodity_name_column).value
original_price = rs.cell(row=row, column=original_price_column).value
return [sku, brand, commodity_name, original_price]
def wirtesqlite(self, rs):
global notes_row
max_row = rs.max_row+1
for row in range(self.start_row, max_row):
data = self.getdata(row, rs)
if data[0]:
sql = """INSERT INTO originaldata VALUES ({0}, {1}, {2});""".format(data[0], data[3], 0)
if conn.insert_update_table(sql):
print('第 {0} 条插入成功'.format(row))
else:
print('第 {0} 条插入失败'.format(row))
else:
print('第 {0} 条插入失败'.format(row))
notes_row = row
# sql = """INSERT INTO originaldata VALUES (?, ?, ?)"""
# row_list = [n for n in range(1, rs.max_row + 1)]
# row_list = [row_list[i:i+100]
# for i in range(0, len(row_list), 100)]
# for row in row_list:
# value = []
# for r in row:
# data = self.getdata(r, rs)
# value.append((data[0], data[3], 0))
# # rs.delete_rows(r)
# print(conn.insert_table_many(sql, value))
def readfile(self):
rb = load_workbook(self.file_path)
sheets = rb.sheetnames
sheet = sheets[0]
rs = rb[sheet]
self.wirtesqlite(rs)
rb.save(self.file_path)
if __name__ == "__main__":
start = time.time()
sql = '''CREATE TABLE `originaldata` (
`sku` VARCHAR(12) DEFAULT NULL PRIMARY KEY,
`original_price` VARCHAR(9) DEFAULT NULL,
`code` int(1) DEFAULT NULL
)'''
print('创建原始数据表成功' if conn.create_tabel(sql) else '创建原始数据表失败')
sql = '''CREATE TABLE `notes` (
`id` VARCHAR(5) DEFAULT NULL PRIMARY KEY,
`number` int(6) DEFAULT NULL
)'''
if conn.create_tabel(sql):
print('创建记录表成功')
conn.insert_update_table('''INSERT INTO notes VALUES ('520', 2);''')
else:
print('创建记录表失败')
file_path = yaml_data.get('FILE_PATH')
file_path = file_path if file_path else 'data/欧美韩免原价.xlsx'
start_row = yaml_data.get('START_ROW')
start_row = start_row if start_row else 2
x = Xlsx(file_path, start_row)
x.readfile()
print("运行完毕,总用时:{}".format(time.time() - start))
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
35,483
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/processdata.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import yaml
import atexit
import random
import aiohttp
import asyncio
import sqlite3
import requests
import concurrent
import threading
from lxml.html.clean import Cleaner
from lxml import etree
import sys
import os
import re
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
from com.headers import getheaders
from com.ConnectSqlite import ConnectSqlite
ip_port = '127.0.0.1:8080'
yamlPath = 'config.yaml'
_yaml = open(yamlPath, 'r', encoding='utf-8')
cont = _yaml.read()
yaml_data = yaml.load(cont, Loader=yaml.FullLoader)
sem = asyncio.Semaphore(10)
conn = ConnectSqlite("./.SqliteData.db")
@atexit.register
def exit_handle():
conn.close_con()
print('数据处理结束!')
class Data:
def __init__(self, timeout=25):
self.tkList = []
self.timeout = timeout
self.pattern = re.compile('[0-9]+')
self.cleaner = Cleaner(
style=True, scripts=True, page_structure=False, safe_attrs_only=False) # 清除掉CSS等
self.search_url = 'http://chn.lottedfs.cn/kr/search?comSearchWord={0}&comCollection=GOODS&comTcatCD=&comMcatCD=&comScatCD=&comPriceMin=&comPriceMax=&comErpPrdGenVal_YN=&comHsaleIcon_YN=&comSaleIcon_YN=&comCpnIcon_YN=&comSvmnIcon_YN=&comGiftIcon_YN=&comMblSpprcIcon_YN=&comSort=RANK%2FDESC&comListCount=20&txtSearchClickCheck=Y'
self.chanel_search_url = 'http://chn.lottedfs.cn/kr/search/chanelSearch?searchWord={0}&collection=CHANEL&returnUrl=&startCount=0&listCount=4&sort=WEIGHT%2FDESC%2CRANK%2FDESC&requery=&rt=&tcatCD=&mcatCD=&scatCD=&priceMin=0&priceMax=0&erpPrdGenVal_YN=&hsaleIcon_YN=&saleIcon_YN=&cpnIcon_YN=&svmnIcon_YN=&giftIcon_YN=&mblSpprcIcon_YN=<OnlyBrnd_YN=&onlOnlySale_YN=&dfsOnly_YN=&newPrd_YN=&bestPrd_YN=&bf3hrshpCD=&so_YN=&cpnAply_YN=&brndNo=&shopSubTpCd=02&prdasListCount=5&prdOptItemCD=&flteCD=&eventCd='
self.sql = '''select sku, original_price, code from originaldata ORDER BY random() LIMIT 1;'''
def getIpPort(self):
ip_port = conn.fetchall_table(
'SELECT * FROM proxyip ORDER BY random() LIMIT 1;')
if isinstance(ip_port, list) and len(ip_port) == 1:
return ip_port[0][0]
else:
raise RuntimeError('Ip代理数量不足,程序被迫停止,请运行获取代理Ip.exe')
def manydata(self, sku_url):
sql_del = '''DELETE FROM originaldata where sku='{0}';'''.format(sku_url[2])
sql = '''INSERT INTO processdata VALUES ('{0}', {1}, {2}, {3});'''.format(
sku_url[2], sku_url[1], '99999999', 2)
if conn.insert_update_table(sql):
print('SKU为:{0} 的商品搜出多条数据'.format(sku_url[2]))
conn.delete_table(sql_del)
def success(self, sku_url, price):
sql_del = '''DELETE FROM originaldata where sku='{0}';'''.format(sku_url[2])
sql = '''INSERT INTO processdata VALUES ('{0}', {1}, {2}, {3});'''.format(
sku_url[2], sku_url[1], price, 1)
if conn.insert_update_table(sql):
print('SKU为:{0} 的商品搜索成功'.format(sku_url[2]))
conn.delete_table(sql_del)
def failure(self, sku_url):
sql_del = '''DELETE FROM originaldata where sku='{0}';'''.format(sku_url[2])
sql = '''INSERT INTO processdata VALUES ('{0}', {1}, {2}, {3});'''.format(
sku_url[2], sku_url[1], '99999999', 0)
if conn.insert_update_table(sql):
print('SKU为:{0} 的商品没有搜到'.format(sku_url[2]))
conn.delete_table(sql_del)
def get_urls(self, sku_list):
sku_urls = [[self.search_url.format(
item[0]) if item[2] == 0 else self.chanel_search_url.format(item[0]), item[1], item[0]] for item in sku_list]
return sku_urls
def processhtml(self, html, sku_url):
soup = etree.HTML(html)
li = soup.xpath(
'//*[@id="searchTabPrdList"]/div[@class="imgType"]/ul[@class="listUl"]/li')
if len(li) > 1:
self.manydata(sku_url=sku_url) # 搜索出多条数据
elif len(li) == 1:
span = li[0].xpath('//div[@class="price"]/span/text()')
match = self.pattern.findall(span[0] if len(span) > 0 else '')
if match:
price = re.search(r'\d+(\.\d+)?', span[0]).group()
else:
strong = li[0].xpath('//div[@class="discount"]/strong/text()')
price = re.search(r'\d+(\.\d+)?', strong[0]).group()
self.success(sku_url=sku_url, price=price) # 搜索成功
else:
em = soup.xpath(
'//*[@id="contSearch"]/section[@class="chanelSearch"]/span/em/text()')
if(len(em) > 0):
sql_update = "UPDATE originaldata SET code={0} WHERE sku='{1}';".format(1, sku_url[2])
conn.insert_update_table(sql_update)
else:
strong = soup.xpath(
'//*[@id="chanelPrdList"]/ul/li//div[@class="discount"]/strong/text()')
if len(strong) > 1:
self.manydata(sku_url=sku_url) # 搜索出多条数据
elif len(strong) == 1:
price = re.search(r'\d+(\.\d+)?', strong[0]).group()
self.success(sku_url=sku_url, price=price) # 搜索成功
else:
div = soup.xpath('//div[@class="wrap"]/section//p[@class="ph"]/span')
if len(div) < 1:
self.failure(sku_url=sku_url) # 搜索失败
return True
async def get(self, url):
global ip_port
headers = getheaders()
async with sem:
async with aiohttp.ClientSession(headers=headers) as session:
try:
async with session.get(url, timeout=self.timeout, proxy='http://' + ip_port) as resp:
if resp.status == 200:
return await resp.read()
else:
return False
except (aiohttp.client_exceptions.ClientProxyConnectionError, aiohttp.ClientHttpProxyError, aiohttp.ClientProxyConnectionError) as cpce:
print('代理Ip:{0} 已失效'.format(ip_port))
conn.delete_table(
'''DELETE FROM proxyip WHERE ip_port='{0}';'''.format(ip_port))
ip_port = '58.23.200.104:8000'
return False
except (aiohttp.client_exceptions.ClientOSError, aiohttp.client_exceptions.ServerDisconnectedError, aiohttp.client_exceptions.ClientConnectorError) as cce:
print('客户端断网失败')
return False
except (concurrent.futures._base.TimeoutError, aiohttp.client_exceptions.ServerTimeoutError) as ste:
print('数据请求超时')
return False
except Exception as e:
print('其他异常错误', type(e))
return False
async def request(self, sku_url):
if len(sku_url) < 2:
return
html = await self.get(sku_url[0])
if html:
tk = threading.Thread(target=self.processhtml,
args=(html, sku_url,))
tk.start()
self.tkList.append(tk)
else:
print('数据请求失败,等待下次重新请求')
return
def get_data(self):
global ip_port
ip_port = '58.23.200.104:8000'
while True:
sku_list = conn.fetchall_table(self.sql)
if len(sku_list) <= 0:
break
sku_urls = self.get_urls(sku_list)
tasks = [asyncio.ensure_future(
self.request(sku_url)) for sku_url in sku_urls]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
break
for tk in self.tkList:
tk.join()
if __name__ == "__main__":
start = time.time()
sql = '''CREATE TABLE `processdata` (
`sku` VARCHAR(12) DEFAULT NULL PRIMARY KEY,
`original_price` VARCHAR(9) DEFAULT NULL,
`new_price` VARCHAR(9) DEFAULT NULL,
`code` int(1) DEFAULT NULL
)'''
print('创建处理数据表成功' if conn.create_tabel(sql) else '创建处理数据表失败')
d = Data()
d.get_data()
print("运行完毕,总用时:{}".format(time.time() - start))
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
35,484
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/test/test.py
|
import sqlite3
import asyncio
import aiohttp
import time
import re
from bs4 import BeautifulSoup
start = time.time()
pattern = re.compile('[0-9]+')
async def get(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.text()
async def request():
url = 'http://chn.lottedfs.cn/kr/search?comSearchWord=2725184485&comCollection=GOODS&comTcatCD=&comMcatCD=&comScatCD=&comPriceMin=&comPriceMax=&comErpPrdGenVal_YN=&comHsaleIcon_YN=&comSaleIcon_YN=&comCpnIcon_YN=&comSvmnIcon_YN=&comGiftIcon_YN=&comMblSpprcIcon_YN=&comSort=RANK%2FDESC&comListCount=20&txtSearchClickCheck=Y'
result = await get(url)
soup = BeautifulSoup(result, 'lxml')
all_span = soup.select('#searchTabPrdList .imgType .listUl .productMd .price span')
if len(all_span) > 1:
return ['商品搜索条数错误', 0]
elif len(all_span) == 1:
match = pattern.findall(all_span[0].get_text())
if not match:
print( ['搜索成功', re.search(r'\d+(\.\d+)?', all_span[0].get_text()).group()])
else:
all_strong = soup.select('#searchTabPrdList .imgType .listUl .productMd .discount strong')
print( ['搜索成功', re.search(r'\d+(\.\d+)?', all_strong[0].get_text()).group()])
tasks = [asyncio.ensure_future(request()) for _ in range(1)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
end = time.time()
print('Cost time:', end - start)
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
35,485
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/ConnectSqlite.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3
class ConnectSqlite:
def __init__(self, dbName="./.Proxies.db"):
self._conn = sqlite3.connect(
dbName, timeout=3, isolation_level=None, check_same_thread=False)
self._conn.execute('PRAGMA synchronous = OFF')
self._cur = self._conn.cursor()
self._time_now = "[" + \
sqlite3.datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') + "]"
def close_con(self):
self._cur.close()
self._conn.close()
def create_tabel(self, sql):
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[CREATE TABLE ERROR]", e)
return False
def delete_table(self, sql):
try:
if 'DELETE' in sql.upper():
self._cur.execute(sql)
self._conn.commit()
return True
else:
print(self._time_now, "[EXECUTE SQL IS NOT DELETE]")
return False
except Exception as e:
print(self._time_now, "[DELETE TABLE ERROR]", e)
return False
def fetchall_table(self, sql, limit_flag=True):
try:
self._cur.execute(sql)
war_msg = self._time_now + \
' The [{}] is empty or equal None!'.format(sql)
if limit_flag is True:
r = self._cur.fetchall()
return r if len(r) > 0 else war_msg
elif limit_flag is False:
r = self._cur.fetchone()
return r if len(r) > 0 else war_msg
except Exception as e:
print(self._time_now, "[SELECT TABLE ERROR]", e)
def insert_update_table(self, sql):
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT/UPDATE TABLE ERROR]", e, " [", sql, "]")
return False
def insert_table_many(self, sql, value):
try:
self._cur.executemany(sql, value)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT MANY TABLE ERROR]", e)
return False
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.