index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
68,125 | sizday/educatiON | refs/heads/master | /main.py | from fastapi import FastAPI
from preload.config import db_pass, db_user, host, db_name
from database.database import db, DBCommands
from database.models import MakeFollower, Registration, Lesson, Subject, Mark, Photo
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/user/profile/{user_id}")
async def get_user(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
user = await dbc.get_user(user_id)
return user
@app.get("/user/subject/{user_id}")
async def get_subject_by_user(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
subject = await dbc.get_subject_by_user(user_id)
return subject
@app.get("/user/lesson/{user_id}")
async def get_lessons_by_user(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
lessons = await dbc.get_lessons_by_user(user_id)
return lessons
@app.get("/teacher/lesson_id/{lesson_id}")
async def get_lesson_by_id(lesson_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
lesson = await dbc.get_lesson_by_id(lesson_id)
return lesson
@app.get("/user/evaluation/{user_id}")
async def get_evaluations_by_user(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
evaluations = await dbc.get_evaluations_by_user(user_id)
return evaluations
@app.get("/teacher/evaluation/{user_id}")
async def get_evaluations_by_teacher(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
evaluations = await dbc.get_evaluations_by_teacher(user_id)
return evaluations
@app.get("/teacher/lesson/{user_id}")
async def get_lessons_by_teacher(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
lessons = await dbc.get_lessons_by_teacher(user_id)
return lessons
@app.get("/teacher/subject/{user_id}")
async def get_subject_by_teacher(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
subject = await dbc.get_subject_by_teacher(user_id)
return subject
@app.get("/teacher/is_teacher/{user_id}")
async def is_teacher(user_id: int):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
result = await dbc.is_teacher(user_id)
return result
@app.get("/user/authorisation/{email}/{password}")
async def authorisation(email: str, password: str):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
result = await dbc.authorisation(email, password)
return result
@app.post("/user/make_follower/")
async def make_follower(params: MakeFollower):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
await dbc.make_follower(params.user_id, params.subject_id)
@app.post("/user/create/")
async def create_user(params: Registration):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
await dbc.create_user(params.name, params.surname, params.email,
params.password, params.birthday, params.is_teacher)
@app.post("/teacher/create/lesson")
async def create_lesson(params: Lesson):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
await dbc.create_lesson(params.user_id, params.title, params.description,
params.date, params.check_file)
@app.post("/teacher/create/subject")
async def create_subject(params: Subject):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
await dbc.create_subject(params.user_id, params.title, params.type_checking)
@app.post("/user/update/mark")
async def update_mark(params: Mark):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
await dbc.update_mark(params.user_id, params.lesson_id, params.file)
@app.post("/user/update/photo")
async def update_photo(params: Photo):
dbc = DBCommands()
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
await dbc.update_photo(params.user_id, params.photo)
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
68,128 | voron434/itc-orders-backend | refs/heads/master | /stock/serializers.py | from rest_framework import serializers
from . import models
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = models.Order
fields = [
'id',
'title',
'description',
'status',
]
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = models.Project
fields = [
'id',
'title',
'description',
'status',
]
class ProjectUserSerializer(serializers.ModelSerializer):
class Meta:
model = models.Project
fields = [
'title',
'id',
]
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,129 | voron434/itc-orders-backend | refs/heads/master | /registration/auth_backend.py | from django.contrib.auth.backends import ModelBackend
from .models import User
class PasswordlessAuthBackend(ModelBackend):
"""Log in to Django without providing a password.
"""
def authenticate(self, vk_id=None):
try:
return User.objects.get(vk_id=vk_id)
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
class PasswordAuthBackend(ModelBackend):
"""Log in to Django without providing a password.
"""
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a nonexistent user (#20760).
User().set_password(password)
return None
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
return None
def get_user(self, user_id):
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,130 | voron434/itc-orders-backend | refs/heads/master | /registration/migrations/0005_auto_20180125_0036.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-25 00:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('registration', '0004_user_is_stuff'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='is_stuff',
new_name='is_staff',
),
]
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,131 | voron434/itc-orders-backend | refs/heads/master | /registration/models.py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import UserManager
ROLE_CHOICES = (
('Admin', 'Admin'),
('Adept', 'Adept'),
('User', 'User'),
)
class User(AbstractBaseUser):
username = models.CharField(
max_length=255,
unique=True,
)
first_name = models.CharField(
max_length=255,
)
last_name = models.CharField(
max_length=255,
)
vk_id = models.CharField(
max_length=20,
)
role = models.CharField(
max_length=5,
choices=ROLE_CHOICES,
default='User',
)
about = models.TextField()
picture = models.ImageField()
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
def __str__(self):
return self.vk_id
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,132 | voron434/itc-orders-backend | refs/heads/master | /registration/views.py | from django.contrib.auth import authenticate, login
from django.http import JsonResponse
import vk
from vk.exceptions import VkAPIError
import logging
from .models import User
from .serializers import UserSerializer
logger = logging.getLogger('admin')
def vk_auth(request):
logger.debug('Starting handling auth request...')
vk_token = request.GET.get('access_token')
if vk_token:
session = vk.Session()
api = vk.API(session, v=5.0)
try:
user_info = api.users.get(access_token=vk_token, fields=['photo_200', ])[0]
except VkAPIError as e:
logger.error(e.message)
logger.debug('Handling done with an error')
return JsonResponse({'error': {'code': e.code, 'message': e.message}})
logger.debug('Successfully got vk response')
user_id = user_info['id']
same_user = User.objects.filter(vk_id=user_id)
if same_user.exists():
logger.debug('Such user exists...')
user = authenticate(vk_id=user_id)
login(request, user)
json_user = UserSerializer(user)
logger.debug('Handling done with success')
return JsonResponse({
'success': True,
'user': json_user.data
})
else:
logger.debug('Creating record for new user...')
new_user = User(
vk_id=user_id,
first_name=user_info['first_name'],
last_name=user_info['last_name'],
role='User',
picture=user_info['photo_200'],
)
new_user.save()
user = authenticate(vk_id=user_id)
login(request, user)
json_user = UserSerializer(user)
logger.debug('Handling done with success')
return JsonResponse({
'success': True,
'user': json_user.data
})
logger.error('Some unexpected error')
logger.debug('Handling done with an error')
return JsonResponse({'error': True})
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,133 | voron434/itc-orders-backend | refs/heads/master | /stock/views.py | from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from . import models
from . import serializers
@ensure_csrf_cookie
@login_required
def create_order(request):
if request.POST:
title = request.POST.get('title')
description = request.POST.get('description')
order = models.Order(
title=title,
description=description,
orderer=request.user)
order.save()
return JsonResponse({'success': True})
return JsonResponse({'error': True})
@login_required
def list_order(request):
orders_list = models.Order.objects.filter(orderer=request.user)
json_orders = serializers.OrderSerializer(orders_list, many=True)
return JsonResponse({
'success': True,
'orders': json_orders.data,
})
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,134 | voron434/itc-orders-backend | refs/heads/master | /stock/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^order$', views.create_order, name='create_order'),
url(r'^orders$', views.list_order, name='list_order'),
]
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,135 | voron434/itc-orders-backend | refs/heads/master | /registration/migrations/0004_user_is_stuff.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-25 00:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0003_auto_20180125_0025'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_stuff',
field=models.BooleanField(default=False),
),
]
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,136 | voron434/itc-orders-backend | refs/heads/master | /registration/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^auth$', views.vk_auth, name='vk_auth'),
]
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,137 | voron434/itc-orders-backend | refs/heads/master | /stock/models.py | from django.db import models
from registration.models import User
ORDER_STATUSES_CHOICES = (
('New', 'New'),
('Rejected', 'Rejected'),
('Approved', 'Approved'),
('Research', 'Research'),
('Archieved', 'Archieved'),
)
PROJECT_STATUSES_CHOICES = (
('Draft', 'Draft'),
('Approved', 'Approved'),
('Operative', 'Operative'),
('Inoperative', 'Inoperative'),
)
class Order(models.Model):
title = models.CharField(
max_length=40,
)
description = models.TextField()
orderer = models.ForeignKey(User, related_name='orders')
status = models.CharField(
max_length=10,
choices=ORDER_STATUSES_CHOICES,
default='New',
)
def __str__(self):
return self.title
class Project(models.Model):
title = models.CharField(max_length=40)
description = models.TextField()
order = models.OneToOneField(Order, related_name='project')
status = models.CharField(
max_length=15,
choices=PROJECT_STATUSES_CHOICES,
default='Draft',
)
deadline = models.DateTimeField()
def __str__(self):
return self.title
class CrossProjectUser(models.Model):
user = models.ForeignKey(User)
project = models.ForeignKey(Project)
role = models.CharField(max_length=20)
description = models.TextField()
def __str__(self):
return '{0} : {1}'.format(self.user, self.project)
| {"/registration/auth_backend.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py"], "/stock/models.py": ["/registration/models.py"]} |
68,138 | xingyongtao/xiangqinzhushou | refs/heads/master | /wxgz_server/views.py | #!/usr/bin/env python
# -*- coding: UTF -*-
# Create your views here.
import logging
logger = logging.getLogger(__name__)
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from wxgz_server.utils import message
from xiangqinzhushou.settings import TOKEN
from wxgz_server.utils.parser import Parser
@csrf_exempt
def home(request):
if request.method == 'GET':
# get方法的时候返回验证字符串
myresponse = HttpResponse()
if message.check_signature(request.GET, TOKEN):
myresponse.write(request.GET.get('echostr'))
return myresponse
else:
myresponse.write('不提供直接访问!')
return myresponse
# 处理微信发过来的post请求
if request.method == 'POST':
received_msg = Parser(request.body)
logger.info('收到一条来自 %s 的 %s 消息: %s',
received_msg.from_user_name,
received_msg.msg_type,
received_msg.content)
msg = message.build_message(received_msg)
logging.info("msg = %s", msg)
if received_msg.msg_type=='event' and received_msg.event == 'subscribe':
return message.subscribe_response(to_user_name=received_msg.from_user_name,
from_user_name=received_msg.to_user_name)
elif received_msg.msg_type=='event' and received_msg.event == 'unsubscribe':
return HttpResponse('成功取消关注!')
else:
return message.default_response(to_user_name=received_msg.from_user_name,
from_user_name=received_msg.to_user_name)
| {"/wxgz_server/views.py": ["/wxgz_server/utils/parser.py"], "/wxgz_server/utils/message.py": ["/wxgz_server/models.py"]} |
68,139 | xingyongtao/xiangqinzhushou | refs/heads/master | /wxgz_server/models.py | #!/usr/bin/env python
# -*- coding: UTF -*-
from django.db import models
import time
# Create your models here.
class WeixinUser(models.Model):
name = models.CharField(max_length=100, primary_key=True, verbose_name='OpenID')
publish_message = models.BooleanField(default=False)
first_login_time = models.DateTimeField(auto_now_add=True)
last_login_time = models.DateTimeField(auto_now=True)
class BaseMessage(models.Model):
MESSAGE_TYPES = (
('text', '文本消息'),
('image', '图片消息'),
('location', '地理位置消息'),
('link', '链接消息'),
('event', '事件推送'),
('music', '音乐消息'),
('news', '图文消息'),
)
from_user_name = models.CharField(max_length=100, verbose_name='OpenID')
to_user_name = models.CharField(max_length=100, verbose_name='OpenID')
message_type = models.CharField(max_length=8, verbose_name='消息类型', choices=MESSAGE_TYPES)
create_time = models.DateTimeField()
class Meta:
abstract = True
def createtime(self):
return time.mktime(self.create_time.timetuple())
class TextMessage(BaseMessage):
message_id = models.IntegerField()
content = models.TextField()
class ImageMessage(BaseMessage):
message_id = models.IntegerField()
picture_url = models.CharField(max_length=500)
class LocationMessage(BaseMessage):
message_id = models.IntegerField()
location_x = models.CharField(max_length=20, verbose_name='地理位置纬度')
location_y = models.CharField(max_length=20, verbose_name='地理位置经度')
scale = models.CharField(max_length=10, verbose_name='地图缩放大小')
label = models.CharField(max_length=50, verbose_name='地理位置信息')
class LinkMessage(BaseMessage):
message_id = models.IntegerField()
title = models.CharField(max_length=50)
description = models.CharField(max_length=100)
url = models.CharField(max_length=500)
class EventMessage(BaseMessage):
EVENT_TYPES = (
('subscribe', '订阅'),
('unsubscribe', '取消订阅'),
('CLICK', '自定义菜单点击事件')
)
event = models.CharField(max_length=15, verbose_name='事件类型', choices=EVENT_TYPES)
event_key = models.CharField(max_length=20, verbose_name='事件KEY值,与自定义菜单接口中KEY值对应')
class MusicMessage(BaseMessage):
music_url = models.CharField(max_length=500, verbose_name='音乐链接')
hq_music_url = models.CharField(max_length=500, verbose_name='高质量音乐链接,WIFI环境下优先使用该链接播放音乐')
class NewsMessage(BaseMessage):
article_count = models.SmallIntegerField()
class SubNewsMessage(BaseMessage):
parent_news = models.ForeignKey(NewsMessage)
article_count = models.SmallIntegerField()
index = models.SmallIntegerField(verbose_name='消息的索引位置,从0开始')
title = models.CharField(max_length=50)
description = models.CharField(max_length=100)
picture_url = models.CharField(max_length=500)
url = models.CharField(max_length=500)
| {"/wxgz_server/views.py": ["/wxgz_server/utils/parser.py"], "/wxgz_server/utils/message.py": ["/wxgz_server/models.py"]} |
68,140 | xingyongtao/xiangqinzhushou | refs/heads/master | /wxgz_server/utils/parser.py | '''
Created on 2013-8-29
@author: yongtaoxing
'''
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
try:
import cStringIO as StringIO
except:
import StringIO
class Parser(object):
'''
classdocs
'''
def __init__(self, xml_strs):
'''传入一个xml字符串来初始化类,自动生成一个文档树,
并调用get_object函数获得一个包含消息各个属性的对象'''
xml_file = StringIO.StringIO(xml_strs)
xml_tree = ET.ElementTree(file=xml_file)
root = xml_tree.getroot()
msgtype = xml_tree.find('MsgType').text
if msgtype == 'text':
self.get_text_msg(root)
elif msgtype == 'image':
self.get_img_msg(root)
elif msgtype == 'location':
self.get_location_msg(root)
elif msgtype == 'link':
self.get_link_msg(root)
elif msgtype == 'event':
self.get_event_msg(root)
def get_text_msg(self, root):
'''文本消息'''
self.to_user_name = root[0].text
self.from_user_name = root[1].text
self.create_time = root[2].text
self.msg_type = root[3].text
self.content = root[4].text
self.msg_id = root[5].text
def get_img_msg(self, root):
'''图片消息'''
self.to_user_name = root[0].text
self.from_user_name = root[1].text
self.create_time = root[2].text
self.msg_type = root[3].text
self.pic_url = root[4].text
self.msg_id = root[5].text
def get_location_msg(self, root):
'''地理位置消息'''
self.to_user_name = root[0].text
self.from_user_name = root[1].text
self.create_time = root[2].text
self.msg_type = root[3].text
self.location_x = root[4].text
self.location_y = root[5].text
self.scale = root[6].text
self.label = root[7].text
self.msg_id = root[8].text
def get_link_msg(self, root):
'''链接消息推送'''
self.to_user_name = root[0].text
self.from_user_name = root[1].text
self.create_time = root[2].text
self.msg_type = root[3].text
self.title = root[4].text
self.description = root[5].text
self.url = root[6].text
self.msg_id = root[7].text
def get_event_msg(self, root):
'''事件推送'''
self.to_user_name = root[0].text
self.from_user_name = root[1].text
self.create_time = root[2].text
self.msg_type = root[3].text
self.event = root[4].text
self.event_key = root[5].text
| {"/wxgz_server/views.py": ["/wxgz_server/utils/parser.py"], "/wxgz_server/utils/message.py": ["/wxgz_server/models.py"]} |
68,141 | xingyongtao/xiangqinzhushou | refs/heads/master | /wxgz_server/utils/message.py | '''
Created on 2013-8-28
@author: yongtaoxing
'''
import logging
logger = logging.getLogger(__name__)
import hashlib
from wxgz_server.models import TextMessage, ImageMessage, LinkMessage, LocationMessage, EventMessage
from datetime.datetime import now
from django.shortcuts import render_to_response
from datetime import datetime
def check_signature(request_dict, token):
'''检查消息是否是微信发过来的'''
if request_dict.get('signature') and request_dict.get('timestamp') and request_dict.get('nonce') and request_dict.get('echostr'):
signature = request_dict.get('signature')
timestamp = request_dict.get('timestamp')
nonce = request_dict.get('nonce')
token = token
tmplist = [token, timestamp, nonce]
tmplist.sort()
newstr = ''.join(tmplist)
sha1result = hashlib.sha1()
sha1result.update(newstr)
if sha1result.hexdigest() == str(signature):
return True
else:
return False
else:
return False
def build_message(parsed_message):
if parsed_message.msg_type == 'text':
msg = TextMessage()
msg.from_user_name = parsed_message.from_user_name
msg.to_user_name = parsed_message.to_user_name
msg.message_type = parsed_message.msg_type
msg.create_time = datetime.fromtimestamp(parsed_message.create_time)
msg.message_id = parsed_message.msg_id
msg.content = parsed_message.content
msg.save()
return msg
elif parsed_message.msg_type == 'image':
msg = ImageMessage()
msg.from_user_name = parsed_message.from_user_name
msg.to_user_name = parsed_message.to_user_name
msg.message_type = parsed_message.msg_type
msg.create_time = datetime.fromtimestamp(parsed_message.create_time)
msg.message_id = parsed_message.msg_id
msg.picture_url = parsed_message.pic_url
msg.save()
return msg
elif parsed_message.msg_type == 'location':
msg = LocationMessage()
msg.from_user_name = parsed_message.from_user_name
msg.to_user_name = parsed_message.to_user_name
msg.message_type = parsed_message.msg_type
msg.create_time = datetime.fromtimestamp(parsed_message.create_time)
msg.message_id = parsed_message.msg_id
msg.location_x = parsed_message.location_x
msg.location_y = parsed_message.location_y
msg.scale = parsed_message.scale
msg.label = parsed_message.label
msg.save()
return msg
elif parsed_message.msg_type == 'link':
msg = LinkMessage()
msg.from_user_name = parsed_message.from_user_name
msg.to_user_name = parsed_message.to_user_name
msg.message_type = parsed_message.msg_type
msg.create_time = datetime.fromtimestamp(parsed_message.create_time)
msg.message_id = parsed_message.msg_id
msg.title = parsed_message.title
msg.description = parsed_message.description
msg.url = parsed_message.url
msg.save()
return msg
elif parsed_message.msg_type == 'event':
msg = EventMessage()
msg.from_user_name = parsed_message.from_user_name
msg.to_user_name = parsed_message.to_user_name
msg.message_type = parsed_message.msg_type
msg.create_time = datetime.fromtimestamp(parsed_message.create_time)
msg.event = parsed_message.event
msg.event_key = parsed_message.event_key
msg.save()
return msg
else:
logging.info("unknown message: %s", parsed_message)
return None
def default_response(to_user_name, from_user_name):
msg = TextMessage(to_user_name=to_user_name, from_user_name=from_user_name)
msg.content = '您好,您的消息系统已经收到,会马上处理,请耐心等待。'
msg.create_time = now()
msg.save()
return render_to_response('response/text_to_user.xml', {'msg':msg})
def subscribe_response(to_user_name, from_user_name):
msg = TextMessage(to_user_name=to_user_name, from_user_name=from_user_name)
msg.content = '您好,欢迎订阅相亲助手!'
msg.create_time = now()
msg.save()
return render_to_response('response/text_to_user.xml', {'msg':msg})
if __name__ == '__main__':
pass
| {"/wxgz_server/views.py": ["/wxgz_server/utils/parser.py"], "/wxgz_server/utils/message.py": ["/wxgz_server/models.py"]} |
68,145 | hmlan/h5 | refs/heads/master | /case/t3.py | list=[1,23,6,9,112,45,36]
list.sort()
print(list)
list.reverse()
print(list)
l=[10*x for x in list if x<20]
print(l)
t=[[x,5*x] for x in list]
print(t)
a=1,2,3,4
print(a)
b=a,(2,4,8)
print(b)
basket={'s','s','g','g','f','f'}
print(basket)
m=set("allladdllll")
print(m)
n=set("kkshlll")
print(m-n)
tel={"username":"may","password":123456}
tel["yanzhengma"]=3333
print(tel)
print(tel.keys())
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for k, v in knights.items():
print(k, v)
questions = ['name', 'quest', 'favorite color']
answers = ['lancelot', 'the holy grail', 'blue']
for q, a in zip(questions, answers):
print('What is your {0}? It is {1}.'.format(q, a)) | {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,146 | hmlan/h5 | refs/heads/master | /case/t2.py | # print("hello,world")
# a='zhangsan'
# print("我的名字叫%r"%a)
# n=input("请输入你的内容:")
# print("你输入的内容时:%r"%n)
s = 0
for i in range(1,100):
s=s+i
print(s)
dict={"usename":"Humaolan","password":"123455"}
# dict.values()
# dict.keys()
# print(dict.keys())
# print(dict.values())
# print(dict.items())
# for key,values in dict.items():
# # print(key,values)
# # import time
# # print(time.ctime()
# aa=0
# try:
# open("a.txt",'r')
# print(aa)
# except Exception as msg:
# print(msg)
from random import randint
n=randint(1,10)
if n%2==0:
raise NameError("%d is even"%n)
else:
raise NameError("%d is odd"%n)
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,147 | hmlan/h5 | refs/heads/master | /page/login_page.py | from page.base_page import PageBase
from selenium.webdriver.common.by import By
class LoginPage(PageBase):
# 定位器
tel_input_loc = By.XPATH, "//input[@type='tel']" # 手机号输入框
pwd_input_loc = By.XPATH, "//input[@type='password']" # 密码输入框
submit_button_loc= By.CSS_SELECTOR, ".shj-box-padding.flex-row.justify-content-center.shj-btn" #登录按钮
def tel_input(self,text):
self.wait(self.tel_input_loc)
self.find_element(self.tel_input_loc).send_keys(text)
def pwd_input(self,text):
self.wait(self.pwd_input_loc)
self.find_element(self.pwd_input_loc).send_keys(text)
def submit_button(self):
self.wait(self.submit_button_loc)
self.find_element(self.submit_button_loc).click() | {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,148 | hmlan/h5 | refs/heads/master | /page/base_page.py | from selenium.webdriver.support.wait import WebDriverWait
class PageBase():
def __init__(self,driver):
self._driver=driver
def open(self,url):
self._driver.get(url)
def find_element(self,locator):
return self._driver.find_element(*locator)
def wait(self,locator, time_out=10):
wait_ = WebDriverWait(self._driver,time_out)
wait_.until(lambda driver: driver.find_element(*locator))
def switch(self,text):
self._driver.switch_to.frame(text)
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,149 | hmlan/h5 | refs/heads/master | /page/index_page.py | from page.base_page import PageBase
from selenium.webdriver.common.by import By
class IndexPage(PageBase):
# 定位器
mine_button_loc = By.XPATH, "//div[text()='我的']"
def mine_button(self):
self.wait(self.mine_button_loc)
self.find_element(self.mine_button_loc).click()
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,150 | hmlan/h5 | refs/heads/master | /case/t1.py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time
d=webdriver.Chrome()
d.get("http://172.18.0.50:9003")
#time.sleep(10)
#me = By.XPATH, "//div[text()='我的']"
#d.find_element(me).click()
d.find_element_by_xpath("//div[text()='我的']").click()
d.find_element_by_xpath("//div[text()='点击登录']").click()
d.find_element_by_xpath("//input[@type='tel']").send_keys(13981401923)
d.find_element_by_xpath("//input[@type='password']").send_keys(123456)
d.find_element_by_css_selector(".shj-box-padding.flex-row.justify-content-center.shj-btn").click()
time.sleep(4)
d.find_element_by_xpath("//div[text()='订单']").click()
time.sleep(3)
#d.find_element_by_xpath("//span[@role='button' and @tabindex='0'and @aria-controls='dropdown-menu-7529']").click()
d.find_element_by_css_selector(".el-icon-caret-bottom.shj-color-footer").click()
time.sleep(3)
d.find_element_by_xpath("//li[text()='堂食订单']").click()
d.find_element_by_xpath("//div[text()='快捷午餐']").click()
time.sleep(5)
d.find_element_by_xpath("//div[text()='本地通测试店铺(商城)']").click()
time.sleep(8)
d.find_element_by_id("54392120").click()
time.sleep(2)
d.find_element_by_id("51432303").click()
time.sleep(4)
d.find_element_by_css_selector(".shj-height-38.submitSku").click()
time.sleep(2)
# d.find_element_by_xpath("/html/body/div/div/div[3]/div[2]/div").click()
#d.find_element_by_css_selector("div.shj-padding-15>div.shj-height-38.submitSku").click()
d.find_element_by_id("icon-cart").click()
time.sleep(3)
d.find_element_by_css_selector(".flex-row.align-items-center.shj-check-out").click()
time.sleep(3)
d.find_element_by_xpath("//span[text()='折扣码']").click()
time.sleep(3)
d.find_element_by_css_selector("div.el-input>input").send_keys(1234567890)#输入折扣码
time.sleep(4)
d.find_element_by_xpath("//div[text()='确定']").click()
time.sleep(3)
d.find_element_by_css_selector("div.el-input>input").send_keys(Keys.CONTROL,'a')
time.sleep(3)
d.find_element_by_css_selector("div.el-input>input").send_keys(Keys.BACK_SPACE,'123456')
time.sleep(4)
d.find_element_by_xpath("//div[text()='确定']").click()
time.sleep(3)
#d.find_element_by_xpath("/html/body/div/div/div[13]/div/div/i").click()
#d.find_element_by_xpath("//div[@class='flex-row'")
d.find_element_by_xpath("//div[@class='flex-row user-info']").click()
time.sleep(4)
d.find_element_by_xpath("//div[text()='haha']").click()
time.sleep(4)
d.find_element_by_xpath("//div[@class='shj-box-padding']").click()
time.sleep(4)
d.find_element_by_id("weui-picker-confirm").click()
time.sleep(4)
d.find_element_by_xpath("//div[@class='flex-row shj-foot-container']/div[2]").click()
#d.find_element_by_css_selector("flex-row.align-items-center.shj-bg-color-pink.shj-color-white.shj-submit-btn") | {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,151 | hmlan/h5 | refs/heads/master | /case/test_baidu.py | from selenium import webdriver
import unittest
from HTMLTestRunner import HTMLTestRunner
class Baidu(unittest.TestCase):
def setUp(self):
self.driver=webdriver.Chrome()
self.driver.implicitly_wait(10)
self.base_url="http://www.baidu.com/"
def test_baidu_search(self):
driver=self.driver
driver.get(self.base_url)
driver.find_element_by_id("kw").send_keys("HTMLTestRunner")
driver.find_element_by_id("su").click()
def tearDown(self):
self.driver.quit()
if __name__=="__main__":
testunit=unittest.TestSuite()
testunit.addTest(Baidu("test_baidu_search"))
#定义测试报告
fp=open('./result.html','wb')
#定义测试报告
runner=HTMLTestRunner(stream=fp,title='百度搜索测试报告',description='用例执行情况')
runner.run(testunit)
fp.close()
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,152 | hmlan/h5 | refs/heads/master | /run.py | import unittest
from HTMLTestRunner import HTMLTestRunner
discover=unittest.defaultTestLoader.discover(
start_dir='case',
pattern='test_tkorder.py'
)
with open('report/report.html','wb') as file:
runner=HTMLTestRunner(
stream=file,
title="易米自动化测试报告",
description='运行环境:win10,Chrome',
tester='pc')
runner.run(discover)
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,153 | hmlan/h5 | refs/heads/master | /case/test_tkorder.py | import unittest
from selenium import webdriver
from page.index_page import IndexPage
from page.mine_page import MinePage
from page.login_page import LoginPage
from time import sleep
class TestOrder(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_shopping_success(self):
tel = '13981401921'
password = '123456'
indexPage=IndexPage(self.driver)
indexPage.open("http://172.18.0.50:9003/")
sleep(3)
indexPage.mine_button()#点击我的
minePage=MinePage(self.driver)
minePage.login_button()
loginPage=LoginPage(self.driver)
loginPage.tel_input(tel)
loginPage.pwd_input(password)
loginPage.submit_button()
sleep(3)
def tearDown(self):
self.driver.quit()
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,154 | hmlan/h5 | refs/heads/master | /page/mine_page.py | from page.base_page import PageBase
from selenium.webdriver.common.by import By
class MinePage(PageBase):
# 定位器
login_button_loc = By.XPATH, "//div[text()='点击登录']"
def login_button(self):
self.wait(self.login_button_loc)
self.find_element(self.login_button_loc).click()
| {"/page/login_page.py": ["/page/base_page.py"], "/page/index_page.py": ["/page/base_page.py"], "/case/test_tkorder.py": ["/page/index_page.py", "/page/mine_page.py", "/page/login_page.py"], "/page/mine_page.py": ["/page/base_page.py"]} |
68,159 | ywzqhl/electrumx | refs/heads/master | /server/version.py | VERSION = "ElectrumX 0.5"
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,160 | ywzqhl/electrumx | refs/heads/master | /server/env.py | # Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling environment configuration and defaults.'''
from os import environ
from lib.coins import Coin
from lib.util import LoggedClass
class Env(LoggedClass):
'''Wraps environment configuration.'''
class Error(Exception):
pass
def __init__(self):
super().__init__()
coin_name = self.default('COIN', 'Bitcoin')
network = self.default('NETWORK', 'mainnet')
self.coin = Coin.lookup_coin_class(coin_name, network)
self.db_dir = self.required('DB_DIRECTORY')
self.utxo_MB = self.integer('UTXO_MB', 1000)
self.hist_MB = self.integer('HIST_MB', 250)
self.host = self.default('HOST', 'localhost')
self.reorg_limit = self.integer('REORG_LIMIT', 200)
self.daemon_url = self.build_daemon_url()
# Server stuff
self.tcp_port = self.integer('TCP_PORT', None)
self.ssl_port = self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.rpc_port = self.integer('RPC_PORT', 8000)
self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = self.default('BANNER_FILE', None)
# The electrum client takes the empty string as unspecified
self.donation_address = self.default('DONATION_ADDRESS', '')
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.debug = self.default('DEBUG', '')
self.debug = [item.lower() for item in self.debug.split()]
# IRC
self.report_tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port)
self.report_ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port)
self.report_host = self.default('REPORT_HOST', self.host)
self.irc_nick = self.default('IRC_NICK', None)
self.irc = self.default('IRC', False)
def default(self, envvar, default):
return environ.get(envvar, default)
def required(self, envvar):
value = environ.get(envvar)
if value is None:
raise self.Error('required envvar {} not set'.format(envvar))
return value
def integer(self, envvar, default):
value = environ.get(envvar)
if value is None:
return default
try:
return int(value)
except:
raise self.Error('cannot convert envvar {} value {} to an integer'
.format(envvar, value))
def build_daemon_url(self):
daemon_url = environ.get('DAEMON_URL')
if not daemon_url:
username = self.required('DAEMON_USERNAME')
password = self.required('DAEMON_PASSWORD')
host = self.required('DAEMON_HOST')
port = self.default('DAEMON_PORT', self.coin.DEFAULT_RPC_PORT)
daemon_url = ('http://{}:{}@{}:{}/'
.format(username, password, host, port))
return daemon_url
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,161 | ywzqhl/electrumx | refs/heads/master | /electrumx_rpc.py | #!/usr/bin/env python3
#
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Script to send RPC commands to a running ElectrumX server.'''
import argparse
import asyncio
import json
import pprint
from functools import partial
from os import environ
class RPCClient(asyncio.Protocol):
def __init__(self, loop):
self.loop = loop
self.method = None
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
self.loop.stop()
def send(self, method, params):
self.method = method
payload = {'method': method, 'params': params, 'id': 'RPC'}
data = json.dumps(payload) + '\n'
self.transport.write(data.encode())
def data_received(self, data):
payload = json.loads(data.decode())
self.transport.close()
result = payload['result']
error = payload['error']
if error:
print("ERROR: {}".format(error))
else:
def data_fmt(count, size):
return '{:,d}/{:,d}KB'.format(count, size // 1024)
def time_fmt(t):
t = int(t)
return ('{:3d}:{:02d}:{:02d}'
.format(t // 3600, (t % 3600) // 60, t % 60))
if self.method == 'sessions':
fmt = ('{:<4} {:>23} {:>15} {:>5} '
'{:>7} {:>7} {:>7} {:>7} {:>5} {:>9}')
print(fmt.format('Type', 'Peer', 'Client', 'Subs',
'Snt #', 'Snt MB', 'Rcv #', 'Rcv MB',
'Errs', 'Time'))
for (kind, peer, subs, client, recv_count, recv_size,
send_count, send_size, error_count, time) in result:
print(fmt.format(kind, peer, client, '{:,d}'.format(subs),
'{:,d}'.format(recv_count),
'{:,.1f}'.format(recv_size / 1048576),
'{:,d}'.format(send_count),
'{:,.1f}'.format(send_size / 1048576),
'{:,d}'.format(error_count),
time_fmt(time)))
else:
pprint.pprint(result, indent=4)
def main():
'''Send the RPC command to the server and print the result.'''
parser = argparse.ArgumentParser('Send electrumx an RPC command' )
parser.add_argument('-p', '--port', metavar='port_num', type=int,
help='RPC port number')
parser.add_argument('command', nargs=1, default=[],
help='command to send')
parser.add_argument('param', nargs='*', default=[],
help='params to send')
args = parser.parse_args()
if args.port is None:
args.port = int(environ.get('ELECTRUMX_RPC_PORT', 8000))
loop = asyncio.get_event_loop()
proto_factory = partial(RPCClient, loop)
coro = loop.create_connection(proto_factory, 'localhost', args.port)
try:
transport, protocol = loop.run_until_complete(coro)
protocol.send(args.command[0], args.param)
loop.run_forever()
except OSError:
print('error connecting - is ElectrumX catching up or not running?')
finally:
loop.close()
if __name__ == '__main__':
main()
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,162 | ywzqhl/electrumx | refs/heads/master | /electrumx_server.py | #!/usr/bin/env python3
#
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Script to kick off the server.'''
import asyncio
import logging
import os
import signal
import traceback
from functools import partial
from server.env import Env
from server.protocol import BlockServer
def main_loop():
'''Start the server.'''
if os.geteuid() == 0:
raise Exception('DO NOT RUN AS ROOT! Create an unpriveleged user '
'account and use that')
loop = asyncio.get_event_loop()
#loop.set_debug(True)
def on_signal(signame):
'''Call on receipt of a signal to cleanly shutdown.'''
logging.warning('received {} signal, shutting down'.format(signame))
for task in asyncio.Task.all_tasks():
task.cancel()
# Install signal handlers
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
partial(on_signal, signame))
server = BlockServer(Env())
future = server.start()
try:
loop.run_until_complete(future)
except asyncio.CancelledError:
pass
finally:
server.stop()
loop.close()
def main():
'''Set up logging, enter main loop.'''
logging.basicConfig(level=logging.INFO)
logging.info('ElectrumX server starting')
try:
main_loop()
except Exception:
traceback.print_exc()
logging.critical('ElectrumX server terminated abnormally')
else:
logging.info('ElectrumX server terminated normally')
if __name__ == '__main__':
main()
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,163 | ywzqhl/electrumx | refs/heads/master | /server/protocol.py | # Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Classes for local RPC server and remote client TCP/SSL servers.'''
import asyncio
import codecs
import json
import ssl
import time
import traceback
from collections import namedtuple
from functools import partial
from lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash
from lib.jsonrpc import JSONRPC, json_notification_payload
from lib.util import LoggedClass
from server.block_processor import BlockProcessor
from server.daemon import DaemonError
from server.irc import IRC
from server.version import VERSION
class BlockServer(BlockProcessor):
'''Like BlockProcessor but also has a server manager and starts
servers when caught up.'''
def __init__(self, env):
super().__init__(env)
self.server_mgr = ServerManager(self, env)
self.bs_caught_up = False
async def caught_up(self, mempool_hashes):
await super().caught_up(mempool_hashes)
if not self.bs_caught_up:
await self.server_mgr.start_servers()
self.bs_caught_up = True
self.server_mgr.notify(self.height, self.touched)
def stop(self):
'''Close the listening servers.'''
self.server_mgr.stop()
class ServerManager(LoggedClass):
'''Manages the servers.'''
AsyncTask = namedtuple('AsyncTask', 'session job')
def __init__(self, bp, env):
super().__init__()
self.bp = bp
self.env = env
self.servers = []
self.irc = IRC(env)
self.sessions = set()
self.tasks = asyncio.Queue()
self.current_task = None
async def start_server(self, kind, *args, **kw_args):
loop = asyncio.get_event_loop()
protocol_class = LocalRPC if kind == 'RPC' else ElectrumX
protocol = partial(protocol_class, self, self.bp, self.env, kind)
server = loop.create_server(protocol, *args, **kw_args)
host, port = args[:2]
try:
self.servers.append(await server)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.error('{} server failed to listen on {}:{:d} :{}'
.format(kind, host, port, e))
else:
self.logger.info('{} server listening on {}:{:d}'
.format(kind, host, port))
async def start_servers(self):
'''Connect to IRC and start listening for incoming connections.
Only connect to IRC if enabled. Start listening on RCP, TCP
and SSL ports only if the port wasn pecified.
'''
env = self.env
if env.rpc_port is not None:
await self.start_server('RPC', 'localhost', env.rpc_port)
if env.tcp_port is not None:
await self.start_server('TCP', env.host, env.tcp_port)
if env.ssl_port is not None:
# FIXME: update if we want to require Python >= 3.5.3
sslc = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
sslc.load_cert_chain(env.ssl_certfile, keyfile=env.ssl_keyfile)
await self.start_server('SSL', env.host, env.ssl_port, ssl=sslc)
asyncio.ensure_future(self.run_tasks())
if env.irc:
self.logger.info('starting IRC coroutine')
asyncio.ensure_future(self.irc.start())
else:
self.logger.info('IRC disabled')
def notify(self, height, touched):
'''Notify sessions about height changes and touched addresses.'''
sessions = [session for session in self.sessions
if isinstance(session, ElectrumX)]
ElectrumX.notify(sessions, height, touched)
def stop(self):
'''Close the listening servers.'''
for server in self.servers:
server.close()
def add_session(self, session):
assert session not in self.sessions
self.sessions.add(session)
def remove_session(self, session):
self.sessions.remove(session)
if self.current_task and session == self.current_task.session:
self.logger.info('cancelling running task')
self.current_task.job.cancel()
def add_task(self, session, job):
assert session in self.sessions
task = asyncio.ensure_future(job)
self.tasks.put_nowait(self.AsyncTask(session, task))
async def run_tasks(self):
'''Asynchronously run through the task queue.'''
while True:
task = await self.tasks.get()
try:
if task.session in self.sessions:
self.current_task = task
await task.job
else:
task.job.cancel()
except asyncio.CancelledError:
self.logger.info('cancelled task noted')
except Exception:
# Getting here should probably be considered a bug and fixed
traceback.print_exc()
finally:
self.current_task = None
def irc_peers(self):
return self.irc.peers
def session_count(self):
return len(self.sessions)
def info(self):
'''Returned in the RPC 'getinfo' call.'''
address_count = sum(len(session.hash168s)
for session in self.sessions
if isinstance(session, ElectrumX))
return {
'blocks': self.bp.height,
'peers': len(self.irc_peers()),
'sessions': self.session_count(),
'watched': address_count,
'cached': 0,
}
def sessions_info(self):
'''Returned to the RPC 'sessions' call.'''
now = time.time()
return [(session.kind,
session.peername(),
len(session.hash168s),
'RPC' if isinstance(session, LocalRPC) else session.client,
session.recv_count, session.recv_size,
session.send_count, session.send_size,
session.error_count,
now - session.start)
for session in self.sessions]
class Session(JSONRPC):
'''Base class of ElectrumX JSON session protocols.'''
def __init__(self, manager, bp, env, kind):
super().__init__()
self.manager = manager
self.bp = bp
self.env = env
self.daemon = bp.daemon
self.coin = bp.coin
self.kind = kind
self.hash168s = set()
self.client = 'unknown'
def connection_made(self, transport):
'''Handle an incoming client connection.'''
super().connection_made(transport)
self.logger.info('connection from {}'.format(self.peername()))
self.manager.add_session(self)
def connection_lost(self, exc):
'''Handle client disconnection.'''
super().connection_lost(exc)
if self.error_count or self.send_size >= 250000:
self.logger.info('{} disconnected. '
'Sent {:,d} bytes in {:,d} messages {:,d} errors'
.format(self.peername(), self.send_size,
self.send_count, self.error_count))
self.manager.remove_session(self)
def method_handler(self, method):
'''Return the handler that will handle the RPC method.'''
return self.handlers.get(method)
def on_json_request(self, request):
'''Queue the request for asynchronous handling.'''
self.manager.add_task(self, self.handle_json_request(request))
def peername(self):
info = self.peer_info
return 'unknown' if not info else '{}:{}'.format(info[0], info[1])
def tx_hash_from_param(self, param):
'''Raise an RPCError if the parameter is not a valid transaction
hash.'''
if isinstance(param, str) and len(param) == 64:
try:
bytes.fromhex(param)
return param
except ValueError:
pass
raise RPCError('parameter should be a transaction hash: {}'
.format(param))
def hash168_from_param(self, param):
if isinstance(param, str):
try:
return self.coin.address_to_hash168(param)
except:
pass
raise RPCError('parameter should be a valid address: {}'.format(param))
def non_negative_integer_from_param(self, param):
try:
param = int(param)
except ValueError:
pass
else:
if param >= 0:
return param
raise RPCError('param should be a non-negative integer: {}'
.format(param))
def extract_hash168(self, params):
if len(params) == 1:
return self.hash168_from_param(params[0])
raise RPCError('params should contain a single address: {}'
.format(params))
def extract_non_negative_integer(self, params):
if len(params) == 1:
return self.non_negative_integer_from_param(params[0])
raise RPCError('params should contain a non-negative integer: {}'
.format(params))
def require_empty_params(self, params):
if params:
raise RPCError('params should be empty: {}'.format(params))
class ElectrumX(Session):
'''A TCP server that handles incoming Electrum connections.'''
def __init__(self, *args):
super().__init__(*args)
self.subscribe_headers = False
self.subscribe_height = False
self.notified_height = None
rpcs = [
('blockchain',
'address.get_balance address.get_history address.get_mempool '
'address.get_proof address.listunspent address.subscribe '
'block.get_header block.get_chunk estimatefee headers.subscribe '
'numblocks.subscribe relayfee transaction.broadcast '
'transaction.get transaction.get_merkle utxo.get_address'),
('server',
'banner donation_address peers.subscribe version'),
]
self.handlers = {'.'.join([prefix, suffix]):
getattr(self, suffix.replace('.', '_'))
for prefix, suffixes in rpcs
for suffix in suffixes.split()}
@classmethod
def notify(cls, sessions, height, touched):
headers_payload = height_payload = None
for session in sessions:
if height != session.notified_height:
session.notified_height = height
if session.subscribe_headers:
if headers_payload is None:
headers_payload = json_notification_payload(
'blockchain.headers.subscribe',
(session.electrum_header(height), ),
)
session.send_json(headers_payload)
if session.subscribe_height:
if height_payload is None:
height_payload = json_notification_payload(
'blockchain.numblocks.subscribe',
(height, ),
)
session.send_json(height_payload)
hash168_to_address = session.coin.hash168_to_address
for hash168 in session.hash168s.intersection(touched):
address = hash168_to_address(hash168)
status = session.address_status(hash168)
payload = json_notification_payload(
'blockchain.address.subscribe', (address, status))
session.send_json(payload)
def height(self):
'''Return the block processor's current height.'''
return self.bp.height
def current_electrum_header(self):
'''Used as response to a headers subscription request.'''
return self.electrum_header(self.height())
def electrum_header(self, height):
'''Return the binary header at the given height.'''
if not 0 <= height <= self.height():
raise RPCError('height {:,d} out of range'.format(height))
header = self.bp.read_headers(height, 1)
return self.coin.electrum_header(header, height)
def address_status(self, hash168):
'''Returns status as 32 bytes.'''
# Note history is ordered and mempool unordered in electrum-server
# For mempool, height is -1 if unconfirmed txins, otherwise 0
history = self.bp.get_history(hash168)
mempool = self.bp.mempool_transactions(hash168)
status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height)
for tx_hash, height in history)
status += ''.join('{}:{:d}:'.format(hex_hash, -unconfirmed)
for hex_hash, tx_fee, unconfirmed in mempool)
if status:
return sha256(status.encode()).hex()
return None
async def tx_merkle(self, tx_hash, height):
'''tx_hash is a hex string.'''
hex_hashes = await self.daemon.block_hex_hashes(height, 1)
block = await self.daemon.deserialised_block(hex_hashes[0])
tx_hashes = block['tx']
# This will throw if the tx_hash is bad
pos = tx_hashes.index(tx_hash)
idx = pos
hashes = [hex_str_to_hash(txh) for txh in tx_hashes]
merkle_branch = []
while len(hashes) > 1:
if len(hashes) & 1:
hashes.append(hashes[-1])
idx = idx - 1 if (idx & 1) else idx + 1
merkle_branch.append(hash_to_str(hashes[idx]))
idx //= 2
hashes = [double_sha256(hashes[n] + hashes[n + 1])
for n in range(0, len(hashes), 2)]
return {"block_height": height, "merkle": merkle_branch, "pos": pos}
def get_history(self, hash168):
# Note history is ordered and mempool unordered in electrum-server
# For mempool, height is -1 if unconfirmed txins, otherwise 0
history = self.bp.get_history(hash168, limit=None)
mempool = self.bp.mempool_transactions(hash168)
conf = tuple({'tx_hash': hash_to_str(tx_hash), 'height': height}
for tx_hash, height in history)
unconf = tuple({'tx_hash': tx_hash, 'height': -unconfirmed, 'fee': fee}
for tx_hash, fee, unconfirmed in mempool)
return conf + unconf
def get_chunk(self, index):
'''Return header chunk as hex. Index is a non-negative integer.'''
chunk_size = self.coin.CHUNK_SIZE
next_height = self.height() + 1
start_height = min(index * chunk_size, next_height)
count = min(next_height - start_height, chunk_size)
return self.bp.read_headers(start_height, count).hex()
def get_balance(self, hash168):
confirmed = self.bp.get_balance(hash168)
unconfirmed = self.bp.mempool_value(hash168)
return {'confirmed': confirmed, 'unconfirmed': unconfirmed}
def list_unspent(self, hash168):
utxos = self.bp.get_utxos_sorted(hash168)
return tuple({'tx_hash': hash_to_str(utxo.tx_hash),
'tx_pos': utxo.tx_pos, 'height': utxo.height,
'value': utxo.value}
for utxo in utxos)
# --- blockchain commands
async def address_get_balance(self, params):
hash168 = self.extract_hash168(params)
return self.get_balance(hash168)
async def address_get_history(self, params):
hash168 = self.extract_hash168(params)
return self.get_history(hash168)
async def address_get_mempool(self, params):
hash168 = self.extract_hash168(params)
raise RPCError('get_mempool is not yet implemented')
async def address_get_proof(self, params):
hash168 = self.extract_hash168(params)
raise RPCError('get_proof is not yet implemented')
async def address_listunspent(self, params):
hash168 = self.extract_hash168(params)
return self.list_unspent(hash168)
async def address_subscribe(self, params):
hash168 = self.extract_hash168(params)
self.hash168s.add(hash168)
return self.address_status(hash168)
async def block_get_chunk(self, params):
index = self.extract_non_negative_integer(params)
return self.get_chunk(index)
async def block_get_header(self, params):
height = self.extract_non_negative_integer(params)
return self.electrum_header(height)
async def estimatefee(self, params):
return await self.daemon.estimatefee(params)
async def headers_subscribe(self, params):
self.require_empty_params(params)
self.subscribe_headers = True
return self.current_electrum_header()
async def numblocks_subscribe(self, params):
self.require_empty_params(params)
self.subscribe_height = True
return self.height()
async def relayfee(self, params):
'''The minimum fee a low-priority tx must pay in order to be accepted
to the daemon's memory pool.'''
self.require_empty_params(params)
return await self.daemon.relayfee()
async def transaction_broadcast(self, params):
'''Pass through the parameters to the daemon.
An ugly API: current Electrum clients only pass the raw
transaction in hex and expect error messages to be returned in
the result field. And the server shouldn't be doing the client's
user interface job here.
'''
try:
tx_hash = await self.daemon.sendrawtransaction(params)
self.logger.info('sent tx: {}'.format(tx_hash))
return tx_hash
except DaemonError as e:
error = e.args[0]
message = error['message']
self.logger.info('sendrawtransaction: {}'.format(message))
if 'non-mandatory-script-verify-flag' in message:
return (
'Your client produced a transaction that is not accepted '
'by the network any more. Please upgrade to Electrum '
'2.5.1 or newer.'
)
return (
'The transaction was rejected by network rules. ({})\n[{}]'
.format(message, params[0])
)
async def transaction_get(self, params):
'''Return the serialized raw transaction.'''
# For some reason Electrum passes a height. Don't require it
# in anticipation it might be dropped in the future.
if 1 <= len(params) <= 2:
tx_hash = self.tx_hash_from_param(params[0])
return await self.daemon.getrawtransaction(tx_hash)
raise RPCError('params wrong length: {}'.format(params))
async def transaction_get_merkle(self, params):
if len(params) == 2:
tx_hash = self.tx_hash_from_param(params[0])
height = self.non_negative_integer_from_param(params[1])
return await self.tx_merkle(tx_hash, height)
raise RPCError('params should contain a transaction hash and height')
async def utxo_get_address(self, params):
if len(params) == 2:
tx_hash = self.tx_hash_from_param(params[0])
index = self.non_negative_integer_from_param(params[1])
tx_hash = hex_str_to_hash(tx_hash)
hash168 = self.bp.get_utxo_hash168(tx_hash, index)
if hash168:
return self.coin.hash168_to_address(hash168)
return None
raise RPCError('params should contain a transaction hash and index')
# --- server commands
async def banner(self, params):
'''Return the server banner.'''
self.require_empty_params(params)
banner = 'Welcome to Electrum!'
if self.env.banner_file:
try:
with codecs.open(self.env.banner_file, 'r', 'utf-8') as f:
banner = f.read()
except Exception as e:
self.logger.error('reading banner file {}: {}'
.format(self.env.banner_file, e))
return banner
async def donation_address(self, params):
'''Return the donation address as a string.
If none is specified return the empty string.
'''
self.require_empty_params(params)
return self.env.donation_address
async def peers_subscribe(self, params):
'''Returns the peer (ip, host, ports) tuples.
Despite the name electrum-server does not treat this as a
subscription.
'''
self.require_empty_params(params)
return list(self.manager.irc_peers().values())
async def version(self, params):
'''Return the server version as a string.'''
if len(params) == 2:
self.client = str(params[0])
self.protocol_version = params[1]
return VERSION
class LocalRPC(Session):
'''A local TCP RPC server for querying status.'''
def __init__(self, *args):
super().__init__(*args)
cmds = 'getinfo sessions numsessions peers numpeers'.split()
self.handlers = {cmd: getattr(self, cmd) for cmd in cmds}
async def getinfo(self, params):
return self.manager.info()
async def sessions(self, params):
return self.manager.sessions_info()
async def numsessions(self, params):
return self.manager.session_count()
async def peers(self, params):
return self.manager.irc_peers()
async def numpeers(self, params):
return len(self.manager.irc_peers())
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,164 | ywzqhl/electrumx | refs/heads/master | /lib/jsonrpc.py | # Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling JSON RPC 2.0 connections, server or client.'''
import asyncio
import json
import numbers
import time
from lib.util import LoggedClass
def json_result_payload(result, id_):
# We should not respond to notifications
assert id_ is not None
return {'jsonrpc': '2.0', 'error': None, 'result': result, 'id': id_}
def json_error_payload(message, code, id_=None):
error = {'message': message, 'code': code}
return {'jsonrpc': '2.0', 'error': error, 'result': None, 'id': id_}
def json_notification_payload(method, params):
return {'jsonrpc': '2.0', 'id': None, 'method': method, 'params': params}
class JSONRPC(asyncio.Protocol, LoggedClass):
'''Manages a JSONRPC connection.
Assumes JSON messages are newline-separated and that newlines
cannot appear in the JSON other than to separate lines.
Derived classes need to implement the synchronous functions
on_json_request() and method_handler(). They probably also want
to override connection_made() and connection_lost() but should be
sure to call the implementation in this base class first.
on_json_request() is passed a JSON request as a python object
after decoding. It should arrange to pass on to the asynchronous
handle_json_request() method.
method_handler() takes a method string and should return a function
that can be passed a parameters array, or None for an unknown method.
Handlers should raise an RPCError on error.
'''
# See http://www.jsonrpc.org/specification
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERAL_ERROR = -32603
ID_TYPES = (type(None), str, numbers.Number)
class RPCError(Exception):
'''RPC handlers raise this error.'''
def __init__(self, msg, code=-1, **kw_args):
super().__init__(**kw_args)
self.msg = msg
self.code
def __init__(self):
super().__init__()
self.start = time.time()
self.transport = None
# Parts of an incomplete JSON line. We buffer them until
# getting a newline.
self.parts = []
# recv_count is JSON messages not calls to data_received()
self.recv_count = 0
self.recv_size = 0
self.send_count = 0
self.send_size = 0
self.error_count = 0
self.peer_info = None
def connection_made(self, transport):
'''Handle an incoming client connection.'''
self.transport = transport
self.peer_info = transport.get_extra_info('peername')
def connection_lost(self, exc):
'''Handle client disconnection.'''
pass
def data_received(self, data):
'''Handle incoming data (synchronously).
Requests end in newline characters. Pass complete requests to
decode_message for handling.
'''
self.recv_size += len(data)
while True:
npos = data.find(ord('\n'))
if npos == -1:
self.parts.append(data)
break
self.recv_count += 1
tail, data = data[:npos], data[npos + 1:]
parts, self.parts = self.parts, []
parts.append(tail)
self.decode_message(b''.join(parts))
def decode_message(self, message):
'''Decode a binary message and queue it for asynchronous handling.
Messages that cannot be decoded are logged and dropped.
'''
try:
message = message.decode()
except UnicodeDecodeError as e:
msg = 'cannot decode binary bytes: {}'.format(e)
self.logger.warning(msg)
self.send_json_error(msg, self.PARSE_ERROR)
return
try:
message = json.loads(message)
except json.JSONDecodeError as e:
msg = 'cannot decode JSON: {}'.format(e)
self.logger.warning(msg)
self.send_json_error(msg, self.PARSE_ERROR)
return
self.on_json_request(message)
def send_json_notification(self, method, params):
'''Create a json notification.'''
return self.send_json(json_notification_payload(method, params))
def send_json_result(self, result, id_):
'''Send a JSON result.'''
return self.send_json(json_result_payload(result, id_))
def send_json_error(self, message, code, id_=None):
'''Send a JSON error.'''
self.error_count += 1
return self.send_json(json_error_payload(message, code, id_))
def send_json(self, payload):
'''Send a JSON payload.'''
if self.transport.is_closing():
# Confirmed this happens, sometimes a lot
return False
try:
data = (json.dumps(payload) + '\n').encode()
except TypeError:
msg = 'JSON encoding failure: {}'.format(payload)
self.logger.error(msg)
return self.send_json_error(msg, self.INTERNAL_ERROR,
payload.get('id'))
self.send_count += 1
self.send_size += len(data)
self.transport.write(data)
return True
async def handle_json_request(self, request):
'''Asynchronously handle a JSON request.
Handles batch requests. Returns True if the request response
was sent (or if nothing was sent because the request was a
notification). Returns False if the send was aborted because
the connection is closing.
'''
if isinstance(request, list):
payload = self.batch_request_payload(request)
else:
payload = await self.single_request_payload(request)
if not payload:
return True
return self.send_json(payload)
async def batch_request_payload(self, batch):
'''Return the JSON payload corresponding to a batch JSON request.'''
# Batches must have at least one request.
if not batch:
return json_error_payload('empty request list',
self.INVALID_REQUEST)
# PYTHON 3.6: use asynchronous comprehensions when supported
payload = []
for item in request:
item_payload = await self.single_request_payload(item)
if item_payload:
payload.append(item_payload)
return payload
async def single_request_payload(self, request):
'''Return the JSON payload corresponding to a single JSON request.
Return None if the request is a notification.
'''
if not isinstance(request, dict):
return json_error_payload('request must be a dict',
self.INVALID_REQUEST)
id_ = request.get('id')
if not isinstance(id_, self.ID_TYPES):
return json_error_payload('invalid id: {}'.format(id_),
self.INVALID_REQUEST)
try:
result = await self.method_result(request.get('method'),
request.get('params', []))
if id_ is None:
return None
return json_result_payload(result, id_)
except self.RPCError as e:
if id_ is None:
return None
return json_error_payload(e.msg, e.code, id_)
async def method_result(self, method, params):
if not isinstance(method, str):
raise self.RPCError('invalid method: {}'.format(method),
self.INVALID_REQUEST)
if not isinstance(params, list):
raise self.RPCError('params should be an array',
self.INVALID_REQUEST)
handler = self.method_handler(method)
if not handler:
raise self.RPCError('unknown method: {}'.format(method),
self.METHOD_NOT_FOUND)
return await handler(params)
def on_json_request(self, request):
raise NotImplementedError('on_json_request in class {}'.
format(self.__class__.__name__))
def method_handler(self, method):
raise NotImplementedError('method_handler in class {}'.
format(self.__class__.__name__))
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,165 | ywzqhl/electrumx | refs/heads/master | /server/db.py | # Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Interface to the blockchain database.'''
import array
import ast
import os
from struct import pack, unpack
from bisect import bisect_right
from collections import namedtuple
from lib.util import chunks, LoggedClass
from lib.hash import double_sha256, hash_to_str
from server.storage import open_db
UTXO = namedtuple("UTXO", "tx_num tx_pos tx_hash height value")
class DB(LoggedClass):
'''Simple wrapper of the backend database for querying.
Performs no DB update, though the DB will be cleaned on opening if
it was shutdown uncleanly.
'''
VERSIONS = [2]
class MissingUTXOError(Exception):
'''Raised if a mempool tx input UTXO couldn't be found.'''
class DBError(Exception):
'''Raised on general DB errors generally indicating corruption.'''
def __init__(self, env):
super().__init__()
self.env = env
self.coin = env.coin
self.logger.info('switching current directory to {}'
.format(env.db_dir))
os.chdir(env.db_dir)
# Open DB and metadata files. Record some of its state.
db_name = '{}-{}'.format(self.coin.NAME, self.coin.NET)
self.db = open_db(db_name, env.db_engine)
if self.db.is_new:
self.logger.info('created new {} database {}'
.format(env.db_engine, db_name))
else:
self.logger.info('successfully opened {} database {}'
.format(env.db_engine, db_name))
self.read_state()
create = self.db_height == -1
self.headers_file = self.open_file('headers', create)
self.txcount_file = self.open_file('txcount', create)
self.tx_hash_file_size = 16 * 1024 * 1024
# tx_counts[N] has the cumulative number of txs at the end of
# height N. So tx_counts[0] is 1 - the genesis coinbase
self.tx_counts = array.array('I')
self.txcount_file.seek(0)
self.tx_counts.fromfile(self.txcount_file, self.db_height + 1)
if self.tx_counts:
assert self.db_tx_count == self.tx_counts[-1]
else:
assert self.db_tx_count == 0
def read_state(self):
if self.db.is_new:
self.db_height = -1
self.db_tx_count = 0
self.db_tip = b'\0' * 32
self.flush_count = 0
self.utxo_flush_count = 0
self.wall_time = 0
self.first_sync = True
else:
state = self.db.get(b'state')
if state:
state = ast.literal_eval(state.decode())
if not isinstance(state, dict):
raise self.DBError('failed reading state from DB')
db_version = state.get('db_version', 0)
if db_version not in self.VERSIONS:
raise self.DBError('your DB version is {} but this software '
'only handles versions {}'
.format(db_version, self.VERSIONS))
if state['genesis'] != self.coin.GENESIS_HASH:
raise self.DBError('DB genesis hash {} does not match coin {}'
.format(state['genesis_hash'],
self.coin.GENESIS_HASH))
self.db_height = state['height']
self.db_tx_count = state['tx_count']
self.db_tip = state['tip']
self.flush_count = state['flush_count']
self.utxo_flush_count = state['utxo_flush_count']
self.wall_time = state['wall_time']
self.first_sync = state['first_sync']
def write_state(self, batch):
'''Write chain state to the batch.'''
state = {
'genesis': self.coin.GENESIS_HASH,
'height': self.db_height,
'tx_count': self.db_tx_count,
'tip': self.db_tip,
'flush_count': self.flush_count,
'utxo_flush_count': self.utxo_flush_count,
'wall_time': self.wall_time,
'first_sync': self.first_sync,
'db_version': max(self.VERSIONS),
}
batch.put(b'state', repr(state).encode())
def open_file(self, filename, create=False):
'''Open the file name. Return its handle.'''
try:
return open(filename, 'rb+')
except FileNotFoundError:
if create:
return open(filename, 'wb+')
raise
def fs_read_headers(self, start, count):
# Read some from disk
disk_count = min(count, self.db_height + 1 - start)
if start < 0 or count < 0 or disk_count != count:
raise self.DBError('{:,d} headers starting at {:,d} not on disk'
.format(count, start))
if disk_count:
header_len = self.coin.HEADER_LEN
self.headers_file.seek(start * header_len)
return self.headers_file.read(disk_count * header_len)
return b''
def fs_tx_hash(self, tx_num):
'''Return a par (tx_hash, tx_height) for the given tx number.
If the tx_height is not on disk, returns (None, tx_height).'''
tx_height = bisect_right(self.tx_counts, tx_num)
if tx_height > self.db_height:
return None, tx_height
file_pos = tx_num * 32
file_num, offset = divmod(file_pos, self.tx_hash_file_size)
filename = 'hashes{:04d}'.format(file_num)
with self.open_file(filename) as f:
f.seek(offset)
return f.read(32), tx_height
def fs_block_hashes(self, height, count):
headers = self.fs_read_headers(height, count)
# FIXME: move to coins.py
hlen = self.coin.HEADER_LEN
return [double_sha256(header) for header in chunks(headers, hlen)]
@staticmethod
def _resolve_limit(limit):
if limit is None:
return -1
assert isinstance(limit, int) and limit >= 0
return limit
def get_history(self, hash168, limit=1000):
'''Generator that returns an unpruned, sorted list of (tx_hash,
height) tuples of confirmed transactions that touched the address,
earliest in the blockchain first. Includes both spending and
receiving transactions. By default yields at most 1000 entries.
Set limit to None to get them all.
'''
limit = self._resolve_limit(limit)
prefix = b'H' + hash168
for key, hist in self.db.iterator(prefix=prefix):
a = array.array('I')
a.frombytes(hist)
for tx_num in a:
if limit == 0:
return
yield self.fs_tx_hash(tx_num)
limit -= 1
def get_balance(self, hash168):
'''Returns the confirmed balance of an address.'''
return sum(utxo.value for utxo in self.get_utxos(hash168, limit=None))
def get_utxos(self, hash168, limit=1000):
'''Generator that yields all UTXOs for an address sorted in no
particular order. By default yields at most 1000 entries.
Set limit to None to get them all.
'''
limit = self._resolve_limit(limit)
s_unpack = unpack
prefix = b'u' + hash168
for db_key, db_value in self.db.iterator(prefix=prefix):
if limit == 0:
return
limit -= 1
tx_num, tx_pos = s_unpack('<IH', db_key[-6:])
value, = unpack('<Q', db_value)
tx_hash, height = self.fs_tx_hash(tx_num)
yield UTXO(tx_num, tx_pos, tx_hash, height, value)
def get_utxos_sorted(self, hash168):
'''Returns all the UTXOs for an address sorted by height and
position in the block.'''
return sorted(self.get_utxos(hash168, limit=None))
def get_utxo_hash168(self, tx_hash, index):
'''Returns the hash168 for a UTXO.
Used only for electrum client command-line requests.
'''
hash168 = None
if 0 <= index <= 65535:
idx_packed = pack('<H', index)
hash168, tx_num_packed = self.db_hash168(tx_hash, idx_packed)
return hash168
def db_hash168(self, tx_hash, idx_packed):
'''Return (hash168, tx_num_packed) for the given TXO.
Both are None if not found.'''
# The 4 is the COMPRESSED_TX_HASH_LEN
key = b'h' + tx_hash[:4] + idx_packed
db_value = self.db.get(key)
if db_value:
assert len(db_value) % 25 == 0
# Find which entry, if any, the TX_HASH matches.
for n in range(0, len(db_value), 25):
tx_num_packed = db_value[n + 21: n + 25]
tx_num, = unpack('<I', tx_num_packed)
hash, height = self.fs_tx_hash(tx_num)
if hash == tx_hash:
return db_value[n:n+21], tx_num_packed
return None, None
def db_utxo_lookup(self, tx_hash, tx_idx):
'''Given a prevout return a (hash168, value) pair.
Raises MissingUTXOError if the UTXO is not found. Used by the
mempool code.
'''
idx_packed = pack('<H', tx_idx)
hash168, tx_num_packed = self.db_hash168(tx_hash, idx_packed)
if not hash168:
# This can happen when the daemon is a block ahead of us
# and has mempool txs spending new txs in that block
raise self.MissingUTXOError
key = b'u' + hash168 + tx_num_packed + idx_packed
db_value = self.db.get(key)
if not db_value:
raise self.DBError('UTXO {} / {:,d} in one table only'
.format(hash_to_str(tx_hash), tx_idx))
value, = unpack('<Q', db_value)
return hash168, value
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,166 | ywzqhl/electrumx | refs/heads/master | /server/block_processor.py | # Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Block prefetcher and chain processor.'''
import array
import asyncio
import itertools
import os
from struct import pack, unpack
import time
from bisect import bisect_left
from collections import defaultdict
from functools import partial
from server.daemon import Daemon, DaemonError
from lib.hash import hash_to_str
from lib.tx import Deserializer
from lib.util import chunks, LoggedClass
import server.db
from server.storage import open_db
# Limits single address history to ~ 65536 * HIST_ENTRIES_PER_KEY entries
HIST_ENTRIES_PER_KEY = 1024
HIST_VALUE_BYTES = HIST_ENTRIES_PER_KEY * 4
def formatted_time(t):
'''Return a number of seconds as a string in days, hours, mins and
secs.'''
t = int(t)
return '{:d}d {:02d}h {:02d}m {:02d}s'.format(
t // 86400, (t % 86400) // 3600, (t % 3600) // 60, t % 60)
class ChainError(Exception):
pass
class Prefetcher(LoggedClass):
'''Prefetches blocks (in the forward direction only).'''
def __init__(self, daemon, height):
super().__init__()
self.daemon = daemon
self.semaphore = asyncio.Semaphore()
self.queue = asyncio.Queue()
self.queue_size = 0
self.fetched_height = height
self.mempool_hashes = []
# Target cache size. Has little effect on sync time.
self.target_cache_size = 10 * 1024 * 1024
# First fetch to be 10 blocks
self.ave_size = self.target_cache_size // 10
async def clear(self, height):
'''Clear prefetched blocks and restart from the given height.
Used in blockchain reorganisations. This coroutine can be
called asynchronously to the _prefetch coroutine so we must
synchronize.
'''
with await self.semaphore:
while not self.queue.empty():
self.queue.get_nowait()
self.queue_size = 0
self.fetched_height = height
async def get_blocks(self):
'''Returns a list of prefetched blocks and the mempool.'''
blocks, height, size = await self.queue.get()
self.queue_size -= size
if height == self.daemon.cached_height():
return blocks, self.mempool_hashes
else:
return blocks, None
async def main_loop(self):
'''Loop forever polling for more blocks.'''
self.logger.info('starting daemon poll loop')
while True:
try:
if await self._caught_up():
await asyncio.sleep(5)
else:
await asyncio.sleep(0)
except DaemonError as e:
self.logger.info('ignoring daemon error: {}'.format(e))
async def _caught_up(self):
'''Poll for new blocks and mempool state.
Mempool is only queried if caught up with daemon.'''
with await self.semaphore:
blocks, size = await self._prefetch()
self.fetched_height += len(blocks)
caught_up = self.fetched_height == self.daemon.cached_height()
if caught_up:
self.mempool_hashes = await self.daemon.mempool_hashes()
# Wake up block processor if we have something
if blocks or caught_up:
self.queue.put_nowait((blocks, self.fetched_height, size))
self.queue_size += size
return caught_up
async def _prefetch(self):
'''Prefetch blocks unless the prefetch queue is full.'''
if self.queue_size >= self.target_cache_size:
return [], 0
daemon_height = await self.daemon.height()
cache_room = self.target_cache_size // self.ave_size
# Try and catch up all blocks but limit to room in cache.
# Constrain count to between 0 and 4000 regardless
count = min(daemon_height - self.fetched_height, cache_room)
count = min(4000, max(count, 0))
if not count:
return [], 0
first = self.fetched_height + 1
hex_hashes = await self.daemon.block_hex_hashes(first, count)
blocks = await self.daemon.raw_blocks(hex_hashes)
size = sum(len(block) for block in blocks)
# Update our recent average block size estimate
if count >= 10:
self.ave_size = size // count
else:
self.ave_size = (size + (10 - count) * self.ave_size) // 10
return blocks, size
class ChainReorg(Exception):
'''Raised on a blockchain reorganisation.'''
class MemPool(LoggedClass):
'''Representation of the daemon's mempool.
Updated regularly in caught-up state. Goal is to enable efficient
response to the value() and transactions() calls.
To that end we maintain the following maps:
tx_hash -> [txin_pairs, txout_pairs, unconfirmed]
hash168 -> set of all tx hashes in which the hash168 appears
A pair is a (hash168, value) tuple. Unconfirmed is true if any of the
tx's txins are unconfirmed. tx hashes are hex strings.
'''
def __init__(self, bp):
super().__init__()
self.txs = {}
self.hash168s = defaultdict(set) # None can be a key
self.bp = bp
self.count = -1
async def update(self, hex_hashes):
'''Update state given the current mempool to the passed set of hashes.
Remove transactions that are no longer in our mempool.
Request new transactions we don't have then add to our mempool.
'''
hex_hashes = set(hex_hashes)
touched = set()
missing_utxos = []
initial = self.count < 0
if initial:
self.logger.info('beginning import of {:,d} mempool txs'
.format(len(hex_hashes)))
# Remove gone items
gone = set(self.txs).difference(hex_hashes)
for hex_hash in gone:
txin_pairs, txout_pairs, unconfirmed = self.txs.pop(hex_hash)
hash168s = set(hash168 for hash168, value in txin_pairs)
hash168s.update(hash168 for hash168, value in txout_pairs)
for hash168 in hash168s:
self.hash168s[hash168].remove(hex_hash)
if not self.hash168s[hash168]:
del self.hash168s[hash168]
touched.update(hash168s)
# Get the raw transactions for the new hashes. Ignore the
# ones the daemon no longer has (it will return None). Put
# them into a dictionary of hex hash to deserialized tx.
hex_hashes.difference_update(self.txs)
raw_txs = await self.bp.daemon.getrawtransactions(hex_hashes)
if initial:
self.logger.info('analysing {:,d} mempool txs'
.format(len(raw_txs)))
new_txs = {hex_hash: Deserializer(raw_tx).read_tx()
for hex_hash, raw_tx in zip(hex_hashes, raw_txs) if raw_tx}
del raw_txs, hex_hashes
# The mempool is unordered, so process all outputs first so
# that looking for inputs has full info.
script_hash168 = self.bp.coin.hash168_from_script()
db_utxo_lookup = self.bp.db_utxo_lookup
def txout_pair(txout):
return (script_hash168(txout.pk_script), txout.value)
for n, (hex_hash, tx) in enumerate(new_txs.items()):
# Yield to process e.g. signals
if n % 500 == 0:
await asyncio.sleep(0)
txout_pairs = [txout_pair(txout) for txout in tx.outputs]
self.txs[hex_hash] = (None, txout_pairs, None)
def txin_info(txin):
hex_hash = hash_to_str(txin.prev_hash)
mempool_entry = self.txs.get(hex_hash)
if mempool_entry:
return mempool_entry[1][txin.prev_idx], True
pair = db_utxo_lookup(txin.prev_hash, txin.prev_idx)
return pair, False
if initial:
next_log = time.time()
self.logger.info('processed outputs, now examining inputs. '
'This can take some time...')
# Now add the inputs
for n, (hex_hash, tx) in enumerate(new_txs.items()):
# Yield to process e.g. signals
if n % 50 == 0:
await asyncio.sleep(0)
if initial and time.time() > next_log:
next_log = time.time() + 20
self.logger.info('{:,d} done ({:d}%)'
.format(n, int(n / len(new_txs) * 100)))
txout_pairs = self.txs[hex_hash][1]
try:
infos = (txin_info(txin) for txin in tx.inputs)
txin_pairs, unconfs = zip(*infos)
except self.bp.MissingUTXOError:
# Drop this TX. If other mempool txs depend on it
# it's harmless - next time the mempool is refreshed
# they'll either be cleaned up or the UTXOs will no
# longer be missing.
del self.txs[hex_hash]
continue
self.txs[hex_hash] = (txin_pairs, txout_pairs, any(unconfs))
# Update touched and self.hash168s for the new tx
for hash168, value in txin_pairs:
self.hash168s[hash168].add(hex_hash)
touched.add(hash168)
for hash168, value in txout_pairs:
self.hash168s[hash168].add(hex_hash)
touched.add(hash168)
if missing_utxos:
self.logger.info('{:,d} txs had missing UTXOs; probably the '
'daemon is a block or two ahead of us.'
.format(len(missing_utxos)))
first = ', '.join('{} / {:,d}'.format(hash_to_str(txin.prev_hash),
txin.prev_idx)
for txin in sorted(missing_utxos)[:3])
self.logger.info('first ones are {}'.format(first))
self.count += 1
if self.count % 25 == 0 or gone:
self.count = 0
self.logger.info('{:,d} txs touching {:,d} addresses'
.format(len(self.txs), len(self.hash168s)))
# Might include a None
return touched
def transactions(self, hash168):
'''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
entries for the hash168.
unconfirmed is True if any txin is unconfirmed.
'''
for hex_hash in self.hash168s[hash168]:
txin_pairs, txout_pairs, unconfirmed = self.txs[hex_hash]
tx_fee = (sum(v for hash168, v in txin_pairs)
- sum(v for hash168, v in txout_pairs))
yield (hex_hash, tx_fee, unconfirmed)
def value(self, hash168):
'''Return the unconfirmed amount in the mempool for hash168.
Can be positive or negative.
'''
value = 0
for hex_hash in self.hash168s[hash168]:
txin_pairs, txout_pairs, unconfirmed = self.txs[hex_hash]
value -= sum(v for h168, v in txin_pairs if h168 == hash168)
value += sum(v for h168, v in txout_pairs if h168 == hash168)
return value
class BlockProcessor(server.db.DB):
'''Process blocks and update the DB state to match.
Employ a prefetcher to prefetch blocks in batches for processing.
Coordinate backing up in case of chain reorganisations.
'''
def __init__(self, env):
super().__init__(env)
# These are our state as we move ahead of DB state
self.height = self.db_height
self.tip = self.db_tip
self.tx_count = self.db_tx_count
self.daemon = Daemon(env.daemon_url, env.debug)
self.daemon.debug_set_height(self.height)
self.mempool = MemPool(self)
self.touched = set()
# Meta
self.utxo_MB = env.utxo_MB
self.hist_MB = env.hist_MB
self.next_cache_check = 0
self.reorg_limit = env.reorg_limit
# Headers and tx_hashes have one entry per block
self.history = defaultdict(partial(array.array, 'I'))
self.history_size = 0
self.prefetcher = Prefetcher(self.daemon, self.height)
self.last_flush = time.time()
self.last_flush_tx_count = self.tx_count
# Caches of unflushed items
self.headers = []
self.tx_hashes = []
# UTXO cache
self.utxo_cache = {}
self.db_cache = {}
self.utxo_cache_spends = 0
self.db_deletes = 0
# Log state
self.logger.info('{}/{} height: {:,d} tx count: {:,d} '
'flush count: {:,d} utxo flush count: {:,d} '
'sync time: {}'
.format(self.coin.NAME, self.coin.NET, self.height,
self.tx_count, self.flush_count,
self.utxo_flush_count,
formatted_time(self.wall_time)))
self.logger.info('reorg limit of {:,d} blocks'
.format(self.reorg_limit))
self.logger.info('flushing UTXO cache at {:,d} MB'
.format(self.utxo_MB))
self.logger.info('flushing history cache at {:,d} MB'
.format(self.hist_MB))
self.clean_db()
def start(self):
'''Returns a future that starts the block processor when awaited.'''
return asyncio.gather(self.main_loop(),
self.prefetcher.main_loop())
async def main_loop(self):
'''Main loop for block processing.
Safely flushes the DB on clean shutdown.
'''
try:
while True:
await self._wait_for_update()
await asyncio.sleep(0) # Yield
except asyncio.CancelledError:
self.flush(True)
raise
async def _wait_for_update(self):
'''Wait for the prefetcher to deliver blocks or a mempool update.
Blocks are only processed in the forward direction. The
prefetcher only provides a non-None mempool when caught up.
'''
blocks, mempool_hashes = await self.prefetcher.get_blocks()
'''Strip the unspendable genesis coinbase.'''
if self.height == -1:
blocks[0] = blocks[0][:self.coin.HEADER_LEN] + bytes(1)
caught_up = mempool_hashes is not None
try:
for block in blocks:
self.advance_block(block, caught_up)
await asyncio.sleep(0) # Yield
if caught_up:
await self.caught_up(mempool_hashes)
self.touched = set()
except ChainReorg:
await self.handle_chain_reorg()
async def caught_up(self, mempool_hashes):
'''Called after each deamon poll if caught up.'''
# Caught up to daemon height. Flush everything as queries
# are performed on the DB and not in-memory.
self.flush(True)
if self.first_sync:
self.first_sync = False
self.logger.info('synced to height {:,d}'.format(self.height))
self.touched.update(await self.mempool.update(mempool_hashes))
async def handle_chain_reorg(self):
# First get all state on disk
self.logger.info('chain reorg detected')
self.flush(True)
self.logger.info('finding common height...')
hashes = await self.reorg_hashes()
# Reverse and convert to hex strings.
hashes = [hash_to_str(hash) for hash in reversed(hashes)]
for hex_hashes in chunks(hashes, 50):
blocks = await self.daemon.raw_blocks(hex_hashes)
self.backup_blocks(blocks)
self.logger.info('backed up to height {:,d}'.format(self.height))
await self.prefetcher.clear(self.height)
self.logger.info('prefetcher reset')
async def reorg_hashes(self):
'''Return the list of hashes to back up beacuse of a reorg.
The hashes are returned in order of increasing height.'''
def match_pos(hashes1, hashes2):
for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
if hash1 == hash2:
return n
return -1
start = self.height - 1
count = 1
while start > 0:
hashes = self.fs_block_hashes(start, count)
hex_hashes = [hash_to_str(hash) for hash in hashes]
d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
n = match_pos(hex_hashes, d_hex_hashes)
if n >= 0:
start += n + 1
break
count = min(count * 2, start)
start -= count
# Hashes differ from height 'start'
count = (self.height - start) + 1
self.logger.info('chain was reorganised for {:,d} blocks from '
'height {:,d} to height {:,d}'
.format(count, start, start + count - 1))
return self.fs_block_hashes(start, count)
def clean_db(self):
'''Clean out stale DB items.
Stale DB items are excess history flushed since the most
recent UTXO flush (only happens on unclean shutdown), and aged
undo information.
'''
if self.flush_count < self.utxo_flush_count:
raise ChainError('DB corrupt: flush_count < utxo_flush_count')
with self.db.write_batch() as batch:
if self.flush_count > self.utxo_flush_count:
self.logger.info('DB shut down uncleanly. Scanning for '
'excess history flushes...')
self.remove_excess_history(batch)
self.utxo_flush_count = self.flush_count
self.remove_stale_undo_items(batch)
self.flush_state(batch)
def remove_excess_history(self, batch):
prefix = b'H'
keys = []
for key, hist in self.db.iterator(prefix=prefix):
flush_id, = unpack('>H', key[-2:])
if flush_id > self.utxo_flush_count:
keys.append(key)
self.logger.info('deleting {:,d} history entries'
.format(len(keys)))
for key in keys:
batch.delete(key)
def remove_stale_undo_items(self, batch):
prefix = b'U'
cutoff = self.db_height - self.reorg_limit
keys = []
for key, hist in self.db.iterator(prefix=prefix):
height, = unpack('>I', key[-4:])
if height > cutoff:
break
keys.append(key)
self.logger.info('deleting {:,d} stale undo entries'
.format(len(keys)))
for key in keys:
batch.delete(key)
def flush_state(self, batch):
'''Flush chain state to the batch.'''
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.tx_count
self.write_state(batch)
def assert_flushed(self):
'''Asserts state is fully flushed.'''
assert self.tx_count == self.db_tx_count
assert not self.history
assert not self.utxo_cache
assert not self.db_cache
def flush(self, flush_utxos=False, flush_history=None):
'''Flush out cached state.
History is always flushed. UTXOs are flushed if flush_utxos.'''
if self.height == self.db_height:
assert flush_history is None
self.assert_flushed()
return
self.flush_count += 1
flush_start = time.time()
last_flush = self.last_flush
tx_diff = self.tx_count - self.last_flush_tx_count
show_stats = self.first_sync
if self.height > self.db_height:
assert flush_history is None
flush_history = self.flush_history
with self.db.write_batch() as batch:
# History first - fast and frees memory. Flush state last
# as it reads the wall time.
flush_history(batch)
if flush_utxos:
self.flush_utxos(batch)
self.flush_state(batch)
self.logger.info('committing transaction...')
# Update and put the wall time again - otherwise we drop the
# time it took to commit the batch
self.flush_state(self.db)
self.logger.info('flush #{:,d} to height {:,d} txs: {:,d} took {:,d}s'
.format(self.flush_count, self.height, self.tx_count,
int(self.last_flush - flush_start)))
# Catch-up stats
if show_stats:
daemon_height = self.daemon.cached_height()
tx_per_sec = int(self.tx_count / self.wall_time)
this_tx_per_sec = 1 + int(tx_diff / (self.last_flush - last_flush))
if self.height > self.coin.TX_COUNT_HEIGHT:
tx_est = (daemon_height - self.height) * self.coin.TX_PER_BLOCK
else:
tx_est = ((daemon_height - self.coin.TX_COUNT_HEIGHT)
* self.coin.TX_PER_BLOCK
+ (self.coin.TX_COUNT - self.tx_count))
# Damp the enthusiasm
realism = 2.0 - 0.9 * self.height / self.coin.TX_COUNT_HEIGHT
tx_est *= max(realism, 1.0)
self.logger.info('tx/sec since genesis: {:,d}, '
'since last flush: {:,d}'
.format(tx_per_sec, this_tx_per_sec))
self.logger.info('sync time: {} ETA: {}'
.format(formatted_time(self.wall_time),
formatted_time(tx_est / this_tx_per_sec)))
def flush_history(self, batch):
flush_start = time.time()
flush_id = pack('>H', self.flush_count)
for hash168, hist in self.history.items():
key = b'H' + hash168 + flush_id
batch.put(key, hist.tobytes())
self.logger.info('flushed {:,d} history entries for {:,d} addrs '
'in {:,d}s'
.format(self.history_size, len(self.history),
int(time.time() - flush_start)))
self.history = defaultdict(partial(array.array, 'I'))
self.history_size = 0
def fs_flush(self):
'''Flush the things stored on the filesystem.'''
blocks_done = len(self.headers)
prior_tx_count = (self.tx_counts[self.db_height]
if self.db_height >= 0 else 0)
cur_tx_count = self.tx_counts[-1] if self.tx_counts else 0
txs_done = cur_tx_count - prior_tx_count
assert self.db_height + blocks_done == self.height
assert len(self.tx_hashes) == blocks_done
assert len(self.tx_counts) == self.height + 1
assert cur_tx_count == self.tx_count, \
'cur: {:,d} new: {:,d}'.format(cur_tx_count, self.tx_count)
# First the headers
headers = b''.join(self.headers)
header_len = self.coin.HEADER_LEN
self.headers_file.seek((self.db_height + 1) * header_len)
self.headers_file.write(headers)
self.headers_file.flush()
# Then the tx counts
self.txcount_file.seek((self.db_height + 1) * self.tx_counts.itemsize)
self.txcount_file.write(self.tx_counts[self.db_height + 1:])
self.txcount_file.flush()
# Finally the hashes
hashes = memoryview(b''.join(itertools.chain(*self.tx_hashes)))
assert len(hashes) % 32 == 0
assert len(hashes) // 32 == txs_done
cursor = 0
file_pos = prior_tx_count * 32
while cursor < len(hashes):
file_num, offset = divmod(file_pos, self.tx_hash_file_size)
size = min(len(hashes) - cursor, self.tx_hash_file_size - offset)
filename = 'hashes{:04d}'.format(file_num)
with self.open_file(filename, create=True) as f:
f.seek(offset)
f.write(hashes[cursor:cursor + size])
cursor += size
file_pos += size
os.sync()
self.tx_hashes = []
self.headers = []
def backup_history(self, batch, hash168s):
self.logger.info('backing up history to height {:,d} tx_count {:,d}'
.format(self.height, self.tx_count))
assert not self.history
nremoves = 0
for hash168 in sorted(hash168s):
prefix = b'H' + hash168
deletes = []
puts = {}
for key, hist in self.db.iterator(prefix=prefix, reverse=True):
a = array.array('I')
a.frombytes(hist)
# Remove all history entries >= self.tx_count
idx = bisect_left(a, self.tx_count)
nremoves += len(a) - idx
if idx > 0:
puts[key] = a[:idx].tobytes()
break
deletes.append(key)
for key in deletes:
batch.delete(key)
for key, value in puts.items():
batch.put(key, value)
self.logger.info('removed {:,d} history entries from {:,d} addresses'
.format(nremoves, len(hash168s)))
def cache_sizes(self):
'''Returns the approximate size of the cache, in MB.'''
# Good average estimates based on traversal of subobjects and
# requesting size from Python (see deep_getsizeof). For
# whatever reason Python O/S mem usage is typically +30% or
# more, so we scale our already bloated object sizes.
one_MB = int(1048576 / 1.3)
utxo_cache_size = len(self.utxo_cache) * 187
db_cache_size = len(self.db_cache) * 105
hist_cache_size = len(self.history) * 180 + self.history_size * 4
tx_hash_size = (self.tx_count - self.db_tx_count) * 74
utxo_MB = (db_cache_size + utxo_cache_size + tx_hash_size) // one_MB
hist_MB = hist_cache_size // one_MB
self.logger.info('UTXOs: {:,d} deletes: {:,d} '
'UTXOs {:,d}MB hist {:,d}MB'
.format(len(self.utxo_cache), self.db_deletes,
utxo_MB, hist_MB))
self.logger.info('our height: {:,d} daemon height: {:,d}'
.format(self.height, self.daemon.cached_height()))
return utxo_MB, hist_MB
def undo_key(self, height):
'''DB key for undo information at the given height.'''
return b'U' + pack('>I', height)
def write_undo_info(self, height, undo_info):
'''Write out undo information for the current height.'''
self.db.put(self.undo_key(height), undo_info)
def read_undo_info(self, height):
'''Read undo information from a file for the current height.'''
return self.db.get(self.undo_key(height))
def fs_advance_block(self, header, tx_hashes, txs):
'''Update unflushed FS state for a new block.'''
prior_tx_count = self.tx_counts[-1] if self.tx_counts else 0
# Cache the new header, tx hashes and cumulative tx count
self.headers.append(header)
self.tx_hashes.append(tx_hashes)
self.tx_counts.append(prior_tx_count + len(txs))
def advance_block(self, block, update_touched):
# We must update the FS cache before calling advance_txs() as
# the UTXO cache uses the FS cache via get_tx_hash() to
# resolve compressed key collisions
header, tx_hashes, txs = self.coin.read_block(block)
prev_hash, header_hash = self.coin.header_hashes(header)
if prev_hash != self.tip:
raise ChainReorg
touched = set()
self.fs_advance_block(header, tx_hashes, txs)
self.tip = header_hash
self.height += 1
undo_info = self.advance_txs(tx_hashes, txs, touched)
if self.daemon.cached_height() - self.height <= self.reorg_limit:
self.write_undo_info(self.height, b''.join(undo_info))
# Check if we're getting full and time to flush?
now = time.time()
if now > self.next_cache_check:
self.next_cache_check = now + 60
utxo_MB, hist_MB = self.cache_sizes()
if utxo_MB >= self.utxo_MB or hist_MB >= self.hist_MB:
self.flush(utxo_MB >= self.utxo_MB)
if update_touched:
self.touched.update(touched)
def advance_txs(self, tx_hashes, txs, touched):
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
undo_info = []
# Use local vars for speed in the loops
history = self.history
tx_num = self.tx_count
script_hash168 = self.coin.hash168_from_script()
s_pack = pack
for tx, tx_hash in zip(txs, tx_hashes):
hash168s = set()
tx_numb = s_pack('<I', tx_num)
# Spend the inputs
if not tx.is_coinbase:
for txin in tx.inputs:
cache_value = spend_utxo(txin.prev_hash, txin.prev_idx)
undo_info.append(cache_value)
hash168s.add(cache_value[:21])
# Add the new UTXOs
for idx, txout in enumerate(tx.outputs):
# Get the hash168. Ignore unspendable outputs
hash168 = script_hash168(txout.pk_script)
if hash168:
hash168s.add(hash168)
put_utxo(tx_hash + s_pack('<H', idx),
hash168 + tx_numb + s_pack('<Q', txout.value))
for hash168 in hash168s:
history[hash168].append(tx_num)
self.history_size += len(hash168s)
touched.update(hash168s)
tx_num += 1
self.tx_count = tx_num
return undo_info
def backup_blocks(self, blocks):
'''Backup the blocks and flush.
The blocks should be in order of decreasing height.
A flush is performed once the blocks are backed up.
'''
self.logger.info('backing up {:,d} blocks'.format(len(blocks)))
self.assert_flushed()
touched = set()
for block in blocks:
header, tx_hashes, txs = self.coin.read_block(block)
prev_hash, header_hash = self.coin.header_hashes(header)
if header_hash != self.tip:
raise ChainError('backup block {} is not tip {} at height {:,d}'
.format(hash_to_str(header_hash),
hash_to_str(self.tip), self.height))
self.backup_txs(tx_hashes, txs, touched)
self.tip = prev_hash
assert self.height >= 0
self.height -= 1
assert not self.headers
assert not self.tx_hashes
self.logger.info('backed up to height {:,d}'.format(self.height))
self.touched.update(touched)
flush_history = partial(self.backup_history, hash168s=touched)
self.flush(True, flush_history=flush_history)
def backup_txs(self, tx_hashes, txs, touched):
# Prevout values, in order down the block (coinbase first if present)
# undo_info is in reverse block order
undo_info = self.read_undo_info(self.height)
n = len(undo_info)
# Use local vars for speed in the loops
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
rtxs = reversed(txs)
rtx_hashes = reversed(tx_hashes)
for tx_hash, tx in zip(rtx_hashes, rtxs):
# Spend the outputs
for idx, txout in enumerate(tx.outputs):
cache_value = spend_utxo(tx_hash, idx)
touched.add(cache_value[:21])
# Restore the inputs
if not tx.is_coinbase:
for txin in reversed(tx.inputs):
n -= 33
undo_item = undo_info[n:n + 33]
put_utxo(txin.prev_hash + s_pack('<H', txin.prev_idx),
undo_item)
touched.add(undo_item[:21])
assert n == 0
self.tx_count -= len(txs)
'''An in-memory UTXO cache, representing all changes to UTXO state
since the last DB flush.
We want to store millions of these in memory for optimal
performance during initial sync, because then it is possible to
spend UTXOs without ever going to the database (other than as an
entry in the address history, and there is only one such entry per
TX not per UTXO). So store them in a Python dictionary with
binary keys and values.
Key: TX_HASH + TX_IDX (32 + 2 = 34 bytes)
Value: HASH168 + TX_NUM + VALUE (21 + 4 + 8 = 33 bytes)
That's 67 bytes of raw data. Python dictionary overhead means
each entry actually uses about 187 bytes of memory. So almost
11.5 million UTXOs can fit in 2GB of RAM. There are approximately
42 million UTXOs on bitcoin mainnet at height 433,000.
Semantics:
add: Add it to the cache dictionary.
spend: Remove it if in the cache dictionary. Otherwise it's
been flushed to the DB. Each UTXO is responsible for two
entries in the DB. Mark them for deletion in the next
cache flush.
The UTXO database format has to be able to do two things efficiently:
1. Given an address be able to list its UTXOs and their values
so its balance can be efficiently computed.
2. When processing transactions, for each prevout spent - a (tx_hash,
idx) pair - we have to be able to remove it from the DB. To send
notifications to clients we also need to know any address it paid
to.
To this end we maintain two "tables", one for each point above:
1. Key: b'u' + address_hash168 + tx_num + tx_idx
Value: the UTXO value as a 64-bit unsigned integer
2. Key: b'h' + compressed_tx_hash + tx_idx
Value: [address_hash168 + tx_num]
The compressed tx hash is just the first few bytes of the hash of
the tx in which the UTXO was created. As this is not unique there
will are potential collisions when saving and looking up UTXOs;
hence why the second table has a list as its value. The collision
can be resolved with the tx_num. The collision rate is low (<0.1%).
'''
def spend_utxo(self, tx_hash, tx_idx):
'''Spend a UTXO and return the 33-byte value.
If the UTXO is not in the cache it must be on disk. We store
all UTXOs so not finding one indicates a logic error or DB
corruption.
'''
# Fast track is it being in the cache
idx_packed = pack('<H', tx_idx)
cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
if cache_value:
self.utxo_cache_spends += 1
return cache_value
# Spend it from the DB. Read the UTXO through the cache
# because compressed keys can collide.
# The 4 is the COMPRESSED_TX_HASH_LEN
db_key = b'h' + tx_hash[:4] + idx_packed
db_value = self.db_cache_get(db_key)
if db_value:
# FIXME: this matches what we did previously but until we store
# all UTXOs isn't safe
if len(db_value) == 25:
udb_key = b'u' + db_value + idx_packed
utxo_value_packed = self.db.get(udb_key)
if utxo_value_packed:
# Remove the UTXO from both tables
self.db_deletes += 1
self.db_cache[db_key] = None
self.db_cache[udb_key] = None
return db_value + utxo_value_packed
# Fall through to below loop for error
assert len(db_value) % 25 == 0
# Find which entry, if any, the TX_HASH matches.
for n in range(0, len(db_value), 25):
tx_num, = unpack('<I', db_value[n + 21:n + 25])
hash, height = self.get_tx_hash(tx_num)
if hash == tx_hash:
match = db_value[n:n+25]
udb_key = b'u' + match + idx_packed
utxo_value_packed = self.db.get(udb_key)
if utxo_value_packed:
# Remove the UTXO from both tables
self.db_deletes += 1
self.db_cache[db_key] = db_value[:n] + db_value[n+25:]
self.db_cache[udb_key] = None
return match + utxo_value_packed
raise self.DBError('UTXO {} / {:,d} not found in "u" table'
.format(hash_to_str(tx_hash), tx_idx))
raise ChainError('UTXO {} / {:,d} not found in "h" table'
.format(hash_to_str(tx_hash), tx_idx))
def db_cache_get(self, key):
'''Fetch a 'h' value from the DB through our write cache.'''
value = self.db_cache.get(key)
if value:
return value
return self.db.get(key)
def flush_utxos(self, batch):
'''Flush the cached DB writes and UTXO set to the batch.'''
# Care is needed because the writes generated by flushing the
# UTXO state may have keys in common with our write cache or
# may be in the DB already.
self.logger.info('flushing {:,d} blocks with {:,d} txs'
.format(self.height - self.db_height,
self.tx_count - self.db_tx_count))
self.logger.info('UTXO cache adds: {:,d} spends: {:,d} '
'DB spends: {:,d}'
.format(len(self.utxo_cache) + self.utxo_cache_spends,
self.utxo_cache_spends,
self.db_deletes))
fs_flush_start = time.time()
self.fs_flush()
fs_flush_end = time.time()
self.logger.info('FS flush took {:.1f} seconds'
.format(fs_flush_end - fs_flush_start))
collisions = 0
new_utxos = len(self.utxo_cache)
for cache_key, cache_value in self.utxo_cache.items():
# Frist write to the hash168 lookup table
# The 4 is the COMPRESSED_TX_HASH_LEN
db_key = b'h' + cache_key[:4] + cache_key[-2:]
prior_value = self.db_cache_get(db_key)
if prior_value: # Should rarely happen
collisions += 1
self.db_cache[db_key] = prior_value + cache_value[:25]
else:
self.db_cache[db_key] = cache_value[:25]
# Next write the UTXO table
db_key = b'u' + cache_value[:25] + cache_key[-2:]
self.db_cache[db_key] = cache_value[-8:]
# GC-ing this now can only help the levelDB write.
self.utxo_cache = {}
# Now we can update to the batch.
for key, value in self.db_cache.items():
if value:
batch.put(key, value)
else: # b'' or None
batch.delete(key)
adds = new_utxos + self.utxo_cache_spends
self.db_cache = {}
self.utxo_cache_spends = self.db_deletes = 0
self.utxo_flush_count = self.flush_count
self.db_tx_count = self.tx_count
self.db_height = self.height
self.db_tip = self.tip
self.logger.info('UTXO flush took {:.1f} seconds'
.format(time.time() - fs_flush_end))
def read_headers(self, start, count):
# Read some from disk
disk_count = min(count, self.db_height + 1 - start)
result = self.fs_read_headers(start, disk_count)
count -= disk_count
start += disk_count
# The rest from memory
if count:
start -= self.db_height + 1
if not (count >= 0 and start + count <= len(self.headers)):
raise ChainError('{:,d} headers starting at {:,d} not on disk'
.format(count, start))
result += b''.join(self.headers[start: start + count])
return result
def get_tx_hash(self, tx_num):
'''Returns the tx_hash and height of a tx number.'''
tx_hash, tx_height = self.fs_tx_hash(tx_num)
# Is this unflushed?
if tx_hash is None:
tx_hashes = self.tx_hashes[tx_height - (self.db_height + 1)]
tx_hash = tx_hashes[tx_num - self.tx_counts[tx_height - 1]]
return tx_hash, tx_height
def mempool_transactions(self, hash168):
'''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
entries for the hash168.
unconfirmed is True if any txin is unconfirmed.
'''
return self.mempool.transactions(hash168)
def mempool_value(self, hash168):
'''Return the unconfirmed amount in the mempool for hash168.
Can be positive or negative.
'''
return self.mempool.value(hash168)
| {"/electrumx_server.py": ["/server/env.py", "/server/protocol.py"], "/server/protocol.py": ["/lib/jsonrpc.py", "/server/block_processor.py", "/server/version.py"], "/server/block_processor.py": ["/server/db.py"]} |
68,172 | sensu-watson/watshogi | refs/heads/master | /game.py | from board import *
class GameRoop:
def __init__(self):
self.current_board = Board()
def roop(self):
isroop = True
while isroop:
command_list = input().split()
isroop = self.response(command_list)
def response(self, command_list):
if command_list[0] == 'position':
if command_list[1] == 'startpos':
moves = []
for i in range(3, len(command_list)):
moves.append(command_list[i])
if len(moves) % 2 == 0:
self.current_board.set_startpos()
else:
self.current_board.set_startpos(engine_turn = 'gote')
self.current_board.generate_state(moves)
if command_list[0] == 'go':
best_move, _ = self.current_board.calc_best_move()
print('bestmove ' + best_move)
#print('bestmove resign')
if command_list[0] == 'gameover':
return False
return True
if __name__ == '__main__':
pass
| {"/game.py": ["/board.py"], "/main.py": ["/game.py"], "/board_test.py": ["/board.py"], "/main_test.py": ["/main.py"]} |
68,173 | sensu-watson/watshogi | refs/heads/master | /main.py | from game import *
class SoftwareInformation:
software_name = 'Watshogi'
software_author = 'Masaru Watanabe'
def response_softwarename(self):
print('id name ' + self.software_name)
def response_softwareauthor(self):
print('id name ' + self.software_author)
class MainRoop:
def __init__(self):
self.swInfo = SoftwareInformation()
self.game = GameRoop()
def roop(self):
isroop = True
while isroop:
command_list = input().split()
isroop = self.response(command_list)
def response(self, command_list):
def response_usiok():
self.swInfo.response_softwarename()
self.swInfo.response_softwareauthor()
print('usiok')
def response_readyok():
print('readyok')
if command_list[0] == 'usi':
response_usiok()
if command_list[0] == 'isready':
response_readyok()
if command_list[0] == 'usinewgame':
self.game.roop()
if command_list[0] == 'quit':
return False
return True
if __name__ == '__main__':
mainRoop = MainRoop()
mainRoop.roop()
| {"/game.py": ["/board.py"], "/main.py": ["/game.py"], "/board_test.py": ["/board.py"], "/main_test.py": ["/main.py"]} |
68,174 | sensu-watson/watshogi | refs/heads/master | /board_test.py | import unittest
from board import *
class BoardTest(unittest.TestCase):
def test_calc_best_move(self):
myboard = Board()
myboard.set_startpos()
ret_str, ret_int = myboard.calc_best_move()
self.assertIsInstance(ret_str, str)
self.assertIsInstance(ret_int, int)
def test_evaluation_value(self):
myboard = Board()
myboard.set_startpos()
ret = myboard.evaluation_value()
exc_g = -10000
exc_l = 10000
self.assertGreater(ret, exc_g)
self.assertLess(ret, exc_l)
def test_puttable_piece(self):
myboard = Board()
myboard.set_startpos(
startpos =
'PPP3PPP/9/9/9/9/9/9/9/9',
startbelongings = 'PLNSGR'
)
geo = '3c'
ret = myboard.puttable_piece(geo)
exc = ['L*', 'N*', 'S*', 'G*', 'R*']
self.assertCountEqual(ret, exc)
def test_movable_place_promote_r_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4r4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_promote_r(geo)
exc = ['5e', '5d', '5c', '5b', '5a',
'4f', '3f', '2f', '1f',
'6f', '7f', '8f', '9f',
'5g', '5h', '5i',
'4e', '4g', '6e', '6g']
self.assertCountEqual(ret, exc)
def test_movable_place_promote_b_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4b4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_promote_b(geo)
exc = ['4e', '3d', '2c', '1b',
'6e', '7d', '8c', '9b',
'4g', '3h', '2i',
'6g', '7h', '8i',
'5e', '5g', '4f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_k_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4k4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_k(geo)
exc = ['4d', '5d', '6d',
'4e', '6e',
'4f', '5f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_r_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4r4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_r(geo)
exc = ['5e', '5d', '5c', '5b', '5a',
'4f', '3f', '2f', '1f',
'6f', '7f', '8f', '9f',
'5g', '5h', '5i']
self.assertCountEqual(ret, exc)
def test_movable_place_b_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4b4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_b(geo)
exc = ['4e', '3d', '2c', '1b',
'6e', '7d', '8c', '9b',
'4g', '3h', '2i',
'6g', '7h', '8i']
self.assertCountEqual(ret, exc)
def test_movable_place_g_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4g4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_g(geo)
exc = ['5d', '6e', '4e', '4f', '5f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_s_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4s4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_s(geo)
exc = ['4d', '6d', '4f', '5f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_n_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'4n4/9/9/9/9/9/9/9/9'
)
geo = '5a'
ret = myboard.movable_place_N(geo)
exc = ['4c', '6c']
def test_movable_place_l_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'4l4/9/9/9/9/9/9/9/9'
)
geo = '5a'
ret = myboard.movable_place_l(geo)
exc = ['5b', '5c', '5d', '5e', '5f', '5g', '5h', '5i']
self.assertCountEqual(ret, exc)
def test_movable_place_p_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4p4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_p(geo)
exc = ['5f']
self.assertCountEqual(ret, exc)
def test_movable_place_promote_R_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4R4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_promote_R(geo)
exc = ['5e', '5d', '5c', '5b', '5a',
'4f', '3f', '2f', '1f',
'6f', '7f', '8f', '9f',
'5g', '5h', '5i',
'4e', '4g', '6e', '6g']
self.assertCountEqual(ret, exc)
def test_movable_place_promote_B_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4B4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_promote_B(geo)
exc = ['4e', '3d', '2c', '1b',
'6e', '7d', '8c', '9b',
'4g', '3h', '2i',
'6g', '7h', '8i',
'5e', '5g', '4f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_K_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4K4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_K(geo)
exc = ['4d', '5d', '6d',
'4e', '6e',
'4f', '5f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_R_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4R4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_R(geo)
exc = ['5e', '5d', '5c', '5b', '5a',
'4f', '3f', '2f', '1f',
'6f', '7f', '8f', '9f',
'5g', '5h', '5i']
self.assertCountEqual(ret, exc)
def test_movable_place_B_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/4B4/9/9/9'
)
geo = '5f'
ret = myboard.movable_place_B(geo)
exc = ['4e', '3d', '2c', '1b',
'6e', '7d', '8c', '9b',
'4g', '3h', '2i',
'6g', '7h', '8i']
self.assertCountEqual(ret, exc)
def test_movable_place_G_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4G4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_G(geo)
exc = ['4d', '5d', '6d', '4e', '6e', '5f']
self.assertCountEqual(ret, exc)
def test_movable_place_S_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/4S4/9/9/9/9'
)
geo = '5e'
ret = myboard.movable_place_S(geo)
exc = ['4d', '5d', '6d', '4f', '6f']
self.assertCountEqual(ret, exc)
def test_movable_place_N_1(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/9/9/9/4N4'
)
geo = '5i'
ret = myboard.movable_place_N(geo)
exc = ['4g', '6g']
self.assertCountEqual(ret, exc)
def test_movable_place_N_2(self):
myboard = Board()
myboard.set_startpos(
startpos =
'9/9/9/9/9/9/3P1p3/9/4N4'
)
geo = '5i'
ret = myboard.movable_place_N(geo)
exc = ['4g']
self.assertCountEqual(ret, exc)
def test_movable_place_L_1(self):
myboard = Board()
myboard.set_startpos()
geo = '9i'
ret = myboard.movable_place_L(geo)
exc = ['9h']
self.assertCountEqual(ret, exc)
def test_movable_place_L_2(self):
myboard = Board()
myboard.set_startpos(
startpos =
'4k4/9/4p4/9/9/9/4P4/9/4K3L'
)
geo = '1i'
ret = myboard.movable_place_L(geo)
exc = ['1a', '1b', '1c', '1d', '1e', '1f', '1g', '1h']
self.assertCountEqual(ret, exc)
def test_movable_place_L_3(self):
myboard = Board()
myboard.set_startpos(
startpos =
'4k4/9/4p3p/9/9/9/4P4/9/4K3L'
)
geo = '1i'
ret = myboard.movable_place_L(geo)
exc = ['1c', '1d', '1e', '1f', '1g', '1h']
self.assertCountEqual(ret, exc)
def test_movable_place_P_1(self):
myboard = Board()
myboard.set_startpos()
geo = '7g'
ret = myboard.movable_place_P(geo)
exc = ['7f']
self.assertCountEqual(ret, exc)
def test_movable_place_P_2(self):
myboard = Board()
myboard.set_startpos(
startpos =
'4k4/9/4p4/4l4/9/4L4/4P4/9/4K4'
)
geo = '5g'
ret = myboard.movable_place_P(geo)
exc = []
self.assertCountEqual(ret, exc)
def test_is_exist_piece_1(self):
myboard = Board()
myboard.set_startpos()
geo = '7f'
ret = myboard.is_exist_piece(geo)
exc = False
self.assertEqual(ret, exc)
def test_is_exist_piece_2(self):
myboard = Board()
myboard.set_startpos()
geo = '7g'
ret = myboard.is_exist_piece(geo)
exc = True
self.assertEqual(ret, exc)
def test_is_exist_turnplayer_piece_1(self):
myboard = Board()
myboard.set_startpos()
geo = '7f'
ret = myboard.is_exist_turnplayer_piece(geo)
exc = False
self.assertEqual(ret, exc)
def test_is_exist_turnplayer_piece_2(self):
myboard = Board()
myboard.set_startpos()
geo = '7g'
ret = myboard.is_exist_turnplayer_piece(geo)
exc = True
self.assertEqual(ret, exc)
def test_is_exist_turnplayer_piece_3(self):
myboard = Board()
myboard.set_startpos()
geo = '3c'
ret = myboard.is_exist_turnplayer_piece(geo)
exc = False
self.assertEqual(ret, exc)
def test_generate_next_move(self):
myboard = Board()
myboard.set_startpos(
startpos =
'4k4/9/4p4/9/9/9/4P4/9/4K4',
starttempo = '0'
)
exc = ['5g5f', '5i6i', '5i6h', '5i5h', '5i4i', '5i4h']
ret = myboard.generate_next_move()
self.assertCountEqual(ret, exc)
def test_board_reverse_geo_1(self):
myboard = Board()
myboard.set_startpos()
i = 5
j = 2
exc = '7f'
ret = myboard.board_reverse_geo(i, j)
self.assertEqual(ret, exc)
def test_generate_state(self):
myboard = Board()
myboard.set_startpos()
moves = ['7g7f', '3c3d', '8h2b+']
exc = {'board':
[['l', 'n', 's', 'g', 'k', 'g', 's', 'n', 'l'],
[' ', 'r', ' ', ' ', ' ', ' ', ' ', 'B+', ' '],
['p', 'p', 'p', 'p', 'p', 'p', ' ', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', 'p', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', 'P', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', ' ', 'P', 'P', 'P', 'P', 'P', 'P'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', 'R', ' '],
['L', 'N', 'S', 'G', 'K', 'G', 'S', 'N', 'L']
],
'sente_hand':{
'R': 0,
'B': 1,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'gote_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'turn_player':'gote',
'tempo':3}
ret = myboard.generate_state(moves)
self.assertEqual(ret, exc)
def test_toggle_player_1(self):
myboard = Board()
myboard.set_startpos()
exc = 'gote'
ret = myboard.toggle_player()
self.assertEqual(ret, exc)
def test_toggle_player_2(self):
myboard = Board()
myboard.set_startpos()
exc = 'sente'
_ = myboard.toggle_player()
ret = myboard.toggle_player()
self.assertEqual(ret, exc)
def test_board_geo_1(self):
myboard = Board()
myboard.set_startpos()
geo = '7f'
exc_i = 5
exc_j = 2
ret_i, ret_j = myboard.board_geo(geo)
self.assertEqual(ret_i, exc_i)
self.assertEqual(ret_j, exc_j)
def test_move_peace_1(self):
myboard = Board()
myboard.set_startpos()
move = '7g7f'
exc = {'board':
[['l', 'n', 's', 'g', 'k', 'g', 's', 'n', 'l'],
[' ', 'r', ' ', ' ', ' ', ' ', ' ', 'b', ' '],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', 'P', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', ' ', 'P', 'P', 'P', 'P', 'P', 'P'],
[' ', 'B', ' ', ' ', ' ', ' ', ' ', 'R', ' '],
['L', 'N', 'S', 'G', 'K', 'G', 'S', 'N', 'L']
],
'sente_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'gote_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'turn_player':'gote',
'tempo':1}
ret = myboard.move_peace(move)
self.assertEqual(ret, exc)
def test_move_peace_2(self):
myboard = Board()
ret = myboard.set_startpos(
startpos =
'lnsgkgsnl/1r5b1/pppppp1pp/6p2/9/2P6/PP1PPPPPP/1B5R1/LNSGKGSNL',
starttempo = '2'
)
move = '8h2b+'
exc = {'board':
[['l', 'n', 's', 'g', 'k', 'g', 's', 'n', 'l'],
[' ', 'r', ' ', ' ', ' ', ' ', ' ', 'B+', ' '],
['p', 'p', 'p', 'p', 'p', 'p', ' ', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', 'p', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', 'P', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', ' ', 'P', 'P', 'P', 'P', 'P', 'P'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', 'R', ' '],
['L', 'N', 'S', 'G', 'K', 'G', 'S', 'N', 'L']
],
'sente_hand':{
'R': 0,
'B': 1,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'gote_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'turn_player':'gote',
'tempo':3}
ret = myboard.move_peace(move)
self.assertEqual(ret, exc)
def test_set_startpos_1(self):
myboard = Board()
startpos = 'lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL'
startplayer = 'b'
startbelongings = '-'
starttempo = '0'
exc = {'board':
[['l', 'n', 's', 'g', 'k', 'g', 's', 'n', 'l'],
[' ', 'r', ' ', ' ', ' ', ' ', ' ', 'b', ' '],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
[' ', 'B', ' ', ' ', ' ', ' ', ' ', 'R', ' '],
['L', 'N', 'S', 'G', 'K', 'G', 'S', 'N', 'L']
],
'sente_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'gote_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'turn_player':'sente',
'tempo':0,
'engine_turn':'sente'}
ret = myboard.set_startpos(
startpos, startplayer, startbelongings, starttempo)
self.assertEqual(ret, exc)
def test_set_startpos_2(self):
myboard = Board()
startpos = 'lnsgkgsnl/9/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL'
startplayer = 'w'
startbelongings = 'r2b'
starttempo = '1'
exc = {'board':
[['l', 'n', 's', 'g', 'k', 'g', 's', 'n', 'l'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
[' ', 'B', ' ', ' ', ' ', ' ', ' ', 'R', ' '],
['L', 'N', 'S', 'G', 'K', 'G', 'S', 'N', 'L']
],
'sente_hand':{
'R': 0,
'B': 0,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'gote_hand':{
'R': 1,
'B': 2,
'G': 0,
'S': 0,
'N': 0,
'L': 0,
'P': 0,
},
'turn_player':'gote',
'tempo':1,
'engine_turn':'sente'}
ret = myboard.set_startpos(
startpos = startpos, startplayer = startplayer,
startbelongings = startbelongings, starttempo = starttempo)
self.assertEqual(ret, exc)
if __name__ == '__main__':
unittest.main()
| {"/game.py": ["/board.py"], "/main.py": ["/game.py"], "/board_test.py": ["/board.py"], "/main_test.py": ["/main.py"]} |
68,175 | sensu-watson/watshogi | refs/heads/master | /board.py | import copy
import threading
class Board:
def __init__(self, board = None, sente_hand = None,
gote_hand = None, turn_player = None, tempo = None,
next_move = None, engine_turn = None, depth = 3):
self.board = board
self.sente_hand = sente_hand
self.gote_hand = gote_hand
self.turn_player = turn_player
self.tempo = tempo
self.next_move = next_move
self.engine_turn = engine_turn
self.depth = depth
def calc_best_move(self):
def calc_next(k):
"""nextboard = Board(board = copy.deepcopy(self.board),
sente_hand = copy.deepcopy(self.sente_hand),
gote_hand = copy.deepcopy(self.gote_hand),
turn_player = self.turn_player,
tempo = self.tempo,
engine_turn = self.engine_turn,
depth = self.depth - 1)"""
nextboard = copy.deepcopy(self)
nextboard.depth -= 1
try:
nextboard.move_peace(k)
except KeyError:
if self.engine_turn == self.turn_player:
self.next_move[k] = 9999
return 9999
else:
self.next_move[k] = -9999
return -9999
_, ret = nextboard.calc_best_move()
self.next_move[k] = ret
return ret
if self.depth == 0:
return None, self.evaluation_value()
self.generate_next_move()
next_turn_player = 'sente' if self.turn_player == 'gote' else 'gote'
move = {}
th = {}
for k in self.next_move.keys():
th[k] = threading.Thread(target=calc_next, name=k, args=(k,))
th[k].start()
for k in self.next_move.keys():
th[k].join()
#for k in self.next_move.keys():
# self.next_move[k] = calc_next(k)
if self.engine_turn == self.turn_player:
sort_next_move = sorted(self.next_move.items(), key=lambda x:x[1], reverse=True)
else:
sort_next_move = sorted(self.next_move.items(), key=lambda x:x[1])
best_move = sort_next_move[0][0]
return best_move, self.next_move[best_move]
def evaluation_value(self):
pointsum = 0
for i in range(0,9):
for j in range(0, 9):
piece = self.board[i][j]
if piece == ' ':
pass
elif piece == 'P':
pointsum += 1
elif piece == 'L':
pointsum += 3
elif piece == 'N':
pointsum += 4
elif piece == 'S':
pointsum += 5
elif piece == 'G':
pointsum += 6
elif piece == 'B':
pointsum += 8
elif piece == 'R':
pointsum += 10
elif piece == 'K':
pass
elif piece == 'P+':
pointsum += 7
elif piece == 'L+':
pointsum += 6
elif piece == 'N+':
pointsum += 6
elif piece == 'S+':
pointsum += 6
elif piece == 'B+':
pointsum += 10
elif piece == 'R+':
pointsum += 12
elif piece == 'p':
pointsum -= 1
elif piece == 'l':
pointsum -= 3
elif piece == 'n':
pointsum -= 4
elif piece == 's':
pointsum -= 5
elif piece == 'g':
pointsum -= 6
elif piece == 'b':
pointsum -= 8
elif piece == 'r':
pointsum -= 10
elif piece == 'k':
pass
elif piece == 'p+':
pointsum -= 7
elif piece == 'l+':
pointsum -= 6
elif piece == 'n+':
pointsum -= 6
elif piece == 's+':
pointsum -= 6
elif piece == 'b+':
pointsum -= 10
elif piece == 'r+':
pointsum -= 12
pointsum += self.sente_hand['R'] * 10
pointsum += self.sente_hand['B'] * 8
pointsum += self.sente_hand['S'] * 5
pointsum += self.sente_hand['N'] * 4
pointsum += self.sente_hand['L'] * 3
pointsum += self.sente_hand['P'] * 1
pointsum -= self.gote_hand['R'] * 10
pointsum -= self.gote_hand['B'] * 8
pointsum -= self.gote_hand['S'] * 5
pointsum -= self.gote_hand['N'] * 4
pointsum -= self.gote_hand['L'] * 3
pointsum -= self.gote_hand['P'] * 1
if self.engine_turn == 'sente':
self.can_move_sente()
else:
pointsum *= -1
self.can_move_gote()
self.generate_next_move()
return pointsum * 100 + len(self.next_move)
#return pointsum
def puttable_piece(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist = []
if self.is_exist_piece(i = i, j = j):
return retlist
if self.turn_player == 'sente':
hand = self.sente_hand
else:
hand = self.gote_hand
for k,v in hand.items():
if v > 0:
if i < 1 and k == 'P':
continue
if i < 1 and k == 'L':
continue
if i < 2 and k == 'N':
continue
if k == 'P':
isnopawn = True
for l in range(9):
if self.board[l][j] == 'P' and self.turn_player == 'sente':
isnopawn = False
if self.board[l][j] == 'p' and self.turn_player == 'gote':
isnopawn = False
if isnopawn == False:
continue
retlist.append(k + '*')
return retlist
def movable_place_P(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
if self.is_exist_turnplayer_piece(i = i-1, j = j):
return []
return [self.board_reverse_geo(i-1,j)]
def movable_place_L(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(i-1, -1, -1):
if self.is_exist_piece(i = k, j = j):
if not self.is_exist_turnplayer_piece(i = k, j = j):
retlist.append(self.board_reverse_geo(k,j))
return retlist
else:
retlist.append(self.board_reverse_geo(k,j))
return retlist
def movable_place_N(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
if not self.is_exist_turnplayer_piece(i = i-2, j = j-1):
retlist.append(self.board_reverse_geo(i-2,j-1))
if not self.is_exist_turnplayer_piece(i = i-2, j = j+1):
retlist.append(self.board_reverse_geo(i-2,j+1))
return retlist
def movable_place_S(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(j-1, j+2):
if not self.is_exist_turnplayer_piece(i = i-1, j = k):
retlist.append(self.board_reverse_geo(i-1,k))
if not self.is_exist_turnplayer_piece(i = i+1, j = j-1):
retlist.append(self.board_reverse_geo(i+1,j-1))
if not self.is_exist_turnplayer_piece(i = i+1, j = j+1):
retlist.append(self.board_reverse_geo(i+1,j+1))
return retlist
def movable_place_G(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(j-1, j+2):
if not self.is_exist_turnplayer_piece(i = i-1, j = k):
retlist.append(self.board_reverse_geo(i-1,k))
if not self.is_exist_turnplayer_piece(i = i, j = j-1):
retlist.append(self.board_reverse_geo(i,j-1))
if not self.is_exist_turnplayer_piece(i = i, j = j+1):
retlist.append(self.board_reverse_geo(i,j+1))
if not self.is_exist_turnplayer_piece(i = i+1, j = j):
retlist.append(self.board_reverse_geo(i+1,j))
return retlist
def movable_place_B(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
leftup = True
leftdown = True
rightup = True
rightdown = True
for k in range(1, 9):
if leftup:
a = i-k
b = j+k
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
leftup = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
if leftdown:
a = i+k
b = j+k
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
leftdown = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
if rightup:
a = i-k
b = j-k
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
rightup = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
if rightdown:
a = i+k
b = j-k
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
rightdown = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
return retlist
def movable_place_R(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
up = True
left = True
right = True
down = True
for k in range(1, 9):
if up:
a = i-k
b = j
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
up = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
if left:
a = i
b = j+k
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
left = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
if right:
a = i
b = j-k
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
right = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
if down:
a = i+k
b = j
if not self.is_exist_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
else:
down = False
if not self.is_exist_turnplayer_piece(i = a, j = b):
retlist.append(self.board_reverse_geo(a, b))
return retlist
def movable_place_K(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(j-1, j+2):
if not self.is_exist_turnplayer_piece(i = i-1, j = k):
retlist.append(self.board_reverse_geo(i-1,k))
if not self.is_exist_turnplayer_piece(i = i, j = j-1):
retlist.append(self.board_reverse_geo(i,j-1))
if not self.is_exist_turnplayer_piece(i = i, j = j+1):
retlist.append(self.board_reverse_geo(i,j+1))
for k in range(j-1, j+2):
if not self.is_exist_turnplayer_piece(i = i+1, j = k):
retlist.append(self.board_reverse_geo(i+1,k))
return retlist
def movable_place_promote_B(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist = self.movable_place_B(i = i, j = j)
kinglist = self.movable_place_K(i = i, j = j)
retlist.extend(kinglist)
return list(set(retlist))
def movable_place_promote_R(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist = self.movable_place_R(i = i, j = j)
kinglist = self.movable_place_K(i = i, j = j)
retlist.extend(kinglist)
return list(set(retlist))
def movable_place_p(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
if self.is_exist_turnplayer_piece(i = i+1, j = j):
return []
return [self.board_reverse_geo(i+1,j)]
def movable_place_l(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(i+1, 9):
if self.is_exist_piece(i = k, j = j):
if not self.is_exist_turnplayer_piece(i = k, j = j):
retlist.append(self.board_reverse_geo(k,j))
return retlist
else:
retlist.append(self.board_reverse_geo(k,j))
return retlist
def movable_place_n(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
if not self.is_exist_turnplayer_piece(i = i+2, j = j-1):
retlist.append(self.board_reverse_geo(i+2,j-1))
if not self.is_exist_turnplayer_piece(i = i+2, j = j+1):
retlist.append(self.board_reverse_geo(i+2,j+1))
return retlist
def movable_place_s(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(j-1, j+2):
if not self.is_exist_turnplayer_piece(i = i+1, j = k):
retlist.append(self.board_reverse_geo(i+1,k))
if not self.is_exist_turnplayer_piece(i = i-1, j = j-1):
retlist.append(self.board_reverse_geo(i-1,j-1))
if not self.is_exist_turnplayer_piece(i = i-1, j = j+1):
retlist.append(self.board_reverse_geo(i-1,j+1))
return retlist
def movable_place_g(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
retlist=[]
for k in range(j-1, j+2):
if not self.is_exist_turnplayer_piece(i = i+1, j = k):
retlist.append(self.board_reverse_geo(i+1,k))
if not self.is_exist_turnplayer_piece(i = i, j = j-1):
retlist.append(self.board_reverse_geo(i,j-1))
if not self.is_exist_turnplayer_piece(i = i, j = j+1):
retlist.append(self.board_reverse_geo(i,j+1))
if not self.is_exist_turnplayer_piece(i = i-1, j = j):
retlist.append(self.board_reverse_geo(i-1,j))
return retlist
def movable_place_b(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
return self.movable_place_B(i = i, j = j)
def movable_place_r(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
return self.movable_place_R(i = i, j = j)
def movable_place_k(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
return self.movable_place_K(i = i, j = j)
def movable_place_promote_b(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
return self.movable_place_promote_B(i = i, j = j)
def movable_place_promote_r(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
return self.movable_place_promote_R(i = i, j = j)
def is_exist_turnplayer_piece(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
if i > 8 or i < 0 or j > 8 or j < 0:
return True
if self.turn_player == 'sente':
return self.board[i][j][0].isupper()
else:
return self.board[i][j][0].islower()
def is_exist_piece(self, geo = None, i = None, j = None):
if geo:
i, j = self.board_geo(geo)
if i > 8 or i < 0 or j > 8 or j < 0:
return True
return self.board[i][j] != ' '
def can_move_sente(self):
retlist = []
for i in range(0,9):
for j in range(0, 9):
geo = self.board_reverse_geo(i = i,j = j)
if self.board[i][j] == ' ':
movlist = self.puttable_piece(i = i, j = j)
for x in movlist:
retlist.append(x + geo)
elif self.board[i][j] == 'P':
movlist = self.movable_place_P(i = i, j = j)
for x in movlist:
if i != 1:
retlist.append(geo + x)
if i < 4:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'L':
movlist = self.movable_place_L(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
if a != 0:
retlist.append(geo + x)
if a < 3:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'N':
movlist = self.movable_place_N(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
if a > 1:
retlist.append(geo + x)
if a < 3:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'S':
movlist = self.movable_place_S(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
if a < 3 or i < 3:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'G':
movlist = self.movable_place_G(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'B':
movlist = self.movable_place_B(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
if a < 3 or i < 3:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'R':
movlist = self.movable_place_R(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
if a < 3 or i < 3:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'K':
movlist = self.movable_place_K(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'P+':
movlist = self.movable_place_G(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'L+':
movlist = self.movable_place_G(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'N+':
movlist = self.movable_place_G(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'S+':
movlist = self.movable_place_G(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'B+':
movlist = self.movable_place_promote_B(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'R+':
movlist = self.movable_place_promote_R(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
retdic = {}
for x in retlist:
retdic[x] = 0
self.next_move = retdic
return retlist
def can_move_gote(self):
retlist = []
for i in range(0,9):
for j in range(0, 9):
geo = self.board_reverse_geo(i = i,j = j)
if self.board[i][j] == ' ':
movlist = self.puttable_piece(i = i, j = j)
for x in movlist:
retlist.append(x + geo)
elif self.board[i][j] == 'p':
movlist = self.movable_place_p(i = i, j = j)
for x in movlist:
if i != 7:
retlist.append(geo + x)
if i > 4:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'l':
movlist = self.movable_place_l(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
if a != 8:
retlist.append(geo + x)
if a > 5:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'n':
movlist = self.movable_place_n(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
if a < 7:
retlist.append(geo + x)
if a > 5:
retlist.append(geo + x + '+')
elif self.board[i][j] == 's':
movlist = self.movable_place_s(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
if a > 5 or i > 5:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'g':
movlist = self.movable_place_g(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'b':
movlist = self.movable_place_b(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
if a > 5 or i > 5:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'r':
movlist = self.movable_place_r(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
if a > 5 or i > 5:
retlist.append(geo + x + '+')
elif self.board[i][j] == 'k':
movlist = self.movable_place_k(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'p+':
movlist = self.movable_place_g(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'l+':
movlist = self.movable_place_g(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'n+':
movlist = self.movable_place_g(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 's+':
movlist = self.movable_place_g(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'b+':
movlist = self.movable_place_promote_b(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
elif self.board[i][j] == 'r+':
movlist = self.movable_place_promote_r(i = i, j = j)
for x in movlist:
a, b = self.board_geo(x)
retlist.append(geo + x)
retdic = {}
for x in retlist:
retdic[x] = 0
self.next_move = retdic
return retlist
def generate_next_move(self):
if self.turn_player == 'sente':
retlist = self.can_move_sente()
else:
retlist = self.can_move_gote()
return retlist
def generate_state(self, moves):
for x in moves:
self.move_peace(x)
retdic = {}
retdic['board'] = self.board
retdic['sente_hand'] = self.sente_hand
retdic['gote_hand'] = self.gote_hand
retdic['turn_player'] = self.turn_player
retdic['tempo'] = self.tempo
return retdic
def toggle_player(self):
if self.turn_player == 'gote':
self.turn_player = 'sente'
else:
self.turn_player = 'gote'
return self.turn_player
def board_geo(self, geo):
geo_list = list(geo)
i = 9 - int(geo_list[0])
if geo_list[1] == 'a':
j = 0
if geo_list[1] == 'b':
j = 1
if geo_list[1] == 'c':
j = 2
if geo_list[1] == 'd':
j = 3
if geo_list[1] == 'e':
j = 4
if geo_list[1] == 'f':
j = 5
if geo_list[1] == 'g':
j = 6
if geo_list[1] == 'h':
j = 7
if geo_list[1] == 'i':
j = 8
return j, i
def board_reverse_geo(self, i, j):
vertical = str(9 - j)
if i == 0:
horizontal = 'a'
if i == 1:
horizontal = 'b'
if i == 2:
horizontal = 'c'
if i == 3:
horizontal = 'd'
if i == 4:
horizontal = 'e'
if i == 5:
horizontal = 'f'
if i == 6:
horizontal = 'g'
if i == 7:
horizontal = 'h'
if i == 8:
horizontal = 'i'
return vertical + horizontal
def move_peace(self, move):
if move[0:1].isdigit():
before_i, before_j = self.board_geo(move[0:2])
move_peace = self.board[before_i][before_j]
self.board[before_i][before_j] = ' '
else:
if self.turn_player == 'sente':
move_peace = move[0:1]
self.sente_hand[move[0:1]]-=1
else:
move_peace = move[0:1].lower()
self.gote_hand[move[0:1]]-=1
after_i, after_j = self.board_geo(move[2:4])
take_peace = self.board[after_i][after_j]
if take_peace != ' ':
if self.turn_player == 'sente':
self.sente_hand[take_peace[0:1].upper()]+=1
else:
self.gote_hand[take_peace[0:1].upper()]+=1
if len(move) == 5:
self.board[after_i][after_j] = move_peace + '+'
else:
self.board[after_i][after_j] = move_peace
self.toggle_player()
self.tempo+=1
retdic = {}
retdic['board'] = self.board
retdic['sente_hand'] = self.sente_hand
retdic['gote_hand'] = self.gote_hand
retdic['turn_player'] = self.turn_player
retdic['tempo'] = self.tempo
return retdic
def set_startpos(
self,
startpos = 'lnsgkgsnl/1r5b1/ppppppppp/9/9/9/PPPPPPPPP/1B5R1/LNSGKGSNL',
startplayer = 'b',
startbelongings = '-',
starttempo = '0',
engine_turn = 'sente'):
self.engine_turn = engine_turn
startpos = startpos.replace('9', ' ')
startpos = startpos.replace('8', ' ')
startpos = startpos.replace('7', ' ')
startpos = startpos.replace('6', ' ')
startpos = startpos.replace('5', ' ')
startpos = startpos.replace('4', ' ')
startpos = startpos.replace('3', ' ')
startpos = startpos.replace('2', ' ')
startpos = startpos.replace('1', ' ')
posstr_array = startpos.split('/')
board = []
for x in posstr_array:
board.append(list(x))
self.board = board
if startplayer == 'b':
turn_player = 'sente'
else:
turn_player = 'gote'
self.turn_player = turn_player
sente_hand = {'R': 0,'B': 0,'G': 0,'S': 0,'N': 0,'L': 0,'P': 0}
gote_hand = {'R': 0,'B': 0,'G': 0,'S': 0,'N': 0,'L': 0,'P': 0}
if startbelongings == '-':
pass
else:
tempstr = startbelongings[::-1]
isdigit = True
input_position = []
for i in range(len(tempstr)):
if isdigit:
isdigit = False
else:
if tempstr[i].isdigit():
isdigit = True
else:
input_position.append(i)
isdigit = False
if not isdigit:
input_position.append(len(tempstr))
input_position = input_position[::-1]
templist = list(tempstr)
for x in input_position:
templist.insert(x, '1')
tempstr = ''.join(templist)
startbelongings = tempstr[::-1]
templist = list(startbelongings)
for i in range(0, len(templist), 2):
num = templist[i]
peace = templist[i+1]
if peace.isupper():
sente_hand[peace] = int(num)
else:
gote_hand[peace.upper()] = int(num)
self.sente_hand = sente_hand
self.gote_hand = gote_hand
tempo = int(starttempo)
self.tempo = int(tempo)
retdic = {}
retdic['board'] = self.board
retdic['sente_hand'] = self.sente_hand
retdic['gote_hand'] = self.gote_hand
retdic['turn_player'] = self.turn_player
retdic['tempo'] = self.tempo
retdic['engine_turn'] = self.engine_turn
return retdic
if __name__ == '__main__':
pass
| {"/game.py": ["/board.py"], "/main.py": ["/game.py"], "/board_test.py": ["/board.py"], "/main_test.py": ["/main.py"]} |
68,176 | sensu-watson/watshogi | refs/heads/master | /main_test.py | import unittest
from main import *
class MainRoopTest(unittest.TestCase):
def setUp(self):
self.mainRoop = main.MainRoop()
mainRoop.response(['usi'])
if __name__ == '__main__':
unittest.main()
| {"/game.py": ["/board.py"], "/main.py": ["/game.py"], "/board_test.py": ["/board.py"], "/main_test.py": ["/main.py"]} |
68,184 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /my_keras_lstm.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
from keras.models import Model, load_model
from keras.layers import LSTM, Input, Dense, TimeDistributed
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import csv
import argparse
import keras.losses
import keras.metrics
import sys
import numpy
numpy.set_printoptions(threshold=sys.maxsize)
from keras.backend.tensorflow_backend import set_session, clear_session, get_session
from math import fsum
from sklearn.metrics import roc_auc_score
import numpy.ma as ma
# Reset Keras Session
def reset_keras():
sess = get_session()
clear_session()
sess.close()
sess = get_session()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
print("Keras backend has been reset")
reset_keras()
data_path = "C:\\Users\Romain\Documents\POLYTECH NANTES\INFO4\STAGE MONTREAL\Projet DKT\Deep-Knowledge-Tracing-On-Skills-With-Limited-Data\Models"
parser = argparse.ArgumentParser()
parser.add_argument('--run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')
parser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')
args = parser.parse_args()
if args.data_path:
data_path = args.data_path
lstm_out = 82 # nombre de noeuds dans la "output layer"
batchSize = 50 # taille des lots de données
look_back = 197 # nombre de noeuds dans la "hidden layer"frg
inputsize = 162 # nombre de noeuds dans la "input layer"
skills = 81 # nb des différentes compétences évaluées chez les élèves
def prepross (xs):
result = []
for x in xs :
xt_zeros = [0 for i in range(0, skills *2)]
skill = np.argmax(x[1:])
a = x[-1]
pos = skill * 2 + int(a)
xt = xt_zeros[:]
xt[pos] = 1
result.append(xt)
return np.array(result)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)):
for j in range(len(dataset[i]) - look_back-1) :
dataX.append(prepross(dataset[i,j:(j+look_back)]))
dataY.append(dataset[i , j+1:(j+ look_back+1)])
return np.array(dataX), np.array(dataY)
def accur(y_true, y_pred):
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
return K.mean(K.equal(K.round(rel_pred), y_true[:,:,-1]))
#keras.metrics.accur = accur
def loss_function(y_true, y_pred):
obs = y_true[:,:,-1]
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
# rep12 = y_true[:,:,-1] * y_true[:,:,12]
# rep16 = y_true[:,:,-1] * y_true[:,:,16]
# rep61 = y_true[:,:,-1] * y_true[:,:,61]
# rep74 = y_true[:,:,-1] * y_true[:,:,74]
rep77 = y_true[:,:,-1] * y_true[:,:,77]
zero = tf.constant(0, dtype=tf.float32)
mask12 = tf.not_equal(temp[:,:,12], zero)
mask16 = tf.not_equal(temp[:,:,16], zero)
mask61 = tf.not_equal(temp[:,:,61], zero)
mask74 = tf.not_equal(temp[:,:,74], zero)
mask77 = tf.not_equal(temp[:,:,77], zero)
return K.binary_crossentropy(rel_pred, obs)\
+ a*tf.where(mask12, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep77))\
+ b*tf.where(mask16, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep77))\
+ c*tf.where(mask61, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep77))\
+ d*tf.where(mask74, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep77))\
+ e*tf.where(mask77, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep77))
#keras.losses.loss_function = loss_function
#df = pd.read_csv('rawData.csv', header=None)
#data = (df.values)[:,1:]
#data = np.array([np.array([y[1:-1].split(', ') for y in x ]) for x in data])
#
##new_data =[]
##for i in range(len(data)):
## inds = [i for i in range(len(data[i]))]
## shuffle(inds)
## new_data.append(data[i,inds])
##data = np.array(new_data)
#
#print(data.shape)
#
#X_data, Y_data = create_dataset(data, look_back)
#np.save('X_data.npy',X_data)
#np.save('Y_data.npy',Y_data)
X_data = np.load('X_data.npy')
Y_data = np.load('Y_data.npy')
X_train1, X_test, Y_train1, Y_test1 = train_test_split(X_data,Y_data, test_size = 0.10, random_state = 42)
X_train, X_val, Y_train2, Y_val1 = train_test_split(X_train1,Y_train1, test_size = 0.20, random_state = 42)
Y_train = Y_train2[:,:,1:]
Y_test = Y_test1[:,:,1:]
Y_val = Y_val1[:,:,1:]
print(X_train.shape,Y_train.shape)
print(X_val.shape,Y_val.shape)
print(X_test.shape,Y_test.shape)
if args.run_opt == 1:
for a,b,c,d,e in ([10, 10, 10, 10, 10],[20,20,20,20,20],[30, 30, 30, 30, 30],[10,10,10,10,30],[30, 30, 30, 30, 10]):
#define model
lstm_layer = LSTM(81, batch_input_shape=(batchSize, look_back, inputsize), return_sequences=True)
comment_input = Input(shape=(look_back,inputsize,),dtype='float32')
x = lstm_layer(comment_input)
preds = TimeDistributed(Dense(81, activation='sigmoid'))(x)
model = Model(inputs=comment_input,outputs=preds)
model.compile(loss= loss_function, optimizer='adam', metrics=[accur])
print(model.summary())
checkpointer = ModelCheckpoint(filepath=data_path + '/model-{epoch:02d}.hdf5', verbose=1)
num_epochs = 50
history = model.fit(X_train, Y_train, validation_data=(X_val, Y_val), epochs = num_epochs, batch_size=batchSize)
#callbacks=[checkpointer]
model.save(data_path + "/final_model_DKT_mask_12" + str(a) + "_16" + str(b) + "_61" + str(c) + "_74" + str(d) + "_77" + str(e) + ".hdf5")
scores = model.evaluate(X_test, Y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
#save results
res = [("test loss","test accuracy","learning loss","learning accuracy","validation loss","validation accuracy","12 a","16 b","61 c","74 d","77 e")]
#res=[]
res.append((scores[0],scores[1],history.history.get('loss')[-1],history.history.get('accur')[-1],history.history.get('val_loss')[-1],history.history.get('val_accur')[-1],a,b,c,d,e))
#write results
with open('resultats_DKT_masque.csv', "a") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in res:
writer.writerow(line)
# reset_keras()
#
# model_names = []
# pred_file_names = []
# for a,b,c,d,e in ([10, 10, 10, 10, 10],[30, 0, 0, 0, 0],[10,0,0,0,0],[0, 30, 30, 30, 30],[0, 10, 10, 10, 10]):
# model_names.append("/final_model_DKT_mask_22" + str(a) + "_25" + str(b) + "_29" + str(c) + "_31" + str(d) + "_71" + str(e) + ".hdf5")
# pred_file_names.append("AUC_skill_DKT_mask_22" + str(a) + "_25" + str(b) + "_29" + str(c) + "_31" + str(d) + "_71" + str(e) + ".txt")
#
# for m,p in zip(model_names,pred_file_names):
#
# model = load_model(data_path + m)
# print(model.summary())
liste_auc = []
#Test global avec l'AUC (Area under de curve)
pred = model.predict(X_test)
temp = Y_test.astype(np.float)[:,:,:-1] * pred
y_true = ((Y_test[:,:,-1]).astype(np.float)).ravel()
y_pred = (np.sum(temp, axis=2)).ravel()
print("AUC = ")
print(roc_auc_score(y_true.ravel(), y_pred.ravel()))
liste_auc.append("AUC = "+str(roc_auc_score(y_true.ravel(), y_pred.ravel())))
#Test de chaque compétence en utilisant l'AUC
for id_skill in range(0,81):
zero = 0
mask4 = ((np.equal(temp[:,:,id_skill], zero)).astype(int)).ravel()
pred_skill = ma.array((temp[:,:,id_skill]).ravel(), mask=mask4)
reel_skill = ma.array(((Y_test[:,:,-1]).astype(np.float)).ravel(), mask=mask4)
pred_skill = pred_skill.compressed()
reel_skill = reel_skill.compressed()
print(pred_skill)
print(reel_skill)
print("AUC_"+str(id_skill)+" = ")
print(roc_auc_score(reel_skill.ravel(), pred_skill.ravel()))
liste_auc.append("AUC_"+str(id_skill)+" = "+str(roc_auc_score(reel_skill.ravel(), pred_skill.ravel())))
file_name = "AUC/" + "AUC_skill_DKT_mask_12" + str(a) + "_16" + str(b) + "_61" + str(c) + "_74" + str(d) + "_77" + str(e) + ".txt"
with open(file_name, "w") as file:
for line in liste_auc:
file.write(line)
reset_keras()
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,185 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /readBNdata.py | import pandas as pd
import numpy as np
from numpy import argmax
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
data = pd.read_csv('bn_data.csv')
kb = (data.values)[:,1:]
data = pd.read_csv('rawData.csv',sep=";")
responses = data.values
ind = [0, 41, 26, 14, 21, 1, 34, 42, 15, 31, 13, 33, 36, 20, 16, 45, 29, 9, 5, 3, 35, 18, 6, 2, 10, 30, 44, 19, 27, 40, 23, 8, 24, 28, 37, 39, 46, 25, 22, 4, 7, 17, 11, 47, 32, 12, 43, 38]
responses = responses[:,ind]
names = ['MPP_FFD','MPP_FMD','MTT_FFD','MTT_FMD','AC_FMA','DA_FMA','AC_FFA','DA_FFA','MPP_CCF','MTT_CCF','AC_CCF','DA_CCF','MPP_A','MTT_A','AC_A','DA_A']
questions = ['MPP_FFD_1','MPP_FFD_2','MPP_FFD_3','MPP_FMD_1','MPP_FMD_2','MPP_FMD_3','MPP_CCF_1','MPP_CCF_2','MPP_CCF_3','MPP_A_1','MPP_A_2','MPP_A_3','MTT_FFD_1','MTT_FFD_2','MTT_FFD_3','MTT_FMD_1','MTT_FMD_2','MTT_FMD_3','MTT_CCF_1','MTT_CCF_2','MTT_CCF_3','MTT_A_1','MTT_A_2','MTT_A_3','AC_FMA_1','AC_FMA_2','AC_FMA_3','AC_FFA_1','AC_FFA_2','AC_FFA_3','AC_CCF_1','AC_CCF_2','AC_CCF_3','AC_A_1','AC_A_2','AC_A_3','DA_FMA_1','DA_FMA_2','DA_FMA_3','DA_FFA_1','DA_FFA_2','DA_FFA_3','DA_CCF_1','DA_CCF_2','DA_CCF_3','DA_A_1','DA_A_2','DA_A_3']
questions = np.array(questions)[ind]
def f (rep):
if rep == 0 :
return -1
return rep
kb_time = []
kb=[]
temp = []
for student in range(len(responses)):
for i,rep in zip(questions, responses[student]):
if names[0] in i :
temp = [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,rep]
elif names[1] in i :
temp = [0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,rep]
elif names[8] in i :
temp = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,rep]
elif names[12] in i :
temp = [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,rep]
elif names[2] in i :
temp = [0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,rep]
elif names[3] in i :
temp = [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,rep]
elif names[9] in i :
temp = [0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,rep]
elif names[13] in i :
temp = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,rep]
elif names[4] in i :
temp = [0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,rep]
elif names[6] in i :
temp = [0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,rep]
elif names[10] in i :
temp = [0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,rep]
elif names[14] in i :
temp = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,rep]
elif names[5] in i :
temp = [0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,rep]
elif names[7] in i :
temp = [0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,rep]
elif names[11] in i :
temp = [0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,rep]
elif names[15] in i :
temp = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,rep]
kb.append(temp)
kb_time.append(kb)
kb=[]
df = pd.DataFrame(kb_time)
df.to_csv('rawData_kn.csv')
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,186 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /DKT.py | import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
import tensorflow as tf
import keras
from keras import backend as K
from sklearn.metrics import classification_report
from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, TimeDistributed
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
#from livelossplot import PlotLossesKeras
import sys, os, re, csv, codecs, numpy as np, pandas as pd
from keras.layers import GRU
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from keras.layers import Input, Dense,multiply
from keras.layers.core import *
from keras.layers.recurrent import LSTM
from keras.models import *
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
from random import randint
import tensorflow as tf
import datetime
import attention_expert as atte
from random import shuffle
""" wordsList = np.load('wordsList.npy')
print('Loaded the word list!')
wordsList = wordsList.tolist() #Originally loaded as numpy array
wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
wordVectors = np.load('wordVectors.npy')
print ('Loaded the word vectors!') """
lstm_out = 17 # nombre de noeuds dans la "output layer"
batchSize = 20 # taille des lots de données
look_back = 46 # nombre de noeuds dans la "hidden layer"
inputsize = 32 # nombre de noeuds dans la "input layer"
skills = 16 # nb des différentes compétences évaluées chez les élèves
def prepross (xs):
result = []
for x in xs :
xt_zeros = [0 for i in range(0, skills *2)]
skill = np.argmax(x)
a = x[-1]
pos = skill * 2 + int(a)
xt = xt_zeros[:]
xt[pos] = 1
result.append(xt)
return np.array(result)
# convert an array of values into a dataset matrix
def create_dataset(dataset, choix, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)):
for j in range(len(dataset[i]) - look_back-1) :
if choix == True :
a = prepross(dataset[i,j:(j+look_back)])
else :
a = dataset[i,j:(j+look_back)]
dataX.append(a)
dataY.append(dataset[i , j+1:(j+ look_back+1)])
return np.array(dataX), np.array(dataY)
def loss_function(y_true, y_pred):
#f = lambda x : tf.where(x[0]>0, tf.where(x[1]>= 0.5,1,0), 0)
""" f = lambda x : abs(x) > 0
g = lambda x : abs(x) >= 0.5
rel_pred = tf.cond(y_true>0, lambda : tf.cond(y_pred>= 0.5,1,0), lambda : 0) """
rep4 = y_true[:,:,-1] * y_true[:,:,4]
# rep5 = y_true[:,:,-1] * y_true[:,:,5]
# rep6 = y_true[:,:,-1] * y_true[:,:,6]
# rep7 = y_true[:,:,-1] * y_true[:,:,7]
# rep10 = y_true[:,:,-1] * y_true[:,:,10]
# rep11 = y_true[:,:,-1] * y_true[:,:,11]
# rep14 = y_true[:,:,-1] * y_true[:,:,14]
# rep15 = y_true[:,:,-1] * y_true[:,:,15]
obs = y_true[:,:,-1]
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
zero = tf.constant(0, dtype=tf.float32)
mask4 = tf.not_equal(temp[:,:,4], zero)
mask5 = tf.not_equal(temp[:,:,5], zero)
mask6 = tf.not_equal(temp[:,:,6], zero)
mask7 = tf.not_equal(temp[:,:,7], zero)
mask10 = tf.not_equal(temp[:,:,10], zero)
mask11 = tf.not_equal(temp[:,:,11], zero)
mask14 = tf.not_equal(temp[:,:,14], zero)
mask15 = tf.not_equal(temp[:,:,15], zero)
# keras implementation does a mean on the last dimension (axis=-1) which
# it assumes is a singleton dimension. But in our context that would
# be wrong.
#+ 10*K.binary_crossentropy(rep5,(temp[:,:,5])) + 10*( K.binary_crossentropy(rep4, (temp[:,:,4])) + K.binary_crossentropy(rep6, (temp[:,:,6])) + K.binary_crossentropy(rep7, (temp[:,:,7])) + K.binary_crossentropy(rep10, (temp[:,:,10])) + K.binary_crossentropy(rep11, (temp[:,:,11])) + K.binary_crossentropy(rep14, (temp[:,:,14])) + K.binary_crossentropy(rep15, (temp[:,:,15])) )
return K.binary_crossentropy(rel_pred, obs) + 10*(tf.where(mask4, K.binary_crossentropy(rel_pred, obs),\
tf.zeros_like(rep4)) + tf.where(mask5, K.binary_crossentropy(rel_pred, obs), \
tf.zeros_like(rep4)) + tf.where(mask6, K.binary_crossentropy(rel_pred, obs), \
tf.zeros_like(rep4))+ tf.where(mask7, K.binary_crossentropy(rel_pred, obs), \
tf.zeros_like(rep4))+tf.where(mask10, K.binary_crossentropy(rel_pred, obs),\
tf.zeros_like(rep4))+tf.where(mask11, K.binary_crossentropy(rel_pred, obs),\
tf.zeros_like(rep4))+tf.where(mask14, K.binary_crossentropy(rel_pred, obs),\
tf.zeros_like(rep4))+tf.where(mask15, K.binary_crossentropy(rel_pred, obs),\
tf.zeros_like(rep4)))
def accur(y_true, y_pred):
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
return K.mean(K.equal(K.round(rel_pred), y_true[:,:,-1]))
df = pd.read_csv('bn_data.csv')
bn = (df.values)[:,1:]
bn = np.array([np.array([ y[1:-2].split(', ') for y in x ]) for x in bn])
print("bn : ", bn.shape)
print(bn[293])
df = pd.read_csv('rawData_kn.csv')
data = (df.values)[:,1:]
data = np.array([np.array([ y[1:-1].split(', ') for y in x ]) for x in data])
print("data : ",data.shape)
print(data[293])
new_data =[]
for i in range(len(data)):
inds = [i for i in range(len(data[i]))]
shuffle(inds)
new_data.append(data[i,inds])
data = np.array(new_data)
print(data.shape)
X_bn, Y_bn = create_dataset(bn, False, look_back )
X_data, Y_data = create_dataset(data, True, look_back )
X = np.concatenate((X_data,X_bn), axis=-1)
print("taille des données = {}".format(X.shape))
print(X[0])
ind_list = [i for i in range(len(X))]
shuffle(ind_list)
X_new = X[ind_list, :]
Y_new = Y_data[ind_list,]
X_train1, X_test1, Y_train1, Y_test = train_test_split(X_new,Y_new, test_size = 0.10, random_state = 42)
X_train2, X_val1, Y_train, Y_val = train_test_split(X_train1,Y_train1, test_size = 0.20, random_state = 42)
X_train = X_train2[:,:,0:inputsize]
X_expert_train = X_train2[:,:,inputsize:inputsize +skills]
X_test = X_test1[:,:,0:inputsize]
X_expert_test = X_test1[:,:,inputsize:inputsize +skills]
X_val = X_val1[:,:,0:inputsize]
X_expert_val = X_val1[:,:,inputsize:inputsize +skills]
print(X_train.shape,X_expert_train.shape,Y_train.shape)
print(X_val.shape,X_expert_val.shape,Y_val.shape)
print(X_test.shape,X_expert_test.shape,Y_test.shape)
# define model
lstm_layer = LSTM(lstm_out-1, batch_input_shape=(batchSize, look_back, inputsize), return_sequences=True)
comment_input = Input(shape=(look_back,inputsize,), dtype='float32')
x = lstm_layer(comment_input)
expert_input = Input(shape=(look_back,16,), dtype='float32')
#merged = atte.AttentionE(16)([x,expert_input])
preds = TimeDistributed(Dense(16, activation='sigmoid'))(x)
model = Model(inputs=[comment_input,expert_input],
outputs=preds)
model.compile(loss= loss_function,
optimizer='adam',
metrics=[accur])
print(model.summary())
#callbacks = [ PlotLossesKeras()]
history = model.fit([X_train,X_expert_train], Y_train, validation_data=([X_val,X_expert_val], Y_val),epochs = 500, batch_size=batchSize)
scores = model.evaluate([X_test,X_expert_test], Y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# make predictions
testPredict = model.predict([X_test,X_expert_test])
temp = Y_test[:,:,0:-1].astype(np.float) * testPredict
print(classification_report(Y_test[:,1,-1].astype(np.float) * Y_test[:,1,4].astype(np.float), np.round(temp[:,1,4])))
print(classification_report(Y_test[:,0,-1].astype(np.float) * Y_test[:,0,7].astype(np.float), np.round(temp[:,0,7])))
print(Y_test[:,1,4])
print(Y_test[:,0,4])
print(Y_test.shape)
""" for (i,j) in zip (testPredict[0:1,0:10,:], Y_test[0:1,0:10,:]):
print("{} - {}".format(i,j)) """
""" np.save('testPredict.npy',testPredict[0:1,:,:])
np.save('y_test.npy',Y_test[0:1,:,:]) """
""" df = pd.DataFrame(testPredict[1])
df.to_csv('testPredict4.csv')
df = pd.DataFrame(Y_test[1])
df.to_csv('y_test4.csv')
df = pd.DataFrame(testPredict[2])
df.to_csv('testPredict5.csv')
df = pd.DataFrame(Y_test[2])
df.to_csv('y_test5.csv') """
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,187 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /Bayesian_Network.py | from pybbn.graph.dag import Bbn
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.jointree import EvidenceBuilder
from pybbn.graph.node import BbnNode
from pybbn.graph.variable import Variable
from pybbn.pptc.inferencecontroller import InferenceController
# create the nodes
a = BbnNode(Variable(0, 'Competence_a_limplication', ['on', 'off']), [0.5, 0.5])
b = BbnNode(Variable(1, 'Causal_Factuel', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
c = BbnNode(Variable(2, 'Abstrait', ['on', 'off']), [0.9, 0.1, 0.1, 0.9])
d = BbnNode(Variable(3, 'CF_Inhiber_PETnonQ', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
e = BbnNode(Variable(4, 'CF_gestion_3_modeles', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
f = BbnNode(Variable(5, 'A_Inhiber_PETnonQ', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
g = BbnNode(Variable(6, 'A_gestion_3_modeles', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
h = BbnNode(Variable(7, 'MPP_FMD', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
i = BbnNode(Variable(8, 'MTT_FMD', ['on', 'off']), [0.95, 0.05, 0.6, 0.4, 0.7, 0.3, 0.4, 0.6])
j = BbnNode(Variable(9, 'CF_genere_nonPETQ', ['on', 'off']), [0.9, 0.1, 0.2, 0.8])
k = BbnNode(Variable(10, 'A_genere_nonPETQ', ['on', 'off']), [0.9, 0.1, 0.2, 0.8])
l = BbnNode(Variable(11, 'AC_FMA', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
m = BbnNode(Variable(12, 'DA_FFA', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
n = BbnNode(Variable(13, 'MTT_A', ['on', 'off']), [0.95, 0.05, 0.6, 0.4, 0.3, 0.7, 0.4, 0.6])
u = BbnNode(Variable(20, 'MPP_A', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
o = BbnNode(Variable(14, 'AC_A', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
p = BbnNode(Variable(15, 'DA_A', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
q = BbnNode(Variable(16, 'MPP_FFD', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
r = BbnNode(Variable(17, 'MTT_FFD', ['on', 'off']), [0.95, 0.05, 0.6, 0.4, 0.7, 0.3, 0.3, 0.7])
s = BbnNode(Variable(18, 'AC_FFA', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
t = BbnNode(Variable(19, 'DA_FMA', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
a1 = BbnNode(Variable(73, 'Causal_Conter_Factuel', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
a2 = BbnNode(Variable(74, 'CCF_Inhiber_PETnonQ', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
a3 = BbnNode(Variable(75, 'CCF_genere_nonPETQ', ['on', 'off']), [0.9, 0.1, 0.2, 0.8])
a4 = BbnNode(Variable(76, 'CCF_gestion_3_modeles', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
a5 = BbnNode(Variable(21, 'MPP_CCF', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
a6 = BbnNode(Variable(22, 'MTT_CCF', ['on', 'off']), [0.95, 0.05, 0.6, 0.4, 0.7, 0.3, 0.3, 0.7])
a7 = BbnNode(Variable(23, 'AC_CCF', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
a8 = BbnNode(Variable(24, 'DA_CCF', ['on', 'off']), [0.9, 0.1, 0.4, 0.6])
#Beaucoup d'antécédents alternatifs
q1 = BbnNode(Variable(25, 'MPP_FMD_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q6 = BbnNode(Variable(26, 'MPP_FMD_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q10 = BbnNode(Variable(27, 'MPP_FMD_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q3 = BbnNode(Variable(28, 'MTT_FMD_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q5 = BbnNode(Variable(29, 'MTT_FMD_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q11 = BbnNode(Variable(30, 'MTT_FMD_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q2 = BbnNode(Variable(31, 'AC_FMA_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q8 = BbnNode(Variable(32, 'AC_FMA_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q9 = BbnNode(Variable(33, 'AC_FMA_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q4 = BbnNode(Variable(34, 'DA_FMA_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q7 = BbnNode(Variable(35, 'DA_FMA_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q12 = BbnNode(Variable(36, 'DA_FMA_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
#Peu d'antécédents alternatifs
q15 = BbnNode(Variable(37, 'MPP_FFD_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q17 = BbnNode(Variable(38, 'MPP_FFD_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q23 = BbnNode(Variable(39, 'MPP_FFD_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q14 = BbnNode(Variable(40, 'MTT_FFD_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q18 = BbnNode(Variable(41, 'MTT_FFD_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q24 = BbnNode(Variable(42, 'MTT_FFD_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q16 = BbnNode(Variable(43, 'AC_FFA_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q19 = BbnNode(Variable(44, 'AC_FFA_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q21 = BbnNode(Variable(45, 'AC_FFA_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q13 = BbnNode(Variable(46, 'DA_FFA_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q20 = BbnNode(Variable(47, 'DA_FFA_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q22 = BbnNode(Variable(48, 'DA_FFA_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
#Conséquent abstrait
q27 = BbnNode(Variable(49, 'MPP_A_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q32 = BbnNode(Variable(50, 'MPP_A_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q36 = BbnNode(Variable(51, 'MPP_A_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q28 = BbnNode(Variable(52, 'MTT_A_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q29 = BbnNode(Variable(53, 'MTT_A_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q35 = BbnNode(Variable(54, 'MTT_A_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q26 = BbnNode(Variable(55, 'AC_A_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q31 = BbnNode(Variable(56, 'AC_A_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q34 = BbnNode(Variable(57, 'AC_A_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q25 = BbnNode(Variable(58, 'DA_A_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q30 = BbnNode(Variable(59, 'DA_A_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
q33 = BbnNode(Variable(60, 'DA_A_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
#Prémisse complètement abstrait
qCCF1 = BbnNode(Variable(61, 'MPP_CCF_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF2 = BbnNode(Variable(62, 'MPP_CCF_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF3 = BbnNode(Variable(63, 'MPP_CCF_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF4 = BbnNode(Variable(64, 'MTT_CCF_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF5 = BbnNode(Variable(65, 'MTT_CCF_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF6 = BbnNode(Variable(66, 'MTT_CCF_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF7 = BbnNode(Variable(67, 'AC_CCF_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF8 = BbnNode(Variable(68, 'AC_CCF_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF9 = BbnNode(Variable(69, 'AC_CCF_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF10 = BbnNode(Variable(70, 'DA_CCF_1', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF11 = BbnNode(Variable(71, 'DA_CCF_2', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
qCCF12 = BbnNode(Variable(72, 'DA_CCF_3', ['on', 'off']), [0.9, 0.1, 0.3, 0.7])
# create the network structure
bbn = Bbn() \
.add_node(a) \
.add_node(b) \
.add_node(c) \
.add_node(d) \
.add_node(e) \
.add_node(f) \
.add_node(g) \
.add_node(h) \
.add_node(i) \
.add_node(j) \
.add_node(k) \
.add_node(l) \
.add_node(m) \
.add_node(n) \
.add_node(o) \
.add_node(p) \
.add_node(q) \
.add_node(r) \
.add_node(s) \
.add_node(t) \
.add_node(u) \
.add_node(a1) \
.add_node(a2) \
.add_node(a3) \
.add_node(a4) \
.add_node(a5) \
.add_node(a6) \
.add_node(a7) \
.add_node(a8) \
.add_node(q1) \
.add_node(q2) \
.add_node(q3) \
.add_node(q4) \
.add_node(q5) \
.add_node(q6) \
.add_node(q6) \
.add_node(q8) \
.add_node(q9) \
.add_node(q10) \
.add_node(q11) \
.add_node(q12) \
.add_node(q13) \
.add_node(q14) \
.add_node(q15) \
.add_node(q16) \
.add_node(q17) \
.add_node(q18) \
.add_node(q19) \
.add_node(q20) \
.add_node(q21) \
.add_node(q22) \
.add_node(q23) \
.add_node(q24) \
.add_node(q25) \
.add_node(q26) \
.add_node(q27) \
.add_node(q28) \
.add_node(q29) \
.add_node(q30) \
.add_node(q31) \
.add_node(q32) \
.add_node(q33) \
.add_node(q34) \
.add_node(q35) \
.add_node(q36) \
.add_node(qCCF1) \
.add_node(qCCF2) \
.add_node(qCCF3) \
.add_node(qCCF4) \
.add_node(qCCF5) \
.add_node(qCCF6) \
.add_node(qCCF7) \
.add_node(qCCF8) \
.add_node(qCCF9) \
.add_node(qCCF10) \
.add_node(qCCF11) \
.add_node(qCCF12) \
.add_edge(Edge(a, b, EdgeType.DIRECTED)) \
.add_edge(Edge(a, a1, EdgeType.DIRECTED)) \
.add_edge(Edge(a1, a2, EdgeType.DIRECTED)) \
.add_edge(Edge(a1, a4, EdgeType.DIRECTED)) \
.add_edge(Edge(a4, a3, EdgeType.DIRECTED)) \
.add_edge(Edge(a4, a6, EdgeType.DIRECTED)) \
.add_edge(Edge(a3, a7, EdgeType.DIRECTED)) \
.add_edge(Edge(a3, a8, EdgeType.DIRECTED)) \
.add_edge(Edge(a, c, EdgeType.DIRECTED)) \
.add_edge(Edge(b, d, EdgeType.DIRECTED)) \
.add_edge(Edge(b, e, EdgeType.DIRECTED)) \
.add_edge(Edge(c, f, EdgeType.DIRECTED)) \
.add_edge(Edge(c, g, EdgeType.DIRECTED)) \
.add_edge(Edge(d, h, EdgeType.DIRECTED)) \
.add_edge(Edge(d, i, EdgeType.DIRECTED)) \
.add_edge(Edge(e, i, EdgeType.DIRECTED)) \
.add_edge(Edge(e, j, EdgeType.DIRECTED)) \
.add_edge(Edge(g, k, EdgeType.DIRECTED)) \
.add_edge(Edge(j, l, EdgeType.DIRECTED)) \
.add_edge(Edge(j, m, EdgeType.DIRECTED)) \
.add_edge(Edge(f, u, EdgeType.DIRECTED)) \
.add_edge(Edge(f, n, EdgeType.DIRECTED)) \
.add_edge(Edge(g, n, EdgeType.DIRECTED)) \
.add_edge(Edge(k, o, EdgeType.DIRECTED)) \
.add_edge(Edge(k, p, EdgeType.DIRECTED)) \
.add_edge(Edge(d, q, EdgeType.DIRECTED)) \
.add_edge(Edge(d, r, EdgeType.DIRECTED)) \
.add_edge(Edge(e, r, EdgeType.DIRECTED)) \
.add_edge(Edge(j, s, EdgeType.DIRECTED)) \
.add_edge(Edge(j, t, EdgeType.DIRECTED)) \
.add_edge(Edge(h, q1, EdgeType.UNDIRECTED)) \
.add_edge(Edge(h, q6, EdgeType.UNDIRECTED)) \
.add_edge(Edge(h, q10, EdgeType.UNDIRECTED)) \
.add_edge(Edge(i, q3, EdgeType.UNDIRECTED)) \
.add_edge(Edge(i, q5, EdgeType.UNDIRECTED)) \
.add_edge(Edge(i, q11, EdgeType.UNDIRECTED)) \
.add_edge(Edge(l, q2, EdgeType.UNDIRECTED)) \
.add_edge(Edge(l, q8, EdgeType.UNDIRECTED)) \
.add_edge(Edge(l, q9, EdgeType.UNDIRECTED)) \
.add_edge(Edge(t, q4, EdgeType.UNDIRECTED)) \
.add_edge(Edge(t, q7, EdgeType.UNDIRECTED)) \
.add_edge(Edge(t, q12, EdgeType.UNDIRECTED)) \
.add_edge(Edge(q, q15, EdgeType.UNDIRECTED)) \
.add_edge(Edge(q, q17, EdgeType.UNDIRECTED)) \
.add_edge(Edge(q, q23, EdgeType.UNDIRECTED)) \
.add_edge(Edge(r, q14, EdgeType.UNDIRECTED)) \
.add_edge(Edge(r, q18, EdgeType.UNDIRECTED)) \
.add_edge(Edge(r, q24, EdgeType.UNDIRECTED)) \
.add_edge(Edge(s, q16, EdgeType.UNDIRECTED)) \
.add_edge(Edge(s, q19, EdgeType.UNDIRECTED)) \
.add_edge(Edge(s, q21, EdgeType.UNDIRECTED)) \
.add_edge(Edge(m, q13, EdgeType.UNDIRECTED)) \
.add_edge(Edge(m, q20, EdgeType.UNDIRECTED)) \
.add_edge(Edge(m, q22, EdgeType.UNDIRECTED)) \
.add_edge(Edge(u, q27, EdgeType.UNDIRECTED)) \
.add_edge(Edge(u, q32, EdgeType.UNDIRECTED)) \
.add_edge(Edge(u, q36, EdgeType.UNDIRECTED)) \
.add_edge(Edge(n, q28, EdgeType.UNDIRECTED)) \
.add_edge(Edge(n, q29, EdgeType.UNDIRECTED)) \
.add_edge(Edge(n, q35, EdgeType.UNDIRECTED)) \
.add_edge(Edge(o, q26, EdgeType.UNDIRECTED)) \
.add_edge(Edge(o, q31, EdgeType.UNDIRECTED)) \
.add_edge(Edge(o, q34, EdgeType.UNDIRECTED)) \
.add_edge(Edge(p, q25, EdgeType.UNDIRECTED)) \
.add_edge(Edge(p, q30, EdgeType.UNDIRECTED)) \
.add_edge(Edge(p, q33, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a2, a6, EdgeType.DIRECTED)) \
.add_edge(Edge(a2, a5, EdgeType.DIRECTED)) \
.add_edge(Edge(a5, qCCF1, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a5, qCCF2, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a5, qCCF3, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a6, qCCF4, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a6, qCCF5, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a6, qCCF6, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a7, qCCF7, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a7, qCCF8, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a7, qCCF9, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a8, qCCF10, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a8, qCCF11, EdgeType.UNDIRECTED)) \
.add_edge(Edge(a8, qCCF12, EdgeType.UNDIRECTED))
def initialize_bn():
# convert the BBN to a join tree
join_tree = InferenceController.apply(bbn)
return join_tree
def insert_evidence(name,value,gate,bn):
# insert an observation evidence
ev = EvidenceBuilder() \
.with_node(bn.get_bbn_node_by_name(name)) \
.with_evidence(gate, value) \
.build()
bn.set_observation(ev)
return bn
def get_knowledge(bn):
names = ['MPP_FFD','MPP_FMD','MTT_FFD','MTT_FMD','AC_FMA','DA_FMA','AC_FFA','DA_FFA','MPP_CCF','MTT_CCF','AC_CCF','DA_CCF','MPP_A','MTT_A','AC_A','DA_A']
knowledgeVcetor = []
for name in names:
node = bn.get_bbn_node_by_name(name)
potential = bn.get_bbn_potential(node)
knowledgeVcetor.append(potential.entries[0].value)
return knowledgeVcetor,bn
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,188 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /DKT_assessment_data.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
from keras.models import Model, load_model
from keras.layers import LSTM, Input, Dense, TimeDistributed
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import csv
import argparse
import keras.losses
import keras.metrics
import sys
import numpy
numpy.set_printoptions(threshold=sys.maxsize)
from keras.backend.tensorflow_backend import set_session, clear_session, get_session
from math import fsum
# Reset Keras Session
def reset_keras():
sess = get_session()
clear_session()
sess.close()
sess = get_session()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
print("Keras backend has been reset")
reset_keras()
data_path = "Models_Ange"
parser = argparse.ArgumentParser()
parser.add_argument('--run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')
parser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')
args = parser.parse_args()
if args.data_path:
data_path = args.data_path
lstm_out = 82 # nombre de noeuds dans la "output layer"
batchSize = 50 # taille des lots de données
look_back = 197 # nombre de noeuds dans la "hidden layer"frg
inputsize = 162 # nombre de noeuds dans la "input layer"
skills = 81 # nb des différentes compétences évaluées chez les élèves
def prepross (xs):
result = []
for x in xs :
xt_zeros = [0 for i in range(0, skills *2)]
skill = np.argmax(x[1:])
a = x[-1]
pos = skill * 2 + int(a)
xt = xt_zeros[:]
xt[pos] = 1
result.append(xt)
return np.array(result)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)):
for j in range(len(dataset[i]) - look_back-1) :
dataX.append(prepross(dataset[i,j:(j+look_back)]))
dataY.append(dataset[i , j+1:(j+ look_back+1)])
return np.array(dataX), np.array(dataY)
def accur(y_true, y_pred):
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
return K.mean(K.equal(K.round(rel_pred), y_true[:,:,-1]))
#keras.metrics.accur = accur
def loss_function(y_true, y_pred):
obs = y_true[:,:,-1]
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
# rep12 = y_true[:,:,-1] * y_true[:,:,12]
# rep16 = y_true[:,:,-1] * y_true[:,:,16]
# rep61 = y_true[:,:,-1] * y_true[:,:,61]
# rep74 = y_true[:,:,-1] * y_true[:,:,74]
rep77 = y_true[:,:,-1] * y_true[:,:,77]
zero = tf.constant(0, dtype=tf.float32)
mask12 = tf.not_equal(temp[:,:,12], zero)
mask16 = tf.not_equal(temp[:,:,16], zero)
mask61 = tf.not_equal(temp[:,:,61], zero)
mask74 = tf.not_equal(temp[:,:,74], zero)
mask77 = tf.not_equal(temp[:,:,77], zero)
return K.binary_crossentropy(rel_pred, obs) + 20*tf.where(mask77, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep77))
#keras.losses.loss_function = loss_function
#df = pd.read_csv('rawData.csv', header=None)
#data = (df.values)[:,1:]
#data = np.array([np.array([y[1:-1].split(', ') for y in x ]) for x in data])
#
##new_data =[]
##for i in range(len(data)):
## inds = [i for i in range(len(data[i]))]
## shuffle(inds)
## new_data.append(data[i,inds])
##data = np.array(new_data)
#
#print(data.shape)
#
#X_data, Y_data = create_dataset(data, look_back)
#np.save('X_data.npy',X_data)
#np.save('Y_data.npy',Y_data)
if args.run_opt == 1:
X_data = np.load('X_data.npy')
Y_data = np.load('Y_data.npy')
X_train1, X_test, Y_train1, Y_test1 = train_test_split(X_data,Y_data, test_size = 0.10, random_state = 42)
X_train, X_val, Y_train2, Y_val1 = train_test_split(X_train1,Y_train1, test_size = 0.20, random_state = 42)
Y_train = Y_train2[:,:,1:]
Y_test = Y_test1[:,:,1:]
Y_val = Y_val1[:,:,1:]
lstm_layer = LSTM(81, batch_input_shape=(batchSize, look_back, inputsize), return_sequences=True)
comment_input = Input(shape=(look_back,inputsize,),dtype='float32')
x = lstm_layer(comment_input)
preds = TimeDistributed(Dense(81, activation='sigmoid'))(x)
model = Model(inputs=comment_input,outputs=preds)
model.compile(loss= loss_function, optimizer='adam', metrics=[accur])
print(model.summary())
num_epochs = 2
print(X_train.shape,Y_train.shape)
print(X_val.shape,Y_val.shape)
print(X_test.shape,Y_test.shape)
history = model.fit(X_train, Y_train, validation_data=(X_val, Y_val), epochs = num_epochs, batch_size=batchSize)
model.save("final_model_DKT_mask_Ange.hdf5")
scores = model.evaluate(X_test, Y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
#Test global avec l'AUC (Area under de curve)
import numpy as np
from sklearn.metrics import roc_auc_score
pred = model.predict(X_test)
temp = Y_test.astype(np.float)[:,:,:-1] * pred
y_true = ((Y_test[:,:,-1]).astype(np.float)).ravel()
y_pred = (np.sum(temp, axis=2)).ravel()
print("AUC = ")
print(roc_auc_score(y_true.ravel(), y_pred.ravel()))
#Test de chaque compétence en utilisant l'AUC
zero = 0
mask4 = ((np.equal(temp[:,:,77], zero)).astype(int)).ravel()
import numpy.ma as ma
pred_77 = ma.array((temp[:,:,77]).ravel(), mask=mask4)
reel_77 = ma.array(((Y_test[:,:,-1]).astype(np.float)).ravel(), mask=mask4)
pred_77 = pred_77.compressed()
reel_77 = reel_77.compressed()
print(pred_77)
print(reel_77)
print("AUC_77 = ")
print(roc_auc_score(reel_77.ravel(), pred_77.ravel()))
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,189 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /graphe.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
""" proba = np.load('testPredict.npy')
questions = np.load('y_test.npy') """
labels = ['MPP_FFD','MPP_FMD','MTT_FFD','MTT_FMD','AC_FMA','DA_FMA','AC_FFA','DA_FFA','MPP_CCF','MTT_CCF','AC_CCF','DA_CCF','MPP_A','MTT_A','AC_A','DA_A']
questions = pd.read_csv("y_test40.csv")
questions = questions.iloc[:,1:]
names = []
for question in questions.values:
pos = np.argmax(question[0:-1])
skill = labels[pos]
rep = question[-1]
names.append((skill,rep))
print((np.array(names)).shape)
# import the data directly into a pandas dataframe
nba = pd.read_csv("testPredict40.csv")
nba = nba.iloc[:,1:]
#print(nba)
# remove index title
#nba.index.name = names
ids = {}
keys = range(len(names))
values = names
for i in keys:
ids[i] = values[i]
#ids = {0:names[0],1:names[1], 2:names[2],3:names[3],4:names[4],5:names[5],6:names[6],7:names[7],8:names[8],9:names[9],10:names[10],11:names[11],12:names[12],13:names[13],14:names[14],15:names[15],16:names[16],17:names[17],18:names[18],19:names[19]}
nba.rename(index=ids, inplace=True)
# normalize data columns
nba_norm = nba #(nba - nba.mean()) / (nba.max() - nba.min())
# relabel columns
nba_norm.columns = labels
# set appropriate font and dpi
sns.set(font_scale=0.6)
sns.set_style({"savefig.dpi": 10000})
# plot it out
ax = sns.heatmap(nba, cmap=plt.cm.Blues, annot=True, linewidths=.1)
# set the x-axis labels on the top
ax.xaxis.tick_top()
# rotate the x-axis labels
plt.xticks(rotation=90)
# get figure (usually obtained via "fig,ax=plt.subplots()" with matplotlib)
fig = ax.get_figure()
# specify dimensions and save
fig.set_size_inches(10, 10)
fig.savefig("nba40.png") | {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,190 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /DKT_BN.py | import numpy as np
import gc
import pandas as pd
from sklearn.metrics import roc_auc_score
import tensorflow as tf
import keras
from keras import backend as K
from sklearn.metrics import classification_report
from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, TimeDistributed
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
#from livelossplot import PlotLossesKeras
import sys, os, re, csv, codecs, numpy as np, pandas as pd
from keras.layers import GRU
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from keras.layers import Input, Dense,multiply
from keras.layers.core import *
from keras.layers.recurrent import LSTM
from keras.models import *
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
from random import randint
import tensorflow as tf
import datetime
import attention_bn as att
from random import shuffle
""" wordsList = np.load('wordsList.npy')
print('Loaded the word list!')
wordsList = wordsList.tolist() #Originally loaded as numpy array
wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
wordVectors = np.load('wordVectors.npy')
print ('Loaded the word vectors!') """
lstm_out = 17
batchSize = 200
look_back = 30
inputsize = 32
skills = 16
def prepross (xs):
result = []
for x in xs :
xt_zeros = [0 for i in range(0, skills *2)]
skill = np.argmax(x)
a = x[-1]
pos = skill * 2 + int(x[-1])
xt = xt_zeros[:]
xt[pos] = 1
result.append(xt)
return np.array(result)
# convert an array of values into a dataset matrix
def create_dataset(dataset, choix, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)):
for j in range(len(dataset[i]) - look_back-1) :
if choix == True :
a = prepross(dataset[i,j:(j+look_back)])
else :
a = dataset[i,j:(j+look_back)]
dataX.append(a)
dataY.append(dataset[i , j+1:(j+ look_back+1)])
return np.array(dataX), np.array(dataY)
def loss_function(y_true, y_pred):
#f = lambda x : tf.where(x[0]>0, tf.where(x[1]>= 0.5,1,0), 0)
""" f = lambda x : abs(x) > 0
g = lambda x : abs(x) >= 0.5
rel_pred = tf.cond(y_true>0, lambda : tf.cond(y_pred>= 0.5,1,0), lambda : 0) """
rep4 = y_true[:,:,-1] * y_true[:,:,4]
# rep5 = y_true[:,:,-1] * y_true[:,:,5]
# rep6 = y_true[:,:,-1] * y_true[:,:,6]
# rep7 = y_true[:,:,-1] * y_true[:,:,7]
# rep10 = y_true[:,:,-1] * y_true[:,:,10]
# rep11 = y_true[:,:,-1] * y_true[:,:,11]
# rep14 = y_true[:,:,-1] * y_true[:,:,14]
# rep15 = y_true[:,:,-1] * y_true[:,:,15]
obs = y_true[:,:,-1]
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
zero = tf.constant(0, dtype=tf.float32)
mask4 = tf.not_equal(temp[:,:,4], zero)
mask5 = tf.not_equal(temp[:,:,5], zero)
mask6 = tf.not_equal(temp[:,:,6], zero)
mask7 = tf.not_equal(temp[:,:,7], zero)
mask10 = tf.not_equal(temp[:,:,10], zero)
mask11 = tf.not_equal(temp[:,:,11], zero)
mask14 = tf.not_equal(temp[:,:,14], zero)
mask15 = tf.not_equal(temp[:,:,15], zero)
# keras implementation does a mean on the last dimension (axis=-1) which
# it assumes is a singleton dimension. But in our context that would
# be wrong.
#+ 10*K.binary_crossentropy(rep5,(temp[:,:,5])) + 10*( K.binary_crossentropy(rep4, (temp[:,:,4])) + K.binary_crossentropy(rep6, (temp[:,:,6])) + K.binary_crossentropy(rep7, (temp[:,:,7])) + K.binary_crossentropy(rep10, (temp[:,:,10])) + K.binary_crossentropy(rep11, (temp[:,:,11])) + K.binary_crossentropy(rep14, (temp[:,:,14])) + K.binary_crossentropy(rep15, (temp[:,:,15])) )
return K.binary_crossentropy(rel_pred, obs) #+ 3*(tf.where(mask4, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4)) + tf.where(mask5, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4)) + tf.where(mask6, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4))+ tf.where(mask7, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4))+tf.where(mask10, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4))+tf.where(mask11, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4))+tf.where(mask14, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4))+tf.where(mask15, K.binary_crossentropy(rel_pred, obs), tf.zeros_like(rep4)) )
def accur(y_true, y_pred):
temp = y_true[:,:,0:-1] * y_pred
rel_pred = K.sum(temp, axis=2)
return K.mean(K.equal(K.round(rel_pred), y_true[:,:,-1]))
def accur2(y_true, y_pred, i):
temp = y_true[:,:,0:-1] * y_pred
mask4 = temp[:,:,i]>0
rel_pred = np.sum(temp, axis=2)
return (np.mean(np.equal(np.round(rel_pred[mask4]), (y_true[:,:,-1])[mask4])), np.mean(np.equal(np.round(rel_pred), y_true[:,:,-1])))
df = pd.read_csv('bn_data.csv')
bn = (df.values)[:,1:]
bn = np.array([np.array([ y[1:-2].split(', ') for y in x ]) for x in bn])
print(bn.shape)
df = pd.read_csv('rawData_kn.csv')
data = (df.values)[:,1:]
data = np.array([np.array([ y[1:-1].split(', ') for y in x ]) for x in data])
print(data.shape)
""" new_data =[]
for i in range(len(data)):
inds = [i for i in range(len(data[i]))]
shuffle(inds)
new_data.append(data[i,inds])
data = np.array(new_data)
print(data.shape) """
X_bn, Y_bn = create_dataset(bn, False, look_back )
X_data, Y_data = create_dataset(data, True, look_back )
print("taille des Y_data = {}".format(Y_data.shape))
print("taille des Y_bn = {}".format(Y_bn.shape))
print(accur2(Y_data.astype(np.float),Y_bn.astype(np.float),5))
X = np.concatenate((X_data,Y_bn), axis=-1)
print("taille des données = {}".format(X.shape))
ind_list = [i for i in range(len(X))]
#shuffle(ind_list)
X_new = X[ind_list, :]
Y_new = Y_data[ind_list,]
X_train1, X_test1, Y_train1, Y_test = train_test_split(X_new,Y_new, test_size = 0.20,shuffle=False)
X_train2, X_val1, Y_train, Y_val = train_test_split(X_train1,Y_train1, test_size = 0.20,shuffle=False)
X_train = X_train2[:,:,0:inputsize]
X_expert_train = X_train2[:,:,inputsize:inputsize +skills]
X_test = X_test1[:,:,0:inputsize]
X_expert_test = X_test1[:,:,inputsize:inputsize +skills]
X_val = X_val1[:,:,0:inputsize]
X_expert_val = X_val1[:,:,inputsize:inputsize +skills]
print(X_train.shape,X_expert_train.shape,Y_train.shape)
print(X_val.shape,X_expert_val.shape,Y_val.shape)
print(X_test.shape,X_expert_test.shape,Y_test.shape)
# define model
lstm_layer = LSTM(lstm_out-1, batch_input_shape=(batchSize, look_back, inputsize), return_sequences=True)
comment_input = Input(shape=(look_back,inputsize,), dtype='float32')
x = lstm_layer(comment_input)
expert_input = Input(shape=(look_back,16,), dtype='float32')
#x = att.Attention(look_back)([x,expert_input])
preds = TimeDistributed(Dense(16, activation='sigmoid'))(x)
model = Model(inputs=[comment_input,expert_input],
outputs=preds)
model.compile(loss= loss_function,
optimizer='adam',
metrics=[accur])
#print(model.summary())
initial_weights = model.get_weights()
model.save_weights('initial_weights.h5')
model.load_weights('initial_weights.h5')
def toutAcc (y,x):
tab = []
for i in range(5) :
tab.append (accur2(y,x,i+4))
tab.append (accur2(y,x,10))
tab.append (accur2(y,x,11)) #here
tab.append (accur2(y,x,14))
tab.append (accur2(y,x,15)) #here
return tab
#callbacks = [ PlotLossesKeras()]
tab = []
for i in range (10):
X_train1, X_test1, Y_train1, Y_test = train_test_split(X_new,Y_new, test_size = 0.20,shuffle=True)
X_train2, X_val1, Y_train, Y_val = train_test_split(X_train1,Y_train1, test_size = 0.20,shuffle=True)
X_train = X_train2[:,:,0:inputsize]
X_expert_train = X_train2[:,:,inputsize:inputsize +skills]
X_test = X_test1[:,:,0:inputsize]
X_expert_test = X_test1[:,:,inputsize:inputsize +skills]
X_val = X_val1[:,:,0:inputsize]
X_expert_val = X_val1[:,:,inputsize:inputsize +skills]
history = model.fit([X_train,X_expert_train], Y_train, validation_data=([X_val,X_expert_val], Y_val),epochs = 30, batch_size=batchSize,verbose=0)
testPredict = model.predict([X_test,X_expert_test])
alpha = toutAcc(Y_test.astype(np.float),testPredict)
tab.append(alpha)
#model.set_weights(initial_weights)
model.load_weights('initial_weights.h5')
#keras.backend.clear_session()
print(np.mean(tab,axis=0))
scores = model.evaluate([X_test,X_expert_test], Y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# make predictions
""" testPredict = model.predict([X_new[:,:,0:inputsize],X_new[:,:,inputsize:inputsize +skills]])
print(accur2(Y_new.astype(np.float),testPredict),5) """
""" temp = Y_test[:,:,0:-1].astype(np.float) * testPredict
print(classification_report(Y_test[:,1,-1].astype(np.float) * Y_test[:,1,4].astype(np.float), np.round(temp[:,1,4])))
print(classification_report(Y_test[:,0,-1].astype(np.float) * Y_test[:,0,7].astype(np.float), np.round(temp[:,0,7])))
"""
""" for (i,j) in zip (testPredict[0:1,0:10,:], Y_test[0:1,0:10,:]):
print("{} - {}".format(i,j)) """
""" np.save('testPredict.npy',testPredict[0:1,:,:])
np.save('y_test.npy',Y_test[0:1,:,:]) """
""" df = pd.DataFrame(testPredict[1])
df.to_csv('testPredict4.csv')
df = pd.DataFrame(Y_test[1])
df.to_csv('y_test4.csv')
df = pd.DataFrame(testPredict[2])
df.to_csv('testPredict5.csv')
df = pd.DataFrame(Y_test[2])
df.to_csv('y_test5.csv') """
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,191 | thanhtd91/Deep-Knowledge-Tracing-On-Skills-With-Limited-Data | refs/heads/master | /constructBNdata.py | import Bayesian_Network as BN
import pandas as pd
import numpy as np
""" rb = BN.initialize_bn()
kb,rb = BN.get_knowledge(rb)
print(kb)
rb = BN.insert_evidence('MPP_CCF_1',1,'on',rb)
rb = BN.insert_evidence('MPP_CCF_2',1,'on',rb)
rb = BN.insert_evidence('MPP_CCF_3',1,'on',rb)
#rb = BN.insert_evidence('Competence_a_limplication',1,'off',rb)
kb,rb = BN.get_knowledge(rb)
print(kb) """
data = pd.read_csv('rawData.csv',sep=";")
responses = data.values
questions = list(data.columns.values)
print(questions)
ind = list(range(48))
np.random.shuffle(ind)
print(ind)
responses = responses[:,ind]
kb_time = []
kb=[]
for student in range(len(responses)):
rb = BN.initialize_bn()
for question,rep in zip(questions, responses[student]):
if rep == 1 :
rb = BN.insert_evidence(question,1,'on',rb)
else :
rb = BN.insert_evidence(question,1,'off',rb)
temp, rb = BN.get_knowledge(rb)
kb.append(temp)
kb_time.append(kb)
kb=[]
df = pd.DataFrame(kb_time)
df.to_csv('bn_data.csv')
| {"/constructBNdata.py": ["/Bayesian_Network.py"]} |
68,195 | iamantony/images2grey | refs/heads/master | /images2grey.py | __author__ = 'Antony Cherepanov'
import argparse
import os
import multiprocessing
from PIL import Image
def main():
folder, save_folder = parse_arguments()
is_ok = check_arguments(folder, save_folder)
if is_ok is True:
start(folder, save_folder)
else:
print("Invalid arguments. Try again!")
def parse_arguments():
""" Parse arguments and start transformation
:return [tuple] of arguments
"""
parser = argparse.ArgumentParser(
description="Multi-thread python app for transformation of color "
"images to grayscale images.")
parser.add_argument("folder",
help="absolute path to the folder with images to "
"transform")
parser.add_argument("-s", "--save_to",
help="path to the folder where greyscale images "
"should be saved",
default="")
args = parser.parse_args()
return args.folder, args.save_to
def check_arguments(t_folder, t_save_folder):
""" Check arguments
:param t_folder: [string] - absolute path to the folder with images to
transform
:param t_save_folder: [string] - absolute path to folder for greyscale
images
:return [bool] True if arguments are OK.
"""
if check_existing_folder(t_folder) is False:
print("Error: Invalid path to folder with images - " + t_folder)
return False
if 0 < len(t_save_folder):
if check_folder_path(t_save_folder) is True:
if not os.path.exists(t_save_folder):
os.makedirs(t_save_folder)
else:
print("Error: Invalid path to folder for greyscale images - " +
t_save_folder)
return False
return True
def check_existing_folder(t_path):
""" Check if folder really exist
:param t_path: [string] - absolute path to presumably existing folder
:return: [bool] True if folder exist, False if it's not
"""
if not os.path.isabs(t_path) or\
not os.path.exists(t_path) or\
not os.path.isdir(t_path):
return False
return True
def check_folder_path(t_path):
""" Check if path to folder is valid
:param t_path: [string] - absolute path to some folder
:return: [bool] True if path could be path for folder.
"""
if os.path.isabs(t_path) is True:
return True
return False
def start(t_folder, t_save_folder):
""" Start transformation process
:param t_folder: [string] - absolute path to the folder with images to
transform
:param t_save_folder: [string] - absolute path to folder for greyscale
images
"""
images = get_images_paths(t_folder)
cores_num = multiprocessing.cpu_count()
img_chunks = list_split(images, cores_num)
jobs = list()
for i in range(cores_num):
thread = multiprocessing.Process(target=greyscale,
args=(next(img_chunks), t_save_folder))
jobs.append(thread)
thread.start()
for thread in jobs:
thread.join()
def get_images_paths(t_folder):
""" Check if folder contains images (on the first level) and return
their paths
:param t_folder: [string] - absolute path to the folder
:return: [list] with the absolute paths of the images in folder
"""
if not os.path.isdir(t_folder):
return list()
image_extensions = ("jpg", "jpeg", "bmp", "png", "gif", "tiff")
images = list()
entries = os.listdir(t_folder)
for entry in entries:
file_path = os.path.join(t_folder, entry)
extension = get_extension(file_path)
if os.path.isfile(file_path) and extension in image_extensions:
images.append(file_path)
return images
def get_extension(t_path):
""" Get extension of the file
:param t_path: [string] - path or name of the file
:return: [string] with extension of the file or empty string if we failed
to get it
"""
path_parts = str.split(t_path, '.')
extension = path_parts[-1:][0]
extension = extension.lower()
return extension
def list_split(t_list, t_size):
""" Generator that split list of elements into n chunks
:param t_list: [list] - list of elements
:param t_size: [int] - size of chunk
:return generator of lists of chunks
"""
new_length = int(len(t_list) / t_size)
for i in range(0, t_size - 1):
start = i * new_length
yield t_list[start: start + new_length]
yield t_list[t_size * new_length - new_length:]
def greyscale(t_images, t_save_folder):
""" Transform color images to greyscale images
:param t_images: [list] - list of paths to the images
:param t_save_folder: [string] - absolute path to folder for greyscale
images
:return [list] of paths to created greyscale images
"""
grey_images = list()
for img_path in t_images:
print("Transforming " + img_path)
img = Image.open(img_path)
grey_img = img.convert("L")
path, name, extension = parse_image_path(img_path)
if 0 < len(t_save_folder):
path = t_save_folder
filename = "{path}{sep}{name}.{ext}".format(path=path, name=name,
sep=str(os.sep), ext=extension)
grey_img.save(filename)
grey_images.append(filename)
img.close()
return grey_images
def parse_image_path(t_img_path):
""" Parse path to image and return it's parts: path, image name, extension
:param t_img_path: [string] - path to image
:return: [tuple] of strings that hold path to image file, image name and
image extension
"""
img_path_parts = str.split(t_img_path, os.sep)
path_parts, image_name = img_path_parts[:-1], img_path_parts[-1]
path = os.sep.join(path_parts)
img_name_parts = str.split(image_name, '.')
image_name_parts, extension = img_name_parts[:-1], img_name_parts[-1]
name = ".".join(image_name_parts)
return path, name, extension
if __name__ == '__main__':
main() | {"/test/test_images2grey.py": ["/images2grey.py"]} |
68,196 | iamantony/images2grey | refs/heads/master | /test/test_images2grey.py | __author__ = 'Antony Cherepanov'
import unittest
import os
import shutil
import images2grey
from PIL import Image
SCRIPT_FOLDER = os.path.dirname(os.path.abspath(__file__))
TEST_SAVE_FOLDER = SCRIPT_FOLDER + str(os.sep) + "test_folder"
BACKUP_FOLDER = SCRIPT_FOLDER + str(os.sep) + "backup"
TEST_IMG_1 = "test1.png"
TEST_IMG_2 = "test2.bmp"
TEST_GREY_IMG = "test_grey.jpg"
TEST_IMG_PATH_1 = SCRIPT_FOLDER + str(os.sep) + TEST_IMG_1
TEST_IMG_PATH_2 = SCRIPT_FOLDER + str(os.sep) + TEST_IMG_2
TEST_GREY_IMG_PATH = SCRIPT_FOLDER + str(os.sep) + TEST_GREY_IMG
class CheckArgumentsTest(unittest.TestCase):
def test_valid_folder(self):
self.assertTrue(images2grey.check_arguments(SCRIPT_FOLDER, ""))
def test_invalid_folder(self):
self.assertFalse(
images2grey.check_arguments("/invalid/folder", ""))
def test_invalid_save_folder(self):
self.assertFalse(images2grey.check_arguments(SCRIPT_FOLDER, "../fake/"))
def test_save_folder_creation(self):
self.assertTrue(images2grey.check_arguments(SCRIPT_FOLDER,
TEST_SAVE_FOLDER))
def tearDown(self):
if os.path.exists(TEST_SAVE_FOLDER):
try:
shutil.rmtree(TEST_SAVE_FOLDER, True)
except Exception as err:
print("Error during folder remove: {0}".format(err))
return
class GetImagesPathsTest(unittest.TestCase):
def test_invalid_folder(self):
self.assertEqual(images2grey.get_images_paths("../fake/"), list())
def test_valid_folder(self):
paths = [TEST_IMG_PATH_1, TEST_IMG_PATH_2, TEST_GREY_IMG_PATH]
found_images = images2grey.get_images_paths(SCRIPT_FOLDER)
for path in found_images:
self.assertTrue(path in paths)
class GetExtensionTest(unittest.TestCase):
def test_get_extension(self):
self.assertEqual(images2grey.get_extension(TEST_IMG_PATH_1), "png")
self.assertEqual(images2grey.get_extension(TEST_IMG_PATH_2), "bmp")
self.assertEqual(images2grey.get_extension(TEST_GREY_IMG_PATH), "jpg")
def test_get_extension_several_dots(self):
test_img_path = SCRIPT_FOLDER + str(os.sep) + "test.hey.jpeg"
self.assertEqual(images2grey.get_extension(test_img_path), "jpeg")
class ParsePathTest(unittest.TestCase):
def test_get_parts(self):
self.assertEqual(images2grey.parse_image_path(TEST_IMG_PATH_1),
(SCRIPT_FOLDER, "test1", "png"))
test_img_path = SCRIPT_FOLDER + str(os.sep) + "test.hey.png"
self.assertEqual(images2grey.parse_image_path(test_img_path),
(SCRIPT_FOLDER, "test.hey", "png"))
class ListSplitTest(unittest.TestCase):
def test_simple_list(self):
simple = [1, 2, 3]
result = images2grey.list_split(simple, len(simple))
for i in range(len(simple)):
self.assertEqual(next(result), simple[i:i+1])
def test_big_list(self):
big_list = [1, 2, 3, 4, 5]
result = images2grey.list_split(big_list, 3)
self.assertEqual(next(result), big_list[0:1])
self.assertEqual(next(result), big_list[1:2])
self.assertEqual(next(result), big_list[2:])
self.assertRaises(StopIteration, lambda: next(result))
class ImageGreyscaleTest(unittest.TestCase):
def setUp(self):
if not os.path.exists(BACKUP_FOLDER):
os.makedirs(BACKUP_FOLDER)
shutil.copy(TEST_IMG_PATH_1, BACKUP_FOLDER)
shutil.copy(TEST_IMG_PATH_2, BACKUP_FOLDER)
shutil.copy(TEST_GREY_IMG_PATH, BACKUP_FOLDER)
def test_greyscale(self):
paths = [TEST_IMG_PATH_1, TEST_IMG_PATH_2, TEST_GREY_IMG_PATH]
result_paths = images2grey.greyscale(paths, "")
for path in result_paths:
self.assertTrue(path in paths)
self.assertTrue(self.check_greyscale(path))
def test_greyscale_with_path(self):
paths = [TEST_IMG_PATH_1, TEST_IMG_PATH_2, TEST_GREY_IMG_PATH]
os.makedirs(TEST_SAVE_FOLDER)
result_paths = images2grey.greyscale(paths, TEST_SAVE_FOLDER)
self.assertEqual(len(result_paths), len(paths))
for path in result_paths:
self.assertTrue(self.check_greyscale(path))
def check_greyscale(self, t_path):
img = Image.open(t_path)
img.load()
width, height = img.size
for wdt in range(width):
for hgt in range(height):
# If pixel if grey, getpixel() will return one int value.
# If it's color pixel, we will get tuple of integers.
pixel = img.getpixel((wdt, hgt))
if not isinstance(pixel, int):
img.close()
return False
img.close()
return True
def tearDown(self):
shutil.copy(BACKUP_FOLDER + str(os.sep) + TEST_IMG_1, SCRIPT_FOLDER)
shutil.copy(BACKUP_FOLDER + str(os.sep) + TEST_IMG_2, SCRIPT_FOLDER)
shutil.copy(BACKUP_FOLDER + str(os.sep) + TEST_GREY_IMG, SCRIPT_FOLDER)
if os.path.exists(BACKUP_FOLDER):
shutil.rmtree(BACKUP_FOLDER, True)
if os.path.exists(TEST_SAVE_FOLDER):
shutil.rmtree(TEST_SAVE_FOLDER, True)
class StartTest(unittest.TestCase):
def setUp(self):
if not os.path.exists(BACKUP_FOLDER):
os.makedirs(BACKUP_FOLDER)
shutil.copy(TEST_IMG_PATH_1, BACKUP_FOLDER)
shutil.copy(TEST_IMG_PATH_2, BACKUP_FOLDER)
shutil.copy(TEST_GREY_IMG_PATH, BACKUP_FOLDER)
def test_start_slicing(self):
os.makedirs(TEST_SAVE_FOLDER)
images2grey.start(SCRIPT_FOLDER, TEST_SAVE_FOLDER)
images = images2grey.get_images_paths(TEST_SAVE_FOLDER)
self.assertEqual(len(images), 3)
for path in images:
self.assertTrue(self.check_greyscale(path))
def check_greyscale(self, t_path):
img = Image.open(t_path)
img.load()
width, height = img.size
for wdt in range(width):
for hgt in range(height):
# If pixel if grey, getpixel() will return one int value.
# If it's color pixel, we will get tuple of integers.
pixel = img.getpixel((wdt, hgt))
if not isinstance(pixel, int):
img.close()
return False
img.close()
return True
def tearDown(self):
shutil.copy(BACKUP_FOLDER + str(os.sep) + TEST_IMG_1, SCRIPT_FOLDER)
shutil.copy(BACKUP_FOLDER + str(os.sep) + TEST_IMG_2, SCRIPT_FOLDER)
shutil.copy(BACKUP_FOLDER + str(os.sep) + TEST_GREY_IMG, SCRIPT_FOLDER)
if os.path.exists(BACKUP_FOLDER):
shutil.rmtree(BACKUP_FOLDER, True)
if os.path.exists(TEST_SAVE_FOLDER):
shutil.rmtree(TEST_SAVE_FOLDER, True)
if __name__ == "__main__":
unittest.main()
| {"/test/test_images2grey.py": ["/images2grey.py"]} |
68,197 | mloecher/tag_tracking | refs/heads/main | /tagsim/sim_fullmotion.py | import numpy as np
from scipy.interpolate import griddata
from scipy import interpolate
from scipy.signal.windows import kaiser
from .base_im_generation import grab_random_model
from .SimObject import SimObject
from .SimPSD import SimInstant
from .interp2d import interpolate2Dpoints, interpolate2Dpoints_f, interpolate2Dpoints_fc
from .interp_temp2d import interpolate_temp1D
import sys
import os
from .pygrid_internal import pygrid as pygrid
from .pygrid_internal import c_grid as c_grid
from .pygrid_internal import utils as pygrid_utils
def get_full_motion_im(ke=0.15, seed=-1, Nt=25, Nt0 = 25, inflow_range=None, fixed_input=None,
beta_lims = [0, 40], blur_chance=0.0, noise_scale = 0.0, basepath = "./image_db/",
SNR_range = (10, 30),
mode = 'gridtag', use_gpu=False, N_im = 256, do_density = True,
random_pts = False, new_sim = True):
# We can gived fixed inputs here rather than calling the random generator
if fixed_input is None:
r, s, t1, t2, final_mask, r_a, r_b, theta, t_sim0, img0, inflow_mask, xmod, ymod, descaler = grab_random_model(
seed=seed, inflow_range=inflow_range, Nt = Nt0, basepath = basepath
)
else:
r, s, t1, t2, final_mask, r_a, r_b, theta, t_sim0, img0, inflow_mask, xmod, ymod, descaler = fixed_input
# Generate the list of time points [ms]
acq_loc = np.arange(0, Nt) * 1000 / Nt + 10.0
# Make the simulator object and PSD objects
sim_object = SimObject()
sim_object.gen_from_generator(r, s, t1, t2)
simulator = SimInstant(sim_object, use_gpu=use_gpu)
if (mode == 'gridtag'):
if new_sim:
scale_tagrf = np.random.uniform(0.8, 1.2)
simulator.sample_tagging_smn_PSD(ke=ke, acq_loc=acq_loc, scale_tagrf=scale_tagrf)
else:
simulator.sample_tagging1331_v2_PSD(ke=ke, acq_loc=acq_loc)
elif (mode == 'DENSE'):
simulator.sample_DENSE_PSD(ke=ke, kd = 0.0, acq_loc=acq_loc)
# Run the PSD simulation, and the extra phase cycles for DENSE if needed
acqs0 = simulator.run()
if (mode == 'DENSE'):
extra_theta = np.linspace(0, 2*np.pi, 4)[1:-1]
extra_acq = []
for theta_i in extra_theta:
simulator = SimInstant(sim_object, use_gpu=use_gpu)
simulator.sample_DENSE_PSD(rf_dir = [np.cos(theta_i), np.sin(theta_i), 0], ke=ke, kd = 0.0, acq_loc=acq_loc)
extra_acq.append(simulator.run())
###### The following code is all to generate the initial tracking points
# It is hard coded right now for all tag intersections, but we could do:
# - The point subsampling here i.e. only pick 10 or 100 points for speed
# - Select random points
if random_pts:
rand_scale = 0.9
rand_N = 5000
xpts = rand_scale * np.random.rand(rand_N) - rand_scale/2.0
ypts = rand_scale * np.random.rand(rand_N) - rand_scale/2.0
else:
scaler = 1e-3 / ke / sim_object.fov[0] / np.sqrt(2)
Ntag = np.ceil(0.5 / scaler) + 1
rr = np.arange(-Ntag, Ntag).astype(np.float32)
xpts0, ypts0 = np.meshgrid(rr, rr)
xpts0 = xpts0.ravel()
ypts0 = ypts0.ravel()
xpts = (xpts0 + ypts0) * (1e-3 / ke / sim_object.fov[0] / np.sqrt(2))
ypts = (xpts0 - ypts0) * (1e-3 / ke / sim_object.fov[0] / np.sqrt(2))
# 0.45 instead of 0.5 for less edge points
ind = (np.abs(xpts) <= 0.45) & (np.abs(ypts) <= 0.45)
xpts = xpts[ind]
ypts = ypts[ind]
xpts_s = (np.array(xpts) + 0.5) * final_mask.shape[0]
ypts_s = (np.array(ypts) + 0.5) * final_mask.shape[1]
##### Now we sample the cartesian maps for various variables at the tracking points
# Primarily the motion path variables, and the masks
tag_ra = interpolate2Dpoints_fc(
r_a.astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
tag_rb = interpolate2Dpoints_fc(
r_b.astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
tag_theta = interpolate2Dpoints_fc(
theta.astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
tag_xmod = []
tag_ymod = []
for i in range(Nt):
temp_x = interpolate2Dpoints_fc(
xmod[i].astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
temp_y = interpolate2Dpoints_fc(
ymod[i].astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
tag_xmod.append(temp_x)
tag_ymod.append(temp_y)
tag_xmod = np.array(tag_xmod)
tag_ymod = np.array(tag_ymod)
tag_mask = interpolate2Dpoints_fc(
final_mask.astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
if inflow_mask is not None:
tag_inflow = interpolate2Dpoints_fc(
inflow_mask.astype(np.float32),
xpts_s.astype(np.float32),
ypts_s.astype(np.float32)
)
else:
tag_inflow = None
tidx_acq = acq_loc / 1000 * t_sim0.size # 0:Nt style indexing for acq_loc
tag_xmod_acq = interpolate_temp1D(tag_xmod, tidx_acq)
tag_ymod_acq = interpolate_temp1D(tag_ymod, tidx_acq)
# t_sim will be the non linear motion times (t_sim0), sampled at the correct acq_loc
# The final array will be in the range 0-2*pi, and will be used for the x y terms next
t_sim = acq_loc / 1000 * t_sim0.size
xx = np.arange(t_sim0.size+1)
f = interpolate.interp1d(xx, np.append(t_sim0, t_sim0[0]+(2*np.pi)))
t_sim = f(t_sim)
# This generates the motion paths for the tracked points, following the same code used to generate the
# sim object motion paths
t_sim_i = np.tile(t_sim[:, np.newaxis], [1, tag_ra.size])
tag_ra_up = np.tile(tag_ra[np.newaxis, :], [t_sim.shape[0], 1])
tag_rb_up = np.tile(tag_rb[np.newaxis, :], [t_sim.shape[0], 1])
ell_x = tag_ra_up * (np.cos(t_sim_i) - 1.0)
ell_y = tag_rb_up * np.sin(t_sim_i)
dx = np.cos(tag_theta) * ell_x - np.sin(tag_theta) * ell_y + tag_xmod_acq
dy = np.sin(tag_theta) * ell_x + np.cos(tag_theta) * ell_y + tag_ymod_acq
# So these are our true motion paths for training
xpts_motion = np.tile(xpts[np.newaxis, :], [t_sim.shape[0], 1]) + dx * descaler
ypts_motion = np.tile(ypts[np.newaxis, :], [t_sim.shape[0], 1]) + dy * descaler
##### Now we generate the images
all_im = np.zeros((Nt, N_im, N_im))
all_imc = np.zeros((Nt, N_im, N_im), np.complex64)
all_im_pc = np.zeros((Nt, N_im, N_im), np.complex64)
dens_mod = 1.0
if do_density:
dd = get_dens(acqs0[0][0], use_gpu = use_gpu)
dens_mod = np.median(dd)
noise_scale = 0.3*256*256/N_im/np.random.uniform(SNR_range[0], SNR_range[1])
kaiser_range = [2,6]
kaiser_beta = np.random.rand() * (kaiser_range[1] - kaiser_range[0]) + kaiser_range[0]
for ii in range(Nt):
# Generate the images without any noise or artifacts
if do_density:
dd = get_dens(acqs0[ii][0], use_gpu = use_gpu)
dd = dens_mod / (dd + dens_mod * .01)
else:
dd = np.ones(acqs0[0][0].shape[0], np.float32)
im0 = sim_object.grid_im_from_M(acqs0[ii][0], acqs0[ii][1], N_im = N_im, use_gpu = use_gpu, dens = dd)
im0 = proc_im(im0, N_im, noise_scale, kaiser_beta)
if (mode == 'DENSE'):
extra_im = []
for acq in extra_acq:
im_temp = sim_object.grid_im_from_M(acq[ii][0], acq[ii][1], N_im = N_im, use_gpu = use_gpu, dens = dd)
im_temp = proc_im(im_temp, N_im, noise_scale, kaiser_beta)
extra_im.append(im_temp)
# Generates a phase cycled image for DENSE
if (mode == 'DENSE'):
im_pc = im0.copy()
for i in range(len(extra_im)):
im_pc += np.conj(np.exp(1j * extra_theta[i])) * extra_im[i]
all_im_pc[ii] = im_pc
all_imc[ii] = im0
all_im[ii] = np.abs(im0)
return {
"ims": all_im,
"pts": (xpts_motion, ypts_motion),
"tag_mask": tag_mask,
"tag_inflow": tag_inflow,
"all_imc": all_imc,
"all_im_pc": all_im_pc,
}
# Here we add noise and random blurring
# TODO: make this a sub function with some more control over things, we can then also apply it to the DENSE images
# Also: for DENSE should the phase cycled image have artifacts? Possibly not, though then we are learning denoising too . . .
def proc_im(im, N_im = 256, noise_scale = 50, kaiser_beta = 4):
k0 = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(im)))
k0 += noise_scale * (np.random.standard_normal(k0.shape) + 1j * np.random.standard_normal(k0.shape))
# if np.random.rand() < blur_chance:
# win_beta = np.random.rand() * (beta_lims[1] - beta_lims[0]) + beta_lims[0]
# window = kaiser(N_im, win_beta, sym=False)
# k0 *= np.outer(window, window)
window = kaiser(N_im, kaiser_beta, sym=False)
k0 *= np.outer(window, window)
im = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(k0)))
return im
def get_dens(pos, N_im=256, oversamp=2.0, krad=1.5, nthreads = 0, use_gpu = False, scaler = 0.8):
gridder = pygrid.Gridder(
imsize=(N_im, N_im), grid_dims=2, over_samp=oversamp, krad=krad, use_gpu=use_gpu
)
kx_all = pos[:, 0].astype(np.float32)
ky_all = pos[:, 1].astype(np.float32)
dens = np.ones_like(kx_all)
traj = np.stack((kx_all, ky_all, np.zeros_like(ky_all)), 1).astype(np.float32) * scaler
MM = np.ones_like(kx_all).astype(np.complex64)
out = None
if use_gpu:
out = gridder.cu_k2im(MM.astype(np.complex64), traj, dens, imspace=True)
else:
out = gridder.k2im(MM.astype(np.complex64), traj, dens, imspace=True)
dd = None
if use_gpu:
dd = gridder.cu_im2k(out, traj, dens, imspace=True)
else:
dd = gridder.im2k(out, traj, dens, imspace=True)
# dd = np.abs(dd)
# dd0 = dd.copy()
# out = None
# if use_gpu:
# out = gridder.cu_k2im(MM.astype(np.complex64), traj, dd, imspace=True)
# else:
# out = gridder.k2im(MM.astype(np.complex64), traj, dd, imspace=True)
# dd = None
# if use_gpu:
# dd = gridder.cu_im2k(out, traj, dens, imspace=True)
# else:
# dd = gridder.im2k(out, traj, dens, imspace=True)
# dd = dd0 / np.abs(dd)
return np.abs(dd) | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,198 | mloecher/tag_tracking | refs/heads/main | /torch_track/train.py | from timeit import default_timer as timer
from .loss_utils import multi_loss, weighted_mse_loss, two_loss
from .utils import get_ref_plots
from .datagen import Dataset, do_all_batch_aug
import torch
import torch.optim as optim
import torch.nn as nn
from collections import defaultdict
import copy
import pickle
import numpy as np
def test_training(model, optimizer, device, train_set, valid_set, load_train_params, load_valid_params, name='',
history = defaultdict(list), n_epochs = 60, print_network = True, batch_stride = 1000, weighted_loss = False,
writer = None, epoch_scheduler = None, batch_scheduler = None, loss_func = None, save_val = False):
if not 'best_loss_validate' in history:
history['best_loss_validate'] = 999999999.0
train_gen = torch.utils.data.DataLoader(train_set, **load_train_params)
valid_gen = torch.utils.data.DataLoader(valid_set, **load_valid_params)
if print_network:
print(model)
else:
print('Starting')
train_iter = iter(train_gen)
if weighted_loss:
criterion = weighted_mse_loss
elif loss_func == 'smoothl1':
print('Using Smooth L1 Loss')
criterion = nn.SmoothL1Loss()
elif loss_func == 'l1':
print('Using L1 Loss')
criterion = nn.L1Loss()
else:
criterion = nn.MSELoss()
# criterion = nn.SmoothL1Loss()
history['name'] = name
print('Total Batches:', len(train_iter), ' Batch Stride:', batch_stride)
for epoch in range(n_epochs):
print('----- Epoch {:02d} -----'.format(epoch))
train_set.shuffle()
train_iter = iter(train_gen)
train(train_iter, model, criterion, optimizer, epoch, device, history, batch_stride=batch_stride, writer = writer, scheduler = batch_scheduler)
validate(valid_gen, model, criterion, optimizer, epoch, device, history, writer = writer)
if history['loss_val'][-1] < history['best_loss_validate']:
# torch.save(model.state_dict(), './states/best_val_{}.pt'.format(name))
history['best_state_validate'] = copy.deepcopy(model.state_dict())
history['best_loss_validate'] = history['loss_val'][-1]
print('New Best Validate: {:.2e}'.format(history['best_loss_validate']))
if save_val:
save_name = './%s_bestval.pt' % name
torch.save(model.state_dict(), save_name)
writer.flush()
if epoch_scheduler is not None:
epoch_scheduler.step()
def train(loader_iter, model, criterion, optimizer, epoch, device, history, batch_stride = None, scheduler=None, writer = None, verbose = 1):
if batch_stride is None:
batch_stride = len(loader_iter)
start_time = timer()
model.train()
running_loss = 0.0
running_mse = 0.0
running_mae = 0.0
for i in range(batch_stride):
# x, y_true, iter_id = next(loader_iter)
x, y_true = next(loader_iter)
x, y_true = x.to(device), y_true.to(device)
# if i < 2:
# print(epoch, i, iter_id)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y_true)
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step(epoch + i/batch_stride)
running_loss += loss.item()
t_mse, t_mae = two_loss(y_pred, y_true)
running_mse += t_mse.item()
running_mae += t_mae.item()
# This is for recording individual loss (smoothed), and learning rate
# This is basiclaly replaced by tensorboard, but is maybe still useful
lr_step = optimizer.state_dict()["param_groups"][0]["lr"]
smoothing = 0.5
if len(history['batch_loss']) == 0:
smooth_loss = loss.item()
else:
smooth_loss = smoothing * history['batch_loss'][-1] + (1.0 - smoothing) * loss.item()
history['batch_loss'].append(smooth_loss)
history['batch_lr'].append(lr_step)
# Take mean of error functions
final_loss = running_loss/i
final_mse = running_mse/i
final_mae = running_mae/i
history['loss_train'].append(final_loss)
history['mse_train'].append(final_mse)
history['mae_train'].append(final_mae)
if writer is not None:
writer.add_scalar('Loss/train', final_loss, epoch)
writer.add_scalar('MSE/train', final_mse, epoch)
writer.add_scalar('MAE/train', final_mae, epoch)
total_time = timer() - start_time
if verbose:
print(' Loss: {:.2e} MSE:{:.2e} MAE:{:.2e} {:d} [{:.1f} sec] LR = {:.2e}'.format(final_loss, final_mse, final_mae, i, total_time, lr_step))
def validate(loader, model, criterion, optimizer, epoch, device, history, writer = None):
start_time = timer()
model.eval()
running_loss = 0.0
running_mse = 0.0
running_mae = 0.0
with torch.no_grad():
for i, (x, y_true) in enumerate(loader):
x, y_true = x.to(device), y_true.to(device)
y_pred = model(x)
loss = criterion(y_pred, y_true)
running_loss += loss.item()
t_mse, t_mae = two_loss(y_pred, y_true)
running_mse += t_mse.item()
running_mae += t_mae.item()
final_loss = running_loss/i
final_mse = running_mse/i
final_mae = running_mae/i
history['loss_val'].append(final_loss)
history['mse_val'].append(final_mse)
history['mae_val'].append(final_mae)
if writer is not None:
writer.add_scalar('Loss/validate', final_loss, epoch)
writer.add_scalar('MSE/validate', final_mse, epoch)
writer.add_scalar('MAE/validate', final_mae, epoch)
total_time = timer() - start_time
print('Validation Loss: {:.2e} MSE:{:.2e} MAE:{:.2e} {:d} [{:.1f} sec]'.format(final_loss, final_mse, final_mae, i, total_time))
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,199 | mloecher/tag_tracking | refs/heads/main | /torch_track/network_resnet2.py | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from .network_utils import AddCoords
def conv1xNxN(in_planes, out_planes, stride=1, ksize = 3, padding=None):
"""1x3x3 convolution with padding"""
if padding is None:
padding = ksize//2
return nn.Conv3d(in_planes, out_planes, kernel_size=[1, ksize, ksize], stride=[1, stride, stride],
padding=[0, padding, padding], bias=False)
def convNx1x1(in_planes, out_planes, stride=1, ksize = 3, padding=None):
"""3x1x1 convolution with padding"""
if padding is None:
padding = ksize//2
return nn.Conv3d(in_planes, out_planes, kernel_size=[ksize, 1, 1], stride=[stride, 1, 1],
padding=[padding, 0, 0], bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlockST(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
norm_layer=None, activation=None):
super(BasicBlockST, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
if activation is None:
activation = nn.ReLU(inplace=True)
self.activation = activation
self.downsample = downsample
self.stride = stride
self.bn1a = norm_layer(inplanes)
self.conv1a = conv1xNxN(inplanes, planes, stride)
self.bn1b = norm_layer(planes)
self.conv1b = convNx1x1(planes, planes, stride)
self.bn2a = norm_layer(planes)
self.conv2a = conv1xNxN(planes, planes)
self.bn2b = norm_layer(planes)
self.conv2b = convNx1x1(planes, planes)
def forward(self, x):
identity = x
out = self.bn1a(x)
out = self.activation(out)
out = self.conv1a(out)
out = self.bn1b(out)
out = self.activation(out)
out = self.conv1b(out)
out = self.bn2a(out)
out = self.activation(out)
out = self.conv2a(out)
out = self.bn2b(out)
out = self.activation(out)
out = self.conv2b(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class ResNet2(nn.Module):
def __init__(self, layers, block=None, num_classes=50, width_per_group=64,
norm_layer=None, input_channels=1, do_coordconv = False,
fc_shortcut = False, activation = 'relu', init_width = 64):
super(ResNet2, self).__init__()
self.do_coordconv = do_coordconv
self.fc_shortcut = fc_shortcut
if block is None:
block = BasicBlockST
if activation == 'relu':
activation = nn.ReLU(inplace=True)
elif activation == 'elu':
activation = nn.ELU(inplace=True)
if norm_layer is None:
norm_layer = nn.BatchNorm3d
self._norm_layer = norm_layer
self.activation = activation
self.inplanes = init_width
self.base_width = width_per_group
extra_layers = 0
if self.do_coordconv:
extra_layers = 3
self.add_coords = AddCoords(3)
self.conv1a = conv1xNxN(input_channels, self.inplanes, ksize=7)
self.bn1b = norm_layer(self.inplanes)
self.conv1b = convNx1x1(self.inplanes, self.inplanes-extra_layers, ksize=3)
if self.fc_shortcut:
self.avgpool0 = nn.AdaptiveAvgPool3d((2, 2, 2))
self.layer1 = self._make_layer(block, init_width, layers[0])
self.layer2 = self._make_layer(block, 2*init_width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 4*init_width, layers[2], stride=2)
self.layer4 = self._make_layer(block, 8*init_width, layers[3], stride=2)
self.bn_fin = norm_layer(8*init_width)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(8*init_width * block.expansion, num_classes)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
# norm_layer(planes * block.expansion),
conv1x1x1(self.inplanes, planes * block.expansion, stride),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, norm_layer, activation = self.activation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer, activation = self.activation))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1a(x)
x = self.bn1b(x)
x = self.activation(x)
x = self.conv1b(x)
if self.do_coordconv:
x = self.add_coords(x)
if self.fc_shortcut:
skip = self.avgpool0(x)
skip = torch.flatten(skip, 1)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_fin(x)
x = self.activation(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
if self.fc_shortcut:
x += skip
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,200 | mloecher/tag_tracking | refs/heads/main | /tagsim/SimObject.py | import hdf5storage
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import sys, os
from .pygrid_internal import pygrid as pygrid
from .pygrid_internal import c_grid as c_grid
from .pygrid_internal import utils as pygrid_utils
class SimObject:
def __init__(self):
self.Nt = 24 # Number of timeframes in the dataset
self.period = 1000 # periodic phantom interval in [ms]
self.dt = self.period / self.Nt
self.tt = np.arange(self.Nt) * self.dt
self.fov = np.array([360, 360, 8.0]) * 1.0e-3
def gen_from_generator(self, r, s, t1, t2, Nz=4):
zz = np.linspace(-0.5, 0.5, Nz)
self.r = r
self.sig0 = s
self.T1 = t1
self.T2 = t2
self.Nt = self.r.shape[0] # Number of timeframes in the dataset
self.period = 1000 # periodic phantom interval in [ms]
self.dt = self.period / self.Nt
self.tt = np.arange(self.Nt) * self.dt
def gen_standard_cardiac(self, Nz=8, NN=512, t1=800, t2=80, inflow=0):
zz = np.linspace(-0.5, 0.5, Nz)
data_loc = os.path.dirname(__file__) + "/cardiac_data/outdata_v7.npz"
data = np.load(data_loc)
x_myo = data["xa"].T
y_myo = data["ya"].T
z_myo = data["za"].T
x_myo /= np.abs(x_myo).max() * 9
y_myo /= np.abs(y_myo).max() * 9
z_myo = (z_myo - 8) / 16
blood_rad = []
for ind_t in range(x_myo.shape[0]):
blood_rad.append(np.abs(x_myo[ind_t, np.abs(y_myo[ind_t]) < 0.005]).min())
blood_rad = np.array(blood_rad)
r_blood = make_blood_pool(blood_rad, Nz=Nz)
myo_pos = np.array([0.06, -0.14])
myo_rad = np.array([x_myo.max() - x_myo.min(), y_myo.max() - y_myo.min()]) / 2.0
x_myo += myo_pos[0]
y_myo += myo_pos[1]
r_blood[:, :, 0] += myo_pos[0]
r_blood[:, :, 1] += myo_pos[1]
s_blood = np.ones(r_blood.shape[1]) / Nz / 4
r_myo = np.stack((x_myo, y_myo, z_myo), 2)
s_myo = np.ones(r_myo.shape[1]) / r_myo.shape[1] / 200 * Nz * NN * NN
# s_myo = np.ones(r_myo.shape[1]) * 0.2
data_loc = os.path.dirname(__file__) + "/cardiac_data/SAX_cavity_dark.png"
img = Image.open(data_loc).convert("L")
img = img.crop((10, 20, 170, 180))
img = img.resize((NN, NN))
img = np.asarray(img).astype("float64")
img /= img.max()
y, x = np.meshgrid(
np.linspace(-0.5, 0.5, NN, False),
np.linspace(-0.5, 0.5, NN, False),
indexing="ij",
)
x = x.ravel()
x = np.tile(x[np.newaxis, np.newaxis, :], [self.tt.shape[0], Nz, 1])
y = y.ravel()
y = np.tile(y[np.newaxis, np.newaxis, :], [self.tt.shape[0], Nz, 1])
z = np.tile(zz[np.newaxis, :, np.newaxis], [y.shape[0], 1, y.shape[2]])
x = np.reshape(x, (self.tt.shape[0], -1))
y = np.reshape(y, (self.tt.shape[0], -1))
z = np.reshape(z, (self.tt.shape[0], -1))
rad = np.sqrt(
((x[0] - myo_pos[0]) / myo_rad[0]) ** 2.0
+ ((y[0] - myo_pos[1]) / myo_rad[1]) ** 2.0
)
mask = np.ones(x.shape[1], np.bool)
mask[(rad < 1.0)] = False
s = np.tile(img[np.newaxis, :], [Nz, 1]).ravel()
r = np.stack((x, y, z), 2)
s = s[mask]
r = r[:, mask, :]
if inflow > 0:
r_all = np.concatenate((r, r_myo, r_blood), 1)
s_all = np.concatenate((s, s_myo, s_blood))
else:
r_all = np.concatenate((r, r_myo), 1)
s_all = np.concatenate((s, s_myo))
self.r = r_all
self.sig0 = s_all
self.T1 = np.ones_like(self.sig0) * t1
self.T2 = np.ones_like(self.sig0) * t2
self.r_myo = r_myo
if inflow > 0:
self.T1[-s_blood.size:] = inflow
def shift_positions(self, dt):
r_new = np.zeros_like(self.r)
for i in range(self.r.shape[0]):
t0 = self.tt[i]
t1 = t0 + dt
r_new[i] = self.get_pos_time(t1)
self.r = r_new
def get_pos_time(self, p_time):
p_time = p_time % self.period
for i in range(self.tt.size):
if self.tt[i] > p_time:
lo = i - 1
hi = i
lo_mod = 1 - (p_time - self.tt[lo]) / self.dt
hi_mod = (p_time - self.tt[lo]) / self.dt
break
elif i == (self.tt.size - 1):
lo = i
hi = 0
lo_mod = 1 - (p_time - self.tt[lo]) / self.dt
hi_mod = (p_time - self.tt[lo]) / self.dt
pos = self.r[lo] * lo_mod + self.r[hi] * hi_mod
return pos
def get_pos_time_r(self, p_time, r_in):
p_time = p_time % self.period
for i in range(self.tt.size):
if self.tt[i] > p_time:
lo = i - 1
hi = i
lo_mod = 1 - (p_time - self.tt[lo]) / self.dt
hi_mod = (p_time - self.tt[lo]) / self.dt
break
elif i == (self.tt.size - 1):
lo = i
hi = 0
lo_mod = 1 - (p_time - self.tt[lo]) / self.dt
hi_mod = (p_time - self.tt[lo]) / self.dt
pos = r_in[lo] * lo_mod + r_in[hi] * hi_mod
return pos
def grid_im_from_M(self, pos, M, N_im=256, w=64, oversamp=4.0, krad=1.5, nthreads = 0, use_gpu = False, dens = None):
gridder = pygrid.Gridder(
imsize=(N_im, N_im), grid_dims=2, over_samp=oversamp, krad=krad, use_gpu=use_gpu
)
kx_all = pos[:, 0].astype(np.float32)
ky_all = pos[:, 1].astype(np.float32)
if dens is None:
dens = np.ones_like(kx_all)
else:
dens = dens.astype(np.float32)
traj = np.stack((kx_all, ky_all, np.zeros_like(ky_all)), 1).astype(np.float32)
MM = M[:, 0] + 1j * M[:, 1]
out = None
if use_gpu:
out = gridder.cu_k2im(MM.astype(np.complex64), traj, dens, imspace=True)
else:
out = gridder.k2im(MM.astype(np.complex64), traj, dens, imspace=True)
return out
def get_im_from_M(self, pos, M, N_im=512, w=64):
xx = pos[:, 0]
yy = pos[:, 1]
im = np.zeros((N_im, N_im), np.complex)
rx = np.round(N_im * (xx + 0.5)).astype("int")
ry = np.round(N_im * (yy + 0.5)).astype("int")
for i in range(M.shape[0]):
if (
ry[i] >= 0
and ry[i] < im.shape[0]
and rx[i] >= 0
and rx[i] < im.shape[1]
):
im[ry[i], rx[i]] += M[i, 0] + 1j * M[i, 1]
k = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(im))) / np.sqrt(N_im / w)
k = k[
k.shape[0] // 2 - w : k.shape[0] // 2 + w,
k.shape[1] // 2 - w : k.shape[1] // 2 + w,
]
im2 = np.fft.ifftshift(np.fft.ifftn(np.fft.fftshift(k)))
return im2
def make_blood_pool(blood_rad, density=100, Nz=4):
Nt = blood_rad.size
zz = np.linspace(-0.5, 0.5, Nz)
all_x = []
all_y = []
for iit in range(Nt):
lim = 1.1 * blood_rad[iit]
ddr = np.linspace(-lim, lim, density)
x, y = np.meshgrid(ddr, ddr)
x = x.ravel()
y = y.ravel()
rad = np.sqrt(x * x + y * y)
x = x[rad < blood_rad[iit]]
y = y[rad < blood_rad[iit]]
x = np.tile(x[np.newaxis, :], [Nz, 1])
y = np.tile(y[np.newaxis, :], [Nz, 1])
all_x.append(x)
all_y.append(y)
x = np.array(all_x)
y = np.array(all_y)
z = np.tile(zz[np.newaxis, :, np.newaxis], [y.shape[0], 1, y.shape[2]])
x = np.reshape(x, (Nt, -1))
y = np.reshape(y, (Nt, -1))
z = np.reshape(z, (Nt, -1))
r = np.stack((x, y, z), 2)
return r
if __name__ == "__main__":
print('Nothing in __main__ right now')
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,201 | mloecher/tag_tracking | refs/heads/main | /torch_track/datagen.py | import torch
import h5py
import numpy as np
# Data generator, with augmentations
class Dataset(torch.utils.data.Dataset):
def __init__(self, list_idx, data_filename, dim=(32, 32), path_scale = 256,
debug = False, normmode=0, output_index = False, do_augs = False):
self.list_idx = list_idx
self.data_filename = data_filename
self.dim = dim
self.path_scale = path_scale
self.debug = debug
self.normmode = normmode
self.output_index = output_index
self.do_augs = do_augs
# self.data_file = h5py.File(self.data_filename, 'r')
def shuffle(self):
self.list_idx = np.random.permutation(self.list_idx)
def __len__(self):
return len(self.list_idx)
def __getitem__(self, index):
# Reloading the h5py each time is necessary to allow multiprocessing.
# It still seems hast with this in use, not much overhead
data_file = h5py.File(self.data_filename, 'r')
ID = self.list_idx[index]
# if self.debug:
# print('dataset getitem:', index, ID)
X = np.empty((1, 25, *self.dim), np.float32)
tpaths = data_file['patch_paths'][ID, :, :].ravel()
y = tpaths * self.path_scale
im0 = data_file['patch_ims'][ID, :, :, :]
if self.do_augs:
im0, y = do_all_augs(im0, y)
if self.normmode == 0:
im0 = ((im0 - im0.min()) / (im0.max() - im0.min())) * 2.0 - 1.0
elif self.normmode == 1:
im0 = (im0 - im0.mean()) / im0.std()
X[0] = im0
data_file.close()
if self.output_index:
return X, y, ID
else:
return X, y
def aug_rotate(X, Y, k = 1):
X1 = np.rot90(X, k, axes=(1,2))
rot_idx = np.roll(np.arange(50), 25)
Y1 = Y.copy()
for i_ in range(k):
Y1 = Y1[rot_idx]
Y1[25:] *= -1.0
return X1, Y1
def aug_flip(X, Y, k = 1):
X1 = X.copy()
Y1 = Y.copy()
if k == 0:
pass
elif k == 1:
X1 = np.flip(X1, k)
Y1[25:] *= -1.0
elif k == 2:
X1 = np.flip(X1, k)
Y1[:25] *= -1.0
return X1, Y1
def aug_linear(X, Y, axis = 0, scale = 0.5):
X1 = X.copy()
Y1 = Y.copy()
# 0.3 here is just so scale in range of 0 to 1 makes sense
mod = 0.3 * X1.mean() * np.linspace(-scale, scale, X1.shape[axis])
if axis == 0:
X1 += mod[:, None, None]
elif axis == 1:
X1 += mod[None, :, None]
elif axis == 2:
X1 += mod[None, None, :]
return X1, Y1
def aug_noise(X, Y, scale = 0.5):
X1 = X.copy()
Y1 = Y.copy()
# 0.1 here is just so scale in range of 0 to 1 makes sense
mod = 0.1 * scale * X1.mean() * np.random.standard_normal(X1.shape)
X1 += mod
return X1, Y1
def aug_temp_blur(X, Y, tf = 0, scale = 1.0):
X1 = X.copy()
Y1 = Y.copy()
if tf > 22:
tf = 22
X1[tf] = X1[tf] + scale*(X1[tf-1] + X1[tf+1]) + scale/2.0*(X1[tf-2] + X1[tf+2])
X1[tf] /= (1.0 + 3.0*scale)
return X1, Y1
from tagsim_git.interp_temp2d import interpolate_temp2D, interpolate_temp
def aug_temp_interp(X, Y, scale = 1.0):
tt = np.arange(25).astype(np.float)
mod = 0.3 * scale * np.random.standard_normal(25)
mod[0] = 0
tt += mod
tt[tt<0] = 0
X1 = interpolate_temp2D(X, tt)
Y1 = interpolate_temp(Y, tt)
return X1, Y1
def do_all_augs(XX, YY):
XX, YY = aug_rotate(XX, YY, np.random.randint(4))
XX, YY = aug_flip(XX, YY, np.random.randint(3))
XX, YY = aug_linear(XX, YY, np.random.randint(3), np.random.rand())
XX, YY = aug_noise(XX, YY, np.random.rand())
XX, YY = aug_temp_blur(XX, YY, np.random.randint(25), np.random.rand())
XX, YY = aug_temp_interp(XX, YY, np.random.rand())
return XX, YY
def do_all_batch_aug(XXb, YYb):
for i in range(XXb.shape[0]):
XX, YY = do_all_augs(XXb[i, 0], YYb[i])
XXb[i, 0] = XX
YYb[i] = YY | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,202 | mloecher/tag_tracking | refs/heads/main | /torch_track/utils_anim.py | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from matplotlib import animation, rc
from IPython.display import HTML
# Jupyter notebook compatible animation
class TagAnimator:
def __init__(self, ims, tags, figsize=[8, 8], nframes=25, interval=50, scale = 1.0, shift = None, clim=None):
print("Starting animation class . . . ", flush=True)
if shift is None:
shift = ims.shape[-1]/2.0
self.ims = np.squeeze(ims)
if tags is None:
self.tags = tags
self.plot_tags = False
else:
self.tags = np.squeeze(tags)
self.plot_tags = True
self.fig, self.axarr = plt.subplots(1, 1, squeeze=False, figsize=figsize)
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
self.im = self.axarr[0, 0].imshow(self.ims[0], cmap="gray", clim=clim)
if self.plot_tags:
self.tagmid = tags.size // 2
xp = np.array(tags[:self.tagmid]) * scale + shift
yp = np.array(tags[self.tagmid:]) * scale + shift
self.pts_all, = self.axarr[0, 0].plot(
xp, yp, linestyle="None", marker="x", markeredgecolor="r", mew=2, markersize=4
)
self.pts_big, = self.axarr[0, 0].plot(
xp[0], yp[0], linestyle="None", marker="+", markeredgecolor="g", mew=4, markersize=12
)
self.xp = xp
self.yp = yp
else:
self.xp = 0
self.yp = 0
self.pts_big = None
print("Making animation . . . ", flush=True)
self.anim = animation.FuncAnimation(
self.fig,
self.animate,
init_func=self.init,
frames=nframes,
interval=interval,
blit=True,
)
plt.close()
def init(self):
self.im.set_data(self.ims[0])
if self.plot_tags:
self.pts_big.set_data(self.xp[0], self.yp[0])
return [self.im, self.pts_big]
else:
return [self.im,]
def animate(self, i):
self.im.set_data(self.ims[i])
if self.plot_tags:
self.pts_big.set_data(self.xp[i], self.yp[i])
return [self.im, self.pts_big]
else:
return [self.im,]
class Animate_FullIm:
def __init__(self, ims, tags_in=None, tags_in2=None, figsize=(8,8), dpi=100, clim_scale=(0.0, 1.0), interval=50, auto_crop = 10):
print("Starting animation class . . . ", flush=True)
self.ims = np.squeeze(ims)
Nt = self.ims.shape[0]
if tags_in is None:
self.tags = tags_in
self.plot_tags = False
else:
tags = np.squeeze(tags_in)
if not tags.ndim == 3:
print('ERROR: tags should be 3 dims, not', tags.ndim)
return
tags_shape = np.array(tags.shape)
idx_dim = np.argwhere(tags_shape==2)[0][0]
idx_time = np.argwhere(tags_shape==25)[0][0]
idx_pts = list(range(tags.ndim))
idx_pts.remove(idx_dim)
idx_pts.remove(idx_time)
idx_pts = idx_pts[0]
tags = tags.transpose([idx_time, idx_dim, idx_pts])
self.tags = tags
self.plot_tags = True
if tags_in2 is None:
self.tags2 = tags_in2
self.plot_tags2 = False
else:
tags2 = np.squeeze(tags_in2)
if not tags2.ndim == 3:
print('ERROR: tags should be 3 dims, not', tags2.ndim)
return
tags2_shape = np.array(tags2.shape)
idx_dim2 = np.argwhere(tags2_shape==2)[0][0]
idx_time2 = np.argwhere(tags2_shape==25)[0][0]
idx_pts2 = list(range(tags2.ndim))
idx_pts2.remove(idx_dim2)
idx_pts2.remove(idx_time2)
idx_pts2 = idx_pts2[0]
tags2 = tags2.transpose([idx_time2, idx_dim2, idx_pts2])
self.tags2 = tags2
self.plot_tags2 = True
self.fig, self.axarr = plt.subplots(1, 1, squeeze=False, figsize=figsize)
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
clim = [self.ims.min() + clim_scale[0] * self.ims.max(), clim_scale[1] * self.ims.max()]
self.im = self.axarr[0, 0].imshow(self.ims[0], cmap="gray", clim=clim, extent=(0, self.ims.shape[2], self.ims.shape[1], 0))
if auto_crop is not None:
self.axarr[0, 0].set_xlim([tags[:,0,:].min()-auto_crop, tags[:,0,:].max()+auto_crop])
self.axarr[0, 0].set_ylim([tags[:,1,:].max()+auto_crop, tags[:,1,:].min()-auto_crop])
if self.plot_tags:
self.pts_plot, = self.axarr[0, 0].plot(
self.tags[0,0], self.tags[0,1], linestyle="None",
marker="+", fillstyle='full', markeredgecolor="#ff1744", markerfacecolor ='#ff1744',mew=3, markersize=15
)
else:
self.pts_plot = None
if self.plot_tags2:
self.pts_plot2, = self.axarr[0, 0].plot(
self.tags2[0,0], self.tags2[0,1], linestyle="None",
marker="x", fillstyle='full', markeredgecolor="#76ff03", markerfacecolor ='#76ff03',mew=3, markersize=10
)
else:
self.pts_plot2 = None
print("Making animation . . . ", flush=True)
self.anim = animation.FuncAnimation(
self.fig,
self.animate,
init_func=self.init,
frames=Nt,
interval=interval,
blit=True,
)
plt.close()
def init(self):
self.im.set_data(self.ims[0])
out = [self.im,]
if self.plot_tags:
self.pts_plot.set_data(self.tags[0,0], self.tags[0,1])
out.append(self.pts_plot)
if self.plot_tags2:
self.pts_plot2.set_data(self.tags2[0,0], self.tags2[0,1])
out.append(self.pts_plot2)
return out
def animate(self, i):
self.im.set_data(self.ims[i])
out = [self.im,]
if self.plot_tags:
self.pts_plot.set_data(self.tags[i,0], self.tags[i,1])
out.append(self.pts_plot)
if self.plot_tags2:
self.pts_plot2.set_data(self.tags2[i,0], self.tags2[i,1])
out.append(self.pts_plot2)
return out | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,203 | mloecher/tag_tracking | refs/heads/main | /tagsim/sim_cardiacmotion.py | import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import splev, splrep
from scipy.signal import triang, windows
from scipy import ndimage, interpolate
from skimage.filters import gaussian
from skimage.transform import rescale
from skimage.morphology import binary_dilation, disk
from .base_im_generation import gen_motion_params, get_temporal_waveform, get_temporal_waveform2, make_random_mask, get_random_image, map_1Dpoly
from .sim_fullmotion import get_dens, proc_im
from .SimObject import SimObject
from .SimPSD import SimInstant
from .interp2d import interpolate2Dpoints_fc
from .interp_temp2d import interpolate_temp1D
def get_random_heart(NN = 768,
Nt = 25,
N_im = 256,
basepath="./image_db/",
rseed=None,
seed = -1,
img_thresh=0.20,
t1_lims=(900, 1200),
t2_lims=(40, 70),
SNR_range = (10, 30),
use_gpu = True,
ke = 0.12,
mode = 'gridtag',
do_density=True,
new_sim = True,
random_pts = False,):
final_mask = make_random_mask(NN=NN, rseed=rseed)
img = get_random_image(basepath, NN=NN, seed=seed)
res_heart = gen_heart(NN = NN, N_im=N_im)
r_heart = res_heart['r']
r0_NN = np.rint((r_heart[0]+0.5) * NN).astype(np.int)
mask_myo = np.zeros((NN,NN))
mask_myo[r0_NN[:,0], r0_NN[:,1]] = 1.0
mask_myo_init = mask_myo.copy()
mask_blood0 = ndimage.binary_fill_holes(mask_myo)
# mask_blood0 = ndimage.morphology.binary_erosion(mask_blood0, disk(2))
img_mask = img > img_thresh
final_mask = final_mask & img_mask & ~mask_blood0
s = img
s = s[final_mask]
t = res_heart['t']
r_a = res_heart['r_a'][final_mask]
r_b = res_heart['r_b'][final_mask]
theta = res_heart['theta'][final_mask]
xmod = res_heart['xmod'][:, final_mask]
ymod = res_heart['ymod'][:, final_mask]
ell_x = r_a[None, :] * (np.cos(t) - 1.0)[:,None]
ell_y = r_b[None, :] * np.sin(t)[:,None]
dx = np.cos(theta)[None,:] * ell_x - np.sin(theta)[None,:] * ell_y + xmod
dy = np.sin(theta)[None,:] * ell_x + np.cos(theta)[None,:] * ell_y + ymod
# dx *= 0.0
# dy *= 0.0
mesh_range = np.arange(NN)/NN - 0.5
xx, yy = np.meshgrid(mesh_range, mesh_range, indexing = 'ij')
x = xx[final_mask]
y = yy[final_mask]
x = x[None, :] + dx
y = y[None, :] + dy
z = np.zeros_like(x)
r = np.stack([x, y, z], 2)
t1 = map_1Dpoly(s, t1_lims)
t2 = map_1Dpoly(s, t2_lims)
s_heart = np.ones(r_heart.shape[1]) * np.random.uniform(0.5, 0.7)
t1_heart = np.ones(r_heart.shape[1]) * np.random.uniform(t1_lims[0], t1_lims[1])
t2_heart = np.ones(r_heart.shape[1]) * np.random.uniform(t2_lims[0], t2_lims[1])
NNa = NN//2
all_static_mask = np.ones([Nt, NN, NN])
for it in range(Nt):
r_NN = np.rint((r_heart[it]+0.5) * NNa).astype(np.int)
mask_myo = np.zeros((NNa,NNa))
mask_myo[r_NN[:,0], r_NN[:,1]] = 1.0
mask_blood = ndimage.binary_fill_holes(mask_myo)
mask_blood = rescale(mask_blood, 2, 1)
mask_myo = rescale(mask_myo, 2, 1)
mask_cavity = mask_blood - mask_myo
mask_cavity = ndimage.morphology.binary_dilation(mask_cavity)
all_static_mask[it] -= mask_cavity
mask_blood0d = ndimage.morphology.binary_dilation(mask_blood0, disk(5), iterations = 3)
all_point_mask = []
for it in range(Nt):
m_temp = all_static_mask[it][mask_blood0d > 0.5]
m_temp = ~(m_temp > 0.5)
all_point_mask.append(m_temp)
x_blood = xx[mask_blood0d > 0.5]
y_blood = yy[mask_blood0d > 0.5]
z_blood = np.zeros_like(x_blood)
r0_blood = np.stack([x_blood, y_blood, z_blood], 1)
r_blood = np.tile(r0_blood, [Nt,1,1])
s_blood = np.ones(r_blood.shape[1]) * np.random.uniform(0.20, 0.40)
t1_blood = np.ones(r_blood.shape[1]) * np.random.uniform(30, 60)
t2_blood = np.ones(r_blood.shape[1]) * np.random.uniform(10, 20)
r_all = np.concatenate([r_blood, r, r_heart], 1)
s_all = np.concatenate([s_blood, s, s_heart])
t1_all = np.concatenate([t1_blood, t1, t1_heart])
t2_all = np.concatenate([t2_blood, t2, t2_heart])
acq_loc = np.arange(0, Nt) * 1000 / Nt + 10.0
sim_object = SimObject()
sim_object.gen_from_generator(r_all, s_all, t1_all, t2_all)
simulator = SimInstant(sim_object, use_gpu=use_gpu)
if (mode == 'gridtag'):
if new_sim:
scale_tagrf = np.random.uniform(0.8, 1.2)
simulator.sample_tagging_smn_PSD(ke=ke, acq_loc=acq_loc, scale_tagrf=scale_tagrf)
else:
simulator.sample_tagging1331_v2_PSD(ke=ke, acq_loc=acq_loc)
elif (mode == 'DENSE'):
simulator.sample_DENSE_PSD(ke=ke, kd = 0.0, acq_loc=acq_loc)
acqs0 = simulator.run()
# For DENSE specifically run the phase cycling acquisition
if (mode == 'DENSE'):
extra_theta = np.linspace(0, 2*np.pi, 4)[1:-1]
extra_acq = []
for theta_i in extra_theta:
simulator = SimInstant(sim_object, use_gpu=use_gpu)
simulator.sample_DENSE_PSD(rf_dir = [np.cos(theta_i), np.sin(theta_i), 0], ke=ke, kd = 0.0, acq_loc=acq_loc)
extra_acq.append(simulator.run())
for it in range(Nt):
point_mask = np.ones(s_all.size)
point_mask[:all_point_mask[it].size] = all_point_mask[it]
acqs0[it][0] = acqs0[it][0][point_mask > 0.5, :]
acqs0[it][1] = acqs0[it][1][point_mask > 0.5, :]
if (mode == 'DENSE'):
for acq in extra_acq:
acq[it][0] = acq[it][0][point_mask > 0.5, :]
acq[it][1] = acq[it][1][point_mask > 0.5, :]
###### The following code is all to generate the initial tracking points
# It is hard coded right not for all tag intersections, but we could do:
# - The point subsampling here i.e. only pick 10 or 100 points for speed
# - Select random points
if random_pts:
rand_scale = 0.75
rand_N = 10000
xpts = rand_scale * np.random.rand(rand_N) - rand_scale/2.0
ypts = rand_scale * np.random.rand(rand_N) - rand_scale/2.0
temp_xs = (np.array(xpts) + 0.5) * final_mask.shape[0]
temp_ys = (np.array(ypts) + 0.5) * final_mask.shape[1]
tag_mask_temp = interpolate2Dpoints_fc(
mask_myo_init.astype(np.float32),
temp_ys.astype(np.float32),
temp_xs.astype(np.float32)
)
else:
scaler = 1e-3 / ke / sim_object.fov[0] / np.sqrt(2)
Ntag = np.ceil(0.5 / scaler) + 1
rr = np.arange(-Ntag, Ntag).astype(np.float32)
xpts0, ypts0 = np.meshgrid(rr, rr, indexing = 'ij')
xpts0 = xpts0.ravel()
ypts0 = ypts0.ravel()
xpts = (xpts0 + ypts0) * (1e-3 / ke / sim_object.fov[0] / np.sqrt(2))
ypts = (xpts0 - ypts0) * (1e-3 / ke / sim_object.fov[0] / np.sqrt(2))
ind = (np.abs(xpts) <= 0.45) & (np.abs(ypts) <= 0.45)
xpts = xpts[ind]
ypts = ypts[ind]
xpts_s = (np.array(xpts) + 0.5) * final_mask.shape[0]
ypts_s = (np.array(ypts) + 0.5) * final_mask.shape[1]
##### Now we sample the cartesian maps for various variables at the tracking points
# Primarily the motion path variables, and the masks
tag_ra = interpolate2Dpoints_fc(
res_heart['r_a'].astype(np.float32),
ypts_s.astype(np.float32),
xpts_s.astype(np.float32)
)
tag_rb = interpolate2Dpoints_fc(
res_heart['r_b'].astype(np.float32),
ypts_s.astype(np.float32),
xpts_s.astype(np.float32)
)
tag_theta = interpolate2Dpoints_fc(
res_heart['theta'].astype(np.float32),
ypts_s.astype(np.float32),
xpts_s.astype(np.float32)
)
tag_xmod = []
tag_ymod = []
for i in range(Nt):
temp_x = interpolate2Dpoints_fc(
res_heart['xmod'][i].astype(np.float32),
ypts_s.astype(np.float32),
xpts_s.astype(np.float32)
)
temp_y = interpolate2Dpoints_fc(
res_heart['ymod'][i].astype(np.float32),
ypts_s.astype(np.float32),
xpts_s.astype(np.float32)
)
tag_xmod.append(temp_x)
tag_ymod.append(temp_y)
tag_xmod = np.array(tag_xmod)
tag_ymod = np.array(tag_ymod)
tag_mask = interpolate2Dpoints_fc(
mask_myo_init.astype(np.float32),
ypts_s.astype(np.float32),
xpts_s.astype(np.float32)
)
tidx_acq = acq_loc / 1000 * t.size # 0:Nt style indexing for acq_loc
tag_xmod_acq = interpolate_temp1D(tag_xmod, tidx_acq)
tag_ymod_acq = interpolate_temp1D(tag_ymod, tidx_acq)
# t_sim will be the non linear motion times (t_sim0), sampled at the correct acq_loc
# The final array will be in the range 0-2*pi, and will be used for the x y terms next
t_sim = acq_loc / 1000 * t.size
xx = np.arange(t.size+1)
f = interpolate.interp1d(xx, np.append(t, t[0]+(2*np.pi)))
t_sim = f(t_sim)
# This generates the motion paths for the tracked points, following the same code used to generate the
# sim object motion paths
t_sim_i = np.tile(t_sim[:, np.newaxis], [1, tag_ra.size])
tag_ra_up = np.tile(tag_ra[np.newaxis, :], [t_sim.shape[0], 1])
tag_rb_up = np.tile(tag_rb[np.newaxis, :], [t_sim.shape[0], 1])
ell_x = tag_ra_up * (np.cos(t_sim_i) - 1.0)
ell_y = tag_rb_up * np.sin(t_sim_i)
dx = np.cos(tag_theta) * ell_x - np.sin(tag_theta) * ell_y + tag_xmod_acq
dy = np.sin(tag_theta) * ell_x + np.cos(tag_theta) * ell_y + tag_ymod_acq
# So these are our true motion paths for training
xpts_motion = np.tile(xpts[np.newaxis, :], [t_sim.shape[0], 1]) + dx
ypts_motion = np.tile(ypts[np.newaxis, :], [t_sim.shape[0], 1]) + dy
##### Now we generate the images
all_im = np.zeros((Nt, N_im, N_im))
all_imc = np.zeros((Nt, N_im, N_im), np.complex64)
all_im_pc = np.zeros((Nt, N_im, N_im), np.complex64)
dens_mod = 1.0
if do_density:
dd = get_dens(acqs0[0][0], use_gpu = use_gpu)
dens_mod = np.median(dd)
noise_scale = 0.3*256*256/N_im/np.random.uniform(SNR_range[0], SNR_range[1])
kaiser_range = [2,6]
kaiser_beta = np.random.rand() * (kaiser_range[1] - kaiser_range[0]) + kaiser_range[0]
for ii in range(Nt):
# Generate the images without any noise or artifacts
if do_density:
dd = get_dens(acqs0[ii][0], use_gpu = use_gpu)
dd = dens_mod / (dd + dens_mod * .1)
else:
dd = np.ones(acqs0[0][0].shape[0], np.float32)
im0 = sim_object.grid_im_from_M(acqs0[ii][0], acqs0[ii][1], N_im = N_im, use_gpu = use_gpu, dens = dd)
im0 = proc_im(im0, N_im, noise_scale, kaiser_beta)
if (mode == 'DENSE'):
extra_im = []
for acq in extra_acq:
im_temp = sim_object.grid_im_from_M(acq[ii][0], acq[ii][1], N_im = N_im, use_gpu = use_gpu, dens = dd)
im_temp = proc_im(im_temp, N_im, noise_scale, kaiser_beta)
extra_im.append(im_temp)
# Generates a phase cycled image for DENSE
if (mode == 'DENSE'):
im_pc = im0.copy()
for i in range(len(extra_im)):
im_pc += np.conj(np.exp(1j * extra_theta[i])) * extra_im[i]
all_im_pc[ii] = im_pc
all_imc[ii] = im0
all_im[ii] = np.abs(im0)
return {
"ims": all_im,
"pts": (xpts_motion, ypts_motion),
"tag_mask": tag_mask,
"all_imc": all_imc,
"all_im_pc": all_im_pc,
"acqs": acqs0,
"t": res_heart['t'],
"t": t_sim,
"r_a": res_heart['r_a'],
"r_b": res_heart['r_b'],
"theta": res_heart['theta'],
"xmod": res_heart['xmod'],
"ymod": res_heart['ymod'],
"full_mask": res_heart['full_mask'],
"r0_lv": res_heart['r0_lv'],
}
# This is more of a gaussian dilation type thing now
def blur_outer(im, mask, blur=4.0):
edt, inds = ndimage.distance_transform_edt(mask < 0.5, return_indices=True)
edt_g = np.exp(-(edt/blur)**2.0)
im2 = edt_g * im[inds[0], inds[1]]
# im2 = ndimage.filters.gaussian_filter(im, blur) * 1.5
im2[mask>0.5] = im[mask>0.5]
return im2
def gen_heart(Np = 10, N_up=100, LV_smooth = .02, NN = 512, Nt = 25, N_im = 256, motion_blur = 1.0):
# Generate points on a circle, with random variations to theta and radius, i.e. LB
theta0 = (np.random.random(Np) + np.arange(Np)) * 2.0 * np.pi / Np
rad0 = np.ones_like(theta0) + np.random.random(Np) * 0.5
rad0 *= 0.5
theta0 = np.hstack([theta0, theta0[0] + 2 * np.pi])
rad0 = np.hstack([rad0, rad0[0]])
x0 = rad0 * np.cos(theta0)
y0 = rad0 * np.sin(theta0)
# Interpolate up to N_up points, with some smoothing
theta0_up = np.linspace(0, 2*np.pi, N_up) + theta0[0]
spl = splrep(theta0, x0, per=True, s = LV_smooth)
x0_up = splev(theta0_up, spl)
spl = splrep(theta0, y0, per=True, s = LV_smooth)
y0_up = splev(theta0_up, spl)
# Select points on the LV curve to be RV insertion points
c0_ind = 0
c1_ind = int(N_up / (2.5 + np.random.rand()))
c0 = np.array([x0_up[c0_ind], y0_up[c0_ind]])
c1 = np.array([x0_up[c1_ind], y0_up[c1_ind]])
# Get the location of the center of the RV, called "offset" here
offset_theta = theta0_up[c0_ind] + (theta0_up[c1_ind] - theta0_up[c0_ind]) / 2.0
offset_rad = np.array([x0_up[c1_ind//2], y0_up[c1_ind//2]])
offset_rad = np.linalg.norm(offset_rad)
offset_x = offset_rad * np.cos(offset_theta)
offset_y = offset_rad * np.sin(offset_theta)
offset = np.array([offset_x, offset_y])
# Get the position of the insertion points, relative to the RV center "offset"
# The negative and 2pi parts is to make sure the path goes in the right direction
p1_start = c0 - offset
rad1_start = np.linalg.norm(p1_start)
theta1_start = np.arctan2(p1_start[1], p1_start[0])
p1_end = c1 - offset
rad1_end = np.linalg.norm(p1_end)
theta1_end = np.arctan2(p1_end[1], p1_end[0])
if theta1_end < 0:
theta1_end += 2* np.pi
# Generate the path and upsample it, very similar to the LV, but not periodic
theta1 = np.linspace(theta1_start, theta1_end, Np)
theta1[1:-1] += 0.2 * (np.random.random(Np-2) - 0.5)
rad1 = np.ones_like(theta1) * offset_rad
# The triangle here is to try to make it less round, but Im not really sure its needed
rad1[1:-1] *= (1 + 0.2*triang(Np-2, sym=False))
rad1[0] = rad1_start
rad1[-1] = rad1_end
x1 = rad1 * np.cos(theta1)
y1 = rad1 * np.sin(theta1)
theta1_up = np.linspace(theta1_start, theta1_end, N_up)
spl = splrep(theta1, x1, per=False)
x1_up = splev(theta1_up, spl)
spl = splrep(theta1, y1, per=False)
y1_up = splev(theta1_up, spl)
x1_up += offset[0]
y1_up += offset[1]
# Make a mask of the regions
mesh_range = np.arange(NN)/NN - 0.5
xx, yy = np.meshgrid(mesh_range, mesh_range, indexing = 'ij')
im_coords = np.array([xx.ravel(), yy.ravel()])
# Size of the heart (basically LV diameter), entered as pixels in a 256 image, then scaled to -0.5 to 0.5 with the /256
size_scale = np.random.uniform(32.0, 45.0) / 256.0
# Randomly rotate everything
theta = 2 * np.pi * np.random.rand()
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
# apply the rotation
offset_s = offset * size_scale
offset_s_r = R@offset_s
x0_up_r = c * x0_up + -s * y0_up
y0_up_r = s * x0_up + c * y0_up
x1_up_r = c * x1_up + -s * y1_up
y1_up_r = s * x1_up + c * y1_up
# Scale the LV and RV paths
lv_path = np.array([x0_up_r, y0_up_r]) * size_scale
rv_path = np.array([x1_up_r, y1_up_r]) * size_scale
both_path = np.hstack([lv_path, rv_path])
# LV thickness (actually radius)
lv_width = np.random.uniform(3.5, 6.0) / 256.0
# Find points within lv_width from lv_path
diff = im_coords[:, None, :] - lv_path[:, :, None]
diff = np.hypot(diff[0], diff[1])
min_dist = np.min(diff, axis=0)
min_dist = np.reshape(min_dist, [NN, NN])
lv_mask = min_dist < lv_width
# RV_width is a little (random amount) smaller than LV
rv_width = lv_width - 2.5*np.random.rand()/256.0
diff = im_coords[:, None, :] - rv_path[:, :, None]
diff = np.hypot(diff[0], diff[1])
min_dist = np.min(diff, axis=0)
min_dist = np.reshape(min_dist, [NN, NN])
rv_mask = min_dist < rv_width
# Get rid of parts of the RV mask that are in the LV mask
rv_mask = rv_mask & ~lv_mask
# full mask == 1 for LV, and == 2 for RV
full_mask = lv_mask + 2*rv_mask
# Now onto the motion generation
##################
# This gives the radius from 1 (endocardium) to 0 (epicardium) for LV
r0_lv = im_coords[:, lv_mask.ravel()>0].T
lv_rad = np.hypot(r0_lv[:,0], r0_lv[:,1])
lv_rad -= lv_rad.min()
lv_rad /= lv_rad.max()
lv_rad = 1.0 - lv_rad
# This gives the radius from 1 (endocardium) to 0 (epicardium) for RV
r0_rv = im_coords[:, rv_mask.ravel()>0].T
rv_rad = np.hypot(r0_rv[:,0]-offset_s_r[0], r0_rv[:,1]-offset_s_r[1])
rv_rad -= rv_rad.min()
rv_rad /= rv_rad.max()
rv_rad = 1.0 - rv_rad
# Get all initial points together
r0 = np.vstack([r0_lv, r0_rv])
init_rad = np.hypot(r0[:,0], r0[:,1])
init_theta = np.arctan2(r0[:,1], r0[:,0])
# Generate the motion parameters that define general contraction motion
r_a = np.random.uniform(0.0, 0.006) * np.ones_like(init_rad)
r_a[:r0_lv.shape[0]] += np.random.uniform(0.003, 0.008)*lv_rad
r_a[r0_lv.shape[0]:] += np.random.uniform(0.04, 0.14)*(init_rad[r0_lv.shape[0]:]**2.0)*rv_rad
r_a[r0_lv.shape[0]:] += np.random.uniform(0.003, 0.008)*rv_rad
r_b = 0.75 * r_a * np.random.rand()
# Add some random twist by changing the direction away from center of LV
theta_mod = np.random.rand() - 0.5
theta_c = init_theta + theta_mod
# Blur and add purturbations to the motion
##################
# # Temporal waveform 0 to 2pi
# t = get_temporal_waveform(Nt)
# # Pertubation fields
# r_a2, r_b2, theta2, extra_p2 = gen_ellipse_params(NN=NN, extra_poly=4)
# r_a2 = (r_a2 - r_a2.mean()) * 0.02
# r_b2 = (r_b2 - r_b2.mean()) * 0.02
# theta2 = (theta2 - theta2.mean()) * 0.15
# filt = windows.tukey(Nt, 0.6)[:, np.newaxis, np.newaxis]
# filt[0] = 0.0
# filt[-1] = 0.10 * np.random.rand()
# xx = np.linspace(-1, 1, Nt)[:, np.newaxis, np.newaxis]
# p0, p1 = extra_p2[0][np.newaxis], extra_p2[1][np.newaxis]
# xmod = (p0 * xx**1.0 + p1 * xx**2.0) * filt * 0.04
# p2, p3 = extra_p2[2][np.newaxis], extra_p2[3][np.newaxis]
# ymod = (p2 * xx**1.0 + p3 * xx**2.0) * filt * 0.04
temp_method = np.random.randint(2)
if temp_method == 0:
t = get_temporal_waveform(Nt)
elif temp_method == 1:
t = get_temporal_waveform2(Nt)
r_a2, r_b2, theta2, extra_p2 = gen_motion_params(NN=NN, extra_poly=4)
# r_a2 = (r_a2 - r_a2.mean()) * np.abs((0.008 + 0.008 * np.random.standard_normal()))
# r_b2 = (r_b2 - r_b2.mean()) * np.abs((0.004 + 0.008 * np.random.standard_normal()))
# theta2 = (theta2 - theta2.mean()) * np.abs((0.15 + 0.005 * np.random.standard_normal()))
r_a2 = (r_a2 - r_a2.mean()) * np.random.uniform(.010, .030)
r_b2 = (r_b2 - r_b2.mean()) * np.random.uniform(.005, .015)
theta2 = (theta2 - theta2.mean()) * np.random.uniform(0.10, 0.20)
filt = np.hstack([0, np.diff(t)])[:, None, None]
filt += .01 * np.random.standard_normal(filt.shape)
xx0 = np.linspace(0, 1, Nt)[:, np.newaxis, np.newaxis] + np.random.uniform(-1.0, 1.0)
xx1 = np.linspace(0, 1, Nt)[:, np.newaxis, np.newaxis] + np.random.uniform(-1.0, 1.0)
xx2 = np.linspace(0, 1, Nt)[:, np.newaxis, np.newaxis] + np.random.uniform(-1.0, 1.0)
xx3 = np.linspace(0, 1, Nt)[:, np.newaxis, np.newaxis] + np.random.uniform(-1.0, 1.0)
p0, p1 = extra_p2[0][np.newaxis], extra_p2[1][np.newaxis]
xmod = (p0 * xx0**1.0 + p1 * xx1**2.0) * filt * np.random.uniform(0.015, 0.025)
p2, p3 = extra_p2[2][np.newaxis], extra_p2[3][np.newaxis]
ymod = (p2 * xx2**1.0 + p3 * xx3**2.0) * filt * np.random.uniform(0.015, 0.025)
# Point image indexes
r0_NN = np.rint((r0+0.5) * NN).astype(np.int)
# Put existing motion fields into image versions
mask_NN = np.zeros((NN,NN))
mask_NN[r0_NN[:,0], r0_NN[:,1]] = 1.0
r_a_NN = np.zeros((NN,NN))
r_a_NN[r0_NN[:,0], r0_NN[:,1]] = r_a.copy()
r_b_NN = np.zeros((NN,NN))
r_b_NN[r0_NN[:,0], r0_NN[:,1]] = r_b.copy()
theta_c_NN = np.zeros((NN,NN), np.complex)
theta_c_NN[r0_NN[:,0], r0_NN[:,1]] = np.exp(1j*theta_c).copy()
r_a_NN += r_a2 * mask_NN
r_b_NN += r_b2 * mask_NN
theta_c_NN.real += theta2 * mask_NN
theta_c_NN.imag += theta2 * mask_NN
mask_NN_b = gaussian(mask_NN, motion_blur, preserve_range=True) + 1e-16
r_a_NN = gaussian(r_a_NN, motion_blur, preserve_range=True) / mask_NN_b
r_b_NN = gaussian(r_b_NN, motion_blur, preserve_range=True) / mask_NN_b
theta_c_NN.real = gaussian(theta_c_NN.real, motion_blur, preserve_range=True) / mask_NN_b
theta_c_NN.imag = gaussian(theta_c_NN.imag, motion_blur, preserve_range=True) / mask_NN_b
xmod_out = np.zeros_like(xmod)
ymod_out = np.zeros_like(ymod)
for it in range(Nt):
xmod[it] *= mask_NN
xmod[it] = gaussian(xmod[it], motion_blur, preserve_range=True) / mask_NN_b
xmod[it] *= mask_NN
xmod_out[it] = blur_outer(xmod[it], mask_NN)
ymod[it] *= mask_NN
ymod[it] = gaussian(ymod[it], motion_blur, preserve_range=True) / mask_NN_b
ymod[it] *= mask_NN
ymod_out[it] = blur_outer(ymod[it], mask_NN)
r_a_NN *= mask_NN
r_b_NN *= mask_NN
theta_c_NN.real *= mask_NN
theta_c_NN.imag *= mask_NN
r_a_out = blur_outer(r_a_NN, mask_NN)
r_b_out = blur_outer(r_b_NN, mask_NN)
theta_c_out = np.zeros_like(theta_c_NN)
theta_c_out.real = blur_outer(theta_c_NN.real, mask_NN)
theta_c_out.imag = blur_outer(theta_c_NN.imag, mask_NN)
scaler = NN/512
r_a_out *= scaler
r_b_out *= scaler
theta_c_out *= scaler
xmod_out *= scaler
ymod_out *= scaler
r_a_ff = r_a_out[r0_NN[:,0], r0_NN[:,1]]
r_b_ff = r_b_out[r0_NN[:,0], r0_NN[:,1]]
theta_c_ff = np.angle(theta_c_out[r0_NN[:,0], r0_NN[:,1]])
xmod_ff = xmod_out[:, r0_NN[:,0], r0_NN[:,1]]
ymod_ff = ymod_out[:, r0_NN[:,0], r0_NN[:,1]]
# Compute actual pointwise motion
ell_x = r_a_ff[None, :] * (np.cos(t) - 1.0)[:,None]
ell_y = r_b_ff[None, :] * np.sin(t)[:,None]
dx = np.cos(theta_c_ff)[None,:] * ell_x - np.sin(theta_c_ff)[None,:] * ell_y + xmod_ff
dy = np.sin(theta_c_ff)[None,:] * ell_x + np.cos(theta_c_ff)[None,:] * ell_y + ymod_ff
# dx *= 0.0
# dy *= 0.0
# Final point cloud motion paths for RV and LV
r = r0[None,...] + np.stack((dx, dy),2)
r = np.concatenate([ r, np.zeros_like(r[:,:,:1]) ], 2)
return {'r': r,
't': t,
'r_a': r_a_out,
'r_b': r_b_out,
'theta': np.angle(theta_c_out),
'theta_c': theta_c_out,
'xmod': xmod_out,
'ymod': ymod_out,
'full_mask': full_mask,
'r0_lv': r0_lv} | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,204 | mloecher/tag_tracking | refs/heads/main | /torch_track/network_utils.py | import torch
import torch.nn as nn
import numpy as np
# CoordConv class from https://github.com/walsvid/CoordConv/blob/master/coordconv.py
# I actually changed it almost completely because torch has a meshgrid function already, so no real point in not using it
class AddCoords(nn.Module):
def __init__(self, rank, with_r=False, use_cuda=True):
super(AddCoords, self).__init__()
self.rank = rank
self.with_r = with_r
self.use_cuda = use_cuda
def forward(self, input_tensor):
if self.rank == 2:
batch_size_shape, channel_in_shape, dim_y, dim_x = input_tensor.shape
yy_range = torch.arange(dim_y) - dim_y/2.0
xx_range = torch.arange(dim_x) - dim_x/2.0
# This scales to -1 to 1
yy_range /= (dim_y-1)/2.0
xx_range /= (dim_x-1)/2.0
yy_range += 1/(dim_y-1)
xx_range += 1/(dim_y-1)
grid_yy, grid_xx = torch.meshgrid(yy_range, xx_range)
grid_yy = grid_yy.repeat(batch_size_shape, 1, 1, 1)
grid_xx = grid_xx.repeat(batch_size_shape, 1, 1, 1)
grid_yy = grid_yy.to(input_tensor.device)
grid_xx = grid_xx.to(input_tensor.device)
out = torch.cat([grid_yy, grid_xx, input_tensor], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(grid_yy, 2) + torch.pow(grid_xx, 2))
out = torch.cat([rr, out], dim=1)
elif self.rank == 3:
batch_size_shape, channel_in_shape, dim_z, dim_y, dim_x = input_tensor.shape
zz_range = torch.arange(dim_z) - dim_z/2.0
yy_range = torch.arange(dim_y) - dim_y/2.0
xx_range = torch.arange(dim_x) - dim_x/2.0
# This scales to -1 to 1
zz_range /= (dim_z-1)/2.0
yy_range /= (dim_y-1)/2.0
xx_range /= (dim_x-1)/2.0
zz_range += 1/(dim_z-1)
yy_range += 1/(dim_y-1)
xx_range += 1/(dim_x-1)
grid_zz, grid_yy, grid_xx = torch.meshgrid(zz_range, yy_range, xx_range)
grid_zz = grid_zz.repeat(batch_size_shape, 1, 1, 1, 1)
grid_yy = grid_yy.repeat(batch_size_shape, 1, 1, 1, 1)
grid_xx = grid_xx.repeat(batch_size_shape, 1, 1, 1, 1)
grid_zz = grid_zz.to(input_tensor.device)
grid_yy = grid_yy.to(input_tensor.device)
grid_xx = grid_xx.to(input_tensor.device)
out = torch.cat([grid_zz, grid_yy, grid_xx, input_tensor], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(grid_zz, 2) + torch.pow(grid_yy, 2) + torch.pow(grid_xx, 2))
out = torch.cat([rr, out], dim=1)
else:
raise NotImplementedError
return out | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,205 | mloecher/tag_tracking | refs/heads/main | /torch_track/utils.py | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from matplotlib import animation, rc
from IPython.display import HTML
import pandas as pd
import seaborn as sns
import torch
# Make plots to quickly compare model data to the reference data
def get_ref_plots(ref_data, model, device, batch_size = 16, dpi = 200):
model.eval()
all_y1 = []
N = ref_data['x'].shape[0]
N_batches = N // batch_size + 1
with torch.no_grad():
for i in range(N_batches):
x = ref_data['x'][i*batch_size:(i+1)*batch_size]
y_true = ref_data['y'][i*batch_size:(i+1)*batch_size]
x = torch.from_numpy(x).to(device)
y_true = torch.from_numpy(y_true).to(device)
# print(x.shape)
y_pred = model(x)
all_y1.append(y_pred.detach().cpu().numpy())
all_y1 = np.vstack(all_y1)
all_y0 = ref_data['y']
all_y0 = all_y0.reshape([all_y0.shape[0], 2, 25])
all_y1 = all_y1.reshape([all_y1.shape[0], 2, 25])
####### Plot 1
sns.set_context('poster')
sns.set_style("ticks")
fig = plt.figure(figsize=(12,12), dpi = dpi)
plt.scatter(all_y0.ravel(), all_y1.ravel(), marker='x', color='r', alpha=0.2)
plt.plot([-10, 10], [-10, 10])
plt.axhline(color = '0.5', linestyle=':', zorder = 0)
plt.axvline(color = '0.5', linestyle=':', zorder = 0)
fig.tight_layout()
# To remove the huge white borders
# fig.gca().margins(0)
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
regression1 = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
####### Plot 2
fig = plt.figure(figsize=(12,12), dpi = dpi)
plt.hexbin(all_y0.ravel(), all_y1.ravel(), gridsize = 40, cmap='Reds', bins='log', extent=(-10,10,-10,10))
plt.ylim(-10,10)
plt.xlim(-10,10)
plt.plot([-10, 10], [-10, 10], 'g:')
fig.tight_layout()
# To remove the huge white borders
# fig.gca().margins(0)
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
regression2 = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
####### Plot 3
sns.set_style("whitegrid")
fig = plt.figure(figsize=(10,8), dpi = dpi)
diff = all_y0 - all_y1
diff = np.hypot(diff[:,0], diff[:,1])
df = pd.DataFrame(diff).melt()
sns.lineplot(x="variable", y="value", data=df, ci='sd')
plt.ylim((0,4))
fig.tight_layout()
# To remove the huge white borders
# fig.gca().margins(0)
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
regression3 = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
####### Plot 4
sns.set_context('poster')
sns.set_style("ticks")
fig = plt.figure(figsize=(10,6), dpi = dpi)
data0, data1 = all_y0.ravel(), all_y1.ravel()
mean = np.mean([data0, data1], axis=0)
diff = data0 - data1 # Difference between data1 and data2
md = np.mean(diff) # Mean of the difference
sd = np.std(diff, axis=0) # Standard deviation of the difference
plt.scatter(mean, diff, marker='x', color='#4527a0', alpha=0.1)
plt.axhline(md, color='gray', linestyle='--')
plt.axhline(md + 1.96*sd, color='gray', linestyle=':')
plt.axhline(md - 1.96*sd, color='gray', linestyle=':')
plt.xlim(-10, 10)
plt.ylim(-4, 4)
sns.despine()
plt.gca().annotate('{:.2f}'.format(np.round(md, 2)),
xy=(12, md),
horizontalalignment='right',
verticalalignment='center',
fontsize=24,
xycoords='data',
annotation_clip=False)
plt.gca().annotate('{:.2f}'.format(np.round(md + 1.96*sd, 2)),
xy=(12, md + 1.96*sd),
horizontalalignment='right',
verticalalignment='center',
fontsize=24,
xycoords='data',
annotation_clip=False)
plt.gca().annotate('{:.2f}'.format(np.round(md - 1.96*sd, 2)),
xy=(12, md - 1.96*sd),
horizontalalignment='right',
verticalalignment='center',
fontsize=24,
xycoords='data',
annotation_clip=False)
fig.tight_layout()
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
regression4 = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
image = Image.fromarray(regression1, 'RGB')
image.thumbnail((512,512), Image.BICUBIC)
regression1 = np.array(image).transpose([2,0,1])
image = Image.fromarray(regression2, 'RGB')
image.thumbnail((512,512), Image.BICUBIC)
regression2 = np.array(image).transpose([2,0,1])
image = Image.fromarray(regression3, 'RGB')
image.thumbnail((512,512), Image.BICUBIC)
regression3 = np.array(image).transpose([2,0,1])
image = Image.fromarray(regression4, 'RGB')
image.thumbnail((512,512), Image.BICUBIC)
regression4 = np.array(image).transpose([2,0,1])
return regression1, regression2, regression3, regression4
# Make an figure with image data, and 1 or 2 tracks overlayed on top
def plot_lines(ims, tags0, tags1 = None, figsize=[8, 8], nframes=25, scale = 1.0, shift = None):
ims = np.squeeze(ims)
if shift is None:
shift = ims.shape[-1]/2.0
fig, axarr = plt.subplots(1, 1, squeeze=False, figsize=figsize)
# fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
fig.subplots_adjust(0, 0, 1, 1)
im = axarr[0, 0].imshow(ims[0], cmap="gray")
tags0 = np.squeeze(np.array(tags0))
tagmid = tags0.size // 2
xp = np.array(tags0[:tagmid]) * scale + shift
yp = np.array(tags0[tagmid:]) * scale + shift
pts_all, = axarr[0, 0].plot(
xp, yp, color='g', marker="x", markeredgecolor="g", mew=2, markersize=6
)
if tags1 is not None:
tags1 = np.squeeze(np.array(tags1))
tagmid = tags1.size // 2
xp = np.array(tags1[:tagmid]) * scale + shift
yp = np.array(tags1[tagmid:]) * scale + shift
pts_all, = axarr[0, 0].plot(
xp, yp, color='r', marker="x", markeredgecolor="r", mew=2, markersize=6
)
axarr[0, 0].axes.get_xaxis().set_visible(False)
axarr[0, 0].axes.get_yaxis().set_visible(False)
axarr[0, 0].set_frame_on(False)
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,206 | mloecher/tag_tracking | refs/heads/main | /tagsim/pygrid_internal/grid_kernel.py | import numpy as np
class GridKernel:
"""Holds all code and functions for gridding kernel creation and manipulation
Attributes:
kx (float ndarray): kernel x axis (radius).
ky (float ndarray): kernel values
"""
def __init__(self, grid_params):
self.krad = grid_params["krad"]
self.grid_mod = grid_params["grid_mod"]
self.grid_params = grid_params
self.calc_kernel(grid_params)
self.fourier_demod(grid_params)
def calc_kernel(self, grid_params):
"""Helper function call the real calc kernel functions
Args:
grid_params (dict): Common dict of grad parameters
"""
if grid_params["kernel_type"] == "kb":
self.calc_kernel_kb(grid_params)
elif grid_params["kernel_type"] == "tri":
self.calc_kernel_tri(grid_params)
elif grid_params["kernel_type"] == "ones":
self.calc_kernel_ones(grid_params)
elif grid_params["kernel_type"] == "gauss":
self.calc_kernel_gauss(grid_params)
def calc_kernel_kb(self, grid_params):
"""Calculates Kaiser-Bessel kernel, as in the Beatty paper
Args:
grid_params (dict): Common dict of grad parameters
Sets the self.kx and self.ky attributes
"""
kw0 = 2.0 * grid_params["krad"] / grid_params["over_samp"]
kr = grid_params["krad"]
beta = np.pi * np.sqrt((kw0 * (grid_params["over_samp"] - 0.5)) ** 2 - 0.8)
x = np.linspace(0, kr, grid_params["grid_mod"])
x_bess = np.sqrt(1 - (x / kr) ** 2)
y = np.i0(beta * x_bess)
y = y / y[0]
x = np.concatenate((x, x + x[-1] + x[1]))
y = np.concatenate((y, np.zeros(grid_params["grid_mod"])))
self.kx = x
self.ky = y
def calc_kernel_tri(self, grid_params):
"""Calculates triangle kernel
Args:
grid_params (dict): Common dict of grad parameters
Sets the self.kx and self.ky attributes
"""
kr = grid_params["krad"]
x = np.linspace(0, kr, grid_params["grid_mod"])
y = 1.0 - x / kr
x = np.concatenate((x, x + x[-1] + x[1]))
y = np.concatenate((y, np.zeros(grid_params["grid_mod"])))
self.kx = x
self.ky = y
def calc_kernel_ones(self, grid_params):
"""Calculates ones kernel
Args:
grid_params (dict): Common dict of grad parameters
Sets the self.kx and self.ky attributes
"""
kr = grid_params["krad"]
x = np.linspace(0, kr, grid_params["grid_mod"])
y = np.ones(x.size)
x = np.concatenate((x, x + x[-1] + x[1]))
y = np.concatenate((y, np.zeros(grid_params["grid_mod"])))
self.kx = x
self.ky = y
def calc_kernel_gauss(self, grid_params):
"""Calculates Gaussian kernel
Args:
grid_params (dict): Common dict of grad parameters
Sets the self.kx and self.ky attributes
"""
kr = grid_params["krad"]
x = np.linspace(0, kr, grid_params["grid_mod"])
sig = grid_params["krad"] / 3
y = 1.0 / (sig * np.sqrt(2 * np.pi)) * np.exp(-0.5 * (x / sig) ** 2.0)
x = np.concatenate((x, x + x[-1] + x[1]))
y = np.concatenate((y, np.zeros(grid_params["grid_mod"])))
self.kx = x
self.ky = y
def fourier_demod(self, grid_params):
"""Takes the self.ky and the image size to perform an FT and
get the deappodization window.
Args:
grid_params (dict): Common dict of grad parameters
Sets the self.Dx and self.Dy attributes
"""
self.Dx = []
self.Dy = []
for i in range(len(grid_params["imsize_os"])):
xres = grid_params["imsize_os"][i]
Dx = np.arange(xres)
Dx = Dx - xres // 2.0
Dy = np.zeros(Dx.size, np.complex64)
for i in range(1, self.kx.size):
Dy += self.ky[i] * 2 * np.exp(2 * 1j * np.pi * Dx / xres * self.kx[i])
Dy = Dy.real
Dy = Dy + self.ky[0]
Dy = Dy / self.kx.size
self.Dx.append(Dx)
self.Dy.append(Dy)
def apply_deapp(self, A):
"""Performs in place deappodization of the input image
Args:
grid_params (ndarray): Image to deapp
Operation is in-place. Currently there are no checks for size issues
"""
if self.grid_params['grid_dims'] == 2:
A /= (self.Dy[1][np.newaxis, :] * self.Dy[0][:, np.newaxis])
elif self.grid_params['grid_dims'] == 3:
A /= (self.Dy[2][np.newaxis, np.newaxis, :] * self.Dy[1][np.newaxis, :, np.newaxis] * self.Dy[0][:, np.newaxis, np.newaxis])
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,207 | mloecher/tag_tracking | refs/heads/main | /tagsim/pygrid_internal/pygrid.py | import numpy as np
from .c_grid import c_grid, c_igrid
from .grid_kernel import GridKernel
from .utils import roundup4, zeropad, crop, check_traj_dens
from time import time
import os
try:
import cupy as cp
CUDA_AVAILABLE = True
except ImportError:
CUDA_AVAILABLE = False
class Gridder:
"""This is the main gridding class
It can mostly just be called with Gridder(imsize=(Nz, Ny, Nz)) or imsize=(Ny,Nx)
"""
def __init__(self, **kwargs):
self.traj = 0
self.dens = 0
self.use_gpu = bool(kwargs.get("use_gpu", True))
self.grid_params = {}
self.grid_params["kernel_type"] = kwargs.get("kernel_type", "kb")
self.grid_params["krad"] = kwargs.get("krad", 1.5)
self.grid_params["grid_mod"] = kwargs.get("grid_mod", 32)
self.grid_params["over_samp"] = kwargs.get("over_samp", 1.5)
self.grid_params["grid_dims"] = kwargs.get("grid_dims", 2)
self.grid_params["imsize"] = list(kwargs.get("imsize", (256, 256)))
for i in range(1, self.grid_params["grid_dims"] + 1):
self.grid_params["imsize"][-i] = roundup4(self.grid_params["imsize"][-i])
# Basically get rid of manual control of grid_dims
if self.grid_params["grid_dims"] != len(self.grid_params["imsize"]):
self.grid_params["grid_dims"] = len(self.grid_params["imsize"])
# print('imsize a different dimension than grid_dims, going with grid_dims = %d' % self.grid_params["grid_dims"])
self.grid_params["imsize_os"] = list(self.grid_params["imsize"])
for i in range(1, self.grid_params["grid_dims"] + 1):
self.grid_params["imsize_os"][-i] = roundup4(
self.grid_params["imsize_os"][-i] * self.grid_params["over_samp"]
)
self.kernel = GridKernel(self.grid_params)
# TODO: there should be user control as well, in case cupy is installed without GPU
# there is functionality to do this built into cupy, but then cupy needs to be installed
# which I am not sure is a given for all users I might be dealing with
if CUDA_AVAILABLE and self.use_gpu:
self.prep_cuda()
def prep_cuda(self):
""" Reads in compute kernels for CUDA and copies som eof the smaller fixed items
onto the GPU.
"""
# # Load the cuda .ptx file, this isnt really any faster than the RawKernel implementation
# # but maybe less compiling involved?
# cubin_filename = os.path.join(os.path.dirname(__file__), "cuda", "cu_gridder_standalone.ptx")
# cuda_module = cp.RawModule(path = cubin_filename)
# self.igrid3d_kern = cuda_module.get_function("igrid3d")
# self.grid2d_kern = cuda_module.get_function("grid2d")
# self.grid3d_kern = cuda_module.get_function("grid3d")
# self.deapp2_kern = cuda_module.get_function("deapp2")
# self.deapp3_kern = cuda_module.get_function("deapp3")
# Load in all the necessary kernels with cupy.RawKernel()
code_filename = os.path.join(os.path.dirname(__file__), "cuda", "cu_gridder.cu")
with open(code_filename, "r") as f:
code = f.read()
self.igrid2d_kern = cp.RawKernel(code, "igrid2d")
self.igrid3d_kern = cp.RawKernel(code, "igrid3d")
self.grid2d_kern = cp.RawKernel(code, "grid2d")
self.grid3d_kern = cp.RawKernel(code, "grid3d")
self.deapp2_kern = cp.RawKernel(code, "deapp2")
self.deapp3_kern = cp.RawKernel(code, "deapp3")
# Transfer gridding kernel and image dimensions to GPU
self.kernel_g = cp.asarray(self.kernel.ky.astype(np.float32))
self.N_g = cp.asarray(self.grid_params["imsize_os"])
# Transfer 1D deapp windows onto the GPU
self.D0_g = cp.asarray(self.kernel.Dy[0])
self.D1_g = cp.asarray(self.kernel.Dy[1])
if self.grid_params["grid_dims"] == 3:
self.D2_g = cp.asarray(self.kernel.Dy[2])
# print('Prepped CUDA')
def set_traj_dens(self, traj, dens):
"""Sets object trajectory and densty arrays, and ends to GPU if CUDA is on
Args:
traj (float32 ndarray): trajectory, will eventually get reshaped to (Nx3)
dens (float32 ndarray): density, will eventually get reshaped to (Nx1)
Inputs go through a checker that should get any formatting problems sorted out,
but its not guaranteed
"""
self.data_shape = dens.shape
self.traj, self.dens = check_traj_dens(traj, dens)
if CUDA_AVAILABLE and self.use_gpu:
self.traj_g = cp.asarray(self.traj)
self.dens_g = cp.asarray(self.dens)
def cu_im2k(self, im, traj=None, dens=None, transfer_cpu=True, imspace = False):
# Set trajectory and density, ideally this is done previously, though it doesn't really matter
if traj is not None or dens is not None:
self.data_shape = dens.shape
traj, dens = check_traj_dens(traj, dens)
traj_g = cp.asarray(traj)
dens_g = cp.asarray(dens)
else:
traj_g = self.traj_g
dens_g = self.dens_g
# Transfer image to the gpu and zeropad by the oversampling factor
im_g = cp.asarray(im.astype(np.complex64))
if imspace:
im_g = cp.fft.ifftshift(cp.fft.ifftn(cp.fft.fftshift(im_g)))
im_g = zeropad(im_g, self.grid_params["imsize_os"], use_gpu=True)
# Apply GPU deappodization
if self.grid_params["grid_dims"] == 3:
self.deapp3_kern(
(64, 64, 64), (8, 8, 8), (im_g, self.N_g, self.D0_g, self.D1_g, self.D2_g)
)
elif self.grid_params["grid_dims"] == 2:
self.deapp2_kern(
(64, 64), (8, 8), (im_g, self.N_g, self.D0_g, self.D1_g)
)
# Perform FFT with cupy (cuFFT)
im_g = cp.fft.ifftshift(cp.fft.fftn(cp.fft.fftshift(im_g)))
if self.grid_params["grid_dims"] == 3:
kern_grid = self.igrid3d_kern
elif self.grid_params["grid_dims"] == 2:
kern_grid = self.igrid2d_kern
# Run through the CUDA inverse gridding kernel
kdata_g = cu_igrid(
im_g, self.grid_params, traj_g, dens_g, self.kernel_g, self.N_g, kern_grid, self.data_shape
)
# Transfer back to the host if desired
if transfer_cpu:
out = kdata_g.get()
else:
out = kdata_g
return out
def cu_k2im(self, data, traj=None, dens=None, transfer_cpu=True, imspace = False):
if traj is not None or dens is not None:
traj, dens = check_traj_dens(traj, dens)
traj_g = cp.asarray(traj)
dens_g = cp.asarray(dens)
else:
traj_g = self.traj_g
dens_g = self.dens_g
# t0 = time()
data_g = cp.asarray(data.astype(np.complex64))
if self.grid_params["grid_dims"] == 3:
kern_grid = self.grid3d_kern
elif self.grid_params["grid_dims"] == 2:
kern_grid = self.grid2d_kern
im_g = cu_grid(
data_g, self.grid_params, traj_g, dens_g, self.kernel_g, self.N_g, kern_grid
)
# t1 = time()
im_g = cp.fft.ifftshift(cp.fft.ifftn(cp.fft.fftshift(im_g)))
# t2 = time()
if self.grid_params["grid_dims"] == 3:
self.deapp3_kern(
(64, 64, 64), (8, 8, 8), (im_g, self.N_g, self.D0_g, self.D1_g, self.D2_g)
)
elif self.grid_params["grid_dims"] == 2:
self.deapp2_kern(
(64, 64), (8, 8), (im_g, self.N_g, self.D0_g, self.D1_g)
)
# t3 = time()
im_g = crop(im_g, self.grid_params["imsize"], use_gpu=True)
# t4 = time()
if imspace:
im_g = cp.fft.ifftshift(cp.fft.fftn(cp.fft.fftshift(im_g)))
if transfer_cpu:
out = im_g.get()
else:
out = im_g
# t5 = time()
# return out, (t0, t1, t2, t3, t4, t5)
return out
def im2k(self, im, traj=None, dens=None, nthreads=0, imspace = False):
if traj is None:
traj = self.traj
if dens is None:
dens = self.dens
traj, dens = check_traj_dens(traj, dens)
if imspace:
im = np.fft.ifftshift(np.fft.ifftn(np.fft.fftshift(im)))
im = zeropad(im, self.grid_params["imsize_os"])
self.kernel.apply_deapp(im)
im = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(im)))
kdata = c_igrid(im, self.grid_params, traj, dens, self.kernel.ky, nthreads)
# kdata = cu_igrid(im, self.grid_params, traj, dens, self.kernel.ky)
return kdata
def k2im(self, data, traj=None, dens=None, nthreads=0, imspace = False):
if traj is None:
traj = self.traj
if dens is None:
dens = self.dens
traj, dens = check_traj_dens(traj, dens)
im = c_grid(data, self.grid_params, traj, dens, self.kernel.ky, nthreads)
im = np.fft.ifftshift(np.fft.ifftn(np.fft.fftshift(im)))
self.kernel.apply_deapp(im)
im = crop(im, self.grid_params["imsize"])
if imspace:
im = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(im)))
return im
def cu_igrid(kspace_g, grid_params, traj_g, dens_g, kernel_g, N_g, cu_kernel, data_shape = None):
n_points = len(dens_g)
if data_shape is not None:
data_g = cp.zeros(data_shape, np.complex64)
else:
data_g = cp.zeros(dens_g.shape, np.complex64)
cu_kernel(
(4096,),
(128,),
(
data_g,
kspace_g,
traj_g,
dens_g,
n_points,
N_g,
np.float32(grid_params["krad"]),
grid_params["grid_mod"],
kernel_g,
),
)
return data_g
def cu_grid(data_g, grid_params, traj_g, dens_g, kernel_g, N_g, cu_kernel):
n_points = len(dens_g)
kr = cp.zeros(grid_params["imsize_os"], np.float32)
ki = cp.zeros(grid_params["imsize_os"], np.float32)
cu_kernel(
(4096,),
(128,),
(
data_g,
kr,
ki,
traj_g,
dens_g,
n_points,
N_g,
np.float32(grid_params["krad"]),
grid_params["grid_mod"],
kernel_g,
),
)
return kr + 1j * ki
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,208 | mloecher/tag_tracking | refs/heads/main | /tagsim/base_im_generation.py | import numpy as np
import os
import random
from PIL import Image
from scipy import interpolate
from skimage.morphology import opening, disk
from skimage.filters import gaussian
from scipy.signal import windows
from .perlin import generate_perlin_noise_2d
# This is a function that needs to be removed eventually, the temporal waveform
# should probably be based on the speed
def get_temporal_waveform(Nt):
t2 = gen_1D_poly(Nt, [0, 1], order=2, scale= 1.0)
t2 = np.hstack((0.0, t2))
mod_lim = [0.5, 0.9]
mod_max = np.random.rand() * (mod_lim[1] - mod_lim[0]) + mod_lim[0]
mod_max = int(mod_max * Nt)
mod = windows.cosine(mod_max)
mod = 10.0 * np.random.rand() * np.hstack((0.0, mod, np.zeros(Nt-mod.size)))
t2 += mod
t2 = np.cumsum(t2)
t2 *= 2*np.pi/t2[-1]
t2 = t2[:-1]
return t2
# Same as above, I think I can do better than this, but this fives the temporal
# plot
def get_temporal_waveform2(Nt):
Nt2 = Nt + np.random.randint(10)
mod_lim = [0.4, 0.9]
skip_chance = 0.8
sec_height = [0.0, 0.3]
mod3_height = [0.0, 0.1]
mod_max = np.random.uniform(mod_lim[0], mod_lim[1])
mod_max = int(mod_max * Nt)
mod = windows.cosine(mod_max)
if np.random.rand() < skip_chance:
mod = mod[1:]
N2 = Nt2-mod.size
height2 = np.random.uniform(sec_height[0], sec_height[1])
mod2 = np.ones(N2) * height2
height3 = np.random.uniform(mod3_height[0], mod3_height[1])
mod3 = height3 * windows.hamming(N2)
mod2 += mod3
mod_upper = mod.copy()
mod_upper[mod < height2] = height2
mod[mod_max//2:] = mod_upper[mod_max//2:]
mod[mod.size-1] = (mod[mod.size-2] + height2) / 1.9
mod = np.hstack((0.0, mod, mod2))
x_stop = np.linspace(np.pi, -np.random.rand()*np.pi, np.random.randint(3,7))
y_stop = (np.tanh(x_stop) + 1)/2
y_stop = np.hstack([y_stop, np.zeros(np.random.randint(3))])
y_stop = np.hstack((np.ones(mod.size - y_stop.size), y_stop))
mod *= y_stop
mod += np.random.uniform(0.0, 0.03) * np.random.standard_normal(mod.size)
mod[0] = 0
# mod[np.random.randint(1,3)] += np.random.uniform(0.0, 3.0)
t2 = np.cumsum(mod)
t2 *= 2*np.pi/t2[-1]
t2 = t2[:Nt]
return t2
# Generate a random 1D function by random polynomial coefficients
def gen_1D_poly(Np, lims, order=3, scale = 1.0):
x = np.linspace(0, 1, Np)
px = np.arange(order + 1)
N = px.size
A = np.zeros((x.size, N))
for i in range(N):
temp = x ** px[i]
A[:, i] = temp.flatten()
coeff = scale * np.random.standard_normal(N)
b = A @ coeff
win = np.random.rand() * (lims[1] - lims[0])
cen = np.random.rand() * (lims[1] - lims[0] - win) + lims[0] + win / 2
b -= b.min()
b /= b.max()
b *= win
b += cen - win / 2
return b
# Generate a random 1D poly like above, and use it to map input values to output
# values (i.e. for T1 and T2 calcualtion of the phantom from grayscale values)
def map_1Dpoly(xnew, lims, order=3, Np=256):
x = np.linspace(-1, 1, Np)
px = np.arange(order + 1)
N = px.size
A = np.zeros((x.size, N))
for i in range(N):
temp = x ** px[i]
A[:, i] = temp.flatten()
coeff = np.random.standard_normal(N)
b = A @ coeff
win = np.random.rand() * (lims[1] - lims[0])
cen = np.random.rand() * (lims[1] - lims[0] - win) + lims[0] + win / 2
b -= b.min()
b /= b.max()
b *= win
b += cen - win / 2
xx = np.linspace(0, 1, b.size)
f = interpolate.interp1d(xx, b)
t1 = f(xnew)
return t1
# Generate random 2D field, with random polynomial coefficients, and then
# additional perlin noise.
# TODO: Add control over te perlin noise, random amplitude to it
def gen_2Dpoly(NN=256, shift=True, fit_order = 3):
x = np.linspace(-1, 1, NN) + np.random.uniform(-0.5, 0.5)
y = np.linspace(-1, 1, NN) + np.random.uniform(-0.5, 0.5)
xv, yv = np.meshgrid(x, y, sparse=False, indexing="xy")
[px, py] = np.meshgrid(range(fit_order + 1), range(fit_order + 1), indexing="ij")
idx = (px + py) <= fit_order
px = px[idx]
py = py[idx]
powers = np.vstack((px, py)).T
N = powers.shape[0]
A = np.zeros((xv.size, N))
for i in range(N):
temp = xv ** px[i] + yv ** py[i]
A[:, i] = temp.ravel()
coeff = np.random.standard_normal(N)
b = A @ coeff
b = np.reshape(b, (NN, NN))
perlin_res = np.random.choice([2,4], 2)
nn = generate_perlin_noise_2d((NN,NN), perlin_res)
# print(b.min(), b.max(), nn.min(), nn.max())
b += 4.0*nn
poly_range = b.max() - b.min()
b /= poly_range
if shift:
b += 1 - b.max()
return b
# Generates a ellipse with random values within the given ranges
# This is mostly the same as the next function, they should be combined
#
# Essentially, the "body"-like big ellipse in the image
def gen_outer_ellipse(offset_lim=(-0.15, 0.15), radius_lim=(0.8, 1.2), NN=256):
ox = np.random.rand() * (offset_lim[1] - offset_lim[0]) + offset_lim[0]
oy = np.random.rand() * (offset_lim[1] - offset_lim[0]) + offset_lim[0]
rx = np.random.rand() * (radius_lim[1] - radius_lim[0]) + radius_lim[0]
ry = np.random.rand() * (radius_lim[1] - radius_lim[0]) + radius_lim[0]
xx, yy = np.meshgrid(
np.linspace((-1 + ox) / rx, (1 + ox) / rx, NN),
np.linspace((-1 + oy) / ry, (1 + oy) / ry, NN),
indexing="xy",
)
mask = xx * xx + yy * yy <= 1
return mask
# Generates a ellipse with random values within the given ranges
# This is mostly the same as the previous function, they should be combined; the
# only difference is that this one negates the mask, excluding the inner portion
#
# Essentially, the random holes in the image
def gen_inner_ellipse(offset_lim=(-0.35, 0.35), radius_lim=(0.05, 0.2), NN=256):
ox = np.random.rand() * (offset_lim[1] - offset_lim[0]) + offset_lim[0]
oy = np.random.rand() * (offset_lim[1] - offset_lim[0]) + offset_lim[0]
rx = np.random.rand() * (radius_lim[1] - radius_lim[0]) + radius_lim[0]
ry = np.random.rand() * (radius_lim[1] - radius_lim[0]) + radius_lim[0]
xx, yy = np.meshgrid(
np.linspace((-1 + ox) / rx, (1 + ox) / rx, NN),
np.linspace((-1 + oy) / ry, (1 + oy) / ry, NN),
indexing="xy",
)
mask = xx * xx + yy * yy <= 1
return ~mask
# Generates a annulus ellipse mask, (ellipse with some thickness)
def gen_hollow_ellipse(
offset_lim=(-0.25, 0.25),
radius_lim=(0.2, 0.4),
drad_lim=(0.08, 0.15),
outer_lim=(0.1, 0.3),
NN=256,
):
ox = np.random.rand() * (offset_lim[1] - offset_lim[0]) + offset_lim[0]
oy = np.random.rand() * (offset_lim[1] - offset_lim[0]) + offset_lim[0]
rx = np.random.rand() * (radius_lim[1] - radius_lim[0]) + radius_lim[0]
ry = np.random.rand() * (radius_lim[1] - radius_lim[0]) + radius_lim[0]
xx, yy = np.meshgrid(
np.linspace((-1 + ox) / rx, (1 + ox) / rx, NN),
np.linspace((-1 + oy) / ry, (1 + oy) / ry, NN),
indexing="xy",
)
mask1 = xx * xx + yy * yy <= 1
drx_inner = np.random.rand() * (drad_lim[1] - drad_lim[0]) + drad_lim[0]
dry_inner = np.random.rand() * (drad_lim[1] - drad_lim[0]) + drad_lim[0]
rx_inner = rx - drx_inner
ry_inner = ry - dry_inner
xx, yy = np.meshgrid(
np.linspace((-1 + ox) / rx_inner, (1 + ox) / rx_inner, NN),
np.linspace((-1 + oy) / ry_inner, (1 + oy) / ry_inner, NN),
indexing="xy",
)
mask2 = xx * xx + yy * yy <= 1
drx_outer = np.random.rand() * (outer_lim[1] - outer_lim[0]) + outer_lim[0]
dry_outer = np.random.rand() * (outer_lim[1] - outer_lim[0]) + outer_lim[0]
rx_outer = rx + drx_outer
ry_outer = ry + dry_outer
xx, yy = np.meshgrid(
np.linspace((-1 + ox) / rx_outer, (1 + ox) / rx_outer, NN),
np.linspace((-1 + oy) / ry_outer, (1 + oy) / ry_outer, NN),
indexing="xy",
)
mask3 = xx * xx + yy * yy > 1
return (mask1.astype(np.int) - mask2.astype(np.int) + mask3.astype(np.int)) > 0
# Calls all of the mask functions randomly and concatenates them
def make_random_mask(odds=(0.5, 0.5, 0.5), NN=512, rseed=None):
if rseed is not None:
random.seed(rseed)
np.random.seed(rseed)
all_mask = []
all_mask.append(np.ones((NN, NN)))
pick = np.random.rand()
if pick > odds[0]:
all_mask.append(gen_outer_ellipse(NN=NN))
pick = np.random.rand()
if pick > odds[1]:
all_mask.append(gen_hollow_ellipse(NN=NN))
for odd in odds[2:]:
pick = np.random.rand()
if pick > odd:
all_mask.append(gen_inner_ellipse(NN=NN))
all_mask = np.array(all_mask).astype(np.bool)
final_mask = np.all(all_mask, 0)
return final_mask
# This generates the underlying motion fields for the full volume simulation
def gen_motion_params(NN=256, rseed=None, extra_poly = 0):
if rseed is not None:
np.random.seed(rseed)
yy, xx = np.meshgrid(
np.linspace(-1, 1, NN, False), np.linspace(-1, 1, NN, False), indexing="ij"
)
rr = np.sqrt(xx * xx + yy * yy)
a_range = (2, 8)
b_range = (1.1, 1.5)
beta = np.random.rand() * (a_range[1] - a_range[0]) + a_range[0]
cutoff = np.random.rand() * (b_range[1] - b_range[0]) + b_range[0]
filt = 0.5 + 1.0 / np.pi * np.arctan(beta * (cutoff - rr.ravel()) / cutoff)
filt = np.reshape(filt, rr.shape)
# p0 = gen_2Dpoly(NN=NN)
# r_a = p0 * filt
# p1 = gen_2Dpoly(NN=NN) + 0.5
# r_b = r_a * p1
p0 = gen_2Dpoly(NN=NN)
r_a = p0 * filt
# The extra scale factor here controls how round the path is, with smaller numbers meaning less round
p1 = gen_2Dpoly(NN=NN)
r_b = np.random.rand() * p1 * filt
theta = (
np.random.rand() * ((gen_2Dpoly(NN=NN) * 2 * np.pi) - np.pi)
+ np.random.rand() * 2 * np.pi
)
extra_p = []
for i in range(extra_poly):
extra_p.append(gen_2Dpoly(NN=NN, shift=False) * filt)
return r_a, r_b, theta, extra_p
# Load in the ppm files (from an image database that is nicely more medical
# image like)
def load_coil100(ppm_file):
with open(ppm_file, "rb") as fd:
pnm = type("pnm", (object,), {}) ## create an empty container
pnm.header = fd.readline().decode("ascii")
pnm.magic = pnm.header.split()[0]
pnm.maxsample = 1 if (pnm.magic == "P4") else 0
while len(pnm.header.split()) < 3 + (1, 0)[pnm.maxsample]:
s = fd.readline().decode("ascii")
if len(s) and s[0] != "#":
pnm.header += s
else:
pnm.header += ""
pnm.width, pnm.height = [int(item) for item in pnm.header.split()[1:3]]
pnm.samples = 3 if (pnm.magic == "P6") else 1
if pnm.maxsample == 0:
pnm.maxsample = int(pnm.header.split()[3])
pnm.pixels = np.fromfile(
fd,
count=pnm.width * pnm.height * pnm.samples,
dtype="u1" if pnm.maxsample < 256 else "<u2",
)
pnm.pixels = (
pnm.pixels.reshape(pnm.height, pnm.width)
if pnm.samples == 1
else pnm.pixels.reshape(pnm.height, pnm.width, pnm.samples)
)
im2 = Image.fromarray((pnm.pixels / pnm.pixels.max() * 255).astype("uint8"), mode="RGB")
im2 = im2.convert("L")
return im2
# Pick a random image from the given folder, load it into a numpy array, and
# scale to a max of 1.0
def get_random_image(basepath, NN=256, seed=-1):
pics = []
for root, dirnames, filenames in os.walk(basepath):
for filename in filenames:
if filename.endswith((".ppm", ".jpg", ".JPEG")):
pics.append(os.path.join(root, filename))
if seed >= 0:
ppath = pics[seed]
else:
ppath = random.choice(pics)
if ppath.endswith(".ppm"):
img = load_coil100(ppath)
else:
img = Image.open(ppath).convert("L")
img = img.resize((NN, NN))
img = np.asarray(img).astype("float64")
img /= img.max()
return img
# Generate a random image and its corresponding motion parameters, and MR
# parameters
# The output of this is used for putting into the MR simulator to get
# cine images
def grab_random_model(
NN=512,
Nt=25,
img_thresh=0.15,
t1_lims=(900, 1200),
t2_lims=(80, 400),
# basepath="E:\\image_db\\",
basepath="./image_db/",
seed=-1,
inflow_range=None,
rseed=None,
mseed=None
):
final_mask = make_random_mask(NN=NN, rseed=rseed)
# final_mask = np.ones_like(final_mask)
r_a, r_b, theta, extra_p = gen_motion_params(NN=NN, rseed=mseed, extra_poly=4)
r_a *= 0.04
r_b *= 0.04
filt = windows.tukey(Nt, 0.3)[:, np.newaxis, np.newaxis]
filt[0] = 0.0
for i in range(Nt//2, Nt):
if filt[i] < 0.2:
filt[i] = (0.2 + filt[i]) / 2
xx = np.linspace(-1, 1, Nt)[:, np.newaxis, np.newaxis]
p0, p1 = extra_p[0][np.newaxis], extra_p[1][np.newaxis]
xmod = (p0 * xx**1.0 + p1 * xx**2.0) * filt * (.02 + .01 * np.random.standard_normal())
p2, p3 = extra_p[2][np.newaxis], extra_p[3][np.newaxis]
ymod = (p2 * xx**1.0 + p3 * xx**2.0) * filt * (.02 + .01 * np.random.standard_normal())
r_a0 = r_a.copy()
r_b0 = r_b.copy()
theta0 = theta.copy()
img = get_random_image(basepath, NN=NN, seed=seed)
img_mask = img > img_thresh
final_mask = final_mask & img_mask
final_mask0 = final_mask.copy()
final_mask = final_mask.ravel()
s = img.ravel()
# s[~final_mask] *= 0.0
s = s[final_mask]
if inflow_range is not None:
inflow_mask = (img > inflow_range[0]) & (img < inflow_range[1])
ii1 = gaussian(inflow_mask, 20.0)
if ii1.max() > 0.0:
ii1 /= ii1.max()
ii1 = ii1 > 0.5
ii1 = opening(ii1, selem=disk(5))
inflow_mask = ii1.copy()
inflow_lin = inflow_mask.ravel()
inflow_lin = inflow_lin[final_mask]
else:
inflow_mask = None
else:
inflow_mask = None
# t2 = gen_1D_poly(Nt, [0, 10], order=2)
# t2 = np.hstack((0.0, t2))
# t2 = np.cumsum(t2)
# t2 *= 2*np.pi/t2[-1]
# t2 = t2[:-1]
temp_method = np.random.randint(2)
if temp_method == 0:
t2 = get_temporal_waveform(Nt)
elif temp_method == 1:
t2 = get_temporal_waveform2(Nt)
# t = np.linspace(0, np.pi * 2, Nt, endpoint=False)
t = t2.copy()
t0 = t.copy()
t = np.tile(t[:, np.newaxis], [1, s.size])
r_a = r_a.ravel()
r_a = r_a[final_mask]
r_b = r_b.ravel()
r_b = r_b[final_mask]
theta = theta.ravel()
theta = theta[final_mask]
xmod0 = xmod.copy()
ymod0 = ymod.copy()
xmod = np.reshape(xmod, [Nt, -1])
ymod = np.reshape(ymod, [Nt, -1])
xmod = xmod[:, final_mask]
ymod = ymod[:, final_mask]
r_a = np.tile(r_a[np.newaxis, :], [t.shape[0], 1])
r_b = np.tile(r_b[np.newaxis, :], [t.shape[0], 1])
# print(t.shape)
# print(r_a.shape, r_b.shape, theta.shape)
ell_x = r_a * (np.cos(t) - 1.0)
ell_y = r_b * np.sin(t)
dx = np.cos(theta) * ell_x - np.sin(theta) * ell_y + xmod
dy = np.sin(theta) * ell_x + np.cos(theta) * ell_y + ymod
y, x = np.meshgrid(
np.linspace(-0.5, 0.5, NN, False), np.linspace(-0.5, 0.5, NN, False), indexing="ij"
)
x = x.ravel()
x = x[final_mask]
x = np.tile(x[np.newaxis, :], [t.shape[0], 1])
y = y.ravel()
y = y[final_mask]
y = np.tile(y[np.newaxis, :], [t.shape[0], 1])
z = np.zeros_like(x)
x0 = x.copy()
y0 = y.copy()
z0 = z.copy()
max_displace = np.hypot(dx, dy).max()
displace_lim = 10/256
descaler = 1.0
if max_displace > displace_lim:
descaler = displace_lim / max_displace
dx *= descaler
dy *= descaler
# print(np.hypot(dx, dy).max())
# print(x.shape, y.shape)
# brownian_mod = 0.0
# rrx = brownian_mod * np.random.standard_normal(x.shape)
# rrx[0] *= 0.0
# c_rrx = np.cumsum(rrx, axis=0)
# rry = brownian_mod * np.random.standard_normal(y.shape)
# rry[0] *= 0.0
# c_rry = np.cumsum(rry, axis=0)
x += dx
y += dy
r = np.stack((x, y, z), 2)
if rseed is not None:
np.random.seed(rseed)
t1 = map_1Dpoly(s, t1_lims)
t2 = map_1Dpoly(s, t2_lims)
if inflow_mask is not None:
s[inflow_lin > 0] = np.random.uniform(0.20, 0.50)
t1[inflow_lin > 0] = np.random.uniform(30, 60)
t2[inflow_lin > 0] = np.random.uniform(10, 20)
return r, s, t1, t2, final_mask0, r_a0, r_b0, theta0, t0, img, inflow_mask, xmod0, ymod0, descaler
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,209 | mloecher/tag_tracking | refs/heads/main | /tagsim/notebooks/save_gifs.py | import subprocess, shutil
def save_gif(input_folder, save_folder = None, save_name = 'out.gif', ww=256, framerate=12):
if save_folder is None:
save_folder = input_folder
command = ['ffmpeg', '-y', '-r', '%d' % framerate, '-i', input_folder + 'frame_%03d.png',
'-filter_complex', '[0:v]scale=w=%d:h=-2,split [a][b];[a] palettegen=stats_mode=diff [p];[b][p] paletteuse=new=1' % ww,
input_folder + 'aa_out.gif']
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
# print(err)
shutil.move(input_folder + 'aa_out.gif', save_folder + save_name)
return p
def save_gif_windows(input_folder, save_folder = None, save_name = 'out.gif', ww=256, framerate=12):
if save_folder is None:
save_folder = input_folder
command = ['C:\\ffmpeg\\bin\\ffmpeg.exe', '-y', '-r', '%d' % framerate, '-i', input_folder + 'frame_%03d.png',
'-filter_complex', '[0:v]scale=w=%d:h=-2,split [a][b];[a] palettegen=stats_mode=diff [p];[b][p] paletteuse=new=1' % ww,
input_folder + 'aa_out.gif']
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
# print(err)
shutil.move(input_folder + 'aa_out.gif', save_folder + save_name)
return p | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,210 | mloecher/tag_tracking | refs/heads/main | /tagsim/pygrid_internal/utils.py | import numpy as np
try:
import cupy as cp
CUDA_AVAILABLE = True
except ImportError:
CUDA_AVAILABLE = False
def roundup2(x):
return int(np.ceil(x / 2.0)) * 2
def roundup4(x):
return int(np.ceil(x / 2.0)) * 2
def zeropad(A, out_shape, use_gpu=False):
out_shape = np.array(out_shape)
in_shape = np.array(A.shape)
# This checks for an A with more dimensions, but
# the rest of the function can't handle it
diff_shape = out_shape - in_shape[-len(out_shape) :]
pad0 = diff_shape // 2
pad1 = diff_shape - pad0
# No zeropad needed
if (pad1 == 0).sum() == len(pad1):
return A
if len(out_shape) != len(in_shape):
print("ERROR: Cant zeropad with unequal dimensions")
# We could instead just check if A is on the gpu...
if use_gpu:
out = cp.zeros(out_shape, A.dtype)
else:
out = np.zeros(out_shape, A.dtype)
if len(out_shape) == 2:
out[pad0[0] : -pad1[0], pad0[1] : -pad1[1]] = A
elif len(out_shape) == 3:
out[pad0[0] : -pad1[0], pad0[1] : -pad1[1], pad0[2] : -pad1[2]] = A
return out
def crop(A, out_shape, use_gpu=False):
out_shape = np.array(out_shape)
in_shape = np.array(A.shape)
# This checks for an A with more dimensions, but
# the rest of the function can't handle it
diff_shape = in_shape[-len(out_shape) :] - out_shape
pad0 = diff_shape // 2
pad1 = diff_shape - pad0
# No crop needed
if (pad1 == 0).sum() == len(pad1):
return A
if len(out_shape) != len(in_shape):
print("ERROR: Cant zeropad with unequal dimensions")
# We could instead just check if A is on the gpu...
if use_gpu:
out = cp.zeros(out_shape, A.dtype)
else:
out = np.zeros(out_shape, A.dtype)
if len(out_shape) == 2:
out = A[pad0[0] : -pad1[0], pad0[1] : -pad1[1]]
elif len(out_shape) == 3:
out = A[pad0[0] : -pad1[0], pad0[1] : -pad1[1], pad0[2] : -pad1[2]]
return out
def check_traj_dens(traj, dens):
"""Does a couple checks to make sure the trajectory and density arrays
are shaped correctly.
Args:
traj (float ndarray): trajectory, will eventually get reshaped to (Nx3)
dens (float ndarray): density, will eventually get reshaped to (Nx1)
Return traj and dens arrays int he correct format
"""
# Check types
if traj.dtype != np.float32:
traj = traj.astype(np.float32)
if dens.dtype != np.float32:
dens = dens.astype(np.float32)
# Transpose into contiguous memory
if traj.shape[0] == 2 or traj.shape[0] == 3:
traj = np.ascontiguousarray(traj.T)
# Add a dimension of zeros if traj only has kx, ky
if traj.shape[-1] == 2:
traj = np.concatenate((traj, np.zeros_like(traj[..., -1:])), axis=traj.ndim - 1)
# Flatten both arrays
traj = np.reshape(traj, (-1, 3))
dens = dens.ravel()
if traj.shape[0] != len(dens):
print("ERROR: traj and dens don't have matching sizes")
return traj, dens
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,211 | mloecher/tag_tracking | refs/heads/main | /tagsim/setup_xcode.py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
import sys
def is_platform_windows():
return sys.platform == "win32"
def is_platform_mac():
return sys.platform == "darwin"
def is_platform_linux():
return sys.platform.startswith("linux")
if is_platform_windows():
extra_compile_args = ['/openmp']
extra_link_args = []
else:
extra_compile_args = [""]
extra_link_args = [""]
# --- interp2d compile ---
print(' ')
print('***********************************')
print('******** Building interp2d ********')
print('***********************************')
print(' ')
ext_modules = [Extension("interp2d",
["src_interp/interp2d.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs=[numpy.get_include()])]
setup(
name = 'interp2d',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
# --- interp_temp2d compile ---
print(' ')
print('***********************************')
print('******** Building interp_temp2d ********')
print('***********************************')
print(' ')
ext_modules = [Extension("interp_temp2d",
["src_interp/interp_temp2d.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs=[numpy.get_include()])]
setup(
name = 'interp_temp2d',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
# --- pygrid3 compile ---
print(' ')
print('**********************************')
print('******** Building pygrid3 ********')
print('**********************************')
print(' ')
ext_modules = [Extension("pygrid_internal.c_grid",
["./pygrid_internal/src/c_grid_xcode.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs=["./pygrid_internal/src/", numpy.get_include()],
library_dirs=["./pygrid_internal/src/"])]
setup(
name = 'c_grid',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
) | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,212 | mloecher/tag_tracking | refs/heads/main | /tagsim/pygrid_internal/setup.py | import os, sys
def is_platform_windows():
return sys.platform == "win32"
def is_platform_mac():
return sys.platform == "darwin"
def is_platform_linux():
return sys.platform.startswith("linux")
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
sourcefiles = ["./src/c_grid.pyx"]
include_dirs = [".", "./src", numpy.get_include()]
library_dirs = [".", "./src"]
if is_platform_windows():
extra_compile_args = ['/openmp']
extra_link_args = []
else:
extra_compile_args = ["-fopenmp"]
extra_link_args = ["-fopenmp"]
extensions = [
Extension(
"c_grid",
sourcefiles,
language="c++",
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
]
setup(name="c_grid",
ext_modules=cythonize(extensions,
compiler_directives={'language_level' : sys.version_info[0]})
)
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,213 | mloecher/tag_tracking | refs/heads/main | /tagsim/SimPSD.py | import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
import pprint
from .SimObject import SimObject
try:
import cupy as cp
HAS_CUPY = True
except:
HAS_CUPY = False
class PosTime:
def __init__(self, sim_object, use_gpu = False):
self.use_gpu = use_gpu
self.tt = sim_object.tt.copy()
self.period = sim_object.period
self.dt = sim_object.dt
if HAS_CUPY and self.use_gpu:
self.r = cp.asarray(sim_object.r, cp.float32)
self.pos = cp.zeros_like(self.r[0], cp.float32)
else:
self.r = sim_object.r.copy()
self.pos = np.zeros_like(self.r[0])
def calc_pos(self, p_time):
p_time = p_time % self.period
for i in range(self.tt.size):
if self.tt[i] > p_time:
lo = i - 1
hi = i
lo_mod = 1 - (p_time - self.tt[lo]) / self.dt
hi_mod = (p_time - self.tt[lo]) / self.dt
break
elif i == (self.tt.size - 1):
lo = i
hi = 0
lo_mod = 1 - (p_time - self.tt[lo]) / self.dt
hi_mod = (p_time - self.tt[lo]) / self.dt
self.pos[:] = self.r[lo] * lo_mod + self.r[hi] * hi_mod
class InstantRF:
def __init__(self, dirvec=[1.0, 0.0, 0.0], flip=90, profile=None, use_gpu = False):
self.type = "rf"
self.use_gpu = use_gpu
self.dirvec = np.array(dirvec).astype(np.float)
self.dirvec /= np.linalg.norm(self.dirvec)
self.flip = flip
self.profile = profile
rads = flip * np.pi / 180.0
rotvec = rads * self.dirvec
self.rot = R.from_rotvec(rotvec).as_dcm()
if HAS_CUPY and self.use_gpu:
self.rot = cp.asarray(self.rot, dtype=cp.float32)
def apply(self, M, sim_object, postime, t):
if HAS_CUPY and self.use_gpu:
xp = cp.get_array_module(M)
else:
xp = np
postime.calc_pos(t)
posz = postime.pos[:,2]
if not self.profile is None:
mask = xp.ones_like(M[:,0])
mask = (posz>self.profile[0]) & (posz<self.profile[1])
M[mask>0, :] = xp.matmul(self.rot, M[mask>0, :].T).T
else:
M[:] = xp.matmul(self.rot, M.T).T
return None
class InstantGrad:
def __init__(self, dirvec=[1, 0, 0], M0=11.74, use_gpu = False):
self.type = "grad"
self.use_gpu = use_gpu
self.dirvec = np.array(dirvec).astype(np.float)
self.dirvec /= np.linalg.norm(self.dirvec)
self.M0 = M0
if HAS_CUPY and self.use_gpu:
self.dirvec = cp.asarray(self.dirvec[None, :], dtype=cp.float32)
def apply(self, M, sim_object, postime, t):
if HAS_CUPY and self.use_gpu:
xp = cp.get_array_module(M)
else:
xp = np
postime.calc_pos(t)
fov = xp.asarray(sim_object.fov[None, :], dtype=xp.float32)
rr = (postime.pos * self.dirvec * fov).sum(1)
theta = rr * self.M0 * 267.522
# print([postime.pos.min(), postime.pos.max(), fov])
# print([rr.min(), rr.max()])
# print([theta.min(), theta.max()])
M_new = xp.zeros_like(M)
M_new[:, 0] = M[:, 0] * xp.cos(theta) - M[:, 1] * xp.sin(theta)
M_new[:, 1] = M[:, 0] * xp.sin(theta) + M[:, 1] * xp.cos(theta)
M_new[:, 2] = M[:, 2]
M[:] = M_new
return None
class InstantAcq:
def __init__(self, use_gpu = False):
self.type = "acq"
self.use_gpu = use_gpu
def apply(self, M, sim_object, postime, t):
postime.calc_pos(t)
if HAS_CUPY and self.use_gpu:
return [cp.asnumpy(postime.pos), cp.asnumpy(M)]
else:
return [postime.pos.copy(), M.copy()]
class InstantSpoil:
def __init__(self, use_gpu = False):
self.type = "spoil"
self.use_gpu = use_gpu
def apply(self, M, sim_object, postime, t):
M[:, :2] *= 0.0
return None
class SimInstant:
def __init__(self, sim_object, use_gpu = False, cu_device = 0, acq_rephase = True):
self.psd = []
self.sim_object = sim_object
self.use_gpu = use_gpu
self.acq_rephase = acq_rephase
self.init_tag = False
self.ss_params = {}
self.Meq = np.zeros((sim_object.sig0.size, 3))
self.Meq[:, 2] = sim_object.sig0.copy()
self.M = self.Meq.copy()
if HAS_CUPY and self.use_gpu:
cp.cuda.Device(cu_device).use()
self.M = cp.asarray(self.M, dtype=cp.float32)
self.Meq = cp.asarray(self.Meq, dtype=cp.float32)
self.T1 = cp.asarray(self.sim_object.T1, dtype=cp.float32)
self.T2 = cp.asarray(self.sim_object.T2, dtype=cp.float32)
else:
self.T1 = self.sim_object.T1.copy()
self.T2 = self.sim_object.T2.copy()
self.postime = PosTime(sim_object, use_gpu = self.use_gpu)
def relaxation(self, dt):
if HAS_CUPY and self.use_gpu:
xp = cp.get_array_module(self.M)
else:
xp = np
self.M[:,0] = self.M[:,0] * xp.exp(-dt / self.T2)
self.M[:,1] = self.M[:,1] * xp.exp(-dt / self.T2)
self.M[:,2] = self.Meq[:,2] - (self.Meq[:,2] - self.M[:,2]) * xp.exp(-dt / self.T1)
def steady_state(self):
if self.ss_params:
flip = self.ss_params['flip']
dt = self.ss_params['dt']
Nss = self.ss_params['Nss']
rf = InstantRF(flip=flip, use_gpu = self.use_gpu)
spoil = InstantSpoil(use_gpu = self.use_gpu)
for i in range(Nss):
rf.apply(self.M, self.sim_object, self.postime, 0)
self.relaxation(dt)
spoil.apply(self.M, self.sim_object, self.postime, 0)
# self.relaxation(dt)
def run(self):
acqs = []
# if self.init_tag:
# rf = InstantRF(flip=90, use_gpu = self.use_gpu)
# spoil = InstantSpoil(use_gpu = self.use_gpu)
# rf.apply(self.M, self.sim_object, self.postime, 0)
# spoil.apply(self.M, self.sim_object, self.postime, 0)
# self.relaxation(1000.0)
self.steady_state()
if self.init_tag:
self.relaxation(300.0)
# pprint.pprint(self.psd)
self.psd.sort(key=lambda x: x[1]) # Make sure PSD is sorted by timing
# pprint.pprint(self.psd)
current_time = 0
for event in self.psd:
dt = event[1] - current_time
if dt > 0:
self.relaxation(dt)
current_time = event[1]
out = event[0].apply(self.M, self.sim_object, self.postime, current_time)
if out is not None:
if self.acq_rephase:
out_new = np.zeros_like(out[1])
out_new[:,0] = out[1][:,1]
out_new[:,1] = -out[1][:,0]
out_new[:,2] = out[1][:,2]
out[1][:] = out_new[:]
acqs.append(out)
return acqs
def set_psd(self, psd, ss_params = None):
self.psd = psd
if ss_params is not None:
self.ss_params = ss_params
########################
# PSDs
########################
def sample_tagging_11_PSD(self, ke=0.1, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
print('M0_ke =', M0_ke)
ff = np.array([61.0,])
flip_tt = np.arange(20) * .02
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[0]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[1]))
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[2]))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), flip_tt[9]))
# self.psd.append((InstantGrad([1, 1, 0], M0=M0_spoil, use_gpu = self.use_gpu), flip_tt[7]))
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[10]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[11]))
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[12]))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), flip_tt[19]))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_spoil, use_gpu = self.use_gpu), flip_tt[15]))
base_flip = 14
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=base_flip, use_gpu = self.use_gpu), t_acq))
self.psd.append((InstantAcq(use_gpu = self.use_gpu), t_acq + 1))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), t_acq + 2))
self.ss_params['flip'] = base_flip
self.ss_params['dt'] = 40
self.ss_params['Nss'] = 30
self.init_tag = True
def sample_tagging_smn_PSD(self, ke=0.1, acq_loc=[100], scale_tagrf = 1.0):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
# print('M0_ke =', M0_ke)
ff = scale_tagrf * np.array([10.0, 20.0, 40.0])
flip_tt = np.arange(20) * 0.1
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[0]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[1]))
self.psd.append((InstantRF(flip=ff[1], use_gpu = self.use_gpu), flip_tt[2]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[3]))
self.psd.append((InstantRF(flip=ff[2], use_gpu = self.use_gpu), flip_tt[4]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[5]))
self.psd.append((InstantRF(flip=ff[1], use_gpu = self.use_gpu), flip_tt[6]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[7]))
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[8]))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), flip_tt[9]))
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[10]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[11]))
self.psd.append((InstantRF(flip=ff[1], use_gpu = self.use_gpu), flip_tt[12]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[13]))
self.psd.append((InstantRF(flip=ff[2], use_gpu = self.use_gpu), flip_tt[14]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[15]))
self.psd.append((InstantRF(flip=ff[1], use_gpu = self.use_gpu), flip_tt[16]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[17]))
self.psd.append((InstantRF(flip=ff[0], use_gpu = self.use_gpu), flip_tt[18]))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), flip_tt[19]))
base_flip = 14
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=base_flip, use_gpu = self.use_gpu), t_acq))
self.psd.append((InstantAcq(use_gpu = self.use_gpu), t_acq + 1))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), t_acq + 2))
self.ss_params['flip'] = base_flip
self.ss_params['dt'] = 40
self.ss_params['Nss'] = 100
self.init_tag = True
def sample_tagging1331_v2_PSD(self, ke=0.1, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
M0_spoil = 1e3 * 2 * 100.0 * np.pi / 267.522 # [mT * ms / m]
ff = 100.0
flip_tt = np.arange(16) * .05
self.psd.append((InstantRF(flip=ff / 8, use_gpu = self.use_gpu), flip_tt[0]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[1]))
self.psd.append((InstantRF(flip=3 * ff / 8, use_gpu = self.use_gpu), flip_tt[2]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[3]))
self.psd.append((InstantRF(flip=3 * ff / 8, use_gpu = self.use_gpu), flip_tt[4]))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[5]))
self.psd.append((InstantRF(flip=ff / 8, use_gpu = self.use_gpu), flip_tt[6]))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), flip_tt[7]))
# self.psd.append((InstantGrad([1, 1, 0], M0=M0_spoil, use_gpu = self.use_gpu), flip_tt[7]))
self.psd.append((InstantRF(flip=ff / 8, dirvec=(0.0, 1.0, 0.0), use_gpu = self.use_gpu), flip_tt[8]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[9]))
self.psd.append((InstantRF(flip=3 * ff / 8, dirvec=(0.0, 1.0, 0.0), use_gpu = self.use_gpu), flip_tt[10]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[11]))
self.psd.append((InstantRF(flip=3 * ff / 8, dirvec=(0.0, 1.0, 0.0), use_gpu = self.use_gpu), flip_tt[12]))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke, use_gpu = self.use_gpu), flip_tt[13]))
self.psd.append((InstantRF(flip=ff / 8, dirvec=(0.0, 1.0, 0.0), use_gpu = self.use_gpu), flip_tt[14]))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), flip_tt[15]))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_spoil, use_gpu = self.use_gpu), flip_tt[15]))
base_flip = 14
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=base_flip, use_gpu = self.use_gpu), t_acq))
self.psd.append((InstantAcq(use_gpu = self.use_gpu), t_acq + 1))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), t_acq + 2))
self.ss_params['flip'] = base_flip
self.ss_params['dt'] = 40
self.ss_params['Nss'] = 30
self.init_tag = True
def sample_DENSE_PSD(self, rf_dir=(1.0, 0.0, 0.0), ke=0.1, ke_dir=(1.0, 0.0, 0.0), kd=0.08, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
M0_kd = 1e3 * 2 * kd * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=90, use_gpu = self.use_gpu), 0))
self.psd.append((InstantGrad(dirvec=ke_dir, M0=M0_ke, use_gpu = self.use_gpu), .01))
self.psd.append((InstantGrad(dirvec=[0, 0, 1], M0=M0_kd, use_gpu = self.use_gpu), .02))
self.psd.append((InstantRF(dirvec=rf_dir, flip=90, use_gpu = self.use_gpu), .03))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), .04))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20, use_gpu = self.use_gpu), t_acq))
self.psd.append((InstantGrad(dirvec=ke_dir, M0=M0_ke, use_gpu = self.use_gpu), t_acq + .01))
self.psd.append((InstantGrad(dirvec=[0, 0, 1], M0=M0_kd, use_gpu = self.use_gpu), t_acq + .02))
self.psd.append((InstantAcq(use_gpu = self.use_gpu), t_acq + .03))
self.psd.append((InstantSpoil(use_gpu = self.use_gpu), t_acq + .04))
########################
# PSDs that need a use_gpu update
########################
def sample_tagging_1D_PSD(self, ke=0.1, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=45), 0))
self.psd.append((InstantGrad([1, 0, 0], M0=M0_ke), 1))
self.psd.append((InstantRF(flip=45), 2))
self.psd.append((InstantSpoil(), 3))
# self.psd.append((InstantRF(flip=45), 4))
# self.psd.append((InstantGrad([0, 1, 0], M0=M0_ke), 5))
# self.psd.append((InstantRF(flip=45), 6))
# self.psd.append((InstantSpoil(), 7))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20), t_acq))
self.psd.append((InstantAcq(), t_acq + 1))
self.psd.append((InstantSpoil(), t_acq + 2))
def sample_tagging_PSD(self, ke=0.1, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=45), 0))
self.psd.append((InstantGrad([1, 0, 0], M0=M0_ke), 1))
self.psd.append((InstantRF(flip=45), 2))
self.psd.append((InstantSpoil(), 3))
self.psd.append((InstantRF(flip=45), 4))
self.psd.append((InstantGrad([0, 1, 0], M0=M0_ke), 5))
self.psd.append((InstantRF(flip=45), 6))
self.psd.append((InstantSpoil(), 7))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20), t_acq))
self.psd.append((InstantAcq(), t_acq + 1))
self.psd.append((InstantSpoil(), t_acq + 2))
def sample_tagging1331_PSD(self, ke=0.1, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=90 / 8), 0))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke), 1))
self.psd.append((InstantRF(flip=3 * 90 / 8), 2))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke), 3))
self.psd.append((InstantRF(flip=3 * 90 / 8), 4))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke), 5))
self.psd.append((InstantRF(flip=90 / 8), 6))
self.psd.append((InstantSpoil(), 7))
self.psd.append((InstantRF(flip=90 / 8), 8))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), 9))
self.psd.append((InstantRF(flip=3 * 90 / 8), 10))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), 11))
self.psd.append((InstantRF(flip=3 * 90 / 8), 12))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), 13))
self.psd.append((InstantRF(flip=90 / 8), 14))
self.psd.append((InstantSpoil(), 15))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20), t_acq))
self.psd.append((InstantAcq(), t_acq + 1))
self.psd.append((InstantSpoil(), t_acq + 2))
def sample_tagging1331_v1_PSD(self, ke=0.1, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=90 / 8), 0))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke), .01))
self.psd.append((InstantRF(flip=3 * 90 / 8), .02))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke), .03))
self.psd.append((InstantRF(flip=3 * 90 / 8), .04))
self.psd.append((InstantGrad([1, 1, 0], M0=M0_ke), .05))
self.psd.append((InstantRF(flip=90 / 8), .06))
self.psd.append((InstantSpoil(), .07))
self.psd.append((InstantRF(flip=90 / 8), .08))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .09))
self.psd.append((InstantRF(flip=3 * 90 / 8), .1))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .11))
self.psd.append((InstantRF(flip=3 * 90 / 8), .12))
self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .13))
self.psd.append((InstantRF(flip=90 / 8), .14))
self.psd.append((InstantSpoil(), .15))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20), t_acq))
self.psd.append((InstantAcq(), t_acq + .01))
self.psd.append((InstantSpoil(), t_acq + .02))
def sample_tagging_lin_PSD(self, direction=[1, 0, 0], ke=0.125, acq_loc=[100], profile = None):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=90 / 8), 0))
self.psd.append((InstantGrad(direction, M0=M0_ke), .01))
self.psd.append((InstantRF(flip=3 * 90 / 8), .02))
self.psd.append((InstantGrad(direction, M0=M0_ke), .03))
self.psd.append((InstantRF(flip=3 * 90 / 8), .04))
self.psd.append((InstantGrad(direction, M0=M0_ke), .05))
self.psd.append((InstantRF(flip=90 / 8), .06))
self.psd.append((InstantSpoil(), .07))
# self.psd.append((InstantRF(flip=90 / 8), .08))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .09))
# self.psd.append((InstantRF(flip=3 * 90 / 8), .1))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .11))
# self.psd.append((InstantRF(flip=3 * 90 / 8), .12))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .13))
# self.psd.append((InstantRF(flip=90 / 8), .14))
# self.psd.append((InstantSpoil(), .15))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20, profile = profile), t_acq))
self.psd.append((InstantAcq(), t_acq + 1))
self.psd.append((InstantSpoil(), t_acq + 2))
def sample_tagging_lin_PSD2(self, rf_dir=(1.0, 0.0, 0.0), direction=[1, 0, 0], ke=0.125, acq_loc=[100]):
M0_ke = 1e3 * 2 * ke * np.pi / 267.522 # [mT * ms / m]
self.psd.append((InstantRF(flip=90), 0))
self.psd.append((InstantGrad(direction, M0=M0_ke), .01))
self.psd.append((InstantRF(dirvec=rf_dir, flip=90), .02))
self.psd.append((InstantSpoil(), .03))
# self.psd.append((InstantRF(flip=90 / 8), .08))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .09))
# self.psd.append((InstantRF(flip=3 * 90 / 8), .1))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .11))
# self.psd.append((InstantRF(flip=3 * 90 / 8), .12))
# self.psd.append((InstantGrad([1, -1, 0], M0=M0_ke), .13))
# self.psd.append((InstantRF(flip=90 / 8), .14))
# self.psd.append((InstantSpoil(), .15))
for t_acq in acq_loc:
self.psd.append((InstantRF(flip=20), t_acq))
self.psd.append((InstantAcq(), t_acq + 1))
self.psd.append((InstantSpoil(), t_acq + 2))
if __name__ == "__main__":
print('Nothing in __main__ right now')
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,214 | mloecher/tag_tracking | refs/heads/main | /tagsim/utils.py |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
class TagAnimator:
def __init__(self, ims, tags, figsize=[8, 8], nframes=25, interval=50, scale = 1.0, shift = None, clim=None):
print("Starting animation class . . . ", flush=True)
if shift is None:
shift = ims.shape[-1]/2.0
self.ims = np.squeeze(ims)
if tags is None:
self.tags = tags
self.plot_tags = False
else:
self.tags = np.squeeze(tags)
self.plot_tags = True
self.fig, self.axarr = plt.subplots(1, 1, squeeze=False, figsize=figsize)
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
self.im = self.axarr[0, 0].imshow(self.ims[0], cmap="gray", clim=clim)
if self.plot_tags:
self.tagmid = tags.size // 2
xp = np.array(tags[:self.tagmid]) * scale + shift
yp = np.array(tags[self.tagmid:]) * scale + shift
self.pts_all, = self.axarr[0, 0].plot(
xp, yp, linestyle="None", marker="x", markeredgecolor="r", mew=2, markersize=4
)
self.pts_big, = self.axarr[0, 0].plot(
xp[0], yp[0], linestyle="None", marker="+", markeredgecolor="g", mew=4, markersize=12
)
self.xp = xp
self.yp = yp
else:
self.xp = 0
self.yp = 0
self.pts_big = None
print("Making animation . . . ", flush=True)
self.anim = animation.FuncAnimation(
self.fig,
self.animate,
init_func=self.init,
frames=nframes,
interval=interval,
blit=True,
)
plt.close()
def init(self):
self.im.set_data(self.ims[0])
if self.plot_tags:
self.pts_big.set_data(self.xp[0], self.yp[0])
return [self.im, self.pts_big]
else:
return [self.im,]
def animate(self, i):
self.im.set_data(self.ims[i])
if self.plot_tags:
self.pts_big.set_data(self.xp[i], self.yp[i])
return [self.im, self.pts_big]
else:
return [self.im,]
def get_patch_path(ims, path, is_scaled = False, width=32):
rad = width//2
if path.ndim == 1:
path = path[:, None]
if not is_scaled:
p_path = (path + 0.5)
p_path[1] *= ims.shape[-2]
p_path[0] *= ims.shape[-1]
else:
p_path = path
im_cp = np.pad(ims, pad_width=((0,0), (rad+1,rad+1), (rad+1,rad+1)), mode='constant')
pos1 = p_path[1,0]
ipos1 = int(pos1)
pos0 = p_path[0,0]
ipos0 = int(pos0)
im_c = im_cp[:, ipos1:ipos1+2*rad+2, ipos0:ipos0+2*rad+2]
kim_c = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(im_c, axes=(1,2)), axes=(1,2)), axes=(1,2))
rr = 2*np.pi*np.arange(-(rad+1), rad+1)/width
yy, xx = np.meshgrid(rr, rr, indexing='ij')
kim_c *= np.exp(1j*xx[np.newaxis,...]*(pos0-ipos0))
kim_c *= np.exp(1j*yy[np.newaxis,...]*(pos1-ipos1))
im_c2 = np.abs(np.fft.ifftshift(np.fft.ifftn(np.fft.fftshift(kim_c, axes=(1,2)), axes=(1,2)), axes=(1,2)))
im_c2 = im_c2[:, 1:-1, 1:-1]
c_path = path - path[:, 0][:, np.newaxis]
return im_c2, c_path | {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,215 | mloecher/tag_tracking | refs/heads/main | /torch_track/loss_utils.py | import torch
import torch.nn as nn
#Custom loss functions, no of these ended up being used, can delete
def custom_mse_loss(test, target):
return ((test - target) ** 2).sum() / test.data.nelement()
def weighted_mse_loss(test, target):
test = torch.reshape(test, [test.shape[0], 2, -1])
target = torch.reshape(target, [target.shape[0], 2, -1])
# Get the displacement magnitude of truth
hypot = torch.hypot(target[:, 0], target[:, 1])
# Make a weighting vector that ranges from 1-2
max_hypot = torch.max(hypot, 1)[0]
scaler = hypot / max_hypot[:, None]
scaler += 1.0
# Weight the difference term
diff = (test - target)
diff *= scaler[:, None, :]
# Return MSE
return (diff ** 2).sum() / test.data.nelement()
def multi_loss(test, target):
loss_MSE = nn.MSELoss()(test, target)
loss_MAE = nn.L1Loss()(test, target)
loss_custom = custom_mse_loss(test, target)
loss_weighted = weighted_mse_loss(test, target)
return loss_MSE, loss_MAE, loss_custom, loss_weighted
def two_loss(test, target):
loss_MSE = nn.MSELoss()(test, target)
loss_MAE = nn.L1Loss()(test, target)
return loss_MSE, loss_MAE
| {"/tagsim/sim_fullmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/torch_track/train.py": ["/torch_track/loss_utils.py", "/torch_track/utils.py", "/torch_track/datagen.py"], "/torch_track/network_resnet2.py": ["/torch_track/network_utils.py"], "/tagsim/sim_cardiacmotion.py": ["/tagsim/base_im_generation.py", "/tagsim/sim_fullmotion.py", "/tagsim/SimObject.py", "/tagsim/SimPSD.py"], "/tagsim/pygrid_internal/pygrid.py": ["/tagsim/pygrid_internal/grid_kernel.py", "/tagsim/pygrid_internal/utils.py"], "/tagsim/SimPSD.py": ["/tagsim/SimObject.py"]} |
68,223 | kader-digital/bitcoin-wallet | refs/heads/master | /wallet.py | class Wallet ():
def __init__(self):
from pywallet import wallet as w
self.coin = ''
self.private_key = ''
self.public_key = ''
self.address = ''
self.seed = w.generate_mnemonic()
self.wallet = w.create_wallet(network='BTC',seed=self.seed, children=0)
self.parse_response()
def parse_response(self):
for line in list(self.wallet):
if 'coin' in line:
self.coin = self.wallet[line]
elif 'private_key' in line:
self.private_key = self.wallet[line]
elif 'public_key' in line:
self.public_key = self.wallet[line]
elif 'address' in line:
self.address = self.wallet[line]
| {"/crypto.py": ["/wallet.py"]} |
68,224 | kader-digital/bitcoin-wallet | refs/heads/master | /crypto.py | import wallet
from tkinter import *
btc_wallet = wallet.Wallet()
filename = 'credentials.txt'
with open (filename, 'w') as f:
f.write ('currency: ' + btc_wallet.coin + '\n')
f.write ('private key: ' + btc_wallet.private_key + '\n')
f.write ('public key: ' + btc_wallet.public_key + '\n')
f.write ('address: ' + btc_wallet.address + '\n')
f.write ('seed: ' + btc_wallet.seed + '\n')
f.close ()
root = Tk()
label_currency = Label(root, text='Currency: '+btc_wallet.coin)
label_currency.grid(row=0, sticky='w')
label_address = Label(root, text='Address: '+btc_wallet.address)
label_address.grid(row=1, sticky='w')
label_private_key = Label(root, text='Private Key: '+btc_wallet.private_key)
label_private_key.grid(row=2, sticky='w')
label_public_key = Label(root, text='Public Key: '+btc_wallet.public_key)
label_public_key.grid(row=3, sticky='w')
label_seed = Label(root, text='seed: '+btc_wallet.seed)
label_seed.grid(row=4, sticky='w')
label_balance = Label(root, text='Balance: '+'0.00000000')
label_balance.grid(row=5,sticky='w')
sending_btc_entry = Entry(root)
sending_btc_entry.grid(row=6,column=0,sticky='w')
send_button = Button(root, text='send')
send_button.grid(row=6, sticky='w')
running = True
while running:
root.mainloop()
if root.quit:
running = False
| {"/crypto.py": ["/wallet.py"]} |
68,226 | disbr007/os_obia | refs/heads/main | /lib.py | import copy
import logging
import numpy as np
import os
import pathlib
from pathlib import Path, PurePath
import subprocess
from subprocess import PIPE
import typing
from typing import Union
from osgeo import gdal, ogr, osr
import geopandas as gpd
import rasterio as rio
from rasterio.features import shapes
from rasterio.fill import fillnodata
import rasterio.mask
from skimage.segmentation import quickshift
# Set up logger
logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
# ch = logging.StreamHandler()
# formatter = logging.Formatter(
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# ch.setFormatter(formatter)
# logger.addHandler(ch)
def log_me():
logger.info('Test')
logger.warning('warning')
def run_subprocess(command, log=True):
proc = subprocess.Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
response = []
for line in iter(proc.stdout.readline, b''): # replace '' with b'' for Python 3
if log:
logger.info(line.decode())
else:
response.append(line.decode())
output, error = proc.communicate()
if log:
logger.debug('Output: {}'.format(output.decode()))
logger.debug('Err: {}'.format(error.decode()))
return response
def clean4cmdline(command):
# Remove whitespace, newlines
command = command.replace('\n', ' ')
command = ' '.join(command.split())
return command
def create_grm_outname(img=None, out_seg=None, out_dir=None,
criterion='bs', threshold=None, niter=0,
speed=0, spectral=0.5, spatial=0.5,
out_format='vector', name_only=False):
# Create output names as needed
if out_seg is None:
if out_dir is None:
out_dir = os.path.dirname(img)
out_name = os.path.basename(img).split('.')[0]
out_name = '{}_{}t{}ni{}s{}spec{}spat{}.tif'.format(out_name, criterion,
str(threshold).replace('.', 'x'),
niter, speed,
str(spectral).replace('.', 'x'),
str(spatial).replace('.', 'x'))
out_seg = os.path.join(out_dir, out_name)
if name_only and out_format == 'vector':
out_seg = out_seg.replace('tif', 'shp')
return out_seg
def detect_ogr_driver(ogr_ds: str, name_only: bool = False) -> typing.Tuple[gdal.Driver, str]:
"""
Autodetect the appropriate driver for an OGR datasource.
Parameters
----------
ogr_ds : OGR datasource
Path to OGR datasource.
name_only : bool
True to return the name of the driver, else the ogr.Driver object will
be return
Returns
-------
OGR driver OR
OGR driver, layer name
"""
# Driver names
FileGDB = 'FileGDB'
OpenFileGDB = 'OpenFileGDB'
# Suffixes
GPKG = '.gpkg'
SHP = '.shp'
GEOJSON = '.geojson'
GDB = '.gdb'
supported_drivers = [gdal.GetDriver(i).GetDescription()
for i in range(gdal.GetDriverCount())]
if FileGDB in supported_drivers:
gdb_driver = FileGDB
else:
gdb_driver = OpenFileGDB
# OGR driver lookup table
driver_lut = {
GEOJSON: 'GeoJSON',
SHP: 'ESRI Shapefile',
GPKG: 'GPKG',
GDB: gdb_driver
}
layer = None
# Check if in-memory datasource
if isinstance(ogr_ds, PurePath):
ogr_ds = str(ogr_ds)
if isinstance(ogr_ds, ogr.DataSource):
driver = 'Memory'
elif 'vsimem' in ogr_ds:
driver = 'ESRI Shapefile'
else:
# Check if extension in look up table
if GPKG in ogr_ds:
drv_sfx = GPKG
layer = Path(ogr_ds).stem
elif GDB in ogr_ds:
drv_sfx = GDB
layer = Path(ogr_ds).stem
else:
drv_sfx = Path(ogr_ds).suffix
if drv_sfx in driver_lut.keys():
driver = driver_lut[drv_sfx]
else:
logger.warning("""Unsupported driver extension {}
Defaulting to 'ESRI Shapefile'""".format(drv_sfx))
driver = driver_lut[SHP]
logger.debug('Driver autodetected: {}'.format(driver))
if not name_only:
try:
driver = ogr.GetDriverByName(driver)
except ValueError as e:
logger.error('ValueError with driver_name: {}'.format(driver))
logger.error('OGR DS: {}'.format(ogr_ds))
raise e
return driver, layer
def read_vec(vec_path: str, **kwargs) -> gpd.GeoDataFrame:
"""
Read any valid vector format into a GeoDataFrame
"""
driver, layer = detect_ogr_driver(vec_path, name_only=True)
if layer is not None:
gdf = gpd.read_file(Path(vec_path).parent, layer=layer, driver=driver, **kwargs)
else:
gdf = gpd.read_file(vec_path, driver=driver, **kwargs)
return gdf
def write_gdf(src_gdf, out_footprint, to_str_cols=None,
out_format=None,
nan_to=None,
precision=None,
overwrite=True,
**kwargs):
"""
Handles common issues with writing GeoDataFrames to a variety of formats,
including removing datetimes, converting list/dict columns to strings,
handling NaNs.
date_format : str
Use to convert datetime fields to string fields, using format provided
TODO: Add different handling for different formats, e.g. does gpkg allow datetime/NaN?
"""
# Drivers
ESRI_SHAPEFILE = 'ESRI Shapefile'
GEOJSON = 'GeoJSON'
GPKG = 'GPKG'
OPEN_FILE_GDB = 'OpenFileGDB'
FILE_GBD = 'FileGDB'
gdf = copy.deepcopy(src_gdf)
if not isinstance(out_footprint, pathlib.PurePath):
out_footprint = Path(out_footprint)
# Format agnostic functions
# Remove if exists and overwrite
if out_footprint.exists():
if overwrite:
logger.warning('Overwriting existing file: '
'{}'.format(out_footprint))
os.remove(out_footprint)
else:
logger.warning('Out file exists and overwrite not specified, '
'skipping writing.')
return None
# Round if precision
if precision:
gdf = gdf.round(decimals=precision)
logger.debug('Writing to file: {}'.format(out_footprint))
# Get driver and layer name. Layer will be none for non database formats
driver, layer = detect_ogr_driver(out_footprint, name_only=True)
if driver == ESRI_SHAPEFILE:
# convert NaNs to empty string
if nan_to:
gdf = gdf.replace(np.nan, nan_to, regex=True)
# Convert columns that store lists to strings
if to_str_cols:
for col in to_str_cols:
logger.debug('Converting to string field: {}'.format(col))
gdf[col] = [','.join(map(str, l)) if isinstance(l, (dict, list))
and len(l) > 0 else '' for l in gdf[col]]
# Write out in format specified
if driver in [ESRI_SHAPEFILE, GEOJSON]:
if driver == GEOJSON:
if gdf.crs != 4326:
logger.warning('Attempting to write GeoDataFrame with non-WGS84 '
'CRS to GeoJSON. Reprojecting to WGS84.')
gdf = gdf.to_crs('epsg:4326')
gdf.to_file(out_footprint, driver=driver, **kwargs)
elif driver in [GPKG, OPEN_FILE_GDB, FILE_GBD]:
gdf.to_file(str(out_footprint.parent), layer=layer, driver=driver, **kwargs)
else:
logger.error('Unsupported driver: {}'.format(driver))
def rio_polygonize(img: str, out_vec: str = None, band: int = 1, mask_value=None):
logger.info('Polygonizing: {}'.format(img))
with rio.Env():
with rio.open(img) as src:
arr = src.read(band)
src_crs = src.crs
if mask_value is not None:
mask = arr == mask_value
else:
mask = None
results = ({'properties': {'raster_val': v},
'geometry': s}
for i, (s, v) in
enumerate(shapes(arr,
mask=mask,
transform=src.transform)))
geoms = list(results)
gdf = gpd.GeoDataFrame.from_features(geoms, crs=src_crs)
if out_vec:
logger.info('Writing polygons to: {}'.format(out_vec))
write_gdf(gdf, out_vec)
return gdf
def write_array(array, out_path, ds, stacked=False, fmt='GTiff',
dtype=None, nodata_val=None):
"""
Writes the passed array with the metadata of the current raster object
as new raster.
"""
# Get dimensions of input array
dims = len(array.shape)
# try:
if dims == 3:
depth, rows, cols = array.shape
stacked = True
elif dims == 2:
# except ValueError:
rows, cols = array.shape
depth = 1
# Handle NoData value
if nodata_val is None:
if nodata_val is not None:
nodata_val = GetRasterBand(1).GetNoDataValue()
else:
logger.warning('Unable to determine NoData value '
'using -9999')
nodata_val = -9999
# Handle dtype
if not dtype:
# Use original dtype
dtype = ds.GetRasterBand(1).DataType
# Create output file
driver = gdal.GetDriverByName(fmt)
geotransform = ds.GetGeoTransform()
try:
logger.info(f'Creating raster at: {out_path}')
dst_ds = driver.Create(out_path, ds.RasterXSize, ds.RasterYSize,
bands=depth,
eType=dtype)
except:
logger.error('Error creating: {}'.format(out_path))
# Set output geotransform and projection
dst_ds.SetGeoTransform(geotransform)
prj = osr.SpatialReference()
prj.ImportFromWkt(ds.GetProjectionRef())
dst_ds.SetProjection(prj.ExportToWkt())
# Loop through each layer of array and write as band
for i in range(depth):
if stacked:
if isinstance(array, np.ma.MaskedArray):
lyr = array[i, :, :].filled()
else:
lyr = array[i, :, :]
band = i + 1
dst_ds.GetRasterBand(band).WriteArray(lyr)
dst_ds.GetRasterBand(band).SetNoDataValue(nodata_val)
else:
band = i + 1
if isinstance(array, np.ma.MaskedArray):
dst_ds.GetRasterBand(band).WriteArray(array.filled(nodata_val))
else:
dst_ds.GetRasterBand(band).WriteArray(array)
dst_ds.GetRasterBand(band).SetNoDataValue(nodata_val)
dst_ds = None
logger.info('Writing complete.') | {"/.ipynb_checkpoints/calc_zonal_stats-checkpoint.py": ["/lib.py"]} |
68,227 | disbr007/os_obia | refs/heads/main | /.ipynb_checkpoints/calc_zonal_stats-checkpoint.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 14:00:03 2020
@author: disbr007
"""
import argparse
import json
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import sys
import pandas as pd
import geopandas as gpd
# import fiona
# import rasterio
from rasterstats import zonal_stats
from skimage.feature import greycomatrix, greycoprops
from lib import detect_ogr_driver, read_vec, write_gdf
logger = logging.getLogger(__name__)
# custom_stat_fxn = {
# 'glcm': calc_glcm
# }
# def calc_glcm(patch, distance = [5], angles=[0], levels=)
def load_stats_dict(stats_json):
if isinstance(stats_json, str):
if os.path.exists(stats_json):
with open(stats_json) as jf:
data = json.load(jf)
else:
logger.error('Zonal stats file not found: {}'.format(stats_json))
elif isinstance(stats_json, dict):
data = stats_json
names = []
rasters = []
stats = []
bands = []
for n, d in data.items():
names.append(n)
rasters.append(d['path'])
stats.append(d['stats'])
if 'bands' in d.keys():
bands.append(d['bands'])
else:
bands.append(None)
return rasters, names, stats, bands
def calc_compactness(geometry):
# Polsby - Popper Score - - 1 = circle
compactness = (np.pi * 4 * geometry.area) / (geometry.boundary.length)**2
return compactness
def apply_compactness(gdf, out_field='compactness'):
gdf[out_field] = gdf.geometry.apply(lambda x: calc_compactness(x))
return gdf
def calc_roundness(geometry):
# Circularity = Perimeter^2 / (4 * pi * Area)
roundess = (geometry.length**2 / (4 * np.pi * geometry.area))
return roundess
def apply_roundness(gdf, out_field='roundness'):
gdf[out_field] = gdf.geometry.apply(lambda x: calc_roundness(x))
return gdf
def compute_stats(gdf, raster, name=None,
stats=None,
custom_stats=None, band=None,
renamer=None):
"""
Computes statistics for each polygon in geodataframe
based on raster. Statistics to be computed are the keys
in the stats_dict, and the renamed columns are the values.
Parameters
----------
gdf : gpd.GeoDataFrame
GeoDataFrame of polygons to compute statistics over.
raster : os.path.abspath | rasterio.raster
Raster to compute statistics from.
stats_dict : dict
Dictionary of stat:renamed_col pairs.
Stats must be one of: min, max, median, sum, std,
unique, range, percentile_<q>
Returns
-------
The geodataframe with added columns.
"""
if stats is None:
stats = ['mean', 'min', 'max', 'std']
logger.info('Computing {} on raster:\n\t{}'.format(' '.join(stats), raster))
if renamer is None:
renamer = {x: '{}_{}'.format(name, x) for x in stats}
if band:
logger.info('Band: {}'.format(band))
gdf = gdf.join(pd.DataFrame(zonal_stats(gdf['geometry'], raster,
stats=stats,
add_stats=custom_stats,
band=band))
.rename(columns=renamer),
how='left')
else:
stats_df = pd.DataFrame(zonal_stats(gdf['geometry'], raster,
stats=stats,
add_stats=custom_stats))
# logger.info('Stats DF Cols: {}'.format(stats_df.columns))
# logger.info('GDF cols: {}'.format(gdf.columns))
gdf = gdf.join(stats_df.rename(columns=renamer), how='left')
return gdf
def calc_zonal_stats(shp, rasters,
names=None,
stats=['min', 'max', 'mean', 'count', 'median'],
area=True,
compactness=False,
roundness=False,
out_path=None):
"""
Calculate zonal statistics on the given vector file
for each raster provided.
Parameters
----------
shp : os.path.abspath
Vector file to compute zonal statistics for the features in.
out_path : os.path.abspath
Path to write vector file with computed stats. Default is to
add '_stats' suffix before file extension.
rasters : list or os.path.abspath
List of rasters to compute zonal statistics for.
Or path to .txt file of raster paths (one per line)
or path to .json file of
name: {path: /path/to/raster.tif, stats: ['mean']}.
or dict of same format as json
names : list
List of names to use as prefixes for created stats. Order
is order of rasters.
stats : list, optional
List of statistics to calculate. The default is None.
area : bool
True to also compute area of each feature in units of
projection.
compactness : bool
True to also compute compactness of each object
roundness : bool
True to also compute roundess of each object
Returns
-------
out_path.
"""
# Load data
if isinstance(shp, gpd.GeoDataFrame):
seg = shp
else:
logger.info('Reading in segments from: {}...'.format(shp))
seg = read_vec(shp)
logger.info('Segments found: {:,}'.format(len(seg)))
# Determine rasters input type
# TODO: Fix logic here, what if a bad path is passed?
if isinstance(rasters[0], dict):
rasters, names, stats, bands = load_stats_dict(rasters[0])
elif len(rasters) == 1:
print(type(rasters))
if os.path.exists(rasters[0]):
logger.info('Reading raster file...')
ext = os.path.splitext(rasters[0])[1]
if ext == '.txt':
# Assume text file of raster paths, read into list
logger.info('Reading rasters from text file: '
'{}'.format(rasters[0]))
with open(rasters[0], 'r') as src:
content = src.readlines()
rasters = [c.strip() for c in content]
rasters, names = zip(*(r.split("~") for r in rasters))
logger.info('Located rasters:'.format('\n'.join(rasters)))
for r, n in zip(rasters, names):
logger.info('{}: {}'.format(n, r))
# Create list of lists of stats passed, one for each raster
stats = [stats for i in range(len(rasters))]
elif ext == '.json':
logger.info('Reading rasters from json file:'
' {}'.format(rasters[0]))
rasters, names, stats, bands = load_stats_dict(rasters[0])
else:
# Raster paths directly passed
stats = [stats for i in range(len(rasters))]
# Confirm all rasters exist before starting
for r in rasters:
if not os.path.exists(r):
logger.error('Raster does not exist: {}'.format(r))
logger.error('FileNotFoundError')
# Iterate rasters and compute stats for each
for r, n, s, bs in zip(rasters, names, stats, bands):
if bs is None:
# Split custom stat functions from built-in options
accepted_stats = ['min', 'max', 'median', 'sum', 'std', 'mean',
'unique', 'range', 'majority']
stats_acc = [k for k in s if k in accepted_stats
or k.startswith('percentile_')]
# Assume any key not in accepted_stats is a name:custom_fxn
custom_stats = [k for k in stats if k not in accepted_stats]
custom_stats_dict = {}
# for cs in custom_stats:
# custom_stats[cs] = custom_stat_fxn(cs)
seg = compute_stats(gdf=seg, raster=r, name=n,
stats=stats_acc)
else:
# Compute stats for each band
for b in bs:
stats_dict = {x: '{}b{}_{}'.format(n, b, x) for x in s}
seg = compute_stats(gdf=seg, raster=r,
stats=stats_dict,
renamer=stats_dict,
band=b)
# Area recording
if area:
seg['area_zs'] = seg.geometry.area
# Compactness: Polsby-Popper Score -- 1 = circle
if compactness:
seg = apply_compactness(seg)
if roundness:
seg = apply_roundness(seg)
# Write segments with stats to new shapefile
if not out_path:
out_path = os.path.join(os.path.dirname(shp),
'{}_stats.shp'.format(os.path.basename(shp).split('.')[0]))
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
logger.info('Writing segments with statistics to: {}'.format(out_path))
# driver = auto_detect_ogr_driver(out_path, name_only=True)
# seg.to_file(out_path, driver=driver)
write_gdf(seg, out_path)
return seg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_shp',
type=os.path.abspath,
help='Vector file to compute zonal statistics for the features in.')
parser.add_argument('-o', '--out_path',
type=os.path.abspath,
help="""Path to write vector file with computed stats. Default is to
add '_stats' suffix before file extension.""")
parser.add_argument('-r', '--rasters',
nargs='+',
type=os.path.abspath,
help="""List of rasters to compute zonal statistics
for, or path to .txt file of raster paths
(one per line) or path to .json file in format:
{"name":
{"path": "C:\\raster",
"stats": ["mean", "min"]}
}""")
parser.add_argument('-n', '--names',
type=str,
nargs='+',
help="""List of names to use as prefixes for created stats fields.
Length must match number of rasters supplied. Order is
the order of the rasters to apply prefix names for. E.g.:
'ndvi' -> 'ndvi_mean', 'ndvi_min', etc.""")
parser.add_argument('-s', '--stats',
type=str,
nargs='+',
default=['min', 'max', 'mean', 'count', 'median'],
help='List of statistics to compute.')
parser.add_argument('-a', '--area',
action='store_true',
help='Use to compute an area field.')
parser.add_argument('-c', '--compactness',
action='store_true',
help='Use to compute a compactness field.')
parser.add_argument('-rd', '--roundness',
action='store_true',
help='Use to compute a roundness field.')
args = parser.parse_args()
calc_zonal_stats(shp=args.input_shp,
rasters=args.rasters,
names=args.names,
stats=args.stats,
area=args.area,
compactness=args.compactness,
roundness=args.roundness,
out_path=args.out_path)
logger.info('Done.') | {"/.ipynb_checkpoints/calc_zonal_stats-checkpoint.py": ["/lib.py"]} |
68,240 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smSTLAfile.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 16:24:30 2018
DESCRIPTION: turns given input into STLA File
* input should be array created by smFacets
-> array with normals and vertice saved as linear array
@author: Sula Mueller
"""
from decimal import Decimal
''' turns float into string (exp format, as requested by stla file reader)
REMARK: '%.5E' specifies number of digits/ precision '''
def formatIntoDecimalString(number):
return str('%.4E'%Decimal(number))
''' createSTLAFile
DESCRIPTION: turns given input into STLA File
INPUT:
* 1D array with all values stored in linear fashion
-> array with normals and vertice saved as linear array
* path: location to save created file (eg: 'C:/ASG/gridfiles/')
end with / or \
* name: of file without extension, eg: 'example'
OUTPUT: .stl file saved at specified location '''
def createSTLAFile(path, name, Facets):
path = path + '\\'
file = open(path + name + '.stl', 'w') # initially open file
''' HEAD: '''
file.write('solid ' + name +'\n')
''' CONTENT: '''
f = len(Facets)
print(str(int(f/12)), 'facets') # to inform user ^^
''' FORMAT ALL THE NUMBERS: '''
F = []
for i in range(0, f):
F.append(formatIntoDecimalString(Facets[i]))
''' WRITE IN FILE: '''
for i in range(0, int(f/12)): # 12 entries for 1 facet
file.write(' facet normal '+F.pop(0)+' '+F.pop(0)+' '+F.pop(0) +'\n'
+' outer loop'+'\n'+' vertex '+ F.pop(0)+' '
+F.pop(0)+' '+F.pop(0) +'\n'+' vertex '+ F.pop(0)+' '
+F.pop(0)+' '+F.pop(0) +'\n'+' vertex '+ F.pop(0)+' '
+F.pop(0)+' '+F.pop(0)+'\n'+' endloop'+'\n'+' endfacet'
+'\n')
''' FOOT: '''
file.write('endsolid')
file.close()
print('Done')
return | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,241 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smFacets.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 08:31:13 2018
@author: Sula Mueller
DESCRIPTION: turns information about leaf angles into Facets for STLA File
# each facet is defined by: normal, vertex1, vertex2, vertex3
* normal: nx, ny, nz; always defined in POSITIVE direction
(routine flips them automatically for faces facing opposite)
* vertex: x,y,z
=> 12 values for 1 triangle
# need 2 triangles to define rectangle
# all values are stored in linear fashion -> [nx,ny,nz,v0,v1...,nx,...]
# leaves orientated along x-axis ("rho-leaves") are continuous, "alpha-leaves"
are segmented to avoid overleafing
"""
import math as m
''' getD
* leaves are tilted -> upper edges are shifted by dx, dz
* getD to calculate that shift
* works in x,z alike '''
def getD(angle, h_grid):
if angle == m.pi/2:
dx = 0
else:
dx = h_grid/ m.tan(angle)
return -dx
''' appends values in vector v to existing array F '''
def appendVector(F, v):
s = len(v)
for i in range(0, s):
F.append(v[i])
return F
''' flips 3-dimensional vector to turn into its negative '''
def neg(n):
return [-n[0], -n[1], -n[2]]
''' appends F by values for two triangles forming a rectangle '''
def appendRectangle(F, n, x0, x1, x2, x3):
F = appendVector(F, n)
F = appendVector(F, x0)
F = appendVector(F, x1)
F = appendVector(F, x2)
F = appendVector(F, n)
F = appendVector(F, x1)
F = appendVector(F, x3)
F = appendVector(F, x2)
return F
''' returns 6x2 surfaces of "cube" defined by 8 points and 3 normals '''
def CubeFromPoints(n_a, n_y, n_b, x0, x1, x2, x3, x0_, x1_, x2_, x3_):
F = []
F = appendRectangle(F, neg(n_a), x0, x1, x2, x3)
F = appendRectangle(F, n_a, x0_, x1_, x2_, x3_)
F = appendRectangle(F, neg(n_y), x0, x0_, x1, x1_)
F = appendRectangle(F, n_y, x2, x2_, x3, x3_)
F = appendRectangle(F, neg(n_b), x0, x0_, x2, x2_)
F = appendRectangle(F, n_b, x1, x1_, x3, x3_)
return F
''' same as CubeFromPoints with one pair of faces not parallel (b)
-> need one more normal '''
def PolyederFromPoints(n_a, n_y, n_b0, n_b1, x0, x1, x2, x3, x0_, x1_, x2_,
x3_):
return irregPolyederFromPoints(n_a, n_a, n_y, n_b0, n_b1, x0, x1, x2, x3,
x0_, x1_, x2_, x3_)
''' irregPolyederFromPoints
* returns 6x2 surfaces of irregular "cube" defined by 8 points and 5 normals
* 2 pairs of faces NOT parallel, third pair IS parallel (top, bottom)
* a,b: if alpha-> a=x, b=z
if rho -> a=z, b=x '''
def irregPolyederFromPoints(n_a0, n_a1, n_y, n_b0, n_b1, x0, x1, x2, x3, x0_,
x1_, x2_, x3_ ):
F = []
F = appendRectangle(F, neg(n_a0), x0, x1, x2, x3)
F = appendRectangle(F, n_a1, x0_, x1_, x2_, x3_)
F = appendRectangle(F, neg(n_y), x0, x0_, x1, x1_)
F = appendRectangle(F, n_y, x2, x2_, x3, x3_)
F = appendRectangle(F, neg(n_b0), x0, x0_, x2, x2_)
F = appendRectangle(F, n_b1, x1, x1_, x3, x3_)
return F
''' FacetizeOneLeafRho
DESCRIPTION:
returns facet values of leaf orientated along x-axis (rho-orientation)
INPUT:
dx0 -> tilt of left (small) edge face
dx1 -> tilt of right (small) edge face
dz -> tilt of entire leaf (difference between bottom, top coordinates)
rho -> angle of tilt (causing dz)
X_0 -> [X[0],Z[i]] -> left front bottom point (i... leaf)
X_1 -> [X[end]+th_grid,Z[i]] -> right front bottom point '''
def FacetizeOneLeafRho(dx0, dx1, dz, rho, X_0, X_1, h_grid, th_grid):
''' determine normals: '''
abs_n0 = m.sqrt(h_grid*h_grid + dx0*dx0) # length of normals...
abs_n1 = m.sqrt(h_grid*h_grid + dx1*dx1) # ...for normalization
# normalized normal of left (small) edge face:
n_x0 = [h_grid/abs_n0, -dx0/abs_n0, 0]
# normalized normal of right (small) edge face:
n_x1 = [h_grid/abs_n1, -dx1/abs_n1, 0] # pointing to positive z
n_y = [0, 1, 0]
n_z = [0, -m.cos(rho), m.sin(rho)] # pointing to positive z
''' determine 8 edge points of leaf: '''
x0 = [X_0[0], 0, X_0[1]]
x1 = [X_1[0], 0, X_1[1]]
x2 = [X_0[0]+dx0, h_grid, X_0[1]+dz]
x3 = [X_1[0]+dx1, h_grid, X_1[1]+dz]
# +th_grid: cuts them short in z to avoid overleafing with rho-leaves
x0_ = [X_0[0], 0, X_0[1]+th_grid]
x1_ = [X_1[0], 0, X_1[1]+th_grid]
x2_ = [X_0[0]+dx0, h_grid, X_0[1]+dz+th_grid]
x3_ = [X_1[0]+dx1, h_grid, X_1[1]+dz+th_grid]
return PolyederFromPoints(n_z, n_y, n_x0, n_x1, x0, x1, x2, x3, x0_, x1_,
x2_, x3_)
''' FacetizeOneLeafAlpha
DESCRIPTION:
returns facet values of leaf oriented along z-axis (alpha-orientation)
INPUT:
dz0 -> tilt of front (small) edge face
dz1 -> tilt of back (small) edge face
alpha -> angle of tilt (causing dx)
X_0 -> [X[j], Z[i]+th_grid] -> left front bottom point
Z[i]+th_grid: cuts them short in z to avoid overleafing with
rho-leaves
X_1 -> [X[j], Z[i+1]] -> left back bottom point
REMARK: i -> row (corresponding rho leaf), j -> column '''
def FacetizeOneLeafAlpha(dz0, dz1, alpha, X_0, X_1, h_grid, th_grid):
# tilt of entire leaf (difference between bottom, top coordinates):
dx = getD(alpha, h_grid)
''' determine normals: '''
n_x = [m.sin(alpha), -m.cos(alpha), 0] # pointing to positive x
n_y = [0, 1, 0]
abs_n0 = m.sqrt(h_grid*h_grid + dz0*dz0) # length of normals...
abs_n1 = m.sqrt(h_grid*h_grid + dz1*dz1) # ...for normalization
# normalized normal of front (small) edge face:
n_z0 = [0, -dz0/abs_n0, h_grid/abs_n0]
# normalized normal of back (small) edge face:
n_z1 = [0, -dz1/abs_n1, h_grid/abs_n1]
''' determine 8 edge points of leaf: '''
x0 = [X_0[0], 0, X_0[1]]
x1 = [X_1[0], 0, X_1[1]]
x2 = [X_0[0]+dx, h_grid, X_0[1]+dz0]
x3 = [X_1[0]+dx, h_grid, X_1[1]+dz1]
# +th_grid: cuts them short in z to avoid overleafing with rho-leaves
x0_ = [X_0[0]+th_grid, 0, X_0[1]]
x1_ = [X_1[0]+th_grid, 0, X_1[1]]
x2_ = [X_0[0]+th_grid+dx, h_grid, X_0[1]+dz0]
x3_ = [X_1[0]+th_grid+dx, h_grid, X_1[1]+dz1]
return PolyederFromPoints(n_x, n_y, n_z0, n_z1, x0, x1, x2, x3, x0_, x1_,
x2_, x3_)
''' FacetizeGapRho
DESCRIPTION:
to totally fill the gap between 2 tiles with one (thick) leaf
melts last leaf of previous tile and first leaf of next tile
returns Facet vector for that structure
INPUT:
dx0 -> tilt of left (small) edge face
dx1 -> tilt of right (small) edge face
dz -> tilt of front face ("current leaf")
dz1 -> tilt of back face ("next leaf" (-> i+1))
rho/ rho1 -> angles of tilt (causing dzs) of front/back faces
X_0 -> [X[0],Z[i]] -> left front bottom point (i... "current leaf")
X_1 -> [X[end]+th_grid, Z[i]] -> right front bottom point
X_4 -> [X[0], Z[i+1]+th_grid] -> left back bottom point ("next leaf") '''
def FacetizeGapRho(dx0, dx1, dz, dz1, rho, rho1, X_0, X_1, X_4_, h_grid):
''' determine normals: '''
abs_n0 = m.sqrt(h_grid*h_grid + dx0*dx0) # length of normals...
abs_n1 = m.sqrt(h_grid*h_grid + dx1*dx1) # ...for normalization
# normalized normal of left (small) edge face:
n_x0 = [h_grid/abs_n0, -dx0/abs_n0, 0]
# normalized normal of right (small) edge face:
n_x1 = [h_grid/abs_n1, -dx1/abs_n1, 0]
n_y = [0, 1, 0]
n_z0 = [0, -m.cos(rho), m.sin(rho)] # pointing to positive z
n_z1 = [0, -m.cos(rho1), m.sin(rho1)] # pointing to positive z
''' determine 8 edge points of leaf: '''
x0 = [X_0[0], 0, X_0[1]]
x1 = [X_1[0], 0, X_1[1]]
x2 = [X_0[0]+dx0, h_grid, X_0[1]+dz]
x3 = [X_1[0]+dx1, h_grid, X_1[1]+dz]
# +th_grid: cuts them short in z to avoid overleafing with rho-leaves
x4_ = [X_0[0], 0, X_4_[1]]
x5_ = [X_1[0], 0, X_4_[1]]
x6_ = [X_0[0]+dx0, h_grid, X_4_[1]+dz1]
x7_ = [X_1[0]+dx1, h_grid, X_4_[1]+dz1]
return irregPolyederFromPoints(n_z0, n_z1 ,n_y, n_x0, n_x1, x0, x1, x2, x3,
x4_, x5_, x6_, x7_)
''' FacetizeGapAlpha
DESCRIPTION:
to totally fill the gap between 2 tiles with one (thick) leaf
melts last leaf of previous tile and first leaf of next tile
returns Facet vector for that structure
INPUT:
dz0 -> tilt of front (small) edge face
dz1 -> tilt of back (small) edge face
alpha/ alpha1 -> angles of tilt (causing dx) of right/ left faces
X_0 -> [X[j], Z[i]+th_grid] -> left front bottom point (j -> "current
leaf")
X_1 -> [X[j], Z[i+1]] -> left back bottom point
X_4 -> [X[j+1]+th_grid, Z[i]+th_grid] -> right front bottom point (of "next
leaf")
i... row (corresponding rho leaf), j... column '''
def FacetizeGapAlpha(dz0, dz1, alpha, alpha1, X_0, X_1, X_4_, h_grid):
# tilt of left face (difference between bottom, top coordinates):
dx = getD(alpha, h_grid)
# tilt of right face (difference between bottom, top coordinates):
dx1 = getD(alpha1, h_grid)
''' determine normals: '''
n_x0 = [m.sin(alpha), -m.cos(alpha), 0] # pointing to positive x
n_x1 = [m.sin(alpha1), -m.cos(alpha1), 0] # pointing to positive x
n_y = [0, 1 ,0]
abs_n0 = m.sqrt(h_grid*h_grid + dz0*dz0) # length of normals...
abs_n1 = m.sqrt(h_grid*h_grid + dz1*dz1) # ...for normalization
# normalized normal of front (small) edge face:
n_z0 = [0, -dz0/abs_n0, h_grid/abs_n0]
# normalized normal of back (small) edge face:
n_z1 = [0, -dz1/abs_n1, h_grid/abs_n1]
''' determine 8 edge points of leaf: '''
x0 = [X_0[0], 0, X_0[1]]
x1 = [X_1[0], 0, X_1[1]]
x2 = [X_0[0]+dx, h_grid, X_0[1]+dz0]
x3 = [X_1[0]+dx, h_grid, X_1[1]+dz1]
# +th_grid: cuts them short in z to avoid overleafing with rho-leaves
x4_ = [X_4_[0], 0, X_0[1]]
x5_ = [X_4_[0], 0, X_1[1]]
x6_ = [X_4_[0]+dx1, h_grid, X_0[1]+dz0]
x7_ = [X_4_[0]+dx1, h_grid, X_1[1]+dz1]
return irregPolyederFromPoints(n_x0, n_x1, n_y, n_z0, n_z1, x0, x1, x2, x3,
x4_, x5_, x6_, x7_)
''' FacetizeEntireGrid
DESCRIPTION:
turns information about leaf angles into Facets for STLA File
returns entire Facets vector of ALL facets in linear fashion
-> [nx, ny ,nz, v0, v1..., nx,...]
INPUT:
Angles (from smAngles.determineAllAnglesAndCoordinates)
Grid Ratio, thickness of grid leaves '''
def FacetizeEntireGrid(Angles, GR, th_grid):
''' INITIALIZE: '''
alphas = Angles[0] # per output of determineAllAnglesAndCoordinates
rhos = Angles[1]
X = Angles[2]
Z = Angles[3]
N_P = Angles[4]
a = len(alphas)
r = len(rhos)
h_grid = th_grid*GR
F = [] # Facets vector with all values
''' EDGE RECYCLING: reuse tilt of leaves '''
# tilt of first/last alpha leaf determines tilt of small edge faces
# of rho-leaves (dx0, dx1):
dx0 = getD(alphas[0], h_grid)
dx1 = getD(alphas[a-1], h_grid)
# tilt of "current" rho-leaf determines tilt of small front edge face
# of alpha-leaves (dz0):
dz0 = getD(rhos[0], h_grid)
# REMARK: dz1 will be calculated FIRST for back edge face of alpha-leaves,
# then reused for next rho-leaf with dz0 = dz1 at end of alpha-loop
skip_Flag_rho = False
# gapfiller already includes next leaf, next leaf will be skipped
''' RHO LEAVES: '''
for i in range(0, r): # z
if skip_Flag_rho: # skip all leaves that are included in "gap-filler"
skip_Flag_rho = False # continue with leaf after that
else:
rho = rhos[i] # from input Angles
if ((i+1)%(N_P[1]+1)==0 and (i<r-1) ): # gap, but not last leaf
rho1 = rhos[i+1] # of "next" leaf (other side of gap)
dz1 = getD(rho1, h_grid)
F = appendVector(F, FacetizeGapRho(dx0, dx1, dz0, dz1, rho,
rho1, [X[0], Z[i]],
[X[a-1]+th_grid, Z[i]],
[X[0], Z[i+1]+th_grid],
h_grid))
dz0 = dz1 # for use in alpha leaves and reuse next rho leaf
skip_Flag_rho = True # to skip next leaf
# (because already included in edge)
else: # normal leaf
F = appendVector(F, FacetizeOneLeafRho(dx0, dx1, dz0, rho,
[X[0], Z[i]],
[X[a-1]+th_grid, Z[i]],
h_grid, th_grid))
if i==r-1: return F # return here, because last rho-leaf
# can't be boardered by more alpha-leaves
''' ALPHA LEAVES: included in rho loop because dont need them if
gap filled '''
if skip_Flag_rho:
i = i+1 # so that alpha leave boarder to second "leaf" of gap
dz1 = getD(rhos[i+1], h_grid) # needed for back (small) edge face
# reuse it later for rho
skip_Flag_alpha = False # x-direction has tile gaps too
for j in range(0, a): # x
alpha = alphas[j]
if ( (j+1)%(N_P[0]+1)==0 and (j<a-1)): # gap
alpha1 = alphas[j+1]
# of "next" leaf; other side of gap
F = appendVector(F, FacetizeGapAlpha(dz0, dz1, alpha,
alpha1,
[X[j], Z[i]+th_grid],
[X[j], Z[i+1]],
[X[j+1]+th_grid,
Z[i]+th_grid],
h_grid))
skip_Flag_alpha = True
else:
if skip_Flag_alpha: # skip next leaf
skip_Flag_alpha = False
else:
F = appendVector(F, FacetizeOneLeafAlpha(dz0, dz1,
alpha,
[X[j],
Z[i]+th_grid],
[X[j],Z[i+1]],
h_grid,
th_grid))
dz0 = dz1 # for reuse in rho leaves | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,242 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smLines.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 12:00:03 2018
DESCRIPTION: turns information about leaf angles into Lines and Boundaries for
IGES File
INPUT: Angles from smAngles
OUTPUT: Matrix -> NxM = (N_P_total+1)*12 x 6
each entry [:,1:6] represents one line [:,[x0,y0,z0,x1,y1,z1]]
12 lines for each pixel: 4 bottom lines, 4 vertical lines, 4 top lines
12 lines for outer edges
@author: Sula Mueller
"""
import numpy as np
import math as m
''' getD
* leaves are tilted -> upper edges are shifted by dx, dz
* getD to calculate that shift
* works in x,z alike '''
def getD (angle, h_grid):
if angle == m.pi/2: return 0 # for 90 -> perpendicular; no tilt
return h_grid/ m.tan(angle)
'''getALLPoints
DESCRIPTION: returns line array of all points surrounding pixels
INPUT: coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0])
x0, z0 should be +th_grid
dx, dz -> tilt of upper points
OUTPUT: line array '''
def getPixelPoints (x0,x1,z0,z1, dx0,dx1,dz0,dz1, h_grid):
P = np.zeros((8,3))
P[0,:] = [x0,0,z0]
P[1,:] = [x1,0,z0]
P[2,:] = [x1,0,z1]
P[3,:] = [x0,0,z1]
P[4,:] = [x0+dx0,h_grid,z0+dz0]
P[5,:] = [x1+dx1,h_grid,z0+dz0]
P[6,:] = [x1+dx1,h_grid,z1+dz1]
P[7,:] = [x0+dx0,h_grid,z1+dz1]
return P
'''getPixelLines
DESCRIPTION: returns line array of all lines surrounding pixels
INPUT: coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0])
x0, z0 should be +th_grid
dx, dz -> tilt of upper points
OUTPUT: line array
REMARK: DON'T do for last alpha,rho (extra routine for outer lines)'''
def getPixelLines (x0,x1,z0,z1, dx0,dx1,dz0,dz1, h_grid):
L = np.zeros((12,6))
'''BOTTOM:'''
L[0,:] = [x0,0,z0, x1,0,z0]
L[1,:] = [x1,0,z0, x1,0,z1]
L[2,:] = [x1,0,z1, x0,0,z1]
L[3,:] = [x0,0,z1, x0,0,z0]
'''VERTICAL:'''
L[4,:] = [x0,0,z0, x0+dx0,h_grid,z0+dz0]
L[5,:] = [x1,0,z0, x1+dx1,h_grid,z0+dz0]
L[6,:] = [x1,0,z1, x1+dx1,h_grid,z1+dz1]
L[7,:] = [x0,0,z1, x0+dx0,h_grid,z1+dz1]
'''TOP:'''
L[8,:] = [x0+dx0,h_grid,z0+dz0, x1+dx1,h_grid,z0+dz0]
L[9,:] = [x1+dx1,h_grid,z0+dz0, x1+dx1,h_grid,z1+dz1]
L[10,:] = [x1+dx1,h_grid,z1+dz1, x0+dx0,h_grid,z1+dz1]
L[11,:] = [x0+dx0,h_grid,z1+dz1, x0+dx0,h_grid,z0+dz0]
return L
''' getAllLinesLoop
DESCRIPTION: turns information about leaf angles into Lines for IGES File
INPUT: Angles from smAngles
OUTPUT Lines: Matrix [(N_P_total+1)*12, 6]
each entry [:, 1:6] represents one line [:,[x0,y0,z0,x1,y1,z1]]
12 lines for each pixel: 4 bottom lines, 4 vertical lines, 4 top lines
all lines linearly stored (12 entries belong together)
inner vs outer:
inner: lines directly boardering the pixels
outer: edges of entire grid (stored last in same array)
def getAllLinesLoop (Angles, GR, th_grid):
#INITIALIZE:
alphas = Angles[0] # per output of determineAllAnglesAndCoordinates
rhos = Angles[1]
X = Angles[2]
Z = Angles[3]
N_P = Angles[4]
a = len(alphas)
r = len(rhos)
h_grid = th_grid*GR
# N_P+1 angles on tile -> (a-1)/ (N_P[0] +1) = N_Tile
N_P_x = ((a-1)/ (N_P[0] +1)) * N_P[0]
N_P_z = ((r-1)/ (N_P[1] +1)) * N_P[1]
N_P_total = N_P_x * N_P_z
L = np.zeros[(N_P_total+1)*12,6] # Lines array
index = 0 # to count entries in L (despite gaps)
#INNER LINES:
dx00 = getD(alphas[0], h_grid) # tilt of left outer edge
dx0 = dx00 # tilt of first leaf = tilt of outer edge
dz00 = getD(rhos[0], h_grid) # tilt of front outer edge
dz0 = dz00 # tilt of first leaf = tilt of outer edge
for i in range(0,r-1): #z
# start with left edge of pixel, include right edge in loop
# -> need to exclude last angle = right edge from looping
if (i+1)%(N_P[1]+1)==0: # if gap
dz0 = getD(rhos[i+1], h_grid)
# do nothing
else:
dz1 = getD(rhos[i+1], h_grid) # tilt of right edge
for j in range(0,a-1): #x
if (j+1)%(N_P[0]+1)==0: # if gap
dx0 = getD(alphas[j+1], h_grid)
# do nothing
else:
dx1 = getD(alphas[j+1], h_grid)
L[index:index+4,:] = getBottomLines(X[j]+th_grid,X[j+1],
Z[i]+th_grid,Z[i+1])
L[index+4:index+8,:] =getVerticalLines(X[j]+th_grid,X[j+1],
Z[i]+th_grid,Z[i+1],
dx0,dx1,dz0,dz1,
h_grid)
L[index+8:index+12,:] = getTopLines(X[j]+th_grid,X[j+1],
Z[i]+th_grid,Z[i+1],
dx0,dx1,dz0,dz1,h_grid)
index = index + 12
dx0 = dx1 #tilt of le edge, reuse for ri edge of next pixel
dz0 = dz1 # tilt of back edge, reuse for front edge of next pixel
#OUTER LINES:
dx0 = dx00
dz0 = dz00
# dx1, dz1 can be directly reused
L[index:index+4,:] = getBottomLines (X[0],X[-1]+th_grid,
Z[0],Z[-1]+th_grid)
L[index+4:index+8,:] = getVerticalLines(X[0],X[-1]+th_grid, Z[0],
Z[-1]+th_grid, dx0,dx1,dz0,dz1,
h_grid)
L[index+8:index+12,:] = getTopLines(X[0],X[-1]+th_grid, Z[0],Z[-1]+th_grid,
dx0,dx1,dz0,dz1, h_grid)
return L'''
| {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,243 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smAngles.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 09:24:32 2018
@author: Sula Mueller
DESCRIPTION: calculates all collimator leaf angles for each leaf
detailed description: see MAIN ROUTINE determineAllAnglesAndCoordinates
"""
import numpy as np
import math as m
''' determineX
determine position (coordinate) of (left) edge of current alpha-leaf
i/j: current tile/ pixel
other input: scalar, only value for x-direction '''
def determineX(EAD, edge_Module, edge_Element, delta_e, delta_p, i, j):
# starts at left edge of first pixel, then considers all left edges
# right edge of last pixel considered since loop til N+1
return EAD + edge_Module + edge_Element + i*delta_e + j*delta_p
''' determineZ
determine position (coordinate) of (front) edge of current rho-leaf
k/m/n: current tile/ pixel/ module
other input: scalar, only value for x-direction
difference to determineX: can also consider different rows of modules '''
def determineZ(EAD, size_M, gap_M, edge_Module, edge_Element, delta_e,
delta_p, k, m, n): # n... index of module
return (EAD + n*(size_M+gap_M) + edge_Module + edge_Element + k*delta_e
+ m*delta_p)
''' determineAngle
calculate angle of leaf
INPUT: given distance of current edge to centerline (center of module)
OUTPUT: angle (can be alpha or rho) '''
def determineAngle(b, SDD):
if b==0: return m.pi/2 # for 90
return np.arctan(SDD/b)
''' determineAnglesSparseInfo
does the same as determineAllAnglesAndCoordinates with simplifications:
* edges, gaps = 0
* isotropic: N_E,P [x] = N_E,P [z] -> scalar input
* does NOT consider thickness of grid for coordinate calculations ->
will be off by th_grid/2 (probably low influence) '''
def determineAnglesSparseInfo(SDD, size_M, N_E, N_P, n):
return determineAllAnglesAndCoordinates(SDD, [size_M/2, size_M/2],
[size_M, size_M], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0],
[N_E, N_E], [N_P, N_P], n)
''' MAIN ROUTINE: determineAllAnglesAndCoordinates
# DESCRIPTION:
calculates angels at specified coordinates:
edges of pixels, tiles and module
saves angle and position for each leaf (left, front positions)
considers leaves of grid -> makes 1 entry more than total number of pixels
in row
# INPUTS:
vectors with arg[0] = arg[x], arg[1] = arg[z]
M... Module, E... Element, P... Pixel
SDD... Source detector difference
EAD... Edge Axis Difference -> the distance from central axis (beam) to
actual edge of module (given as NEGATIVE value)
"edge": edge of element <-> edge of next functional unit,
"gap": distance between elements of same kind
edge_M/E: _0-> CS origin side, _1 -> other side, positive x,z
(if not specified, assume _0 -> on CO side)
for several (n) modules in z: assume, they are appended at positive z
# OUTPUT:
[0]: angles for leaves in z-direction
[1]: angles for leaves in x-direction
[2]: x-coordinates of (front) leaf edges in z-direction
[3]: z-coordinates of (left) leaf edges in x-direction
REMARK: coordinates of back and right side leaf edges derived from
known leaf thickness
[4]: number of pixels on tile (needed later for filling of gaps between
tiles) '''
def determineAllAnglesAndCoordinates(SDD, EAD, size_M, edge_M0, edge_M1,
gap_M, edge_E0, edge_E1, gap_E,
N_E, N_P, n):
alphas = np.zeros(int(N_E[0])*(int(N_P[0])+1)) # angles of leaves along z
X = np.zeros(int(N_E[0])*(int(N_P[0])+1)) # (front) edges of these leaves
rhos = np.zeros(N_E[1]*(N_P[1]+1)) # angles of leaves in x-direction
Z = np.zeros(N_E[1]*(N_P[1]+1)) # (left) edges of these leaves
delta_e = [] # distance between 2 Element center points
delta_e.append((size_M[0]-edge_M0[0]-edge_M1[0])/N_E[0]) # in x
delta_e.append((size_M[1]-edge_M0[1]-edge_M1[1])/N_E[1]) # in z
delta_p = [] # distance between 2 Pixel center points
delta_p.append((delta_e[0]-gap_E[0]-edge_E0[0]-edge_E1[0])/N_P[0]) # in x
delta_p.append((delta_e[1]-gap_E[1]-edge_E0[1]-edge_E1[1])/N_P[1]) # in z
#in x:
for i in range(0, N_E[0]): # for each tile
for j in range(0, N_P[0]+1): # for each pixel
x = determineX(EAD[0], edge_M0[0], edge_E0[0], delta_e[0],
delta_p[0], i, j)
# x: position/coordinate of (front) edge of current leaf
alphas[i*(N_P[0]+1)+j] = determineAngle(x,SDD)
X[i*(N_P[0]+1)+j] = x
#in z:
for i in range(0, N_E[1]): # for each tile
for j in range(0,N_P[1]+1): # for each pixel
z = determineZ(EAD[1], size_M[1], gap_M, edge_M0[1], edge_E0[1],
delta_e[1], delta_p[1], i, j, n)
# z: position/coordinate of (left) edge of current leaf
rhos[i*(N_P[1]+1)+j] = determineAngle(z,SDD)
Z[i*(N_P[1]+1)+j] = z
return [alphas, rhos, X, Z, N_P] | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,244 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smPsection.py | # -*- coding: utf-8 -*-
"""
Created on Tue May 8 17:13:01 2018
DESCRIPTION: creates P section for IGES File
INPUT: Angles as determined by smAngles -> determineAllAnglesAndCoordinates
MAIN ROUTINE: allP_Entries(Angles, GR, th_grid)
REMARK: nextline, entry_index
from specifications of IGES file:
* entry_index:
* specifies the entry, is identical for entire P entry
* independent of how many lines have been used up for that entry
* only uses odd numbers (hence +2 for each entry)
* entry_index can be used as "pointer" (DE) to that entry
* line counter ("nextline"):
* continuously counts used up lines
* independent of which P entry it is used for
* set variable "nextline" = number of used up lines
-> "nextline" is line index for next entry (needed in D-section)
SECTIONS:
0: Basics (P_Entry, Points, Lines)
I: Surfaces directly boardering Pixels
II: Edge faces of entire grid
III: Bottom and Top surface
A: between pixels ("alpha")
B: between pixel rows ("rho")
IV: Main routine
@author: Sula Mueller
"""
'''---------------------------------------------------------------------------
SECTION 0: BASICS '''
import numpy as np
from smFacets import getD
''' creates 7 digit string from value x and enough spaces (right justified) '''
# eigth in block is section entry ('D', or 'P')
def spaced7(x):
xx = str(x)
return (7-len(xx))*' ' + xx
''' creates 8 digit string from value x and enough spaces (right justified) '''
def spaced8(x):
xx = str(x)
return (8-len(xx))*' ' + xx
''' reverses direction of line '''
def reverseLine(L):
return [L[3], L[4], L[5], L[0], L[1], L[2]]
''' P_Entry
DESCRIPTION: creates one "entry" for P section from given parameters
INPUT: entry_index -> index of entry (independent of used up lines)
nextline -> line counter for first line
typ -> type of entity as integer
params -> parameters of entity (if point: [x,y,z], if line: P1, P2)
pointFlag -> true, if point, line or curve, false if surface
OUTPUT:
P, alternating P[0+3i] -> string as written in P section (incl. '/n')
P[1+3i] -> type as integer
P[2+3i] -> line index for NEXT entry ("nextline") '''
def P_Entry(entry_index, nextline, typ, params, pointFlag):
nextline = int(nextline)
p = str(typ) + ',' # p: string output without '\n'
for i in range(0, len(params[:])):
p = p + str(params[i]) + ',' # float with 4 digits
if pointFlag:
p = p + '0,0;' # line scheme for points, lines and curves
else:
p = p + '0,1,43;' # line scheme for surfaces
string = '' # string output with added '\n'
estring = spaced8(entry_index) + 'P' # string form of entry_index
# gets added at each line
if len(p)>64:
while (len(p)>64):
# slice after next comma before 64 AND REMOVE:
ind = p.rfind(',', 0, 64)
pp = p[0:ind+1] # sliced off part
p = p[ind+1:len(p)] # leftover part
string = (string + pp + (64-len(pp))*' ' + estring
+ spaced7(nextline) + '\n')
nextline = int(nextline + 1)
# last part (or if len < 64 in the first place):
string = string + p + (64-len(p))*' ' + estring + spaced7(nextline) + '\n'
return [string, typ, int(nextline+1)]
''' RuledSurface
DESCRIPTION: returns surface as P_Entry
INPUT: L -> pointers of 2 antiparallel lines on that surface
OUTPUT: 1 surface as P_Entry '''
def RuledSurface(L, entry_index, nextline):
params = [L[0], L[1], 1, 0]
pointFlag = False
return P_Entry(entry_index, nextline, 118, params, pointFlag)
''' BoundedSurfaces
DESCRIPTION: gets P_Entries of bounded surfaces
INPUT: SP -> pointers to unbounded surfaces
BP -> pointers to boundaries
OUTPUT: P_Entries '''
def BoundedSurfaces(SP, BP, entry_index, nextline):
pointFlag = False
lenP = len(SP) # assume, len(SP) = len(BP)
P = [] # output
for i in range(0,lenP):
params = [0,SP[i],1,BP[i]]
P = P + P_Entry(entry_index, nextline, 143, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
return P
''' BoundedSurface
DESCRIPTION: gets P_Entry of 1 bounded surface
INPUT: SP -> pointer to unbounded surface
BP -> pointer to boundary
OUTPUT: P_Entry '''
def BoundedSurface(SP, BP, entry_index, nextline):
pointFlag = False
params = [0,SP,1,BP]
return P_Entry(entry_index, nextline, 143, params, pointFlag)
'''---------------------------------------------------------------------------
SECTION I: PIXELS '''
''' PixelPoints
DESCRIPTION: returns line array of all points surrounding pixels
INPUT: coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0])
(x0, z0 should be +th_grid)
dx, dz -> tilt of upper points
h_grid -> height of grid
OUTPUT: line array [8,3] '''
def PixelPoints(x0, x1, z0, z1, dx0, dx1, dz0, dz1, h_grid):
P = np.zeros((8,3))
P[0,:] = ["%.4f"%x0, 0, "%.4f"%z0] # "%.4f"% to limit to 4 digits
P[1,:] = ["%.4f"%x1, 0, "%.4f"%z0]
P[2,:] = ["%.4f"%x1, 0, "%.4f"%z1]
P[3,:] = ["%.4f"%x0, 0, "%.4f"%z1]
P[4,:] = ["%.4f"%(x0+dx0), h_grid, "%.4f"%(z0+dz0)]
P[5,:] = ["%.4f"%(x1+dx1), h_grid, "%.4f"%(z0+dz0)]
P[6,:] = ["%.4f"%(x1+dx1), h_grid, "%.4f"%(z1+dz1)]
P[7,:] = ["%.4f"%(x0+dx0), h_grid, "%.4f"%(z1+dz1)]
return P
''' PixelLines
DESCRIPTION: returns line array of all lines surrounding pixels
INPUT: coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0])
(x0, z0 should be +th_grid)
dx, dz -> tilt of upper points
h_grid -> height of grid
OUTPUT: line array [12,6]
REMARKS:
* DON'T do for last alpha,rho since need i+1
* "%.4f"% to round on 4 digits '''
def PixelLines(x0, x1, z0, z1, dx0, dx1, dz0, dz1, h_grid):
L = np.zeros((12,6))
''' BOTTOM: '''
L[0,:] = ["%.4f"%x0,0,"%.4f"%z0, "%.4f"%x1,0,"%.4f"%z0]
L[1,:] = ["%.4f"%x1,0,"%.4f"%z0, "%.4f"%x1,0,"%.4f"%z1]
L[2,:] = ["%.4f"%x1,0,"%.4f"%z1, "%.4f"%x0,0,"%.4f"%z1]
L[3,:] = ["%.4f"%x0,0,"%.4f"%z1, "%.4f"%x0,0,"%.4f"%z0]
''' VERTICAL: '''
L[4,:] = ["%.4f"%x0,0,"%.4f"%z0, "%.4f"%(x0+dx0),h_grid,"%.4f"%(z0+dz0)]
L[5,:] = ["%.4f"%x1,0,"%.4f"%z0, "%.4f"%(x1+dx1),h_grid,"%.4f"%(z0+dz0)]
L[6,:] = ["%.4f"%x1,0,"%.4f"%z1, "%.4f"%(x1+dx1),h_grid,"%.4f"%(z1+dz1)]
L[7,:] = ["%.4f"%x0,0,"%.4f"%z1, "%.4f"%(x0+dx0),h_grid,"%.4f"%(z1+dz1)]
''' TOP: '''
L[8,:] = ["%.4f"%(x0+dx0),h_grid,"%.4f"%(z0+dz0), "%.4f"%(x1+dx1),h_grid,
"%.4f"%(z0+dz0)]
L[9,:] = ["%.4f"%(x1+dx1),h_grid,"%.4f"%(z0+dz0), "%.4f"%(x1+dx1),h_grid,
"%.4f"%(z1+dz1)]
L[10,:] = ["%.4f"%(x1+dx1),h_grid,"%.4f"%(z1+dz1), "%.4f"%(x0+dx0),h_grid,
"%.4f"%(z1+dz1)]
L[11,:] = ["%.4f"%(x0+dx0),h_grid,"%.4f"%(z1+dz1), "%.4f"%(x0+dx0),h_grid,
"%.4f"%(z0+dz0)]
return L
''' PixelRuledSurfaces
DESCRIPTION: returns array of all surfaces boardering a pixel
INPUT: LP -> pointers to all lines boardering pixel
OUTPUT: 4 surfaces as P_Entries '''
def PixelRuledSurfaces(LP, entry_index, nextline):
pointFlag = False
# front:
params = [LP[0],LP[8],0,0]
P = P_Entry(entry_index, nextline, 118, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# right:
params = [LP[1],LP[9],0,0]
P = P + P_Entry(entry_index, nextline, 118, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# back:
params = [LP[2],LP[10],0,0]
P = P + P_Entry(entry_index, nextline, 118, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# left:
params = [LP[3],LP[11],0,0]
return P + P_Entry(entry_index, nextline, 118, params, pointFlag)
''' PixelBoundaries
DESCRIPTION: gets the boundaries of 4 pixel boardering surfaces from Lines
INPUT: LP -> pointers to all lines boardering pixel
SP -> pointers to 4 unbounded surfaces
OUTPUT: 4 boundaries for 1 pixel as P_Entries '''
def PixelBoundaries(LP, SP, entry_index, nextline):
pointFlag = True
# front:
params = [0,0,SP[0],4,LP[0],0,0,LP[4],0,0,LP[8],1,0,LP[7],1,0]
P = P_Entry(entry_index, nextline, 141, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# right:
params = [0,0,SP[1],4,LP[1],0,0,LP[5],0,0,LP[9],1,0,LP[4],1,0]
P = P + P_Entry(entry_index, nextline, 141, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# back:
params = [0,0,SP[2],4,LP[2],0,0,LP[6],0,0,LP[10],1,0,LP[5],1,0]
P = P + P_Entry(entry_index, nextline, 141, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# left:
params = [0,0,SP[3],4,LP[3],0,0,LP[7],0,0,LP[11],1,0,LP[6],1,0]
return P + P_Entry(entry_index, nextline, 141, params, pointFlag)
''' P_EntriesOnePixel
DESCRIPTION:
creates ALL P_Entries of points, lines and bounded surfaces boardering one
pixel
INPUT:
values -> x0, x1, z0, z1, dx0, dx1, dz0, dz1, h_grid
= X[j]+th_grid, X[j+1], Z[i]+th_grid, Z[i+1], dx0, dx1, dz0, dz1, h_grid
OUTPUT: 32 P_Entries:
4 bottom points
4 top points
4 bottom lines
4 vertical lines
4 top lines
4 ruled, unbounded surfaces (created by sweeping bottom, top lines)
4 boundaries of those (created by lines)
4 bounded surfaces (created by surfaces and corresponding boundaries)
1 P_Entry = 3 array elements:
P[0+3i] -> string as written in P section
P[1+3i] -> type as integer
P[2+3i] -> line index for NEXT entry ("nextline")
LP -> pointers to all lines boardering pixel '''
def P_EntriesOnePixel(entry_index, nextline, values):
P = [] # output
LP = np.zeros(12, dtype=int) # pointers for lines from entry_index
''' points as [8,3] '''
Points = PixelPoints(values[0],values[1],values[2],values[3],values[4],
values[5],values[6],values[7],values[8])
pointFlag = True
for i in range(0,8): # write Points as P_Entries:
P = P + P_Entry(entry_index, nextline, 116, Points[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
''' lines as [12,6] '''
Lines = PixelLines(values[0],values[1],values[2],values[3],values[4],
values[5],values[6],values[7],values[8])
for i in range(0,12): # write Lines as P_Entries:
LP[i] = entry_index
P = P + P_Entry(entry_index, nextline, 110, Lines[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
''' ruled surfaces '''
P = P + PixelRuledSurfaces(LP, entry_index, nextline)
e = entry_index
SP = [e, e+2, e+4, e+6] # pointers for surfaces as entry_indice
nextline = int(P[-1])
entry_index = entry_index + 8 # +2*(4 surfaces)
''' boundaries for inner surfaces '''
#P = P + PixelBoundaries(LP, SP, entry_index, nextline)
e = entry_index
BP = [e, e+2, e+4, e+6] # pointers for boundaries as entry_indice
nextline = int(P[-1])
entry_index = entry_index + 8 # +2*(4 boundaries)
''' bounded surfaces '''
P = P + BoundedSurfaces(SP, BP, entry_index, nextline)
return [P, LP]
'''---------------------------------------------------------------------------
SECTION II: EDGES OF GRID '''
''' OuterEdgesLines
DESCRIPTION:
creates P_Entries of points and front/back lines of outer edges of grid
equivalent to P_EntriesOnePixel, but exclude left, right lines
(otherwise would have redundant definition of lines)
(boundaries & surfaces defined in OuterEdgesSurfaces)
INPUT:
values -> x0, x1, z0, z1, dx0, dx1, dz0, dz1, h_grid
OUTPUT:
P -> P_entries of points and lines, except left/ right lines
oeL -> pointers to [bottom front+back, vertical, top front+back] lines '''
def OuterEdgesLines(entry_index, nextline, values):
P = [] # output
oeL = np.zeros(8, dtype=int) # pointers for lines from entry_index
''' points as [8,3] '''
Points = PixelPoints(values[0],values[1],values[2],values[3],values[4],
values[5],values[6],values[7],values[8])
pointFlag = True
for i in range(0,8): # write Points as P_Entries:
P = P + P_Entry(entry_index, nextline, 116, Points[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
''' lines as [8,6] '''
Lines = PixelLines(values[0],values[1],values[2],values[3],values[4],
values[5],values[6],values[7],values[8])
Lines = [Lines[0], Lines[2], Lines[4], Lines[5], Lines[6], Lines[7],
Lines[8], Lines[10]] # exclude left/ right edges
for i in range(0,8): # write Lines as P_Entries:
oeL[i] = entry_index
P = P + P_Entry(entry_index, nextline, 110, Lines[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
return [P, oeL]
''' OuterEdgesRuledSurf
DESCRIPTION: returns array of outer edge surfaces of grid
INPUT: oeL -> pointers to all lines [bottom front+back, vert, top front+back]
OUTPUT: 4 surfaces as P_Entries '''
def OuterEdgesRuledSurfs(oeL, entry_index, nextline):
pointFlag = False
# front:
params = [oeL[2],oeL[3],0,0]
P = P_Entry(entry_index, nextline, 118, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# right:
params = [oeL[3],oeL[4],0,0]
P = P + P_Entry(entry_index, nextline, 118, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# back:
params = [oeL[4],oeL[5],0,0]
P = P + P_Entry(entry_index, nextline, 118, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# left:
params = [oeL[5],oeL[2],0,0]
return P + P_Entry(entry_index, nextline, 118, params, pointFlag)
''' OuterEdgesRLboundary
DESCRIPTION: gets boundary for right or left surface of outer edges of grid
INPUT: vLP -> pointers to vertical lines (bottom to top)
bLP -> pointers to all bottom lines (many partials, front to back)
tLP -> pointers to all top lines (many partials, front to back)
SP -> pointers to unbounded surfaces
OUTPUT: boundary for outer edge face as P_Entry
REMARK: can also be used for [bottom, top] surfaces between pixel rows '''
def OuterEdgesRLboundary(vLP, bLP, tLP, SP, entry_index, nextline):
pointFlag = True
lenC = 2 # number of partial curves, initialize with 2 vertical lines
''' vertical 1: '''
params = [vLP[0],1,0]
''' bottom: '''
if isinstance(bLP, np.int32):
params = np.concatenate((params, [bLP,0,0]))
lenC = lenC + 1
else:
lenLP = len(bLP)
lenC = lenC + lenLP
for i in range(0, lenLP):
params = np.concatenate((params, [bLP[i],0,0]))
''' vertical 2: '''
params = np.concatenate((params, [vLP[1],0,0]))
''' top: '''
if isinstance(tLP, np.int32):
params = np.concatenate((params, [tLP,0,0]))
lenC = lenC + 1
else:
lenLP = len(tLP)
lenC = lenC + lenLP
for i in range(0, lenLP):
params = np.concatenate((params, [tLP[-i],1,0]))
''' first part of boundary definition: '''
params = np.concatenate(([0,0,SP,lenC], params))
return P_Entry(entry_index, nextline, 141, params, pointFlag)
''' OuterEdgesBoundaries
DESCRIPTION: gets the boundaries of 4 outer edge surfaces of grid
INPUT: oeL -> pointers to all lines except left/ right lines
l/r b/t L, -> pointers to left/right bottom/top lines of edges
SP -> pointers to 4 unbounded surfaces
OUTPUT: 4 boundaries for outer edges as P_Entries '''
def OuterEdgesBoundaries(oeL, lbL, rbL, ltL, rtL, SP, entry_index, nextline):
pointFlag = True
# front:
params = [0,0,SP[0],4,oeL[0],0,0,oeL[2],0,0,oeL[6],1,0,oeL[5],1,0]
P = P_Entry(entry_index, nextline, 141, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# right:
vL = [oeL[2], oeL[3]] # pointers to vertical lines of right edge
P = P + OuterEdgesRLboundary(vL, rbL, rtL, SP[1], entry_index, nextline)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# back:
params = [0,0,SP[2],4,oeL[1],0,0,oeL[4],0,0,oeL[7],1,0,oeL[3],1,0]
P = P + P_Entry(entry_index, nextline, 141, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# left:
vL = [oeL[4], oeL[5]] # pointers to vertical lines of left edge
return (P + OuterEdgesRLboundary(vL, lbL, ltL, SP[3], entry_index,
nextline))
''' OuterEdgesSurfaces
DESCRIPTION: wraps up surface routines to create 4 outer edge surfaces of grid
INPUT: oeLP -> pointers of all lines except left/ right lines
oelrLP -> pointers to left/ rigth lines of edges
OUTPUT: four bounded surfaces of outer edges as P_Entries '''
def OuterEdgesSurfaces(oeL, edgeRL, SP, entry_index, nextline):
lenE = len(edgeRL)
lbL = edgeRL[0:lenE:4] # pointers to left bottom partial lines
rbL = edgeRL[1:lenE:4] # pointers to right bottom partial lines
ltL = edgeRL[2:lenE:4] # pointers to left top partial lines
rtL = edgeRL[3:lenE:4] # pointers to right top partial lines
''' ruled surfaces '''
P = OuterEdgesRuledSurfs(oeL, entry_index, nextline)
e = entry_index
SP = [e, e+2, e+4, e+6] # pointers for surfaces
nextline = int(P[-1])
entry_index = entry_index + 8 # +2*(4 surfaces)
''' boundaries for surfaces '''
P = P + OuterEdgesBoundaries(oeL, lbL, rbL, ltL, rtL, SP, entry_index,
nextline)
e = entry_index
BP = [e, e+2, e+4, e+6] # pointers for boundaries
nextline = int(P[-1])
entry_index = entry_index + 8 # +2*(4 boundaries)
''' bounded surfaces '''
return P + BoundedSurfaces(SP, BP, entry_index, nextline)
'''---------------------------------------------------------------------------
SECTION III: BOTTOM & TOP '''
''' PART A: ALPHA '''
''' BotTopAlpPartialLineParams
DESCRIPTION: gets additional lines surrounding [bottom, top] surfaces between
pixels
INPUT: coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0]):
[X[j], X[j]+th_grid, Z[i]+th_grid, Z[i+1]]
dx, dz -> tilt of upper points
h_grid -> height of grid
OUTPUT: line array [4,6] '''
def BotTopAlpPartialLineParams(x0, x1, z0, z1, dx0, dx1, dz0, dz1, h_grid):
L = np.zeros((4,6))
L[0,:] = ["%.4f"%x0,0,"%.4f"%z0, "%.4f"%x1,0,"%.4f"%z0]
L[1,:] = ["%.4f"%x0,0,"%.4f"%z1, "%.4f"%x1,0,"%.4f"%z1]
L[2,:] = ["%.4f"%(x0+dx0),h_grid,"%.4f"%(z0+dz0), "%.4f"%(x1+dx1),h_grid,
"%.4f"%(z0+dz0)]
L[3,:] = ["%.4f"%(x0+dx0),h_grid,"%.4f"%(z1+dz1), "%.4f"%(x1+dx1),h_grid,
"%.4f"%(z1+dz1)]
return L
''' BotTopAlpBoundaries
DESCRIPTION: gets the boundaries of [bottom, top] surfaces between pixels
INPUT: parL -> pointers to partial lines between pixels
alps -> pointers to [left bottom, right bottom, l top, r top] lines of
space in between
SP -> pointers to unbounded surfaces
OUTPUT: 2 boundaries as P_Entries '''
def BotTopAlpBoundaries(parL, alps, SP, entry_index, nextline):
pointFlag = True
# bottom:
params = [0,0,SP[0],4,parL[0],1,0,alps[0],0,0,parL[1],0,0,alps[1],0,0]
P = P_Entry(entry_index, nextline, 141, params, pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
# top:
params = [0,0,SP[1],4,parL[2],1,0,alps[2],0,0,parL[3],0,0,alps[3],0,0]
return P + P_Entry(entry_index, nextline, 141, params, pointFlag)
''' BotTopAlphaSurfaces
DESCRIPTION: gets all P_entries of [bottom, top] surfaces between pixels
INPUT: alps -> pointers to [left bottom, right bottom, l top, r top] lines of
space in between
SP -> pointers to unbounded surfaces
values -> [X[j], X[j]+th_grid, Z[i]+th_grid, Z[i+1], dx0, dx1, dz0, dz1,
h_grid]
OUTPUT: P -> all P_Entries for both surfaces
parLP -> pointers to additional partial lines between pixels '''
def BotTopAlphaSurfaces(alps, SP, entry_index, nextline, values):
P = [] # output
parLP = np.zeros(4, dtype=int) # pointers for lines from entry_index
''' additional partial lines: '''
parL = BotTopAlpPartialLineParams(values[0],values[1],values[2],values[3],
values[4],values[5],values[6],values[7],
values[8])
pointFlag = True
for i in range(0,4): # write parL as P_Entries:
parLP[i] = entry_index
P = P + P_Entry(entry_index, nextline, 110, parL[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2 # per definition of P section
''' boundaries: '''
BP = [entry_index, entry_index+2] # pointers for boundaries
P = P + BotTopAlpBoundaries(parLP, alps, SP, entry_index, nextline)
nextline = int(P[-1])
entry_index = entry_index + 4 # + 2*(2 boundaries)
''' bounded surfaces '''
P = P + BoundedSurfaces(SP, BP, entry_index, nextline)
return [P, parLP]
''' PART B: RHO '''
''' BotTopRhoPartialPointParams
DESCRIPTION: returns line array of 4 additional points for partial lines
INPUT: coordinates of 2 bottom points ([x0,z1], [x1,z1])
[X[0], X[-1]+th_grid, Z[i]+th_grid]
dx, dz -> tilt of upper points
h_grid -> height of grid
OUTPUT: line array [4,3] '''
def BotTopRhoPartialPointParams(x0, x1, z1, dx0, dx1, dz1, h_grid):
P = np.zeros((4,3))
P[0,:] = ["%.4f"%x1, 0, "%.4f"%z1]
P[1,:] = ["%.4f"%x0, 0, "%.4f"%z1]
P[2,:] = ["%.4f"%(x1+dx1), h_grid, "%.4f"%(z1+dz1)]
P[3,:] = ["%.4f"%(x0+dx0), h_grid, "%.4f"%(z1+dz1)]
return P
''' BotTopRhoPartialLineParams
DESCRIPTION: gets [bl, br, tl, tr] edge lines of surfaces between pixel rows
INPUT: coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0]):
[X[0], X[-1]+th_grid, Z[i], Z[i]+th_grid]
dx, dz -> tilt of upper points
h_grid -> height of grid
OUTPUT: line array [4,6] '''
def BotTopRhoPartialLineParams(x0, x1, z0, z1, dx00, dx11, dz0, dz1, h_grid):
L = np.zeros((4,6))
L[0,:] = ["%.4f"%x0,0,"%.4f"%z0, "%.4f"%x0,0,"%.4f"%z1]
L[1,:] = ["%.4f"%x1,0,"%.4f"%z0, "%.4f"%x1,0,"%.4f"%z1]
L[2,:] = ["%.4f"%(x0+dx00),h_grid,"%.4f"%(z0+dz0), "%.4f"%(x0+dx00),h_grid,
"%.4f"%(z1+dz1)]
L[3,:] = ["%.4f"%(x1+dx11),h_grid,"%.4f"%(z0+dz0), "%.4f"%(x1+dx11),h_grid,
"%.4f"%(z1+dz1)]
return L
''' BotTopEdgePartialLines
DESCRIPTION: gets [bottom left, bo right, top le, top re] partial lines at end
of pixel rows
INPUT: values -> [X[j], X[j]+th_grid, Z[i]+th_grid, Z[i+1], dx0, dx1, dz0, dz1,
h_grid]
* coordinates of 4 bottom points ([x0,z0], [x0,z1], [x1,z1], [x1,z0])
* dx, dz -> tilt of upper points
* h_grid -> height of grid
OUTPUT: P -> P_Entries for 4 partial lines at edges of grid,
parLP -> pointers to [bl, br, tl, tr] partial lines '''
def BotTopEdgePartialLines(entry_index, nextline, values):
P = [] # output
pointFlag = True
''' additional points: '''
Points = BotTopRhoPartialPointParams(values[0],values[1],values[3],
values[4],values[5],values[7],
values[8])
for i in range(0,4): # write Points as P_Entries:
P = P + P_Entry(entry_index, nextline, 116, Points[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2
parLP = np.zeros(4, dtype=int) # pointers for lines from entry_index
parL = BotTopRhoPartialLineParams(values[0],values[1],values[2],values[3],
values[4],values[5],values[6],values[7],
values[8])
for i in range(0,4): # write parL as P_Entries:
parLP[i] = entry_index
P = P + P_Entry(entry_index, nextline, 110, parL[i], pointFlag)
nextline = int(P[-1])
entry_index = entry_index + 2
return [P, parLP]
''' BotTopRhoBoundaries
DESCRIPTION: gets two boundaries of [bottom, top] surfaces between pixel rows
INPUT: rhoE -> pointers to left/right edge lines of surfaces between pixel rows
rho F/B b/t -> pointers to collection of front/back bottom/top lines of
space in between pixel rows
SP -> pointers to unbounded [bottom, top] surfaces
OUTPUT: 2 boundaries as P_Entries '''
def BotTopRhoBoundaries(rhoE, rhoFb, rhoBb, rhoFt, rhoBt, SP, entry_index,
nextline):
# bottom:
rhos = [rhoE[0], rhoE[1]]
P = OuterEdgesRLboundary(rhos, rhoFb, rhoBb, SP[0], entry_index, nextline)
nextline = int(P[-1])
entry_index = entry_index + 2
# top:
rhos = [rhoE[2], rhoE[3]]
return P + OuterEdgesRLboundary(rhos, rhoFt, rhoBt, SP[1], entry_index,
nextline)
''' BotTopRhoSurfaces
DESCRIPTION: gets all P_entries of [bottom, top] surfaces between pixels
INPUT: rhoE -> pointers to [left bottom, right bottom, l top, r top] lines of
surface in between pixel rows
rho F/B b/t -> front/back bottom/top collection of partial lines along
surface in between pixel rows (ordered, left to right)
btSP -> pointers to unbounded bottom/ top surfaces
OUTPUT: boundaries and bounded surfaces as P_Entries for both surfaces '''
def BotTopRhoSurfaces(rhoE, rhoFb, rhoBb, rhoFt, rhoBt, SP, entry_index,
nextline):
''' boundaries: '''
BP = [entry_index, entry_index+2] # pointers for boundaries
P = BotTopRhoBoundaries(rhoE, rhoFb, rhoBb, rhoFt, rhoBt, SP,
entry_index, nextline)
nextline = int(P[-1])
entry_index = entry_index + 4 # + 2*(2 boundaries)
''' bounded surfaces '''
return P + BoundedSurfaces(SP, BP, entry_index, nextline)
'''---------------------------------------------------------------------------
SECTION IV: MAIN ROUTINE '''
''' allP_Entries
creates entire P section as list of entries
OUTPUT:
P, alternating:
P[0] -> first nextline = 1
P[1+3i] -> string as written in P section
P[2+3i] -> type as integer
P[3+3i] -> line index for NEXT entry ("nextline") '''
def allP_Entries(Angles, GR, th_grid):
''' INITIALIZATION: '''
entry_index = 1 # entry P, stays same value for each element
# (even if covering several lines)
# can be used as pointer for this entry
nextline = 1 # line counter (continually counting used up lines)
alphas = Angles[0] # per output of determineAllAnglesAndCoordinates
rhos = Angles[1]
X = Angles[2] # positions corresponding to alphas
Z = Angles[3] # positions corresponding to rhos
N_P = Angles[4] # number of pixels on tile [x,z]
h_grid = th_grid*GR # height of grid
a = len(alphas)
r = len(rhos)
P = [1] # entries list -> output
# initialize with 1 so that in createIGESfile D section, can iterate linesp
gapRho = False
gapAlpha = False
''' OUTER EDGE LINES (I): ''' # surfaces later (II)
dx00 = getD(alphas[0], h_grid) # tilt of left outer edge
dx11 = getD(alphas[-1], h_grid) # tilt of right outer edge
dz00 = getD(rhos[0], h_grid) # tilt of front outer edge
dz11 = getD(rhos[-1], h_grid) # tilt of back outer edge
values = [X[0], X[-1]+th_grid, Z[0], Z[-1]+th_grid, dx00, dx11, dz00, dz11,
h_grid]
[P1, oeL] = OuterEdgesLines(entry_index, nextline, values)
# oeL: pointers to 6 lines of outer edges (right/ left edges excluded)
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
''' BOTTOM & TOP RULED SURFACE: '''
LBottom = [17,19] # pointers to 2 antiparallel lines on bottom surface
LTop = [29,31] # pointers to 2 antiparallel lines on top surface
# outer edges are first entries in P section:
# 8 points + 4 bottom lines + 4 vertical lines + 4 top lines
# only odd numbers are used
bSP = entry_index # pointer to bottom surface
P = P + RuledSurface(LBottom, entry_index, nextline)
nextline = int(P[-1])
entry_index = entry_index + 2
btSP = [bSP, entry_index] # pointers to [bottom, top] surfaces
P = P + RuledSurface(LTop, entry_index, nextline)
nextline = int(P[-1])
entry_index = entry_index + 2
''' INNER PIXELS + BOTTOM & TOP BOUNDED SURFACES '''
dx0 = dx00 # tilt of first alpha leaf = tilt of left outer edge
dz0 = dz00 # tilt of first rho leaf = tilt of front outer edge
dxgap = 0 # for alpha gaps, need three dxs simultaneously
dzgap = 0 # for rho gaps, need three dzs simultaneously
rhoFb = oeL[0] # pointers to current front edge(s) of rho bottom face
rhoFt = oeL[6] # pointers to current front edge(s) of rho top face
edgeRL = np.zeros(0, int) # pointers to partial l/r edge lines of grid
for i in range(0, r-1): # z
# start with left edge of pixel, include right edge in loop
# r-1 -> need to exclude last angle (= right edge) from looping
rhoBb = np.zeros(0, int) # pointers to current back edge(s)
rhoBt = np.zeros(0, int) # bottom/ top
# = pointers to front edges of pixels and lines in between
nextrhoFb = np.zeros(0, int) # pointers to next front edge(s)
nextrhoFt = np.zeros(0, int) # bottom/ top
# = pointers to back lines of pixels and lines in between
if (i+1)%(N_P[1]+1)==0: # if gap
dzgap = getD(rhos[i+1], h_grid)
gapRho = True
# do nothing
else:
dz1 = getD(rhos[i+1], h_grid) # tilt of right edge
''' right/ left edges of rho leaf between pixel rows: '''
values = [X[0], X[-1]+th_grid, Z[i], Z[i]+th_grid, dx00, dx11, dz0,
dz0, h_grid] # on leaf, dz0 = dz1
if gapRho: # if last was gap
values = [X[0], X[-1]+th_grid, Z[i-1], Z[i]+th_grid, dx00,
dx11, dz0, dzgap, h_grid] # for gap, dz0 ~= dz1
[P1, rhoE] = BotTopEdgePartialLines(entry_index, nextline, values)
# rhoE: pointers to [bl, br, tl, tr] lines of surface between pixel
# rows
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
edgeRL = np.concatenate((edgeRL, rhoE))
''' right/ left lines at end of pixel rows: '''
values = [X[0], X[-1]+th_grid, Z[i]+th_grid, Z[i+1], dx00, dx11,
dz0, dz1, h_grid]
if gapRho: # if last was gap
gapRho = False
values = [X[0], X[-1]+th_grid, Z[i]+th_grid, Z[i+1], dx00,
dx11, dzgap, dz1, h_grid]
[P1, rhoL] = BotTopEdgePartialLines(entry_index, nextline, values)
# rhoL: pointers to [bottom left, bo right, top le, top ri] lines
# of surface at end of pixel row
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
edgeRL = np.concatenate((edgeRL, rhoL))
alpLb = rhoL[0]
alpLt = rhoL[2]
for j in range(0, a-1): # x
if (j+1)%(N_P[0]+1)==0: # if gap
dxgap = dx0
dx0 = getD(alphas[j+1], h_grid)
gapAlpha = True
# do nothing
else:
dx1 = getD(alphas[j+1], h_grid)
''' pixel boardering surfaces: '''
values = [X[j]+th_grid, X[j+1], Z[i]+th_grid, Z[i+1], dx0,
dx1, dz0, dz1, h_grid]
[P1, L1] = P_EntriesOnePixel(entry_index, nextline, values)
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
''' (alpha) bottom and top faces between pixels: '''
alpRb = L1[3]
alpRt = L1[11]
values = [X[j], X[j]+th_grid, Z[i]+th_grid, Z[i+1], dx0,
dx0, dz0, dz1, h_grid] # dx1 = dx0 if on leaf
if gapAlpha: # if last was gap
gapAlpha = False
values = [X[j-1], X[j]+th_grid, Z[i]+th_grid, Z[i+1],
dxgap, dx0, dz0, dz1, h_grid] # dx1 ~= dx0
alps = [alpLb, alpRb, alpLt, alpRt]
# pointers to lines in pixels that bind edge faces
[P1, pL] = BotTopAlphaSurfaces(alps, btSP, entry_index,
nextline, values)
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
rhoBb = np.concatenate((rhoBb, [pL[0],L1[0]]))
rhoBt = np.concatenate((rhoBt, [pL[2],L1[8]]))
nextrhoFb = np.concatenate((nextrhoFb, [pL[1], L1[2]]))
nextrhoFt = np.concatenate((nextrhoFt, [pL[3], L1[10]]))
alpLb = L1[1] # left bottom edge of next face
alpLt = L1[9] # left top edge of next face
dx0 = dx1
# tilt of left edge, reuse for right edge of next pixel
''' last alpha leaf bottom and top face: '''
alpRb = rhoL[1]
alpRt = rhoL[3]
alps = [alpLb, alpRb, alpLt, alpRt]
values = [X[j+1], X[j+1]+th_grid, Z[i]+th_grid, Z[i+1], dx0, dx0,
dz0, dz1, h_grid]
[P1, pL] = BotTopAlphaSurfaces(alps, btSP, entry_index, nextline,
values)
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
rhoBb = np.concatenate((rhoBb, [pL[0]]))
rhoBt = np.concatenate((rhoBt, [pL[2]]))
nextrhoFb = np.concatenate((nextrhoFb, [pL[1]]))
nextrhoFt = np.concatenate((nextrhoFt, [pL[3]]))
''' rho leaf bottom and top face between pixel rows: '''
P1 = BotTopRhoSurfaces(rhoE, rhoFb, rhoBb, rhoFt, rhoBt, btSP,
entry_index, nextline)
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
rhoFb = nextrhoFb
rhoFt = nextrhoFt
dz0 = dz1 # tilt of back edge, reuse for front edge of next pixel
dx0 = dx00 # reset to first leaf
''' last rho leaf bottom and top face: '''
values = [X[0], X[-1]+th_grid, Z[-1], Z[-1]+th_grid, dx00, dx11, dz0, dz0,
h_grid]
[P1, rhoE] = BotTopEdgePartialLines(entry_index, nextline, values)
# rhoE: pointers to [bl, br, tl, tr] lines of surface between pixel
# rows
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
edgeRL = np.concatenate((edgeRL, rhoE))
rhoBb = oeL[1] # pointer to bottom back edge of grid
rhoBt = oeL[7] # pointer to top back edge of grid
P1 = BotTopRhoSurfaces(rhoE, rhoFb, rhoBb, rhoFt, rhoBt, btSP, entry_index,
nextline)
P = P + P1
p = int(len(P1)/3) # number of additional entries
nextline = int(P[-1])
entry_index = entry_index + p*2
''' OUTER EDGE SURFACES (II): '''
return P + OuterEdgesSurfaces(oeL, edgeRL, btSP, entry_index, nextline) | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,245 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /ASGmain.py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 17:49:47 2018
@author: Sula Mueller
DESCRIPTION:
creates STLA or IGES file from parameters
input: Input-Parameter-File.txt saved in same directory
to create STLA or IGES file please do the following steps:
I. INSERT values in Input_Parameter_File.txt
II. SAVE file
III. RUN this module
"""
import os
from smParam2File import readInputFromText, readAsMatrix, readAsIntMatrix
from smParam2File import createSTLAfromGridParams, createIGESfromGridParams
#-------------------------------------------------------------
''' I. READ INPUT FILE '''
thispath = os.path.dirname(os.path.realpath(__file__))
with open(thispath+'\Input-Parameter-File.txt','r') as inputfile:
inputtext = inputfile.read() # total file as string
#-------------------------------------------------------------
''' II. SET PARAMETERS '''
''' OUTPUT FILE '''
GRIDTYPE = readInputFromText(inputtext, 'GRIDTYPE')
if GRIDTYPE == '2D':
grid = 0 # use grid as parameter specifying the gridtype
else:
if (GRIDTYPE == 'x') or (GRIDTYPE == 'X'):
grid = 1
elif (GRIDTYPE == 'z') or (GRIDTYPE == 'Z'):
grid = -1
else:
print('Error: enter dimension of 1D grid')
path = readInputFromText (inputtext, 'OUTpath')
if path == 'THIS':
path = thispath
name = readInputFromText (inputtext, 'OUTname')
FILETYPE = readInputFromText (inputtext, 'FILETYPE')
''' GRID PARAMETERS '''
GR = float(readInputFromText (inputtext, 'GridRatio'))
th_grid = float(readInputFromText (inputtext, 'th_grid'))
''' SYSTEM PARAMETERS '''
SDD = float(readInputFromText (inputtext, 'SDD'))
EAD = readAsMatrix(inputtext, 'EdgeAxisDifference', grid)
''' MODULE '''
size_M = readAsMatrix(inputtext, 'size_M', 0)
# 0: need matrix for either gridtype
edge_M0 = readAsMatrix(inputtext, 'edge_M0', grid)
edge_M1 = readAsMatrix(inputtext, 'edge_M1', grid)
gap_M = float(readInputFromText(inputtext, 'gap_M'))
n = int(readInputFromText(inputtext,'nnn'))
''' ELEMENT (TILE) '''
edge_E0 = readAsMatrix(inputtext, 'edge_T0', grid)
edge_E1 = readAsMatrix(inputtext, 'edge_T1', grid)
gap_E = readAsMatrix(inputtext, 'gap_T', grid)
N_E = readAsIntMatrix(inputtext, 'N_T', grid)
''' PIXEL '''
N_P = readAsIntMatrix(inputtext, 'N_P', grid)
if size_M == [0,0]:
size_P = readAsMatrix(inputtext, 'size_P', grid)
gap_P = readAsMatrix(inputtext, 'gap_P', grid)
size_M = ([edge_M0[0] + edge_M1[0] + N_E[0]*(edge_E0[0] + edge_E1[0]
+ gap_E[0] + N_P[0]*(size_P[1]+gap_P[0])-gap_P[0])-gap_E[0],
edge_M0[1] + edge_M1[1] + N_E[1]*(edge_E0[1] + edge_E1[1]
+ gap_E[1] + N_P[1]*(size_P[1]+gap_P[1]) -gap_P[1]) -gap_E[1]])
print('size of M: ', str(size_M[0]), ' ', str(size_M[1]))
#-------------------------------------------------------------
''' III. CREATE OUTPUT FILE '''
if ((FILETYPE == 'STLA') or (FILETYPE == 'stla') or (FILETYPE == '.stl') or
(FILETYPE == 'stl')):
createSTLAfromGridParams(path, name, GR, th_grid, SDD,EAD, size_M,edge_M0,
edge_M1,gap_M, edge_E0,edge_E1,gap_E, N_E,N_P, n)
if ((FILETYPE == 'IGES') or (FILETYPE == 'iges') or (FILETYPE == '.igs') or
(FILETYPE == 'igs')):
createIGESfromGridParams(path, name, GR, th_grid, SDD, EAD, size_M,
edge_M0, edge_M1, gap_M, edge_E0, edge_E1, gap_E,
N_E, N_P, n) | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,246 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smIGESfile.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 07:12:22 2018
DESCRIPTION: turns given input into IGES File
INPUT: Angles as determined by smAngles -> determineAllAnglesAndCoordinates
OUTPUT: .igs file
@author: Sula Mueller
"""
import time
from smPsection import allP_Entries
''' creates 7 digit string from value x and enough spaces (right justified) '''
# eigth in block is section entry ('D', or 'P')
def spaced7(x):
xx = str(x)
return (7-len(xx))*' ' + xx
''' creates 8 digit string from value x and enough spaces (right justified) '''
def spaced8(x):
xx = str(x)
return (8-len(xx))*' ' + xx
''' creates string length ls from value x and enough spaces
(right justified) '''
def spacedx(x, ls):
xx = str(x)
return (ls-len(xx))*' ' + xx
def getDateAndTime(): # needed for G section
t = time.localtime(time.time())
if t.tm_mon<10:
m = '0' + str(t.tm_mon)
else:
m = str(t.tm_mon)
if t.tm_mday<10:
d = '0' + str(t.tm_mday)
else:
d = str(t.tm_mday)
if t.tm_hour<10:
h = '0' + str(t.tm_hour)
else:
h = str(t.tm_hour)
if t.tm_min<10:
mn = '0' + str(t.tm_min)
else:
mn = str(t.tm_min)
if t.tm_sec<10:
s ='0' + str(t.tm_sec)
else:
s = str(t.tm_sec)
return str(t.tm_year) + m + d + '.' + h + mn + s
''' createIGESFile
DESCRIPTION: creates IGES file name.igs, saves at path, as specified by .igs
guidelines
INPUT:
Angles (from smAngles.determineAllAnglesAndCoordinates), Grid Ratio,
thickness of grid leaves
OUTPUT: file '''
def createIGESFile(path, name, Angles, GR, th_grid):
file = open(path + '/' + name + '.igs', 'w')
''' S SECTION: '''
file.write(72*' ' + 'S 1' + '\n')
''' G SECTION: '''
n = len(name) + 4 # +4 because .igs
NAME = name.upper()
p = len(path) + n
dt = getDateAndTime()
g = ('1H,,1H;,'+str(n)+'H'+NAME+'.IGS,'+str(p)+'H'+path+name+
'.igs,10HASGCreator,3H1.1,32,8,23,11,52,'+str(n)+'H'+NAME+
'.IGS,1.,1,2HMM,4,0.7,15H'+dt+
',0.1000000000E-003,4747.868164,,,11,,,;')
g = g + (72-len(g)%72)*' ' # to fill up last line with spaces
len_g = int(len(g)/72) # number of lines g fills
for i in range(0, len_g): # each line
file.write(g[i*72:i*72+71] + ' G ' + str(i+1) + '\n')
''' CREATE P SECTION: '''
# needs to be created first because pointers point to entities
# (but write to file later; after D section)
# use functions defined in smPsection
P = allP_Entries(Angles, GR, th_grid) # from smPsection.py
# P[0] -> first nextline = 1
# P[1+3i] -> string as written in P section
# P[2+3i] -> type as integer
# P[3+3i] -> line index for NEXT entry ("nextline")
lenP = int((len(P)-1)/3) # number of actual P entries
# -1 since first entry initalized as 1 (= first "nextline" for first line)
# /3 since alternating strings, "nextlines", types
print ('\n' + 'number of P entries: ' + str(lenP))
''' D SECTION: '''
p = spaced8(1) # pointer for entry in P section (P[1] itnitialized as '1')
for i in range(0, lenP):
linesp = spaced8(P[3*i+3]-P[3*i])
# number of lines in P for this entity
x = spaced7(2*i+1) # line index of current line
y = spaced7(2*i+2) # line index of 2nd line (its a 2 line paragraph)
typ = spaced8(P[3*i+2])
file.write(typ + p +
' 0 1 0 0 0 0 0D'
+ x + '\n')
file.write(typ + ' 1' + ' 4' + linesp +
' 0 1D' + y + '\n')
p = spaced8(P[3*i+3]) # get pointer for next line as nextline
''' WRITE P SECTION: '''
for i in range(0, lenP):
file.write(P[3*i+1])
''' T SECTION: '''
file.write('S 1G ' + str(len_g) + 'D' + spaced7(lenP*2) + 'P'
+ spaced7(P[-1]-1) + 40*' ' + 'T 1')
file.close()
print('Done')
return | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,247 | SulaMueller/2018AntiScatterGrid | refs/heads/main | /smParam2File.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 14:55:30 2018
@author: Sula Mueller
DESCRIPTION:
defines functions used by ASGmain
"""
from smAngles import determineAnglesSparseInfo
from smAngles import determineAllAnglesAndCoordinates
from smFacets import FacetizeEntireGrid
from smSTLAfile import createSTLAFile
from smIGESfile import createIGESFile
''' readInputFromText
DESCRIPTION: extracts value/ input from given file-string by designation of
value
INPUT:
* inputtext: content of Input_Parameter_File as string
* valuename: name of value that gets extracted (eg 'FILETYPE' or 'SDD')
OUTPUT:
* value of designated valuename (eg 'STLA' or '120')
* output always as string '''
def readInputFromText(inputtext, valuename):
i = inputtext.find(valuename) # index of valuename
i = inputtext.find('=', i) # index of = after valuename
substring = inputtext[i:-1]
value = substring.split() # returns array of all non-space entries
return value[1] # first entry is '=', second entry should be wanted value
''' readAsMatrix
DESCRIPTION: extracts value/ input from given file-string by designation of
value
INPUT:
* inputtext: content of Input_Parameter_File as string
* valuename: name of value that gets extracted (eg 'N_P')
* OneDgrid: 0 -> 2D grid
1 -> 1D with leaves along x-axis
-1 -> 1D with leaves along z-axis
OUTPUT:
* value of designated valuename (eg [32,32])
* output always as 1x2 matrix type float
* if 1D grid: values of OTHER dimension are set to 0 '''
def readAsMatrix(inputtext, valuename, gridtype):
if gridtype == 1: # 1D with leaves along x-axis
val = float(readInputFromText(inputtext, valuename))
return [0,val]
if gridtype == -1: # 1D with leaves along z-axis
val = float(readInputFromText(inputtext, valuename))
return [val,0]
# else: 2D grid
i = inputtext.find(valuename) # index of valuename
i = inputtext.find('[', i) # index of first [ after valuename
j = inputtext.find(']', i)
substring = inputtext[i+1:j] # extract string between [] (without [])
if substring == '[]':
return ('[0,0]')
substring.replace(';', ',')
value = substring.split(',')
valu = value[0] # first entry
valu.strip() # remove ' '
x = float(valu)
valu = value[1] # second entry
valu.strip() # remove ' '
z = float(valu)
return [x,z]
''' readAsIntMatrix
same as readAsMatrix but output is matrix of integers
if 1D grid: values of OTHER dimension are set to 1
(readAsIntMatrix gets only called by N_E, N_P -> treat as if 1 pixel) '''
def readAsIntMatrix(inputtext, valuename, gridtype):
if gridtype == 1: # 1D with leaves along x-axis
val = int(readInputFromText(inputtext, valuename))
return [1,val]
if gridtype == -1: # 1D with leaves along z-axis
val = int(readInputFromText(inputtext, valuename))
return [val,1]
# else: 2D grid
i = inputtext.find(valuename) # index of valuename
i = inputtext.find('[', i) # index of first [ after valuename
j = inputtext.find(']', i)
substring = inputtext[i+1:j] # extract string between [] (without [])
if substring == '[]':
return ('[0,0]')
substring.replace(';',',')
value = substring.split(',')
valu = value[0] # first entry
valu.strip() # remove ' '
x = int(valu)
valu = value[1] # second entry
valu.strip() # remove ' '
z = int(valu)
return [x,z]
''' does same as createSTLAfromGridParams with simpler geometry '''
def createSTLAfromGridParams_Sparse(path, name, GR, th_grid, SDD, delta_M, N_E,
N_P, n):
Angles = determineAnglesSparseInfo(SDD, delta_M, N_E, N_P, n)
Facets = FacetizeEntireGrid(Angles, GR, th_grid)
createSTLAFile(path, name, Facets)
return
def createSTLAfromGridParams(path, name, GR, th_grid, SDD, EAD, size_M,
edge_M0, edge_M1, gap_M, edge_E0, edge_E1, gap_E,
N_E, N_P, n):
Angles = determineAllAnglesAndCoordinates(SDD, EAD, size_M, edge_M0,
edge_M1, gap_M, edge_E0,
edge_E1, gap_E, N_E, N_P, n)
Facets = FacetizeEntireGrid(Angles, GR, th_grid)
createSTLAFile(path, name, Facets)
return
def createIGESfromGridParams(path, name, GR, th_grid, SDD, EAD, size_M,
edge_M0, edge_M1, gap_M, edge_E0, edge_E1, gap_E,
N_E, N_P, n):
Angles = determineAllAnglesAndCoordinates(SDD, EAD, size_M, edge_M0,
edge_M1, gap_M, edge_E0, edge_E1,
gap_E, N_E, N_P, n)
createIGESFile(path, name, Angles, GR, th_grid)
return | {"/smPsection.py": ["/smFacets.py"], "/ASGmain.py": ["/smParam2File.py"], "/smIGESfile.py": ["/smPsection.py"], "/smParam2File.py": ["/smAngles.py", "/smFacets.py", "/smSTLAfile.py", "/smIGESfile.py"]} |
68,250 | Omay-Edekar/gameOfLife | refs/heads/master | /arrayBackedGrids.py | """
Pygame base template for opening a window
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/vRB_983kUMc
"""
import pygame
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
# Define some variables
grid = [[0 for x in range(10)] for y in range(10)]
grid[1][5] = 1
pygame.init()
# Set the width and height of the screen [width, height]
size = (255, 255)
width = 20
height = 20
margin = 5
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
click = False
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
click = True
# --- Game logic should go here
if click == True:
pos = pygame.mouse.get_pos()
row = pos[1]//(width + margin)
column = pos[0]//(height + margin)
grid[row][column] = 1
print("Row:", row, " Column:", column)
# --- Screen-clearing code goes here
# Here, we clear the screen to black. Don't put other drawing commands
# above this, or they will be erased with this command.
# If you want a background image, replace this clear with blit'ing the
# background image.
screen.fill(BLACK)
# --- Drawing code should go here
for row in range(10):
for column in range(10):
color = WHITE
if grid[row][column] == 1:
color = GREEN
pygame.draw.rect(screen, color, (0 + column*width + (column+1)*margin, 0 + row*width + (row+1)*margin, width, height))
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
pygame.quit() | {"/gameOfLife.py": ["/variables.py", "/functions.py", "/classes.py"], "/functions.py": ["/variables.py", "/classes.py"], "/classes.py": ["/functions.py", "/variables.py"]} |
68,251 | Omay-Edekar/gameOfLife | refs/heads/master | /variables.py | import pygame
import sys
import os
pygame.init()
def resource_path(relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
# initialize pygame variables
width = 720
heigth = 720
fps = 60
screen = pygame.display.set_mode((width, heigth))
clock = pygame.time.Clock()
# Color Constants
WHITE = (248, 248, 255)
BLACK = (53, 56, 57)
GREY = (192, 192, 192)
LIGHTBLACK = (83, 86, 87)
BROWN = (150, 81, 16)
# Counting Variables
queue = 0
phase = 1
rows = 0
cols = 0
generationsLeft = 0
generation = 0
population = 0
maxPopulation = 0
minPopulation = 0
storagePopulation = []
storageMaxPopulation = []
storageMinPopulation = []
# Boolean Variables
done = False
not_generated = True
grid_creation = False
manual = False
created = False
play = False
# Random Variables
font = resource_path(os.path.join("open-sans","OpenSans-Regular.ttf")) | {"/gameOfLife.py": ["/variables.py", "/functions.py", "/classes.py"], "/functions.py": ["/variables.py", "/classes.py"], "/classes.py": ["/functions.py", "/variables.py"]} |
68,252 | Omay-Edekar/gameOfLife | refs/heads/master | /gameOfLife.py | import pygame
import random
import time
import variables
import functions
import classes
random.seed()
pygame.init()
pygame.display.set_caption("The Game of Life")
start_button = classes.Button(180, variables.heigth*5/8, 360, 90, "Start", 45, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
random_button = classes.Button(120, variables.heigth*5/6, 180, 90, "Random", 45, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
manual_button = classes.Button(420, variables.heigth*5/6, 180, 90, "Manual", 45, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
rows_inputBox = classes.inputBox(variables.width/2, variables.heigth/2, 200, 100, "rows", 80, variables.WHITE, variables.BLACK, variables.BLACK)
cols_inputBox = classes.inputBox(variables.width/2, variables.heigth/2, 200, 100, "columns", 80, variables.WHITE, variables.BLACK, variables.BLACK)
generationsLeft_inputBox = classes.inputBox(variables.width/2, variables.heigth/2, 200, 100, "generations", 80, variables.WHITE, variables.BLACK, variables.BLACK)
generation_inputBox = classes.inputBox(variables.width/2, variables.heigth/2, 200, 100, "generation", 80, variables.WHITE, variables.BLACK, variables.BLACK)
# -------- Main Program Loop -----------
while not variables.done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
variables.done = True
# --- Screen-clearing code goes here
# Here, we clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
# If you want a background image, replace this clear with blit'ing the
# background image.
variables.screen.fill(variables.WHITE)
# --- Drawing code should go here
if variables.phase == 1:
variables.screen.fill(variables.WHITE)
functions.render_text(80, "The Game of Life", variables.BLACK, variables.width/2, variables.heigth*3/8)
start_button.click(functions.add_to_phase)
if variables.phase == 2:
variables.screen.fill(variables.WHITE)
random_button.click(functions.add_to_phase)
manual_button.click(functions.set_manual)
if variables.phase == 3:
variables.screen.fill(variables.WHITE)
variables.rows = int(rows_inputBox.ask())
if variables.rows != 0:
variables.rows += 2
variables.screen.fill(variables.WHITE)
variables.cols = int(cols_inputBox.ask())
if variables.cols != 0:
variables.cols += 2
variables.screen.fill(variables.WHITE)
variables.generationsLeft = int(generationsLeft_inputBox.ask())
if variables.generationsLeft != 0:
functions.sets_phase_to(4)
if variables.phase == 4:
if variables.not_generated:
if variables.grid_creation == False:
grid = [[0 for i in range(variables.cols)] for j in range(variables.rows)]
storageGrid = [[[0 for i in range(variables.cols)] for j in range(variables.rows)] for k in range(variables.generationsLeft)]
displayGrid = classes.grid(variables.rows, variables.cols, variables.BLACK, variables.WHITE, variables.GREY)
previous_button = classes.Button((displayGrid.width + 2*displayGrid.margin), ((displayGrid.rows-1)*displayGrid.height + displayGrid.rows*displayGrid.margin), displayGrid.width, displayGrid.height, "Previous", displayGrid.height/5, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
next_button = classes.Button(((displayGrid.cols-2)*displayGrid.width + (displayGrid.cols-1)*displayGrid.margin), ((displayGrid.rows-1)*displayGrid.height + displayGrid.rows*displayGrid.margin), displayGrid.width, displayGrid.height, "Next", displayGrid.height/3, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
play_button = classes.Button((previous_button.x + next_button.x)/2, ((displayGrid.rows-1)*displayGrid.height + displayGrid.rows*displayGrid.margin), displayGrid.width, displayGrid.height, "Play", displayGrid.height/3, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
pause_button = classes.Button((previous_button.x + next_button.x)/2, ((displayGrid.rows-1)*displayGrid.height + displayGrid.rows*displayGrid.margin), displayGrid.width, displayGrid.height, "Pause", displayGrid.height/3, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
go_to_button = classes.Button((previous_button.x + next_button.x)/2, displayGrid.margin, displayGrid.width, displayGrid.height, "Go To", displayGrid.height/3, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
done_button = classes.Button((previous_button.x + next_button.x)/2, ((displayGrid.rows-1)*displayGrid.height + displayGrid.rows*displayGrid.margin), displayGrid.width, displayGrid.height, "Done", displayGrid.height/3, variables.BLACK, variables.LIGHTBLACK, variables.WHITE)
variables.grid_creation = True
if variables.manual:
functions.initGrid(grid, storageGrid, variables.rows, variables.cols, variables.generation, False)
displayGrid.toggleGrid(grid)
for row in range(variables.rows):
for col in range(variables.cols):
storageGrid[0][row][col] = grid[row][col]
variables.screen.fill(variables.GREY)
displayGrid.displayGrid(storageGrid, variables.generation)
done_button.click(functions.toggle_created)
else:
functions.initGrid(grid, storageGrid, variables.rows, variables.cols, variables.generation, True)
variables.created = True
if variables.created:
if variables.manual:
for row in range(variables.rows):
for col in range(variables.cols):
if grid[row][col] == 1:
variables.population += 1
variables.maxPopulation = variables.population
variables.minPopulation = variables.population
variables.storagePopulation.append(variables.population)
variables.storageMaxPopulation.append(variables.population)
variables.storageMinPopulation.append(variables.population)
for i in range(variables.generationsLeft-1):
variables.generation += 1
functions.processGeneration(grid, storageGrid, variables.rows, variables.cols, variables.generation)
variables.generation = 0
variables.not_generated = False
if variables.not_generated == False:
variables.screen.fill(variables.GREY)
displayGrid.displayGrid(storageGrid, variables.generation)
displayGrid.displayInfo()
if variables.play == False:
previous_button.click(functions.remove_from_generation)
next_button.click(functions.add_to_generation)
play_button.click(functions.toggle_play)
go_to_button.click(functions.add_to_phase)
elif variables.play == True:
if variables.queue == variables.fps/3:
functions.add_to_generation()
variables.queue = 0
else:
variables.queue += 1
pause_button.click(functions.toggle_play)
if variables.phase == 5:
variables.screen.fill(variables.WHITE)
tempGeneration = variables.generation
variables.generation = int(generation_inputBox.ask())-1
functions.sets_phase_to(4)
# --- Go ahead and update the scree10n with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
variables.clock.tick(variables.fps)
pygame.quit() | {"/gameOfLife.py": ["/variables.py", "/functions.py", "/classes.py"], "/functions.py": ["/variables.py", "/classes.py"], "/classes.py": ["/functions.py", "/variables.py"]} |
68,253 | Omay-Edekar/gameOfLife | refs/heads/master | /functions.py | import pygame
import os
import sys
import random
import variables
import classes
# Pygame Logic Functions
_image_library = {}
def get_image(path):
"""Grabs image"""
global _image_library
image = _image_library.get(path)
if image is None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
_image_library[path] = image
return image
def text_object(text, font, color):
"""Creates object for text to be created on"""
text_surface = font.render(text, True, color)
return text_surface, text_surface.get_rect()
def render_text(font_size, text, color, x, y):
"""function to render centered text"""
font = pygame.font.Font(variables.font, font_size)
text_surface, text_rect = text_object(text, font, color)
text_rect.center = (x, y)
variables.screen.blit(text_surface, text_rect)
def get_key():
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
return event.key
else:
pass
def get_click():
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
return True
else:
return False
def set_manual():
"""sets toggleable grid and increases variable.phase by 1"""
variables.manual = True
variables.phase += 1
def add_to_phase():
"""increase variables.phase by 1"""
variables.phase += 1
def sets_phase_to(num):
"""changes value of variables.phase"""
variables.phase = num
def add_to_generation():
"""increase variables.generation by 1"""
variables.generation += 1
if variables.generation == variables.generationsLeft:
variables.generation = 0
def remove_from_generation():
"""increase variables.generation by 1"""
variables.generation -= 1
if variables.generation < 0:
variables.generation = variables.generationsLeft - 1
def toggle_play():
if variables.play: variables.play = False
else: variables.play = True
def toggle_created():
if variables.created: variables.created = False
else: variables.created = True
# def
# Game of Life Logic Functions
def countNeighbors(grid, rows, cols, x, y):
neighbors = 0
for i in range(x-1, x+2):
for j in range(y-1, y+2):
if (i < 0 or j < 0 or i > rows-1 or j > cols-1 or (i == x and j ==y)):
continue
elif (grid[i][j] != -1):
neighbors += grid[i][j]
return neighbors
def processGeneration(grid, storageGrid, rows, cols, generation):
tempGrid = [[0 for x in range(cols)] for x in range(rows)]
for i in range(rows):
for j in range(cols):
neighbors = countNeighbors(grid, rows, cols, i, j)
if (grid[i][j] == -1):
tempGrid[i][j] = -1
elif (grid[i][j] == 1 and (neighbors < 2 or neighbors > 3)):
tempGrid[i][j] = 0
elif (grid[i][j] == 0 and (neighbors == 3)):
tempGrid[i][j] = 1
else:
tempGrid[i][j] = grid[i][j]
variables.population = 0
for i in range(rows):
for j in range(cols):
if (grid[i][j] == -1):
continue
elif (tempGrid[i][j] == 1):
variables.population += 1
grid[i][j] = tempGrid[i][j]
if (variables.population > variables.maxPopulation):
variables.maxPopulation = variables.population
if (variables.population < variables.minPopulation or variables.minPopulation == 0):
variables.minPopulation = variables.population
variables.storagePopulation.append(variables.population)
variables.storageMaxPopulation.append(variables.maxPopulation)
variables.storageMinPopulation.append(variables.minPopulation)
for i in range(rows):
for j in range(cols):
storageGrid[generation][i][j] = grid[i][j]
def initGrid(grid, storageGrid, rows, cols, generation, not_manual):
for i in range(rows):
for j in range(cols):
if (i == 0 or i == rows-1 or j == 0 or j == cols-1):
grid[i][j] = -1
elif not_manual:
cell = random.randint(1, 3) % 3
if (cell == 0):
grid[i][j] = 1
variables.population += 1
else:
grid[i][j] = 0
if not_manual:
variables.maxPopulation = variables.population
variables.minPopulation = variables.population
variables.storagePopulation.append(variables.population)
variables.storageMaxPopulation.append(variables.maxPopulation)
variables.storageMinPopulation.append(variables.minPopulation)
for i in range(rows):
for j in range(cols):
storageGrid[generation][i][j] = grid[i][j]
def printGrid(grid, rows, cols, generation, population, maxPopulation, minPopulation):
print("\nGeneration: ", generation+1, "\nCurrent Population: ", population, "\nMaximum Population: ", maxPopulation, "\nMinimum Population: ", minPopulation, '\n', end = '')
for i in range(rows):
for j in range(cols):
if (grid[i][j] == -1):
print("-", ' ', end = '')
elif (grid[i][j] == 0):
print('0', ' ', end = '')
elif (grid[i][j] == 1):
print("1", ' ', end = '')
else:
print(grid[i][j], ' ', end = '')
print('') | {"/gameOfLife.py": ["/variables.py", "/functions.py", "/classes.py"], "/functions.py": ["/variables.py", "/classes.py"], "/classes.py": ["/functions.py", "/variables.py"]} |
68,254 | Omay-Edekar/gameOfLife | refs/heads/master | /classes.py | import pygame
import time
import functions
import variables
class Button(pygame.Rect):
def __init__(self, x, y, width, height, message, font_size, inactive_color, active_color, text_color):
super().__init__(x, y, width, height)
self.message = message
self.font_size = int(font_size)
self.inactive_color = inactive_color
self.active_color = active_color
self.text_color = text_color
def click(self, action=None):
mouse_pos = pygame.mouse.get_pos()
if self.collidepoint(mouse_pos):
pygame.draw.rect(variables.screen, self.active_color, (self.x, self.y, self.width, self.height))
if functions.get_click() and action is not None:
action()
else:
pygame.draw.rect(variables.screen, self.inactive_color, (self.x, self.y, self.width, self.height))
button_font = pygame.font.Font(variables.font, self.font_size)
text_surface, text_rect = functions.text_object(self.message, button_font, self.text_color)
text_rect.center = ((self.x + (self.width / 2)), (self.y + (self.height / 2)))
variables.screen.blit(text_surface, text_rect)
class inputBox(pygame.Rect):
def __init__(self, x, y, width, height, message, font_size, box_color, border_color, text_color):
super().__init__(x, y, width, height)
self.message = message
self.font_size = font_size
self.box_color = box_color
self.border_color = border_color
self.text_color = text_color
def display(self, string):
if self.message == "generation":
dist = .55
fontMiddle = pygame.font.Font(variables.font, self.font_size)
text_surface_middle, text_rect_middle = functions.text_object("Go to", fontMiddle, self.text_color)
text_rect_middle.center = (self.x, self.y - self.font_size * dist)
variables.screen.blit(text_surface_middle, text_rect_middle)
functions.render_text(45, ''.join(["Max generation: ", str(variables.generationsLeft)]), variables.BLACK, variables.width/2, variables.heigth * 5/6)
else:
dist = 1.1
fontTop = pygame.font.Font(variables.font, self.font_size)
text_surface_top, text_rect_top = functions.text_object("Input the", fontTop, self.text_color)
text_rect_top.center = (self.x, self.y - self.font_size * dist)
fontMiddle = pygame.font.Font(variables.font, self.font_size)
text_surface_middle, text_rect_middle = functions.text_object("number of", fontMiddle, self.text_color)
text_rect_middle.center = (self.x, self.y)
variables.screen.blit(text_surface_top, text_rect_top)
variables.screen.blit(text_surface_middle, text_rect_middle)
dummy1FontBottom = pygame.font.Font(variables.font, self.font_size)
dummy1_text_surface_bottom, dummy1_text_rect_bottom = functions.text_object(''.join([self.message, ": 777"]), dummy1FontBottom, self.text_color)
dummy1_text_rect_bottom.center = (self.x, self.y + self.font_size * dist)
dummy2FontBottom = pygame.font.Font(variables.font, self.font_size)
dummy2_text_surface_bottom, dummy2_text_rect_bottom = functions.text_object(''.join([self.message, ": "]), dummy2FontBottom, self.text_color)
dummy2_text_rect_bottom.top = dummy1_text_rect_bottom.top
dummy2_text_rect_bottom.left = dummy1_text_rect_bottom.left
fontBottom = pygame.font.Font(variables.font, self.font_size)
text_surface_bottom, text_rect_bottom = functions.text_object(''.join([self.message, ": ", string]), fontBottom, self.text_color)
text_rect_bottom.top = dummy1_text_rect_bottom.top
text_rect_bottom.left = dummy1_text_rect_bottom.left
pygame.draw.rect(variables.screen, variables.WHITE, (dummy2_text_rect_bottom.right - self.font_size/6, text_rect_bottom.top + self.font_size/6, dummy1_text_rect_bottom.right - dummy2_text_rect_bottom.right + self.font_size/6, text_rect_bottom.height - self.font_size/3), 0)
pygame.draw.rect(variables.screen, variables.BLACK, (dummy2_text_rect_bottom.right - self.font_size/6, text_rect_bottom.top + self.font_size/6, (dummy1_text_rect_bottom.right - dummy2_text_rect_bottom.right + self.font_size/6) * 1.1, (text_rect_bottom.height - self.font_size/3) * 1.1), 1)
variables.screen.blit(text_surface_bottom, text_rect_bottom)
pygame.display.flip()
def ask(self):
string = ""
variables.screen.fill(variables.WHITE)
self.display(string)
while True:
variables.screen.fill(variables.WHITE)
inkey = functions.get_key()
if inkey == pygame.K_BACKSPACE:
string = string[:-1]
elif inkey == pygame.K_RETURN and string != "":
break
elif inkey <= 127 and chr(inkey).isnumeric():
string += chr(inkey)
self.display(string)
return string
class grid():
def __init__(self, rows, cols, alive_color, dead_color, frame_color):
super().__init__()
self.rows = rows
self.cols = cols
if rows >= 100 or cols >= 100 : self.margin = 1
elif rows >= 25 or cols >= 25 : self.margin = 5
else : self.margin = 10
self.width = (variables.width-self.margin-cols*self.margin)/(cols)
self.height = (variables.heigth-self.margin-rows*self.margin)/(rows)
self.alive_color = alive_color
self.dead_color = dead_color
self.frame_color = frame_color
def displayGrid(self, grid, generation):
for row in range(self.rows):
for col in range(self.cols):
if grid[generation][row][col] == -1:
color = self.frame_color
elif grid[generation][row][col] == 0:
color = self.dead_color
elif grid[generation][row][col] == 1:
color = self.alive_color
pygame.draw.rect(variables.screen, color, (0 + col*self.width + (col+1)*self.margin, 0 + row*self.height + (row+1)*self.margin, self.width, self.height))
def displayInfo(self):
fontGen = pygame.font.Font(variables.font, int(self.height/2))
text_surface_gen, text_rect_gen = functions.text_object(''.join(["Gen: ", str(variables.generation+1)]), fontGen, variables.WHITE)
text_rect_gen.left = self.width + 2*self.margin
text_rect_gen.centery = .5*self.height + self.margin
dummyFontPop = pygame.font.Font(variables.font, int(self.height/3))
dummy_text_surface_pop, dummy_text_rect_pop = functions.text_object(''.join(["Population: ", str(self.rows*self.cols)]), dummyFontPop, variables.WHITE)
dummy_text_rect_pop.right = self.width*(self.cols-1) + (self.cols-1)*self.margin + self.margin
dummy_text_rect_pop.centery = .5*self.height + self.margin
fontPop = pygame.font.Font(variables.font, int(self.height/3))
text_surface_pop, text_rect_pop = functions.text_object(''.join(["Population: ", str(variables.storagePopulation[variables.generation])]), fontPop, variables.WHITE)
text_rect_pop.left = dummy_text_rect_pop.left
text_rect_pop.centery = dummy_text_rect_pop.centery - (self.height/3) * 1.1
text_surface_maxPop, text_rect_maxPop = functions.text_object(''.join(["Max Population: ", str(variables.storageMaxPopulation[variables.generation])]), fontPop, variables.WHITE)
text_rect_maxPop.left = dummy_text_rect_pop.left
text_rect_maxPop.centery = dummy_text_rect_pop.centery
text_surface_minPop, text_rect_minPop = functions.text_object(''.join(["Min Population: ", str(variables.storageMinPopulation[variables.generation])]), fontPop, variables.WHITE)
text_rect_minPop.left = dummy_text_rect_pop.left
text_rect_minPop.centery = dummy_text_rect_pop.centery + (self.height/3) * 1.1
variables.screen.blit(text_surface_gen, text_rect_gen)
variables.screen.blit(text_surface_pop, text_rect_pop)
variables.screen.blit(text_surface_maxPop, text_rect_maxPop)
variables.screen.blit(text_surface_minPop, text_rect_minPop)
def toggleGrid(self, grid):
if functions.get_click():
pos = pygame.mouse.get_pos()
row = int(pos[1]/(self.width + self.margin))
column = int(pos[0]/(self.height + self.margin))
if grid[row][column] == 1:
grid[row][column] = 0
elif grid[row][column] == 0:
grid[row][column] = 1 | {"/gameOfLife.py": ["/variables.py", "/functions.py", "/classes.py"], "/functions.py": ["/variables.py", "/classes.py"], "/classes.py": ["/functions.py", "/variables.py"]} |
68,255 | Omay-Edekar/gameOfLife | refs/heads/master | /asciiGameOfLife.py | import random
def countNeighbors(grid, rows, cols, x, y):
neighbors = 0
for i in range(x-1, x+2):
for j in range(y-1, y+2):
if (i < 0 or j < 0 or i > rows-1 or j > cols-1 or (i == x and j ==y)):
continue
elif (grid[i][j] != -1):
neighbors += grid[i][j]
return neighbors
def processGeneration(grid, storageGrid, rows, cols, generation, population, maxPopulation, minPopulation):
tempGrid = [[0 for x in range(cols)] for x in range(rows)]
for i in range(rows):
for j in range(cols):
neighbors = countNeighbors(grid, rows, cols, i, j)
if (grid[i][j] == -1):
tempGrid[i][j] = -1
elif (grid[i][j] == 1 and (neighbors < 2 or neighbors > 3)):
tempGrid[i][j] = 0
elif (grid[i][j] == 0 and (neighbors == 3)):
tempGrid[i][j] = 1
else:
tempGrid[i][j] = grid[i][j]
population = 0
for i in range(rows):
for j in range(cols):
if (grid[i][j] == -1):
continue
elif (tempGrid[i][j] == 1):
population += 1
grid[i][j] = tempGrid[i][j]
if (population > maxPopulation):
maxPopulation = population
if (population < minPopulation or minPopulation == 0):
minPopulation = population
for i in range(rows):
for j in range(cols):
storageGrid[generation][i][j] = grid[i][j]
def initGrid(grid, storageGrid, rows, cols, generation, population, maxPopulation, minPopulation):
for i in range(rows):
for j in range(cols):
if (i == 0 or i == rows-1 or j == 0 or j == cols-1):
grid[i][j] = -1
else:
cell = random.randint(1, 3) % 3
if (cell == 0):
grid[i][j] = 1
population += 1
else:
grid[i][j] = 0
maxPopulation = population
minPopulation = population
for i in range(rows):
for j in range(cols):
storageGrid[generation][i][j] = grid[i][j]
def printGrid(grid, rows, cols, generation):
print("\nGeneration: ", generation+1, "\nCurrent Population: ", population, "\nMaximum Population: ", maxPopulation, "\nMinimum Population: ", minPopulation, '\n', end = '')
for i in range(rows):
for j in range(cols):
if (grid[i][j] == -1):
print("-", ' ', end = '')
elif (grid[i][j] == 0):
print('0', ' ', end = '')
elif (grid[i][j] == 1):
print("1", ' ', end = '')
else:
print(grid[i][j], ' ', end = '')
print('')
random.seed()
generation = 0
population = 0
maxPopulation = 0
minPopulation = 0
rows = int(input("Welcome to the Game of Life!\nInput the number of rows: "))
cols = int(input("Input the number of columns: "))
generationsLeft = int(input("Input the number of generations: "))
rows += 2
cols += 2
grid = [[0 for i in range(cols)] for i in range(rows)]
storageGrid = [[[0 for i in range(cols)] for i in range(rows)] for i in range(generationsLeft)]
initGrid(grid, storageGrid, rows, cols, generation)
# printGrid(grid, rows, cols, generation)
for i in range(generationsLeft-1):
generation += 1
processGeneration(grid, storageGrid, rows, cols, generation)
# printGrid(grid, rows, cols, generation)
for i in range(generationsLeft):
print("Generation: ", i+1)
for j in range(rows):
for k in range(cols):
if (storageGrid[i][j][k] == -1):
print('-', ' ', end = '')
elif (storageGrid[i][j][k] == 0):
print('0', ' ', end = '')
elif (storageGrid[i][j][k] == 1):
print('1', ' ', end = '')
else:
print(storageGrid[i][j][k], ' ', end = '')
print('')
print("-"*2*cols)
print("\nThank you for playing the Game of Life!") | {"/gameOfLife.py": ["/variables.py", "/functions.py", "/classes.py"], "/functions.py": ["/variables.py", "/classes.py"], "/classes.py": ["/functions.py", "/variables.py"]} |
68,256 | szerhoudi/ActiveLearning | refs/heads/master | /strategies/strategies.py | import numpy as np
from libact.base.interfaces import QueryStrategy, ContinuousModel, ProbabilisticModel
from libact.utils import zip
from sklearn.metrics.pairwise import cosine_similarity
class CMBSampling(QueryStrategy):
def __init__(self, *args, **kwargs):
super(CMBSampling, self).__init__(*args, **kwargs)
self.model = kwargs.pop('model', None)
self.lmbda = kwargs.pop('lmbda', 0.5)
if self.model is None:
raise TypeError(
"__init__() missing required keyword-only argument: 'model'"
)
if not isinstance(self.model, ContinuousModel) and \
not isinstance(self.model, ProbabilisticModel):
raise TypeError(
"model has to be a ContinuousModel or ProbabilisticModel"
)
self.model.train(self.dataset)
def make_query(self, return_score=False):
dataset = self.dataset
self.model.train(dataset)
unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries())
if isinstance(self.model, ProbabilisticModel):
dvalue = self.model.predict_proba(X_pool)
elif isinstance(self.model, ContinuousModel):
dvalue = self.model.predict_real(X_pool)
if np.shape(dvalue)[1] > 2:
dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2])
dist = np.abs(dvalue[:, 0] - dvalue[:, 1])
arr1, arr2 = [], []
arr1.append(np.array(dvalue[:, 0]).tolist())
arr2.append(np.array(dvalue[:, 1]).tolist())
div = -np.max(cosine_similarity(arr1, arr2), axis=1)
score = (self.lmbda * dist) + ((1 - self.lmbda) * div)
ask_id = np.argmin(score)
if return_score:
return unlabeled_entry_ids[ask_id], \
list(zip(unlabeled_entry_ids, score))
else:
return unlabeled_entry_ids[ask_id]
class USampling(QueryStrategy):
def __init__(self, *args, **kwargs):
super(USampling, self).__init__(*args, **kwargs)
self.model = kwargs.pop('model', None)
if self.model is None:
raise TypeError(
"__init__() missing required keyword-only argument: 'model'"
)
if not isinstance(self.model, ContinuousModel) and \
not isinstance(self.model, ProbabilisticModel):
raise TypeError(
"model has to be a ContinuousModel or ProbabilisticModel"
)
self.model.train(self.dataset)
self.method = kwargs.pop('method', 'mm')
def make_query(self, return_score=False):
dataset = self.dataset
self.model.train(dataset)
unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries())
if isinstance(self.model, ProbabilisticModel):
dvalue = self.model.predict_proba(X_pool)
elif isinstance(self.model, ContinuousModel):
dvalue = self.model.predict_real(X_pool)
'''if self.method == 'sm':
if np.shape(dvalue)[1] > 2:
dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2])
score = -np.abs(dvalue[:, 0] - dvalue[:, 1])'''
if self.method == 'mm': # max margin
margin = np.partition(-dvalue, 1, axis=1)
score = -np.abs(margin[:, 0] - margin[:, 1])
'''elif self.method == 'entropy':
score = np.sum(-dvalue * np.log(dvalue), axis=1)'''
ask_id = np.argmax(score)
if return_score:
return unlabeled_entry_ids[ask_id], \
list(zip(unlabeled_entry_ids, score))
else:
return unlabeled_entry_ids[ask_id]
| {"/task_balanced_dataset.py": ["/strategies/strategies.py"]} |
68,257 | szerhoudi/ActiveLearning | refs/heads/master | /task_balanced_dataset.py | """
Active Learning - choosing queries strategies
(UncertaintySampling (Max Margin) - CMB Sampling : Combination of active learning algorithms
(distance-based (DIST), diversity-based (DIV)))
"""
import copy
import time
import codecs
import random
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from strategies.strategies import USampling, CMBSampling
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from libact.base.dataset import Dataset
from libact.models import LogisticRegression, SVM
from libact.query_strategies import UncertaintySampling, RandomSampling, QueryByCommittee, HintSVM
def openfile_txt(filepath):
with open(filepath, 'r', encoding='utf16') as f:
file = f.read().split('"\n[')
return file
def simulate_w4v(tweet_id):
element_id = ids_list.index(tweet_id)
vectorized = vectors_list[element_id]
return vectorized
def get_vectors_list(filepath):
vectors_list_x, ids_list_x = [], []
with open(filepath, 'r', encoding='utf16') as f:
file = f.read().split('"\n[')
for line in file:
parts = line.replace('\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split(";")
vectors_list_x.append(parts[0])
ids_list_x.append(parts[1].replace(' ', ''))
return vectors_list_x, ids_list_x
def define_label(tweet_id):
with open(pos_filepath, 'r', encoding='utf16') as f:
next(f)
for line in f.readlines():
parts = line.split(";")
tweets = parts[0].replace('"', '')
if tweet_id in tweets:
label = 1
break
else:
label = 0
return label
def define_tweet_by_id(line_id):
with open(csv_filepath, 'r', encoding='utf16') as fp:
for i, line in enumerate(fp):
if i == line_id:
parts = line.split(";")
tweet = parts[2]
elif i > line_id:
break
return tweet
def randomize(X, y):
permutation = np.random.permutation(y.shape[0])
X2 = X[permutation]
y2 = y[permutation]
return X2, y2
def build_dataset(file):
target, data = [], []
for line in file:
z = np.array(define_label(line[1].replace(' ', '')))
target.append(z)
x = np.fromstring(line[0].replace(']', '').replace('[', '').replace(' ', ' '), sep=' ')
data.append(x)
target = np.asarray(target)
data = np.asarray(data)
return target, data
def balance_dataset():
file_pos_ids, file_neg_ids = [], []
file_pos = openfile_txt(pos_filepath_txt)
for line in file_pos:
parts = line.replace('\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split(";")
file_pos_ids.append(parts)
pos_part = random.sample(file_pos_ids, 1000)
file_neg = openfile_txt(neg_filepath_txt)
for line in file_neg:
parts = line.replace('\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split(";")
file_neg_ids.append(parts)
neg_part = random.sample(file_neg_ids, 1000)
balanced_txt_file = pos_part+neg_part
random.shuffle(balanced_txt_file)
# print(balanced_txt_file)
return balanced_txt_file
def simulate_human_decision(line_id):
with open(csv_filepath, 'r', encoding='utf16') as fp:
for i, line in enumerate(fp):
if i == line_id:
parts = line.split(";")
tweet_id = parts[0]
label = define_label(tweet_id)
elif i > line_id:
break
return label
def split_train_test(file):
target = build_dataset(file)
n_labeled = 50
X = target[1]
y = target[0]
print(np.shape(X))
print(np.shape(y))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)
while (np.sum(y_train[:n_labeled]) < 25):
X_rand, y_rand = randomize(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_rand, y_rand, test_size=0.2, stratify=y_rand)
print(np.concatenate([y_train[:n_labeled], [None] * (len(y_train) - n_labeled)]))
trn_ds = Dataset(X_train, np.concatenate([y_train[:n_labeled], [None] * (len(y_train) - n_labeled)]))
tst_ds = Dataset(X_test, y_test)
return trn_ds, tst_ds
def main():
global pos_filepath, neg_filepath_txt, pos_filepath_txt, dataset_filepath, csv_filepath, vectors_list, ids_list
dataset_filepath = "/Users/dndesign/Desktop/active_learning/vecteurs_et_infos/vectors_2015.txt"
csv_filepath = "/Users/dndesign/Desktop/active_learning/donnees/corpus_2015_id-time-text.csv"
pos_filepath = "/Users/dndesign/Desktop/active_learning/donnees/oriane_pos_id-time-text.csv"
pos_filepath_txt = "/Users/dndesign/Desktop/active_learning/vecteurs_et_infos/vectors_2015_pos.txt"
neg_filepath_txt = "/Users/dndesign/Desktop/active_learning/vecteurs_et_infos/vectors_2015_neg.txt"
vectors_list, ids_list = get_vectors_list(dataset_filepath)
timestr = time.strftime("%Y%m%d_%H%M%S")
text_file = codecs.open("task_" + str(timestr) + ".txt", "w", "utf-8")
print("Loading data...")
text_file.write("Loading data...\n")
# Open this file
t0 = time.time()
file = openfile_txt(dataset_filepath)
num_lines = sum(1 for line in file)
print("Treating " + str(num_lines) + " entries...")
text_file.write("Treating : %s entries...\n" % str(num_lines))
# Number of queries to ask human to label
quota = 100
E_out1, E_out2, E_out3, E_out4, E_out6, E_out7 = [], [], [], [], [], []
balanced_file = balance_dataset()
trn_ds, tst_ds = split_train_test(balanced_file)
# model = SVM(kernel='linear')
# model = LogisticRegression()
model = RandomForestClassifier()
''' UncertaintySampling (Least Confident)
UncertaintySampling : it queries the instances about which
it is least certain how to label
Least Confident : it queries the instance whose posterior
probability of being positive is nearest 0.5
'''
qs = UncertaintySampling(trn_ds, method='lc', model=LogisticRegression(C=.01))
# model.train(trn_ds)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out1 = np.append(E_out1, 1 - score)
# E_out1 = np.append(E_out1, 1 - model.score(tst_ds))
''' UncertaintySampling (Max Margin)
'''
trn_ds2 = copy.deepcopy(trn_ds)
qs2 = USampling(trn_ds2, method='mm', model=SVM(kernel='linear'))
# model.train(trn_ds2)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out2 = np.append(E_out2, 1 - score)
# E_out2 = np.append(E_out2, 1 - model.score(tst_ds))
''' CMB Sampling
Combination of active learning algorithms (distance-based (DIST), diversity-based (DIV))
'''
trn_ds3 = copy.deepcopy(trn_ds)
qs3 = CMBSampling(trn_ds3, model=SVM(kernel='linear'))
# model.train(trn_ds3)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out3 = np.append(E_out3, 1 - score)
# E_out3 = np.append(E_out3, 1 - model.score(tst_ds))
''' Random Sampling
Random : it chooses randomly a query
'''
trn_ds4 = copy.deepcopy(trn_ds)
qs4 = RandomSampling(trn_ds4, random_state=1126)
# model.train(trn_ds4)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out4 = np.append(E_out4, 1 - score)
# E_out4 = np.append(E_out4, 1 - model.score(tst_ds))
''' QueryByCommittee (Vote Entropy)
QueryByCommittee : it keeps a committee of classifiers and queries
the instance that the committee members disagree, it also examines
unlabeled examples and selects only those that are most informative
for labeling
Vote Entropy : a way of measuring disagreement
Disadvantage : it does not consider the committee members’ class
distributions. It also misses some informative unlabeled examples
to label
'''
trn_ds6 = copy.deepcopy(trn_ds)
qs6 = QueryByCommittee(trn_ds6, disagreement='vote',
models=[LogisticRegression(C=1.0),
LogisticRegression(C=0.01),
LogisticRegression(C=100)],
random_state=1126)
# model.train(trn_ds6)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out6 = np.append(E_out6, 1 - score)
# E_out6 = np.append(E_out6, 1 - model.score(tst_ds))
''' QueryByCommittee (Kullback-Leibler Divergence)
QueryByCommittee : it examines unlabeled examples and selects only
those that are most informative for labeling
Disadvantage : it misses some examples on which committee members
disagree
'''
trn_ds7 = copy.deepcopy(trn_ds)
qs7 = QueryByCommittee(trn_ds7, disagreement='kl_divergence',
models=[LogisticRegression(C=1.0),
LogisticRegression(C=0.01),
LogisticRegression(C=100)],
random_state=1126)
# model.train(trn_ds7)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out7 = np.append(E_out7, 1 - score)
# E_out7 = np.append(E_out7, 1 - model.score(tst_ds))
with sns.axes_style("darkgrid"):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
query_num = np.arange(0, 1)
p1, = ax.plot(query_num, E_out1, 'red')
p2, = ax.plot(query_num, E_out2, 'blue')
p3, = ax.plot(query_num, E_out3, 'green')
p4, = ax.plot(query_num, E_out4, 'orange')
p6, = ax.plot(query_num, E_out6, 'black')
p7, = ax.plot(query_num, E_out7, 'purple')
plt.legend(
('Least Confident', 'Max Margin', 'Distance Diversity CMB', 'Random Sampling', 'Vote Entropy', 'KL Divergence'),
loc=4)
plt.ylabel('Accuracy')
plt.xlabel('Number of Queries')
plt.title('Active Learning - Query choice strategies')
plt.ylim([0, 1])
plt.show(block=False)
for i in range(quota):
print("\n#################################################")
print("Query number " + str(i) + " : ")
print("#################################################\n")
text_file.write("\n#################################################\n")
text_file.write("Query number %s : " % str(i))
text_file.write("\n#################################################\n")
ask_id = qs.make_query()
print("\033[4mUsing Uncertainty Sampling (Least confident) :\033[0m")
print("Tweet :" + define_tweet_by_id(ask_id), end='', flush=True)
print("Simulating human response : " + str(simulate_human_decision(ask_id)) + " \n")
text_file.write("Using Uncertainty Sampling (Least confident) :\n")
text_file.write("Tweet : %s \n" % str(define_tweet_by_id(ask_id)))
text_file.write("Simulating human response : %s \n\n" % str(simulate_human_decision(ask_id)))
trn_ds.update(ask_id, simulate_human_decision(ask_id))
# model.train(trn_ds)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out1 = np.append(E_out1, 1 - score)
# E_out1 = np.append(E_out1, 1 - model.score(tst_ds))
ask_id = qs2.make_query()
print("\033[4mUsing Uncertainty Sampling (Max Margin) :\033[0m")
print("Tweet :" + define_tweet_by_id(ask_id), end='', flush=True)
print("Simulating human response : " + str(simulate_human_decision(ask_id)) + " \n")
text_file.write("Using Uncertainty Sampling (Smallest Margin) :\n")
text_file.write("Tweet : %s \n" % str(define_tweet_by_id(ask_id)))
text_file.write("Simulating human response : %s \n\n" % str(simulate_human_decision(ask_id)))
trn_ds2.update(ask_id, simulate_human_decision(ask_id))
# model.train(trn_ds2)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out2 = np.append(E_out2, 1 - score)
# E_out2 = np.append(E_out2, 1 - model.score(tst_ds))
ask_id = qs3.make_query()
print("\033[4mUsing CMB Distance-Diversity Sampling :\033[0m")
print("Tweet :" + define_tweet_by_id(ask_id), end='', flush=True)
print("Simulating human response : " + str(simulate_human_decision(ask_id)) + " \n")
text_file.write("Using Uncertainty Sampling (Entropy) :\n")
text_file.write("Tweet : %s \n" % str(define_tweet_by_id(ask_id)))
text_file.write("Simulating human response : %s \n\n" % str(simulate_human_decision(ask_id)))
trn_ds3.update(ask_id, simulate_human_decision(ask_id))
# model.train(trn_ds3)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out3 = np.append(E_out3, 1 - score)
# E_out3 = np.append(E_out3, 1 - model.score(tst_ds))
ask_id = qs4.make_query()
print("\033[4mUsing Random Sampling :\033[0m")
print("Tweet :" + define_tweet_by_id(ask_id), end='', flush=True)
print("Simulating human response : " + str(simulate_human_decision(ask_id)) + " \n")
text_file.write("Using Random Sampling :\n")
text_file.write("Tweet : %s \n" % str(define_tweet_by_id(ask_id)))
text_file.write("Simulating human response : %s \n\n" % str(simulate_human_decision(ask_id)))
trn_ds4.update(ask_id, simulate_human_decision(ask_id))
# model.train(trn_ds4)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out4 = np.append(E_out4, 1 - score)
# E_out4 = np.append(E_out4, 1 - model.score(tst_ds))
ask_id = qs6.make_query()
print("\033[4mUsing QueryByCommittee (Vote Entropy) :\033[0m")
print("Tweet :" + define_tweet_by_id(ask_id), end='', flush=True)
print("Simulating human response : " + str(simulate_human_decision(ask_id)) + " \n")
text_file.write("Using QueryByCommittee (Vote Entropy) :\n")
text_file.write("Tweet : %s \n" % str(define_tweet_by_id(ask_id)))
text_file.write("Simulating human response : %s \n\n" % str(simulate_human_decision(ask_id)))
trn_ds6.update(ask_id, simulate_human_decision(ask_id))
# model.train(trn_ds6)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out6 = np.append(E_out6, 1 - score)
# E_out6 = np.append(E_out6, 1 - model.score(tst_ds))
ask_id = qs7.make_query()
print("\033[4mUsing QueryByCommittee (KL Divergence) :\033[0m")
print("Tweet :" + define_tweet_by_id(ask_id), end='', flush=True)
print("Simulating human response : " + str(simulate_human_decision(ask_id)) + " \n")
text_file.write("Using QueryByCommittee (KL Divergence) :\n")
text_file.write("Tweet : %s \n" % str(define_tweet_by_id(ask_id)))
text_file.write("Simulating human response : %s \n\n" % str(simulate_human_decision(ask_id)))
trn_ds7.update(ask_id, simulate_human_decision(ask_id))
# model.train(trn_ds7)
model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])
predicted = model.predict(tst_ds.format_sklearn()[0])
score = accuracy_score(tst_ds.format_sklearn()[1], predicted)
E_out7 = np.append(E_out7, 1 - score)
# E_out7 = np.append(E_out7, 1 - model.score(tst_ds))
ax.set_xlim((0, i + 1))
ax.set_ylim((0, max(max(E_out1), max(E_out2), max(E_out3), max(E_out4), max(E_out6), max(E_out7)) + 0.1))
query_num = np.arange(0, i + 2)
p1.set_xdata(query_num)
p1.set_ydata(E_out1)
p2.set_xdata(query_num)
p2.set_ydata(E_out2)
p3.set_xdata(query_num)
p3.set_ydata(E_out3)
p4.set_xdata(query_num)
p4.set_ydata(E_out4)
p6.set_xdata(query_num)
p6.set_ydata(E_out6)
p7.set_xdata(query_num)
p7.set_ydata(E_out7)
plt.draw()
t2 = time.time()
time_total = t2 - t0
print("\n\n\n#################################################\n")
print("Execution time : %fs \n\n" % time_total)
text_file.write("\n\n\n#################################################\n")
text_file.write("Execution time : %fs \n" % time_total)
text_file.close()
input("Press any key to save the plot...")
plt.savefig('task_' + str(timestr) + '.png')
print("Done")
if __name__ == '__main__':
main()
| {"/task_balanced_dataset.py": ["/strategies/strategies.py"]} |
68,260 | bygreencn/pywren | refs/heads/master | /pywren/wren.py | from __future__ import absolute_import
import boto3
import botocore
from six import reraise
import json
import base64
from threading import Thread
try:
from six.moves import cPickle as pickle
except:
import pickle
from pywren.wrenconfig import *
from pywren import wrenconfig, wrenutil, runtime
import enum
from multiprocessing.pool import ThreadPool
import time
from pywren import s3util, version
import logging
import botocore
import glob2
import os
from pywren.cloudpickle import serialize
from pywren import invokers
from tblib import pickling_support
pickling_support.install()
logger = logging.getLogger(__name__)
class JobState(enum.Enum):
new = 1
invoked = 2
running = 3
success = 4
error = 5
def default_executor(**kwargs):
executor_str = 'lambda'
if 'PYWREN_EXECUTOR' in os.environ:
executor_str = os.environ['PYWREN_EXECUTOR']
if executor_str == 'lambda':
return lambda_executor(**kwargs)
elif executor_str == 'remote' or executor_str=='standalone':
return remote_executor(**kwargs)
elif executor_str == 'dummy':
return dummy_executor(**kwargs)
return lambda_executor(**kwargs)
def lambda_executor(config= None, job_max_runtime=280):
if config is None:
config = wrenconfig.default()
AWS_REGION = config['account']['aws_region']
FUNCTION_NAME = config['lambda']['function_name']
S3_BUCKET = config['s3']['bucket']
S3_PREFIX = config['s3']['pywren_prefix']
invoker = invokers.LambdaInvoker(AWS_REGION, FUNCTION_NAME)
return Executor(AWS_REGION, S3_BUCKET, S3_PREFIX, invoker, config,
job_max_runtime)
def dummy_executor():
config = wrenconfig.default()
AWS_REGION = config['account']['aws_region']
S3_BUCKET = config['s3']['bucket']
S3_PREFIX = config['s3']['pywren_prefix']
invoker = invokers.DummyInvoker()
return Executor(AWS_REGION, S3_BUCKET, S3_PREFIX, invoker, config,
100)
def remote_executor(config= None, job_max_runtime=3600):
if config is None:
config = wrenconfig.default()
AWS_REGION = config['account']['aws_region']
SQS_QUEUE = config['standalone']['sqs_queue_name']
S3_BUCKET = config['s3']['bucket']
S3_PREFIX = config['s3']['pywren_prefix']
invoker = invokers.SQSInvoker(AWS_REGION, SQS_QUEUE)
return Executor(AWS_REGION, S3_BUCKET, S3_PREFIX, invoker, config,
job_max_runtime)
class Executor(object):
"""
Theoretically will allow for cross-AZ invocations
"""
def __init__(self, aws_region, s3_bucket, s3_prefix,
invoker, config, job_max_runtime):
self.aws_region = aws_region
self.s3_bucket = s3_bucket
self.s3_prefix = s3_prefix
self.config = config
self.session = botocore.session.get_session()
self.invoker = invoker
self.s3client = self.session.create_client('s3', region_name = aws_region)
self.job_max_runtime = job_max_runtime
runtime_bucket = config['runtime']['s3_bucket']
runtime_key = config['runtime']['s3_key']
if not runtime.runtime_key_valid(runtime_bucket, runtime_key):
raise Exception("The indicated runtime: s3://{}/{} is not approprite for this python version".format(runtime_bucket, runtime_key))
def create_mod_data(self, mod_paths):
module_data = {}
# load mod paths
for m in mod_paths:
if os.path.isdir(m):
files = glob2.glob(os.path.join(m, "**/*.py"))
pkg_root = os.path.dirname(m)
else:
pkg_root = os.path.dirname(m)
files = [m]
for f in files:
dest_filename = f[len(pkg_root)+1:]
module_data[f[len(pkg_root)+1:]] = open(f, 'r').read()
return module_data
def put_data(self, s3_data_key, data_str,
callset_id, call_id):
# put on s3 -- FIXME right now this takes 2x as long
self.s3client.put_object(Bucket = s3_data_key[0],
Key = s3_data_key[1],
Body = data_str)
logger.info("call_async {} {} s3 upload complete {}".format(callset_id, call_id, s3_data_key))
def invoke_with_keys(self, s3_func_key, s3_data_key, s3_output_key,
s3_status_key,
callset_id, call_id, extra_env,
extra_meta, data_byte_range, use_cached_runtime,
host_job_meta, job_max_runtime,
overwrite_invoke_args = None):
arg_dict = {'func_key' : s3_func_key,
'data_key' : s3_data_key,
'output_key' : s3_output_key,
'status_key' : s3_status_key,
'callset_id': callset_id,
'job_max_runtime' : job_max_runtime,
'data_byte_range' : data_byte_range,
'call_id' : call_id,
'use_cached_runtime' : use_cached_runtime,
'runtime_s3_bucket' : self.config['runtime']['s3_bucket'],
'runtime_s3_key' : self.config['runtime']['s3_key'],
'pywren_version' : version.__version__}
if extra_env is not None:
logger.debug("Extra environment vars {}".format(extra_env))
arg_dict['extra_env'] = extra_env
if extra_meta is not None:
# sanity
for k, v in extra_meta.iteritems():
if k in arg_dict:
raise ValueError("Key {} already in dict".format(k))
arg_dict[k] = v
host_submit_time = time.time()
arg_dict['host_submit_time'] = host_submit_time
logger.info("call_async {} {} lambda invoke ".format(callset_id, call_id))
lambda_invoke_time_start = time.time()
# overwrite explicit args, mostly used for testing via injection
if overwrite_invoke_args is not None:
arg_dict.update(overwrite_invoke_args)
# do the invocation
self.invoker.invoke(arg_dict)
host_job_meta['lambda_invoke_timestamp'] = lambda_invoke_time_start
host_job_meta['lambda_invoke_time'] = time.time() - lambda_invoke_time_start
host_job_meta.update(self.invoker.config())
logger.info("call_async {} {} lambda invoke complete".format(callset_id, call_id))
host_job_meta.update(arg_dict)
fut = ResponseFuture(call_id, callset_id, host_job_meta,
self.s3_bucket, self.s3_prefix,
self.aws_region)
fut._set_state(JobState.invoked)
return fut
def call_async(self, func, data, extra_env = None,
extra_meta=None):
return self.map(func, [data], extra_env, extra_meta)[0]
def agg_data(self, data_strs):
ranges = []
pos = 0
for datum in data_strs:
l = len(datum)
ranges.append((pos, pos + l -1))
pos += l
return b"".join(data_strs), ranges
def map(self, func, iterdata, extra_env = None, extra_meta = None,
invoke_pool_threads=64, data_all_as_one=True,
use_cached_runtime=True, overwrite_invoke_args = None):
"""
# FIXME work with an actual iterable instead of just a list
data_all_as_one : upload the data as a single s3 object; fewer
tcp transactions (good) but potentially higher latency for workers (bad)
use_cached_runtime : if runtime has been cached, use that. When set
to False, redownloads runtime.
"""
host_job_meta = {}
pool = ThreadPool(invoke_pool_threads)
callset_id = s3util.create_callset_id()
data = list(iterdata)
### pickle func and all data (to capture module dependencies
serializer = serialize.SerializeIndependent()
func_and_data_ser, mod_paths = serializer([func] + data)
func_str = func_and_data_ser[0]
data_strs = func_and_data_ser[1:]
data_size_bytes = sum(len(x) for x in data_strs)
s3_agg_data_key = None
host_job_meta['aggregated_data_in_s3'] = False
host_job_meta['data_size_bytes'] = data_size_bytes
if data_size_bytes < wrenconfig.MAX_AGG_DATA_SIZE and data_all_as_one:
s3_agg_data_key = s3util.create_agg_data_key(self.s3_bucket,
self.s3_prefix, callset_id)
agg_data_bytes, agg_data_ranges = self.agg_data(data_strs)
agg_upload_time = time.time()
self.s3client.put_object(Bucket = s3_agg_data_key[0],
Key = s3_agg_data_key[1],
Body = agg_data_bytes)
host_job_meta['agg_data_in_s3'] = True
host_job_meta['data_upload_time'] = time.time() - agg_upload_time
host_job_meta['data_upload_timestamp'] = time.time()
else:
# FIXME add warning that you wanted data all as one but
# it exceeded max data size
pass
module_data = self.create_mod_data(mod_paths)
func_str_encoded = wrenutil.bytes_to_b64str(func_str)
#debug_foo = {'func' : func_str_encoded,
# 'module_data' : module_data}
#pickle.dump(debug_foo, open("/tmp/py35.debug.pickle", 'wb'))
### Create func and upload
func_module_str = json.dumps({'func' : func_str_encoded,
'module_data' : module_data})
host_job_meta['func_module_str_len'] = len(func_module_str)
func_upload_time = time.time()
s3_func_key = s3util.create_func_key(self.s3_bucket, self.s3_prefix,
callset_id)
self.s3client.put_object(Bucket = s3_func_key[0],
Key = s3_func_key[1],
Body = func_module_str)
host_job_meta['func_upload_time'] = time.time() - func_upload_time
host_job_meta['func_upload_timestamp'] = time.time()
def invoke(data_str, callset_id, call_id, s3_func_key,
host_job_meta,
s3_agg_data_key = None, data_byte_range=None ):
s3_data_key, s3_output_key, s3_status_key \
= s3util.create_keys(self.s3_bucket,
self.s3_prefix,
callset_id, call_id)
host_job_meta['job_invoke_timestamp'] = time.time()
if s3_agg_data_key is None:
data_upload_time = time.time()
self.put_data(s3_data_key, data_str,
callset_id, call_id)
data_upload_time = time.time() - data_upload_time
host_job_meta['data_upload_time'] = data_upload_time
host_job_meta['data_upload_timestamp'] = time.time()
data_key = s3_data_key
else:
data_key = s3_agg_data_key
return self.invoke_with_keys(s3_func_key, data_key,
s3_output_key,
s3_status_key,
callset_id, call_id, extra_env,
extra_meta, data_byte_range,
use_cached_runtime, host_job_meta.copy(),
self.job_max_runtime,
overwrite_invoke_args = overwrite_invoke_args)
N = len(data)
call_result_objs = []
for i in range(N):
call_id = "{:05d}".format(i)
data_byte_range = None
if s3_agg_data_key is not None:
data_byte_range = agg_data_ranges[i]
cb = pool.apply_async(invoke, (data_strs[i], callset_id,
call_id, s3_func_key,
host_job_meta.copy(),
s3_agg_data_key,
data_byte_range))
logger.info("map {} {} apply async".format(callset_id, call_id))
call_result_objs.append(cb)
res = [c.get() for c in call_result_objs]
pool.close()
pool.join()
logger.info("map invoked {} {} pool join".format(callset_id, call_id))
# FIXME take advantage of the callset to return a lot of these
# note these are just the invocation futures
return res
def reduce(self, function, list_of_futures,
extra_env = None, extra_meta = None):
"""
Apply a function across all futures.
# FIXME change to lazy iterator
"""
#if self.invoker.TIME_LIMIT:
wait(list_of_futures, return_when=ALL_COMPLETED) # avoid race condition
def reduce_func(fut_list):
# FIXME speed this up for big reduce
accum_list = []
for f in fut_list:
accum_list.append(f.result())
return function(accum_list)
return self.call_async(reduce_func, list_of_futures,
extra_env=extra_env, extra_meta=extra_meta)
def get_logs(self, future, verbose=True):
logclient = boto3.client('logs', region_name=self.aws_region)
log_group_name = future.run_status['log_group_name']
log_stream_name = future.run_status['log_stream_name']
aws_request_id = future.run_status['aws_request_id']
log_events = logclient.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,)
if verbose: # FIXME use logger
print("log events returned")
this_events_logs = []
in_this_event = False
for event in log_events['events']:
start_string = "START RequestId: {}".format(aws_request_id)
end_string = "REPORT RequestId: {}".format(aws_request_id)
message = event['message'].strip()
timestamp = int(event['timestamp'])
if verbose:
print(timestamp, message)
if start_string in message:
in_this_event = True
elif end_string in message:
in_this_event = False
this_events_logs.append((timestamp, message))
if in_this_event:
this_events_logs.append((timestamp, message))
return this_events_logs
# this really should not be a global singleton FIXME
global_s3_client = boto3.client('s3') # , region_name = AWS_REGION)
def get_call_status(callset_id, call_id,
AWS_S3_BUCKET = wrenconfig.AWS_S3_BUCKET,
AWS_S3_PREFIX = wrenconfig.AWS_S3_PREFIX,
AWS_REGION = wrenconfig.AWS_REGION, s3=None):
s3_data_key, s3_output_key, s3_status_key = s3util.create_keys(AWS_S3_BUCKET,
AWS_S3_PREFIX,
callset_id, call_id)
if s3 is None:
s3 = global_s3_client
try:
r = s3.get_object(Bucket = s3_status_key[0], Key = s3_status_key[1])
result_json = r['Body'].read()
return json.loads(result_json.decode('ascii'))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchKey":
return None
else:
raise e
def get_call_output(callset_id, call_id,
AWS_S3_BUCKET = wrenconfig.AWS_S3_BUCKET,
AWS_S3_PREFIX = wrenconfig.AWS_S3_PREFIX,
AWS_REGION = wrenconfig.AWS_REGION, s3=None):
s3_data_key, s3_output_key, s3_status_key = s3util.create_keys(AWS_S3_BUCKET,
AWS_S3_PREFIX,
callset_id, call_id)
if s3 is None:
s3 = global_s3_client # boto3.client('s3', region_name = AWS_REGION)
r = s3.get_object(Bucket = s3_output_key[0], Key = s3_output_key[1])
return pickle.loads(r['Body'].read())
class ResponseFuture(object):
"""
"""
GET_RESULT_SLEEP_SECS = 4
def __init__(self, call_id, callset_id, invoke_metadata,
s3_bucket, s3_prefix, aws_region):
self.call_id = call_id
self.callset_id = callset_id
self._state = JobState.new
self.s3_bucket = s3_bucket
self.s3_prefix = s3_prefix
self.aws_region = aws_region
self._invoke_metadata = invoke_metadata.copy()
self.status_query_count = 0
def _set_state(self, new_state):
## FIXME add state machine
self._state = new_state
def cancel(self):
raise NotImplementedError("Cannot cancel dispatched jobs")
def cancelled(self):
raise NotImplementedError("Cannot cancel dispatched jobs")
def running(self):
raise NotImplementedError()
def done(self):
if self._state in [JobState.success, JobState.error]:
return True
if self.result(check_only = True) is None:
return False
return True
def result(self, timeout=None, check_only=False, throw_except=True):
"""
From the python docs:
Return the value returned by the call. If the call hasn't yet
completed then this method will wait up to timeout seconds. If
the call hasn't completed in timeout seconds then a
TimeoutError will be raised. timeout can be an int or float.If
timeout is not specified or None then there is no limit to the
wait time.
If the future is cancelled before completing then CancelledError will be raised.
If the call raised then this method will raise the same exception.
"""
if self._state == JobState.new:
raise ValueError("job not yet invoked")
if self._state == JobState.success:
return self._return_val
if self._state == JobState.error:
raise self._exception
call_status = get_call_status(self.callset_id, self.call_id,
AWS_S3_BUCKET = self.s3_bucket,
AWS_S3_PREFIX = self.s3_prefix,
AWS_REGION = self.aws_region)
self.status_query_count += 1
## FIXME implement timeout
if timeout is not None : raise NotImplementedError()
if check_only is True:
if call_status is None:
return None
while call_status is None:
time.sleep(self.GET_RESULT_SLEEP_SECS)
call_status = get_call_status(self.callset_id, self.call_id,
AWS_S3_BUCKET = self.s3_bucket,
AWS_S3_PREFIX = self.s3_prefix,
AWS_REGION = self.aws_region)
self.status_query_count += 1
self._invoke_metadata['status_done_timestamp'] = time.time()
self._invoke_metadata['status_query_count'] = self.status_query_count
if call_status['exception'] is not None:
# the wrenhandler had an exception
exception_str = call_status['exception']
print(call_status.keys())
exception_args = call_status['exception_args']
if exception_args[0] == "WRONGVERSION":
raise Exception("Pywren version mismatch: remove expected version {}, local library is version {}".format(exception_args[2], exception_args[3]))
elif exception_args[0] == "OUTATIME":
raise Exception("process ran out of time")
else:
raise Exception(exception_str, *exception_args)
call_output_time = time.time()
call_invoker_result = get_call_output(self.callset_id, self.call_id,
AWS_S3_BUCKET = self.s3_bucket,
AWS_S3_PREFIX = self.s3_prefix,
AWS_REGION = self.aws_region)
call_output_time_done = time.time()
self._invoke_metadata['download_output_time'] = call_output_time_done - call_output_time_done
self._invoke_metadata['download_output_timestamp'] = call_output_time_done
call_success = call_invoker_result['success']
logger.info("ResponseFuture.result() {} {} call_success {}".format(self.callset_id,
self.call_id,
call_success))
self._call_invoker_result = call_invoker_result
if call_success:
self._return_val = call_invoker_result['result']
self._state = JobState.success
else:
self._exception = call_invoker_result['result']
self._traceback = (call_invoker_result['exc_type'],
call_invoker_result['exc_value'],
call_invoker_result['exc_traceback'])
self._state = JobState.error
self.run_status = call_status # this is the remote status information
self.invoke_status = self._invoke_metadata # local status information
if call_success:
return self._return_val
elif call_success == False and throw_except:
reraise(*self._traceback)
return None
def exception(self, timeout = None):
raise NotImplementedError()
def add_done_callback(self, fn):
raise NotImplementedError()
ALL_COMPLETED = 1
ANY_COMPLETED = 2
ALWAYS = 3
def wait(fs, return_when=ALL_COMPLETED, THREADPOOL_SIZE=64,
WAIT_DUR_SEC=5):
"""
this will eventually provide an optimization for checking if a large
number of futures have completed without too much network traffic
by exploiting the callset
From python docs:
Wait for the Future instances (possibly created by different Executor
instances) given by fs to complete. Returns a named 2-tuple of
sets. The first set, named "done", contains the futures that completed
(finished or were cancelled) before the wait completed. The second
set, named "not_done", contains uncompleted futures.
http://pythonhosted.org/futures/#concurrent.futures.wait
"""
N = len(fs)
if return_when==ALL_COMPLETED:
result_count = 0
while result_count < N:
fs_dones, fs_notdones = _wait(fs, THREADPOOL_SIZE)
result_count = len(fs_dones)
if result_count == N:
return fs_dones, fs_notdones
else:
time.sleep(WAIT_DUR_SEC)
elif return_when == ANY_COMPLETED:
raise NotImplementedError()
elif return_when == ALWAYS:
return _wait(fs, THREADPOOL_SIZE)
else:
raise ValueError()
def _wait(fs, THREADPOOL_SIZE):
"""
internal function that performs the majority of the WAIT task
work.
"""
# get all the futures that are not yet done
not_done_futures = [f for f in fs if f._state not in [JobState.success,
JobState.error]]
if len(not_done_futures) == 0:
return fs, []
# check if the not-done ones have the same callset_id
present_callsets = set([f.callset_id for f in not_done_futures])
if len(present_callsets) > 1:
raise NotImplementedError()
# get the list of all objects in this callset
callset_id = present_callsets.pop() # FIXME assume only one
f0 = not_done_futures[0] # This is a hack too
callids_done = s3util.get_callset_done(f0.s3_bucket,
f0.s3_prefix,
callset_id)
callids_done = set(callids_done)
fs_dones = []
fs_notdones = []
f_to_wait_on = []
for f in fs:
if f._state in [JobState.success, JobState.error]:
# done, don't need to do anything
fs_dones.append(f)
else:
if f.call_id in callids_done:
f_to_wait_on.append(f)
fs_dones.append(f)
else:
fs_notdones.append(f)
def test(f):
f.result(throw_except=False)
pool = ThreadPool(THREADPOOL_SIZE)
pool.map(test, f_to_wait_on)
pool.close()
pool.join()
return fs_dones, fs_notdones
def log_test():
logger.info("logging from pywren.wren")
| {"/pywren/wren.py": ["/pywren/__init__.py"], "/pywren/__init__.py": ["/pywren/wren.py", "/pywren/version.py"], "/tests/test_modules.py": ["/pywren/__init__.py"]} |
68,261 | bygreencn/pywren | refs/heads/master | /pywren/__init__.py | from __future__ import absolute_import
from pywren.wren import default_executor, wait, lambda_executor, dummy_executor, remote_executor
from pywren import wrenlogging
from pywren.version import __version__
import logging
import os
if "PYWREN_LOGLEVEL" in os.environ:
log_level = os.environ['PYWREN_LOGLEVEL']
wrenlogging.default_config(log_level)
# FIXME there has to be a better way to disable noisy boto logs
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
SOURCE_DIR = os.path.dirname(os.path.abspath(__file__))
| {"/pywren/wren.py": ["/pywren/__init__.py"], "/pywren/__init__.py": ["/pywren/wren.py", "/pywren/version.py"], "/tests/test_modules.py": ["/pywren/__init__.py"]} |
68,262 | bygreencn/pywren | refs/heads/master | /pywren/version.py | # we're following the version number guidelines
# from https://www.python.org/dev/peps/pep-0386/
__version__ = "0.1rc0"
| {"/pywren/wren.py": ["/pywren/__init__.py"], "/pywren/__init__.py": ["/pywren/wren.py", "/pywren/version.py"], "/tests/test_modules.py": ["/pywren/__init__.py"]} |
68,263 | bygreencn/pywren | refs/heads/master | /tests/test_modules.py | """
Test our ability to import other modules
"""
import pytest
import time
import boto3
import uuid
import numpy as np
import time
import pywren
import subprocess
import logging
import unittest
import numpy as np
import extmodule
class SimpleAsync(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_simple(self):
def foo(x):
return extmodule.foo_add(x)
x = 1.0
fut = self.wrenexec.call_async(foo, x)
res = fut.result()
self.assertEqual(res, 2.0)
| {"/pywren/wren.py": ["/pywren/__init__.py"], "/pywren/__init__.py": ["/pywren/wren.py", "/pywren/version.py"], "/tests/test_modules.py": ["/pywren/__init__.py"]} |
68,264 | MaksimLion/python-base-algorithms | refs/heads/master | /binary_search.py | import random
from time_tracker import time_checker
@time_checker
def binary_search(sorted_list, item):
left_border = 0
right_border = len(sorted_list)
while left_border <= right_border:
middle_value = (left_border + right_border) // 2
if sorted_list[middle_value] == item:
return middle_value
if sorted_list[middle_value] > item:
right_border = middle_value
else:
left_border = middle_value
| {"/binary_search.py": ["/time_tracker.py"]} |
68,265 | MaksimLion/python-base-algorithms | refs/heads/master | /time_tracker.py | import time
def time_checker(func):
def wrapper(a, b):
start_time = time.time()
result = func(a, b)
print(time.time() - start_time)
return result
return wrapper | {"/binary_search.py": ["/time_tracker.py"]} |
68,266 | MaksimLion/python-base-algorithms | refs/heads/master | /selection_sort.py | def selection_sort(sort_list):
sorted_list = list()
for i in range(len(sort_list)):
min_value_index = sort_list.index(min(sort_list))
sorted_list.append(sort_list.pop(min_value_index))
return sorted_list
| {"/binary_search.py": ["/time_tracker.py"]} |
68,267 | hvac/hvac-cli | refs/heads/master | /hvac_cli/cli.py | import hvac
class CLI(object):
def __init__(self, args):
self.args = args
self.open_vault()
def list_mounts(self):
return self.vault.sys.list_mounted_secrets_engines()['data']
def open_vault(self):
if self.args.tls_skip_verify:
verify = False
else:
if self.args.ca_cert:
verify = self.args.ca_cert
else:
verify = True
cert = (self.args.client_cert, self.args.client_key)
self.vault = hvac.Client(url=self.args.address,
token=self.args.token,
cert=cert,
verify=verify)
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,268 | hvac/hvac-cli | refs/heads/master | /hvac_cli/version.py | import pbr.version
__all__ = ['__version__']
version_info = pbr.version.VersionInfo('hvac-cli')
__version__ = version_info.version_string()
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,269 | hvac/hvac-cli | refs/heads/master | /tests/test_modified_environ.py | # coding: utf-8
# copy/pasted from https://github.com/laurent-laporte-pro/stackoverflow-q2059482/blob/master/tests/test_environ_ctx.py # noqa
import os
from tests.modified_environ import modified_environ
def setup_method(test_method):
os.environ.pop('MODIFIED_ENVIRON', None)
def teardown_method(test_method):
os.environ.pop('MODIFIED_ENVIRON', None)
def test_modified_environ__no_args():
with modified_environ():
pass
def test_modified_environ__inserted():
with modified_environ(MODIFIED_ENVIRON="inserted"):
assert os.environ['MODIFIED_ENVIRON'] == "inserted"
assert 'MODIFIED_ENVIRON' not in os.environ
def test_modified_environ__updated():
os.environ['MODIFIED_ENVIRON'] = "value"
with modified_environ(MODIFIED_ENVIRON="updated"):
assert os.environ['MODIFIED_ENVIRON'] == "updated"
assert os.environ['MODIFIED_ENVIRON'] == "value"
def test_modified_environ__deleted():
os.environ['MODIFIED_ENVIRON'] = "value"
with modified_environ('MODIFIED_ENVIRON'):
assert 'MODIFIED_ENVIRON' not in os.environ
assert os.environ['MODIFIED_ENVIRON'] == "value"
def test_modified_environ__deleted_missing():
with modified_environ('MODIFIED_ENVIRON'):
assert 'MODIFIED_ENVIRON' not in os.environ
assert os.environ['MODIFIED_ENVIRON'] == "value"
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,270 | hvac/hvac-cli | refs/heads/master | /tests/test_kv.py | import copy
import logging
import hvac
from hvac_cli.kv import KVCLI, kvcli_factory, ReadSecretVersion, SecretVersion
import mock
import pytest
def test_kvcli_factory(mocker, caplog):
caplog.set_level(logging.INFO, 'hvac_cli')
mocker.patch('hvac_cli.kv.kvcli_factory')
mocker.patch('hvac_cli.kv.CLI.list_mounts', side_effect=ValueError)
args = mock.MagicMock()
args.kv_version = None
with pytest.raises(ValueError):
kvcli_factory(args, args)
assert 'failed to read sys/mount to determine' in caplog.text
def test_sanitize_do_nothing():
args = mock.MagicMock()
args.no_workaround_6282 = None
status = {'version': '1.1.0'}
assert KVCLI.sanitize('a/b/c', status, args) == 'a/b/c'
path = 'éà'
assert KVCLI.sanitize(path, status, args) == path
def test_sanitize_user_friendly(caplog):
caplog.set_level(logging.INFO, 'hvac_cli')
path = '|'.join(["-{:02x}-{}".format(i, chr(i)) for i in range(128)])
expected = ('-00-_|-01-_|-02-_|-03-_|-04-_|-05-_|-06-_|-07-_|'
'-08-_|-09-_|-0a-_|-0b-_|-0c-_|-0d-_|-0e-_|-0f-_|'
'-10-_|-11-_|-12-_|-13-_|-14-_|-15-_|-16-_|-17-_|'
'-18-_|-19-_|-1a-_|-1b-_|-1c-_|-1d-_|-1e-_|-1f-_|'
'-20- |-21-!|-22-"|-23-_|-24-$|-25-_|-26-&|-27-\'|'
'-28-_|-29-)|-2a-_|-2b-_|-2c-,|-2d--|-2e-.|-2f-/|'
'-30-0|-31-1|-32-2|-33-3|-34-4|-35-5|-36-6|-37-7|'
'-38-8|-39-9|-3a-:|-3b-;|-3c-<|-3d-=|-3e->|-3f-?|'
'-40-@|-41-A|-42-B|-43-C|-44-D|-45-E|-46-F|-47-G|'
'-48-H|-49-I|-4a-J|-4b-K|-4c-L|-4d-M|-4e-N|-4f-O|'
'-50-P|-51-Q|-52-R|-53-S|-54-T|-55-U|-56-V|-57-W|'
'-58-X|-59-Y|-5a-Z|-5b-_|-5c-_|-5d-]|-5e-^|-5f-_|'
'-60-`|-61-a|-62-b|-63-c|-64-d|-65-e|-66-f|-67-g|'
'-68-h|-69-i|-6a-j|-6b-k|-6c-l|-6d-m|-6e-n|-6f-o|'
'-70-p|-71-q|-72-r|-73-s|-74-t|-75-u|-76-v|-77-w|'
'-78-x|-79-y|-7a-z|-7b-{|-7c-||-7d-}|-7e-~|-7f-_')
args = mock.MagicMock()
args.no_workaround_6282 = None
status = {'version': '1.1.0'}
assert KVCLI.sanitize(path, status, args) == expected
assert 'bug 6282 was fixed' in caplog.text
assert 'issues/6282' in caplog.text
assert 'replace control characters' in caplog.text
caplog.clear()
status = {'version': '1.0.3'}
assert KVCLI.sanitize(path, status, args) == expected
assert 'bug 6282 was fixed' not in caplog.text
assert 'issues/6282' in caplog.text
assert 'replace control characters' in caplog.text
caplog.clear()
expected = ('-00-_|-01-_|-02-_|-03-_|-04-_|-05-_|-06-_|-07-_|'
'-08-_|-09-_|-0a-_|-0b-_|-0c-_|-0d-_|-0e-_|-0f-_|'
'-10-_|-11-_|-12-_|-13-_|-14-_|-15-_|-16-_|-17-_|'
'-18-_|-19-_|-1a-_|-1b-_|-1c-_|-1d-_|-1e-_|-1f-_|'
'-20- |-21-!|-22-"|-23-#|-24-$|-25-_|-26-&|-27-\'|'
'-28-(|-29-)|-2a-*|-2b-+|-2c-,|-2d--|-2e-.|-2f-/|'
'-30-0|-31-1|-32-2|-33-3|-34-4|-35-5|-36-6|-37-7|'
'-38-8|-39-9|-3a-:|-3b-;|-3c-<|-3d-=|-3e->|-3f-?|'
'-40-@|-41-A|-42-B|-43-C|-44-D|-45-E|-46-F|-47-G|'
'-48-H|-49-I|-4a-J|-4b-K|-4c-L|-4d-M|-4e-N|-4f-O|'
'-50-P|-51-Q|-52-R|-53-S|-54-T|-55-U|-56-V|-57-W|'
'-58-X|-59-Y|-5a-Z|-5b-[|-5c-\\|-5d-]|-5e-^|-5f-_|'
'-60-`|-61-a|-62-b|-63-c|-64-d|-65-e|-66-f|-67-g|'
'-68-h|-69-i|-6a-j|-6b-k|-6c-l|-6d-m|-6e-n|-6f-o|'
'-70-p|-71-q|-72-r|-73-s|-74-t|-75-u|-76-v|-77-w|'
'-78-x|-79-y|-7a-z|-7b-{|-7c-||-7d-}|-7e-~|-7f-_')
status = {'version': '1.1.0'}
args.no_workaround_6282 = True
assert KVCLI.sanitize(path, status, args) == expected
assert 'bug 6282 was fixed' not in caplog.text
assert 'issues/6282' not in caplog.text
assert 'replace control characters' in caplog.text
caplog.clear()
def test_sanitize_bug_6213(caplog):
caplog.set_level(logging.INFO, 'hvac_cli')
args = mock.MagicMock()
args.no_workaround_6282 = None
status = {'version': '1.1.0'}
path = 'A B /C / D '
assert KVCLI.sanitize(path, status, args) == 'A B/C/ D'
assert 'issues/6213' in caplog.text
def mount_kv(vault_server, mount_point, kv_version):
client = hvac.Client(url=vault_server['http'], token=vault_server['token'])
client.sys.disable_secrets_engine(path=mount_point)
client.sys.enable_secrets_engine(
backend_type='kv', options={'version': kv_version}, path=mount_point)
@pytest.mark.parametrize("kv_version", ['1', '2'])
def test_kv_version(vault_server, kv_version):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, kv_version)
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = kv_version
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
version = None
kv.create_or_update_secret(secret_key, secret_value, cas=None)
assert kv.read_secret(secret_key, version) == secret_value
kv.erase()
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, version)
def test_read_secret_version_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
version = None
with pytest.raises(SecretVersion):
kv.create_or_update_secret(secret_key, secret_value, cas=1)
kv.create_or_update_secret(secret_key, secret_value, cas=None)
assert kv.read_secret(secret_key, version) == secret_value
with pytest.raises(ReadSecretVersion):
kv.read_secret(secret_key, '0')
kv.erase()
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, version)
def test_read_secret_version_v2(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
KV_args.rewrite_key = True
KV_args.no_workaround_6282 = None
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
assert kv.read_secret(secret_key, None) == secret_value
assert kv.read_secret(secret_key, 1) == secret_value
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, 2)
with pytest.raises(hvac.exceptions.InvalidRequest):
kv.create_or_update_secret(secret_key, secret_value, cas=0)
with pytest.raises(hvac.exceptions.InvalidRequest):
kv.create_or_update_secret(secret_key, secret_value, cas=2)
kv.create_or_update_secret(secret_key, secret_value, cas=1)
assert kv.read_secret(secret_key, 2) == secret_value
kv.erase()
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, None)
def test_metadata_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(SecretVersion):
kv.read_secret_metadata(secret_key)
with pytest.raises(SecretVersion):
kv.update_metadata(secret_key, None, None)
def test_metadata_v2(vault_server, caplog):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
kv.update_metadata(secret_key, None, None)
metadata = kv.read_secret_metadata(secret_key)
assert metadata['data']['max_versions'] == 0
assert metadata['data']['cas_required'] is False
max_versions = 5
cas_required = True
metadata = kv.update_metadata(secret_key, max_versions, cas_required)
assert metadata['data']['max_versions'] == max_versions
assert metadata['data']['cas_required'] == cas_required
kv.delete_metadata_and_all_versions(secret_key)
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret_metadata(secret_key)
caplog.clear()
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret_metadata('doesnotexist')
assert 'failed to read metadata' in caplog.text
def test_delete_version_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(SecretVersion):
kv.delete(secret_key, versions='1')
assert kv.delete(secret_key, versions=None) == 0
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, None)
with pytest.raises(SecretVersion):
kv.undelete(secret_key, versions='1')
def test_delete_version_v2(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
for i in ('1', '2', '3'):
secret_value = {'field': i}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
versions = kv.read_secret_metadata(secret_key)['data']['versions']
assert not versions[i]['deletion_time']
assert not versions[i]['destroyed']
assert kv.delete(secret_key, versions=None) == 0
versions = kv.read_secret_metadata(secret_key)['data']['versions']
assert versions['3']['deletion_time']
assert not versions['3']['destroyed']
assert kv.delete(secret_key, versions=['1', '2']) == 0
versions = kv.read_secret_metadata(secret_key)['data']['versions']
for i in ('1', '2'):
assert versions[i]['deletion_time']
assert not versions[i]['destroyed']
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, i)
assert kv.undelete(secret_key, versions=['1', '2']) == 0
versions = kv.read_secret_metadata(secret_key)['data']['versions']
for i in ('1', '2'):
assert not versions[i]['deletion_time']
assert not versions[i]['destroyed']
assert kv.read_secret(secret_key, i) == {'field': i}
def test_patch_version_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(SecretVersion):
kv.patch(secret_key, secret_value)
def test_patch_version_v2(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
KV_args.rewrite_key = True
KV_args.no_workaround_6282 = None
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {
'override': 'before',
'preserved': 'before',
}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
assert kv.read_secret(secret_key, None) == secret_value
override_secret_value = {
'override': 'after',
'something': 'else',
}
kv.patch(secret_key, override_secret_value)
expected = copy.copy(secret_value)
expected.update(override_secret_value)
assert kv.read_secret(secret_key, None) == expected
def test_destroy_version_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(SecretVersion):
kv.destroy(secret_key, versions='1')
def test_destroy_version_v2(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
for i in ('1', '2', '3'):
secret_value = {'field': i}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
versions = kv.read_secret_metadata(secret_key)['data']['versions']
assert not versions[i]['deletion_time']
assert not versions[i]['destroyed']
assert kv.destroy(secret_key, versions=[1, 2]) == 0
versions = kv.read_secret_metadata(secret_key)['data']['versions']
for i in ('1', '2'):
assert not versions[i]['deletion_time']
assert versions[i]['destroyed']
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, i)
def test_rollback_version_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(SecretVersion):
kv.rollback(secret_key, version='1')
def test_rollback_version_v2(vault_server, caplog):
caplog.set_level(logging.INFO, 'hvac_cli')
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.dry_run = None
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
for i in ('1', '2', '3'):
secret_value = {'field': i}
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, '4')
kv.rollback(secret_key, '2')
assert kv.read_secret(secret_key, '4') == {'field': '2'}
caplog.clear()
with pytest.raises(hvac.exceptions.InvalidPath):
kv.rollback(secret_key, '20')
assert 'at version 20' in caplog.text
def test_dry_run_version_v1(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '1')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
CLI_args.dry_run = True
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, None)
CLI_args.dry_run = None
kv.create_or_update_secret(secret_key, secret_value, cas=None)
assert kv.read_secret(secret_key, None) == secret_value
CLI_args.dry_run = True
assert kv.delete(secret_key, versions=None) == 0
assert kv.read_secret(secret_key, None) == secret_value
def test_dry_run_version_v2(vault_server):
mount_point = 'mysecrets'
mount_kv(vault_server, mount_point, '2')
CLI_args = mock.MagicMock()
CLI_args.token = vault_server['token']
CLI_args.address = vault_server['http']
KV_args = mock.MagicMock()
KV_args.kv_version = None
KV_args.mount_point = mount_point
KV_args.rewrite_key = True
KV_args.no_workaround_6282 = None
kv = kvcli_factory(CLI_args, KV_args)
secret_key = 'my/key'
secret_value = {'field': 'value'}
CLI_args.dry_run = True
kv.create_or_update_secret(secret_key, secret_value, cas=None)
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, None)
CLI_args.dry_run = None
kv.create_or_update_secret(secret_key, secret_value, cas=None)
kv.create_or_update_secret(secret_key, secret_value, cas=None)
assert kv.read_secret(secret_key, None) == secret_value
CLI_args.dry_run = True
secret_patch = {'other': 'value'}
kv.patch(secret_key, secret_patch)
assert kv.read_secret(secret_key, None) == secret_value
assert kv.delete(secret_key, versions=None) == 0
assert kv.read_secret(secret_key, None) == secret_value
assert kv.destroy(secret_key, versions='2') == 0
assert kv.read_secret(secret_key, '2') == secret_value
assert kv.delete_metadata_and_all_versions(secret_key) == 0
assert kv.read_secret(secret_key, None) == secret_value
existing_metadata = kv.read_secret_metadata(secret_key)['data']
max_versions = 5
cas_required = True
metadata = kv.update_metadata(secret_key, max_versions, cas_required)['data']
assert existing_metadata['max_versions'] == metadata['max_versions']
assert existing_metadata['cas_required'] == metadata['cas_required']
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, '3')
assert kv.rollback(secret_key, '1') == 0
with pytest.raises(hvac.exceptions.InvalidPath):
kv.read_secret(secret_key, '3')
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,271 | hvac/hvac-cli | refs/heads/master | /hvac_cli/cmd.py | import sys
import os
from cliff.app import App
from cliff.commandmanager import CommandManager
from hvac_cli.version import __version__
DEFAULT_VAULT_ADDR = 'http://127.0.0.1:8200'
class HvacApp(App):
def __init__(self):
super(HvacApp, self).__init__(
description="""
hvac-cli is CLI to Hashicorp Vault with additional features.
It does not support extensions that are not available
as Free Software such as namespaces, Sentinel, Policy Overrides
or Multi-factor Authentication (MFA).
""",
version=__version__,
command_manager=CommandManager('hvac_cli'),
deferred_help=True,
)
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super().build_option_parser(description, version, argparse_kwargs)
self.set_parser_arguments(parser)
return parser
@staticmethod
def set_parser_arguments(parser):
parser.add_argument(
'--dry-run',
action='store_true',
help='Show what would be done but do nothing'
)
parser.add_argument(
'--token',
required=False,
default=os.getenv('VAULT_TOKEN'),
help=('Vault token. It will be prompted interactively if unset. '
'This can also be specified via the VAULT_TOKEN environment variable.')
)
parser.add_argument(
'--address', '--agent-address',
default=os.getenv('VAULT_AGENT_ADDR', os.getenv('VAULT_ADDR', DEFAULT_VAULT_ADDR)),
required=False,
dest='address',
help=('Address of the Vault server or the Vault agent. '
'--agent-address was introduced with vault 1.1.0. '
'This can also be specified via the VAULT_ADDR '
'or the VAULT_AGENT_ADDR environment variable. '
'If both VAULT_AGENT_ADDR and VAULT_ADDR are in the environment '
'VAULT_AGENT_ADDR has precedence')
)
parser.add_argument(
'--tls-skip-verify',
action='store_true',
default=True if os.getenv('VAULT_SKIP_VERIFY', False) else False,
required=False,
help=('Disable verification of TLS certificates. Using this option is highly '
'discouraged and decreases the security of data transmissions to and from '
'the Vault server. The default is false. '
'This can also be specified via the VAULT_SKIP_VERIFY environment variable.')
)
parser.add_argument(
'--ca-cert',
default=os.getenv('VAULT_CACERT'),
required=False,
help=('Path on the local disk to a single PEM-encoded CA certificate to verify '
'the Vault server\'s SSL certificate. '
'This can also be specified via the VAULT_CACERT environment variable. ')
)
parser.add_argument(
'--client-cert',
default=os.getenv('VAULT_CLIENT_CERT'),
required=False,
help=('Path on the local disk to a single PEM-encoded CA certificate to use '
'for TLS authentication to the Vault server. If this flag is specified, '
'--client-key is also required. '
'This can also be specified via the VAULT_CLIENT_CERT environment variable.')
)
parser.add_argument(
'--client-key',
default=os.getenv('VAULT_CLIENT_KEY'),
required=False,
help=('Path on the local disk to a single PEM-encoded private key matching the '
'client certificate from -client-cert. '
'This can also be specified via the VAULT_CLIENT_KEY environment variable.')
)
def main(argv=sys.argv[1:]):
myapp = HvacApp()
return myapp.run(argv)
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,272 | hvac/hvac-cli | refs/heads/master | /tests/test_cmd.py | from hvac_cli import cmd
import pytest
from tests.modified_environ import modified_environ
def test_help(capsys):
with pytest.raises(SystemExit):
cmd.main(['--help'])
captured = capsys.readouterr()
assert 'print bash completion command' in captured.out
def test_parse_args_agent_address():
token_value = 'TOKEN'
with modified_environ(
'VAULT_AGENT_ADDR',
):
app = cmd.HvacApp()
parser = app.build_option_parser('DESCRIPTION', 'version-1')
args = parser.parse_args([
'--token', token_value
])
assert args.address == cmd.DEFAULT_VAULT_ADDR
addr = 'ADDR'
with modified_environ(
'VAULT_AGENT_ADDR',
):
app = cmd.HvacApp()
parser = app.build_option_parser('DESCRIPTION', 'version-1')
args = parser.parse_args([
'--token', token_value,
'--agent-address', addr,
])
assert args.address == addr
ignored = 'SHOULD BE IGNORED'
with modified_environ(
VAULT_ADDR=ignored,
VAULT_AGENT_ADDR=addr,
):
app = cmd.HvacApp()
parser = app.build_option_parser('DESCRIPTION', 'version-1')
args = parser.parse_args([
'--token', token_value
])
assert args.address == addr
def test_parse_args():
token_value = 'TOKEN'
with modified_environ(
'VAULT_ADDR',
'VAULT_SKIP_VERIFY',
'VAULT_CACERT',
'VAULT_CLIENT_CERT',
'VAULT_CLIENT_KEY',
):
app = cmd.HvacApp()
parser = app.build_option_parser('DESCRIPTION', 'version-1')
args = parser.parse_args([
'--token', token_value
])
assert args.token == token_value
assert args.address == cmd.DEFAULT_VAULT_ADDR
assert args.tls_skip_verify is False
assert args.ca_cert is None
assert args.client_cert is None
assert args.client_key is None
addr = 'ADDR'
skip_verify = 'yes'
cacert = 'CACERT'
client_cert = 'CLIENT_CERT'
client_key = 'CLIENT_KEY'
with modified_environ(
VAULT_ADDR=addr,
VAULT_SKIP_VERIFY=skip_verify,
VAULT_CACERT=cacert,
VAULT_CLIENT_CERT=client_cert,
VAULT_CLIENT_KEY=client_key,
):
app = cmd.HvacApp()
parser = app.build_option_parser('DESCRIPTION', 'version-1')
args = parser.parse_args([
'--token', token_value
])
assert args.token == token_value
assert args.address == addr
assert args.tls_skip_verify is True
assert args.ca_cert == cacert
assert args.client_cert == client_cert
assert args.client_key == client_key
with modified_environ(
'VAULT_ADDR',
'VAULT_SKIP_VERIFY',
'VAULT_CACERT',
'VAULT_CLIENT_CERT',
'VAULT_CLIENT_KEY',
):
app = cmd.HvacApp()
parser = app.build_option_parser('DESCRIPTION', 'version-1')
args = parser.parse_args([
'--token', token_value, '--ca-cert', cacert
])
assert args.ca_cert == cacert
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.