max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
naver_oauth/user/oauth/providers/naver.py | bluebamus/django_miscellaneous_book | 0 | 6622651 | <filename>naver_oauth/user/oauth/providers/naver.py
from django.conf import settings
from django.contrib.auth import login
import requests
class SingletonInstane:
__instance = None
@classmethod
def __getInstance(cls):
return cls.__instance
@classmethod
def instance(cls, *args, **kargs):
cls.__instance = cls(*args, **kargs)
cls.instance = cls.__getInstance
return cls.__instance
class NaverClient(SingletonInstane):
client_id = settings.NAVER_CLIENT_ID
secret_key = settings.NAVER_SECRET_KEY
grant_type = 'authorization_code'
auth_url = 'https://nid.naver.com/oauth2.0/token'
profile_url = 'https://openapi.naver.com/v1/nid/me'
# singleton 이라는 패턴 사용
# 첫번째 생성자 호출 때만 객체만 생성시키고 이후 생성자 호출부터는 먼저 생성된 객체를 공유하게 하는 방식
# NaverClient 클래스를 NaverLoginMixin 뿐만 아니라 다른 클래스에서도 공유하며 사용할 수 있습니다.
# NaverClient 객체는 인스턴스변수가 없기 때문에 하나의 객체를 서로 공유하더라도 문제가 발생하지 않습니다.
# 이렇게 인스턴스변수가 존재하지 않으나 여러 클래스에서 유틸리티처럼 사용하는 클래스의 경우
# 싱글턴 패턴을 많이 사용합니다.
# 객체를 생성하는 비용이 줄어 서버의 가용성을 높이는 좋은 패턴이며 가장 간단한 방법을 구현함
# * 일반적으로 싱글턴은 생성자가 아니라 명시적으로 getInstance 라는 static 메소드를 제공해서 객체를 생성합니다.
# getInstance 를 사용하지 않고 생성자를 사용해 객체를 생성하면 에러를 발생시켜
# 싱글턴으로 구현되었음을 개발자에게 알려주는 것이죠.
# 원래 싱글턴 객체에 인스턴스변수를 추가하거나 클래스변수를 변경하면 안됩니다.
# __instance = None
# @classmethod
# def __getInstance(cls):
# return cls.__instance
# @classmethod
# def instance(cls, *args, **kargs):
# cls.__instance = cls(*args, **kargs)
# cls.instance = cls.__getInstance
# return cls.__instance
# def __new__(cls, *args, **kwargs):
# if not isinstance(cls.__instance, cls):
# cls.__instance = super().__new__(cls, *args, **kwargs)
# # cls.__instance = object.__new__(cls, *args, **kwargs)
# return cls.__instance
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
'''
get_profile 메소드에서 headers 라는 파라미터가 사용되는데
http 헤더의 값을 딕셔너리 형태로 전달하면 됩니다.
Authorization 헤더를 token_type(bearer) 와 인증토큰을 조합한 값으로 추가했습니다.
각 함수 반환데이터는 json 메소드를 통해 본문의 내용을 딕셔너리 형태로 반환해 줄 수도 있습니다.
물론 본문이 json 타입이 아닐 경우 에러가 발생합니다.
'''
def get_access_token(self, state, code):
res = requests.get(self.auth_url, params={'client_id': self.client_id, 'client_secret': self.secret_key,
'grant_type': self.grant_type, 'state': state, 'code': code})
return res.ok, res.json()
def get_profile(self, access_token, token_type='Bearer'):
res = requests.get(self.profile_url, headers={'Authorization': '{} {}'.format(token_type, access_token)}).json()
if res.get('resultcode') != '00':
return False, res.get('message')
else:
return True, res.get('response')
# NaverLoginMixin 에서 네이버의 api를 구현한 네이버 클라이언트를 naver_client 클래스변수로 추가했습니다.
# 네이버의 인증토큰 발급과 프로필 정보를 가져오는 두 가지의 기능을 제공합니다.
'''
naver_client로부터 token_infos 객체를 전달받는데 token_infos 객체는 아래와 같은 키를 갖는 딕셔너리 객체입니다.
1. error - 에러코드
2. error_description - 에러메시지
3. access_token - 인증토큰
4. refresh_token - 인증토큰 재발급토큰
5. expires_in - 인증토큰 만료기한(초)
6. token_type - 인증토큰 사용하는 api 호출시 인증방식(Authorization 헤더 타입)
'''
class NaverLoginMixin:
naver_client = NaverClient.instance()
def login_with_naver(self, state, code):
# 인증토근 발급
is_success, token_infos = self.naver_client.get_access_token(state, code)
if not is_success:
return False, '{} [{}]'.format(token_infos.get('error_desc'), token_infos.get('error'))
access_token = token_infos.get('access_token')
refresh_token = token_infos.get('refresh_token')
expires_in = token_infos.get('expires_in')
token_type = token_infos.get('token_type')
# 네이버 프로필 얻기
is_success, profiles = self.get_naver_profile(access_token, token_type)
if not is_success:
return False, profiles
# 프로필정보까지 정상적으로 받아오면 사용자 모델에서 get_or_create 메소드를 통해
# 동일한 이메일의 사용자가 있는 지 확인 후 없으면 새로 생성합니다.
# 소셜로그인은 가입과 로그인을 동시에 제공하는 것이 더 좋습니다.
# 이미 가입되어 있는 사용자라면 회원정보(이름)만 수정하면 되고,
# 가입되어 있지 않은 케이스라면 새로 회원정보를 생성해서 가입시켜 줍니다.
# 소셜로그인은 로컬 비밀번호가 필요없기 때문에 새로 사용자 데이터가 추가되는 경우라면
# set_password(None) 메소드를 통해 랜덤한 비밀번호를 생성해서 저장합니다.
# 이미 소셜로그인을 통해서 이메일에 대한 인증도 되었으니 is_active 값도 활성화 시켜주고
# 저장을 하면 가입이 완료입니다.
# 만일 이미 가입되어 있던 사용자라면 이메일과 비밀번호로도 로그인이 가능하고
# 네이버 소셜로그인으로도 로그인이 가능합니다.
# 사용자 생성 또는 업데이트
user, created = self.model.objects.get_or_create(email=profiles.get('email'))
if created: # 사용자 생성할 경우
user.set_password(None)
user.username = profiles.get('name')
user.is_active = True
user.save()
# 가입된 이후에 로그인처리까지 해줘야 합니다.
# 로그인은 auth 프레임워크의 login 함수를 이용합니다.
# login 함수는 사용자 데이터와 로그인처리를 해줄 인증백엔드의 경로가 필요합니다.
# 기본 인증모듈인 'django.contrib.auth.backends.ModelBackend' 는 username(email) 과 비밀번호를 이용해서
# 인증처리를 하는데 소셜로그인은 비밀번호를 전달받을 수가 없습니다.
# 어쩔 수 없이 소셜로그인을 위한 인증백엔드를 추가로 구현해줘야 합니다.
# 로그인
login(self.request, user, 'naver_oauth.user.oauth.backends.NaverBackend') # NaverBackend 를 통한 인증 시도
# login(self.request, user, NaverBackend)
# 소셜로그인의 마지막은 세션정보에 인증토큰정보를 추가하는 것입니다.
# 현재는 인증토큰이 필요없지만 네이버 api를 이용한 기능을 제공할 경우도 있습니다.
# 이 때 사용자의 인증토큰이 있어야만 사용자의 권한으로 네이버 서비스 api 기능들을 제공할 수 있는데
# 매번 재로그인을 할 수 없으니 인증토큰과 그 외 정보들을 세션에 저장합니다.
# 인증토큰 재발급토큰(refresh_token)도 함께 저장을 해야 인증토큰이 만료가 되더라도
# 재발급토큰으로 다시 인증토큰을 갱신할 수 있습니다.
# 만일 재발급토큰도 만료가 되었거나 문제가 있어서 인증토큰을 갱신할 수 없다면 로그아웃 처리 해주면 됩니다.
# 세션데이터 추가
self.set_session(access_token=access_token, refresh_token=refresh_token, expires_in=expires_in, token_type=token_type)
return True, user
# 인증토큰이 정상적으로 발급되었다면 회원가입을 위해 이메일과 사용자의 이름을 받아야 하는데,
# 네이버에서 profile api도 제공해주기 때문에 이것을 이용해서 받아오면 됩니다.
# get_naver_profile 메소드는 api를 통해 받아 온 프로필 정보를 검증하는 역할을 합니다.
# 프로필 정보는 사용자가 제공항목에 선택한 값들과 사용자의 id 값만 전달되는데
# 만일 이메일이나 이름을 선택하지 않은 경우 에러메시지를 반환하도록 했습니다.
def get_naver_profile(self, access_token, token_type):
is_success, profiles = self.naver_client.get_profile(access_token, token_type)
if not is_success:
return False, profiles
for profile in self.required_profiles:
if profile not in profiles:
return False, '{}은 필수정보입니다. 정보제공에 동의해주세요.'.format(profile)
return True, profiles
# 네이버의 api를 호출할 때 requests 라이브러리를 사용하여 호출하도록 했습니다.
# requests 는 파이썬의 표준 http 클라이언트보다 사용하기 간편하고, 무엇보다 직관적입니다.
# requests 라이브러리를 먼저 설치하세요.
# pip install requests
# reference : https://developers.naver.com/docs/login/web/web.md
# requests 모듈의 사용법을 알려드리면 get, post, put, delete 등의 함수들이 구현되어 있고,
# 각각의 함수는 함수명과 동일한 http 메소드로 요청을 합니다.
# 첫번째 위치 인자는 url 이고 그 외 파라미터는 keyword 인자로 전달하면 됩니다.
# get_profile 메소드에서 headers 라는 파라미터가 사용되는데
# http 헤더의 값을 딕셔너리 형태로 전달하면 됩니다.
# Authorization 헤더를 token_type(bearer) 와 인증토큰을 조합한 값으로 추가했습니다.
# 각 함수 반환데이터는 json 메소드를 통해 본문의 내용을 딕셔너리 형태로 반환해 줄 수도 있습니다.
# 물론 본문이 json 타입이 아닐 경우 에러가 발생합니다.
| <filename>naver_oauth/user/oauth/providers/naver.py
from django.conf import settings
from django.contrib.auth import login
import requests
class SingletonInstane:
__instance = None
@classmethod
def __getInstance(cls):
return cls.__instance
@classmethod
def instance(cls, *args, **kargs):
cls.__instance = cls(*args, **kargs)
cls.instance = cls.__getInstance
return cls.__instance
class NaverClient(SingletonInstane):
client_id = settings.NAVER_CLIENT_ID
secret_key = settings.NAVER_SECRET_KEY
grant_type = 'authorization_code'
auth_url = 'https://nid.naver.com/oauth2.0/token'
profile_url = 'https://openapi.naver.com/v1/nid/me'
# singleton 이라는 패턴 사용
# 첫번째 생성자 호출 때만 객체만 생성시키고 이후 생성자 호출부터는 먼저 생성된 객체를 공유하게 하는 방식
# NaverClient 클래스를 NaverLoginMixin 뿐만 아니라 다른 클래스에서도 공유하며 사용할 수 있습니다.
# NaverClient 객체는 인스턴스변수가 없기 때문에 하나의 객체를 서로 공유하더라도 문제가 발생하지 않습니다.
# 이렇게 인스턴스변수가 존재하지 않으나 여러 클래스에서 유틸리티처럼 사용하는 클래스의 경우
# 싱글턴 패턴을 많이 사용합니다.
# 객체를 생성하는 비용이 줄어 서버의 가용성을 높이는 좋은 패턴이며 가장 간단한 방법을 구현함
# * 일반적으로 싱글턴은 생성자가 아니라 명시적으로 getInstance 라는 static 메소드를 제공해서 객체를 생성합니다.
# getInstance 를 사용하지 않고 생성자를 사용해 객체를 생성하면 에러를 발생시켜
# 싱글턴으로 구현되었음을 개발자에게 알려주는 것이죠.
# 원래 싱글턴 객체에 인스턴스변수를 추가하거나 클래스변수를 변경하면 안됩니다.
# __instance = None
# @classmethod
# def __getInstance(cls):
# return cls.__instance
# @classmethod
# def instance(cls, *args, **kargs):
# cls.__instance = cls(*args, **kargs)
# cls.instance = cls.__getInstance
# return cls.__instance
# def __new__(cls, *args, **kwargs):
# if not isinstance(cls.__instance, cls):
# cls.__instance = super().__new__(cls, *args, **kwargs)
# # cls.__instance = object.__new__(cls, *args, **kwargs)
# return cls.__instance
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
'''
get_profile 메소드에서 headers 라는 파라미터가 사용되는데
http 헤더의 값을 딕셔너리 형태로 전달하면 됩니다.
Authorization 헤더를 token_type(bearer) 와 인증토큰을 조합한 값으로 추가했습니다.
각 함수 반환데이터는 json 메소드를 통해 본문의 내용을 딕셔너리 형태로 반환해 줄 수도 있습니다.
물론 본문이 json 타입이 아닐 경우 에러가 발생합니다.
'''
def get_access_token(self, state, code):
res = requests.get(self.auth_url, params={'client_id': self.client_id, 'client_secret': self.secret_key,
'grant_type': self.grant_type, 'state': state, 'code': code})
return res.ok, res.json()
def get_profile(self, access_token, token_type='Bearer'):
res = requests.get(self.profile_url, headers={'Authorization': '{} {}'.format(token_type, access_token)}).json()
if res.get('resultcode') != '00':
return False, res.get('message')
else:
return True, res.get('response')
# NaverLoginMixin 에서 네이버의 api를 구현한 네이버 클라이언트를 naver_client 클래스변수로 추가했습니다.
# 네이버의 인증토큰 발급과 프로필 정보를 가져오는 두 가지의 기능을 제공합니다.
'''
naver_client로부터 token_infos 객체를 전달받는데 token_infos 객체는 아래와 같은 키를 갖는 딕셔너리 객체입니다.
1. error - 에러코드
2. error_description - 에러메시지
3. access_token - 인증토큰
4. refresh_token - 인증토큰 재발급토큰
5. expires_in - 인증토큰 만료기한(초)
6. token_type - 인증토큰 사용하는 api 호출시 인증방식(Authorization 헤더 타입)
'''
class NaverLoginMixin:
naver_client = NaverClient.instance()
def login_with_naver(self, state, code):
# 인증토근 발급
is_success, token_infos = self.naver_client.get_access_token(state, code)
if not is_success:
return False, '{} [{}]'.format(token_infos.get('error_desc'), token_infos.get('error'))
access_token = token_infos.get('access_token')
refresh_token = token_infos.get('refresh_token')
expires_in = token_infos.get('expires_in')
token_type = token_infos.get('token_type')
# 네이버 프로필 얻기
is_success, profiles = self.get_naver_profile(access_token, token_type)
if not is_success:
return False, profiles
# 프로필정보까지 정상적으로 받아오면 사용자 모델에서 get_or_create 메소드를 통해
# 동일한 이메일의 사용자가 있는 지 확인 후 없으면 새로 생성합니다.
# 소셜로그인은 가입과 로그인을 동시에 제공하는 것이 더 좋습니다.
# 이미 가입되어 있는 사용자라면 회원정보(이름)만 수정하면 되고,
# 가입되어 있지 않은 케이스라면 새로 회원정보를 생성해서 가입시켜 줍니다.
# 소셜로그인은 로컬 비밀번호가 필요없기 때문에 새로 사용자 데이터가 추가되는 경우라면
# set_password(None) 메소드를 통해 랜덤한 비밀번호를 생성해서 저장합니다.
# 이미 소셜로그인을 통해서 이메일에 대한 인증도 되었으니 is_active 값도 활성화 시켜주고
# 저장을 하면 가입이 완료입니다.
# 만일 이미 가입되어 있던 사용자라면 이메일과 비밀번호로도 로그인이 가능하고
# 네이버 소셜로그인으로도 로그인이 가능합니다.
# 사용자 생성 또는 업데이트
user, created = self.model.objects.get_or_create(email=profiles.get('email'))
if created: # 사용자 생성할 경우
user.set_password(None)
user.username = profiles.get('name')
user.is_active = True
user.save()
# 가입된 이후에 로그인처리까지 해줘야 합니다.
# 로그인은 auth 프레임워크의 login 함수를 이용합니다.
# login 함수는 사용자 데이터와 로그인처리를 해줄 인증백엔드의 경로가 필요합니다.
# 기본 인증모듈인 'django.contrib.auth.backends.ModelBackend' 는 username(email) 과 비밀번호를 이용해서
# 인증처리를 하는데 소셜로그인은 비밀번호를 전달받을 수가 없습니다.
# 어쩔 수 없이 소셜로그인을 위한 인증백엔드를 추가로 구현해줘야 합니다.
# 로그인
login(self.request, user, 'naver_oauth.user.oauth.backends.NaverBackend') # NaverBackend 를 통한 인증 시도
# login(self.request, user, NaverBackend)
# 소셜로그인의 마지막은 세션정보에 인증토큰정보를 추가하는 것입니다.
# 현재는 인증토큰이 필요없지만 네이버 api를 이용한 기능을 제공할 경우도 있습니다.
# 이 때 사용자의 인증토큰이 있어야만 사용자의 권한으로 네이버 서비스 api 기능들을 제공할 수 있는데
# 매번 재로그인을 할 수 없으니 인증토큰과 그 외 정보들을 세션에 저장합니다.
# 인증토큰 재발급토큰(refresh_token)도 함께 저장을 해야 인증토큰이 만료가 되더라도
# 재발급토큰으로 다시 인증토큰을 갱신할 수 있습니다.
# 만일 재발급토큰도 만료가 되었거나 문제가 있어서 인증토큰을 갱신할 수 없다면 로그아웃 처리 해주면 됩니다.
# 세션데이터 추가
self.set_session(access_token=access_token, refresh_token=refresh_token, expires_in=expires_in, token_type=token_type)
return True, user
# 인증토큰이 정상적으로 발급되었다면 회원가입을 위해 이메일과 사용자의 이름을 받아야 하는데,
# 네이버에서 profile api도 제공해주기 때문에 이것을 이용해서 받아오면 됩니다.
# get_naver_profile 메소드는 api를 통해 받아 온 프로필 정보를 검증하는 역할을 합니다.
# 프로필 정보는 사용자가 제공항목에 선택한 값들과 사용자의 id 값만 전달되는데
# 만일 이메일이나 이름을 선택하지 않은 경우 에러메시지를 반환하도록 했습니다.
def get_naver_profile(self, access_token, token_type):
is_success, profiles = self.naver_client.get_profile(access_token, token_type)
if not is_success:
return False, profiles
for profile in self.required_profiles:
if profile not in profiles:
return False, '{}은 필수정보입니다. 정보제공에 동의해주세요.'.format(profile)
return True, profiles
# 네이버의 api를 호출할 때 requests 라이브러리를 사용하여 호출하도록 했습니다.
# requests 는 파이썬의 표준 http 클라이언트보다 사용하기 간편하고, 무엇보다 직관적입니다.
# requests 라이브러리를 먼저 설치하세요.
# pip install requests
# reference : https://developers.naver.com/docs/login/web/web.md
# requests 모듈의 사용법을 알려드리면 get, post, put, delete 등의 함수들이 구현되어 있고,
# 각각의 함수는 함수명과 동일한 http 메소드로 요청을 합니다.
# 첫번째 위치 인자는 url 이고 그 외 파라미터는 keyword 인자로 전달하면 됩니다.
# get_profile 메소드에서 headers 라는 파라미터가 사용되는데
# http 헤더의 값을 딕셔너리 형태로 전달하면 됩니다.
# Authorization 헤더를 token_type(bearer) 와 인증토큰을 조합한 값으로 추가했습니다.
# 각 함수 반환데이터는 json 메소드를 통해 본문의 내용을 딕셔너리 형태로 반환해 줄 수도 있습니다.
# 물론 본문이 json 타입이 아닐 경우 에러가 발생합니다.
| ko | 0.999866 | # singleton 이라는 패턴 사용 # 첫번째 생성자 호출 때만 객체만 생성시키고 이후 생성자 호출부터는 먼저 생성된 객체를 공유하게 하는 방식 # NaverClient 클래스를 NaverLoginMixin 뿐만 아니라 다른 클래스에서도 공유하며 사용할 수 있습니다. # NaverClient 객체는 인스턴스변수가 없기 때문에 하나의 객체를 서로 공유하더라도 문제가 발생하지 않습니다. # 이렇게 인스턴스변수가 존재하지 않으나 여러 클래스에서 유틸리티처럼 사용하는 클래스의 경우 # 싱글턴 패턴을 많이 사용합니다. # 객체를 생성하는 비용이 줄어 서버의 가용성을 높이는 좋은 패턴이며 가장 간단한 방법을 구현함 # * 일반적으로 싱글턴은 생성자가 아니라 명시적으로 getInstance 라는 static 메소드를 제공해서 객체를 생성합니다. # getInstance 를 사용하지 않고 생성자를 사용해 객체를 생성하면 에러를 발생시켜 # 싱글턴으로 구현되었음을 개발자에게 알려주는 것이죠. # 원래 싱글턴 객체에 인스턴스변수를 추가하거나 클래스변수를 변경하면 안됩니다. # __instance = None # @classmethod # def __getInstance(cls): # return cls.__instance # @classmethod # def instance(cls, *args, **kargs): # cls.__instance = cls(*args, **kargs) # cls.instance = cls.__getInstance # return cls.__instance # def __new__(cls, *args, **kwargs): # if not isinstance(cls.__instance, cls): # cls.__instance = super().__new__(cls, *args, **kwargs) # # cls.__instance = object.__new__(cls, *args, **kwargs) # return cls.__instance # def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) get_profile 메소드에서 headers 라는 파라미터가 사용되는데 http 헤더의 값을 딕셔너리 형태로 전달하면 됩니다. Authorization 헤더를 token_type(bearer) 와 인증토큰을 조합한 값으로 추가했습니다. 각 함수 반환데이터는 json 메소드를 통해 본문의 내용을 딕셔너리 형태로 반환해 줄 수도 있습니다. 물론 본문이 json 타입이 아닐 경우 에러가 발생합니다. # NaverLoginMixin 에서 네이버의 api를 구현한 네이버 클라이언트를 naver_client 클래스변수로 추가했습니다. # 네이버의 인증토큰 발급과 프로필 정보를 가져오는 두 가지의 기능을 제공합니다. naver_client로부터 token_infos 객체를 전달받는데 token_infos 객체는 아래와 같은 키를 갖는 딕셔너리 객체입니다. 1. error - 에러코드 2. error_description - 에러메시지 3. access_token - 인증토큰 4. refresh_token - 인증토큰 재발급토큰 5. expires_in - 인증토큰 만료기한(초) 6. token_type - 인증토큰 사용하는 api 호출시 인증방식(Authorization 헤더 타입) # 인증토근 발급 # 네이버 프로필 얻기 # 프로필정보까지 정상적으로 받아오면 사용자 모델에서 get_or_create 메소드를 통해 # 동일한 이메일의 사용자가 있는 지 확인 후 없으면 새로 생성합니다. # 소셜로그인은 가입과 로그인을 동시에 제공하는 것이 더 좋습니다. # 이미 가입되어 있는 사용자라면 회원정보(이름)만 수정하면 되고, # 가입되어 있지 않은 케이스라면 새로 회원정보를 생성해서 가입시켜 줍니다. # 소셜로그인은 로컬 비밀번호가 필요없기 때문에 새로 사용자 데이터가 추가되는 경우라면 # set_password(None) 메소드를 통해 랜덤한 비밀번호를 생성해서 저장합니다. # 이미 소셜로그인을 통해서 이메일에 대한 인증도 되었으니 is_active 값도 활성화 시켜주고 # 저장을 하면 가입이 완료입니다. # 만일 이미 가입되어 있던 사용자라면 이메일과 비밀번호로도 로그인이 가능하고 # 네이버 소셜로그인으로도 로그인이 가능합니다. # 사용자 생성 또는 업데이트 # 사용자 생성할 경우 # 가입된 이후에 로그인처리까지 해줘야 합니다. # 로그인은 auth 프레임워크의 login 함수를 이용합니다. # login 함수는 사용자 데이터와 로그인처리를 해줄 인증백엔드의 경로가 필요합니다. # 기본 인증모듈인 'django.contrib.auth.backends.ModelBackend' 는 username(email) 과 비밀번호를 이용해서 # 인증처리를 하는데 소셜로그인은 비밀번호를 전달받을 수가 없습니다. # 어쩔 수 없이 소셜로그인을 위한 인증백엔드를 추가로 구현해줘야 합니다. # 로그인 # NaverBackend 를 통한 인증 시도 # login(self.request, user, NaverBackend) # 소셜로그인의 마지막은 세션정보에 인증토큰정보를 추가하는 것입니다. # 현재는 인증토큰이 필요없지만 네이버 api를 이용한 기능을 제공할 경우도 있습니다. # 이 때 사용자의 인증토큰이 있어야만 사용자의 권한으로 네이버 서비스 api 기능들을 제공할 수 있는데 # 매번 재로그인을 할 수 없으니 인증토큰과 그 외 정보들을 세션에 저장합니다. # 인증토큰 재발급토큰(refresh_token)도 함께 저장을 해야 인증토큰이 만료가 되더라도 # 재발급토큰으로 다시 인증토큰을 갱신할 수 있습니다. # 만일 재발급토큰도 만료가 되었거나 문제가 있어서 인증토큰을 갱신할 수 없다면 로그아웃 처리 해주면 됩니다. # 세션데이터 추가 # 인증토큰이 정상적으로 발급되었다면 회원가입을 위해 이메일과 사용자의 이름을 받아야 하는데, # 네이버에서 profile api도 제공해주기 때문에 이것을 이용해서 받아오면 됩니다. # get_naver_profile 메소드는 api를 통해 받아 온 프로필 정보를 검증하는 역할을 합니다. # 프로필 정보는 사용자가 제공항목에 선택한 값들과 사용자의 id 값만 전달되는데 # 만일 이메일이나 이름을 선택하지 않은 경우 에러메시지를 반환하도록 했습니다. # 네이버의 api를 호출할 때 requests 라이브러리를 사용하여 호출하도록 했습니다. # requests 는 파이썬의 표준 http 클라이언트보다 사용하기 간편하고, 무엇보다 직관적입니다. # requests 라이브러리를 먼저 설치하세요. # pip install requests # reference : https://developers.naver.com/docs/login/web/web.md # requests 모듈의 사용법을 알려드리면 get, post, put, delete 등의 함수들이 구현되어 있고, # 각각의 함수는 함수명과 동일한 http 메소드로 요청을 합니다. # 첫번째 위치 인자는 url 이고 그 외 파라미터는 keyword 인자로 전달하면 됩니다. # get_profile 메소드에서 headers 라는 파라미터가 사용되는데 # http 헤더의 값을 딕셔너리 형태로 전달하면 됩니다. # Authorization 헤더를 token_type(bearer) 와 인증토큰을 조합한 값으로 추가했습니다. # 각 함수 반환데이터는 json 메소드를 통해 본문의 내용을 딕셔너리 형태로 반환해 줄 수도 있습니다. # 물론 본문이 json 타입이 아닐 경우 에러가 발생합니다. | 2.04624 | 2 |
documentation/models/DOC_HSE.py | ElNahoko/HSE_ARNOSH | 1 | 6622652 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class DOCHSE(models.Model):
_name = 'document'
_rec_name = 'categorie'
categorie = fields.Many2one(
'document.categorie',
string="Catégorie",
required=False, )
description = fields.Char(
string="Description")
Document_binaire = fields.Binary(
string="Fichier",)
Nom_Document = fields.Char(
string="Filename",
required=False, )
@api.multi
def print_doc(self):
return self.env.ref('documentation.action_report_doc').report_action(self)
class DOCUMENTCATEGORIE(models.Model):
_name = 'document.categorie'
_rec_name = 'categorie'
categorie = fields.Char(
string="catégorie",
required=False, )
date = fields.Datetime(
string="Date",
default=lambda s: fields.Datetime.now(),
invisible=False,
readonly=True,
required=False, )
| # -*- coding: utf-8 -*-
from odoo import models, fields, api
class DOCHSE(models.Model):
_name = 'document'
_rec_name = 'categorie'
categorie = fields.Many2one(
'document.categorie',
string="Catégorie",
required=False, )
description = fields.Char(
string="Description")
Document_binaire = fields.Binary(
string="Fichier",)
Nom_Document = fields.Char(
string="Filename",
required=False, )
@api.multi
def print_doc(self):
return self.env.ref('documentation.action_report_doc').report_action(self)
class DOCUMENTCATEGORIE(models.Model):
_name = 'document.categorie'
_rec_name = 'categorie'
categorie = fields.Char(
string="catégorie",
required=False, )
date = fields.Datetime(
string="Date",
default=lambda s: fields.Datetime.now(),
invisible=False,
readonly=True,
required=False, ) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.089961 | 2 |
tuenti/tuenti_challenge_4/qualification/3_gambler_cup/gambler_cup.py | GMadorell/programming-challenges | 0 | 6622653 | #!/usr/bin/env python
"""
Problem description.
"""
from __future__ import division
import sys
import math
class GamblerCupSolver(object):
def __init__(self, output_file=sys.stdout):
self.__output_file = output_file
def solve(self, instances):
solutions = []
for instance in instances:
solutions.append(self.solve_instance(instance))
for i, solution in enumerate(solutions, start=1):
newline_needed = True if i != len(solutions) else False
if solution.is_integer():
self.__output_file.write("{1:.0f}{2}".format(i, solution, "\n" if newline_needed else ""))
else:
self.__output_file.write("{1:.2f}{2}".format(i, solution, "\n" if newline_needed else ""))
def solve_instance(self, instance):
return math.sqrt(instance.x * instance.x + instance.y * instance.y)
class GamblerCupInstance(object):
def __init__(self):
self.x = None
self.y = None
class GamblerCupParser(object):
def __init__(self):
data = sys.stdin.readlines()
data = map(lambda s: s.strip(), data)
self.amount_samples = int(data[0][0])
self.data = data[1:]
self.instances = []
self.parse()
def parse(self):
"""
This method should populate the instances list.
"""
for line in self.data:
row = line.strip().split()
instance = GamblerCupInstance()
instance.x = int(row[0])
instance.y = int(row[1])
self.instances.append(instance)
def get_data_as_type(self, type_):
return map(lambda row: map(lambda element: type_(element), row), self.data)
if __name__ == "__main__":
parser = GamblerCupParser()
solver = GamblerCupSolver()
solver.solve(parser.instances) | #!/usr/bin/env python
"""
Problem description.
"""
from __future__ import division
import sys
import math
class GamblerCupSolver(object):
def __init__(self, output_file=sys.stdout):
self.__output_file = output_file
def solve(self, instances):
solutions = []
for instance in instances:
solutions.append(self.solve_instance(instance))
for i, solution in enumerate(solutions, start=1):
newline_needed = True if i != len(solutions) else False
if solution.is_integer():
self.__output_file.write("{1:.0f}{2}".format(i, solution, "\n" if newline_needed else ""))
else:
self.__output_file.write("{1:.2f}{2}".format(i, solution, "\n" if newline_needed else ""))
def solve_instance(self, instance):
return math.sqrt(instance.x * instance.x + instance.y * instance.y)
class GamblerCupInstance(object):
def __init__(self):
self.x = None
self.y = None
class GamblerCupParser(object):
def __init__(self):
data = sys.stdin.readlines()
data = map(lambda s: s.strip(), data)
self.amount_samples = int(data[0][0])
self.data = data[1:]
self.instances = []
self.parse()
def parse(self):
"""
This method should populate the instances list.
"""
for line in self.data:
row = line.strip().split()
instance = GamblerCupInstance()
instance.x = int(row[0])
instance.y = int(row[1])
self.instances.append(instance)
def get_data_as_type(self, type_):
return map(lambda row: map(lambda element: type_(element), row), self.data)
if __name__ == "__main__":
parser = GamblerCupParser()
solver = GamblerCupSolver()
solver.solve(parser.instances) | en | 0.621619 | #!/usr/bin/env python Problem description. This method should populate the instances list. | 3.219369 | 3 |
machina/apps/forum/receivers.py | OneRainbowDev/django-machina | 1 | 6622654 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import F
from django.dispatch import receiver
from machina.apps.forum.signals import forum_viewed
@receiver(forum_viewed)
def update_forum_redirects_counter(sender, forum, user, request, response, **kwargs):
"""
Receiver to handle the update of the link redirects counter associated with link forums.
"""
if forum.is_link and forum.link_redirects:
forum.link_redirects_count = F('link_redirects_count') + 1
forum.save()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import F
from django.dispatch import receiver
from machina.apps.forum.signals import forum_viewed
@receiver(forum_viewed)
def update_forum_redirects_counter(sender, forum, user, request, response, **kwargs):
"""
Receiver to handle the update of the link redirects counter associated with link forums.
"""
if forum.is_link and forum.link_redirects:
forum.link_redirects_count = F('link_redirects_count') + 1
forum.save()
| en | 0.887596 | # -*- coding: utf-8 -*- Receiver to handle the update of the link redirects counter associated with link forums. | 2.045086 | 2 |
game/utils/files.py | m4tx/hackthespace | 8 | 6622655 | import shutil
def cat_files(out_path: str, *in_files: str):
with open(out_path, 'wb') as out_file:
for in_path in in_files:
with open(in_path, 'rb') as in_file:
shutil.copyfileobj(in_file, out_file)
| import shutil
def cat_files(out_path: str, *in_files: str):
with open(out_path, 'wb') as out_file:
for in_path in in_files:
with open(in_path, 'rb') as in_file:
shutil.copyfileobj(in_file, out_file)
| none | 1 | 2.750143 | 3 | |
text_blind_watermark/text_blind_watermark.py | guofei9987/blind_watermark_text | 53 | 6622656 | import random
class TextBlindWatermark:
def __init__(self, password):
self.password = password
self.text, self.wm_bin = None, None
def read_wm(self, watermark):
random.seed(self.password)
wm_bin = [format(i ^ random.randint(0, 255), '08b') for i in watermark.encode('utf-8')] # 8位2进制格式
# 头尾各放一个1。提取过程中把首尾的0去掉。
self.wm_bin = '1' + ''.join(wm_bin) + '1'
def read_text(self, text):
self.text = text
def embed(self):
wm_bin, text = self.wm_bin, self.text
# 打入水印
len_wm_bin, len_text = len(wm_bin), len(text)
assert len_text > len_wm_bin, "文本至少{},实际{}".format(len_wm_bin, len_text)
# TODO:循环嵌入
sentence_embed = ""
for idx in range(len_text):
sentence_embed += text[idx]
if idx < len_wm_bin:
if wm_bin[idx] == "1":
sentence_embed += chr(127)
return sentence_embed
def extract(self, text_embed):
wm_extract_bin = ""
idx = 0
while idx < len(text_embed):
if text_embed[idx] != chr(127):
idx += 1
wm_extract_bin += '0'
else:
idx += 2
wm_extract_bin += '1'
first_zero = wm_extract_bin.find("1")
last_zero = len(wm_extract_bin) - wm_extract_bin[::-1].find("1")
wm_extract_bin = wm_extract_bin[first_zero + 1:last_zero - 1]
random.seed(self.password)
return bytes([int(wm_extract_bin[8 * i:8 * i + 8], base=2) ^ random.randint(0, 255) for i in
range(len(wm_extract_bin) // 8)]).decode('utf-8')
def embed(sentence, wm, password):
random.seed(password)
wm_bin = [format(i ^ random.randint(0, 255), '08b') for i in wm.encode('utf-8')] # 8位2进制格式
# 头尾各放一个1。提取过程中把首尾的0去掉。
wm_bin = '1' + ''.join(wm_bin) + '1'
# 打入水印
len_bin_text = len(wm_bin)
len_sentence = len(sentence)
assert len_sentence > len_bin_text, "文本长度太短了,至少{}".format(len_bin_text)
# TODO:循环嵌入
# 嵌入水印
sentence_embed = ""
for idx in range(len_sentence):
sentence_embed += sentence[idx]
if idx < len_bin_text:
if wm_bin[idx] == "1":
sentence_embed += chr(127)
return sentence_embed
def extract(sentence_embed, password):
wm_extract_bin = ""
idx = 0
while idx < len(sentence_embed):
if sentence_embed[idx] != chr(127):
idx += 1
wm_extract_bin += '0'
else:
idx += 2
wm_extract_bin += '1'
first_zero = wm_extract_bin.find("1")
last_zero = len(wm_extract_bin) - wm_extract_bin[::-1].find("1")
wm_extract_bin = wm_extract_bin[first_zero + 1:last_zero - 1]
random.seed(password)
s_out = bytes([int(wm_extract_bin[8 * i:8 * i + 8], base=2) ^ random.randint(0, 255) for i in
range(len(wm_extract_bin) // 8)]).decode('utf-8')
return s_out
| import random
class TextBlindWatermark:
def __init__(self, password):
self.password = password
self.text, self.wm_bin = None, None
def read_wm(self, watermark):
random.seed(self.password)
wm_bin = [format(i ^ random.randint(0, 255), '08b') for i in watermark.encode('utf-8')] # 8位2进制格式
# 头尾各放一个1。提取过程中把首尾的0去掉。
self.wm_bin = '1' + ''.join(wm_bin) + '1'
def read_text(self, text):
self.text = text
def embed(self):
wm_bin, text = self.wm_bin, self.text
# 打入水印
len_wm_bin, len_text = len(wm_bin), len(text)
assert len_text > len_wm_bin, "文本至少{},实际{}".format(len_wm_bin, len_text)
# TODO:循环嵌入
sentence_embed = ""
for idx in range(len_text):
sentence_embed += text[idx]
if idx < len_wm_bin:
if wm_bin[idx] == "1":
sentence_embed += chr(127)
return sentence_embed
def extract(self, text_embed):
wm_extract_bin = ""
idx = 0
while idx < len(text_embed):
if text_embed[idx] != chr(127):
idx += 1
wm_extract_bin += '0'
else:
idx += 2
wm_extract_bin += '1'
first_zero = wm_extract_bin.find("1")
last_zero = len(wm_extract_bin) - wm_extract_bin[::-1].find("1")
wm_extract_bin = wm_extract_bin[first_zero + 1:last_zero - 1]
random.seed(self.password)
return bytes([int(wm_extract_bin[8 * i:8 * i + 8], base=2) ^ random.randint(0, 255) for i in
range(len(wm_extract_bin) // 8)]).decode('utf-8')
def embed(sentence, wm, password):
random.seed(password)
wm_bin = [format(i ^ random.randint(0, 255), '08b') for i in wm.encode('utf-8')] # 8位2进制格式
# 头尾各放一个1。提取过程中把首尾的0去掉。
wm_bin = '1' + ''.join(wm_bin) + '1'
# 打入水印
len_bin_text = len(wm_bin)
len_sentence = len(sentence)
assert len_sentence > len_bin_text, "文本长度太短了,至少{}".format(len_bin_text)
# TODO:循环嵌入
# 嵌入水印
sentence_embed = ""
for idx in range(len_sentence):
sentence_embed += sentence[idx]
if idx < len_bin_text:
if wm_bin[idx] == "1":
sentence_embed += chr(127)
return sentence_embed
def extract(sentence_embed, password):
wm_extract_bin = ""
idx = 0
while idx < len(sentence_embed):
if sentence_embed[idx] != chr(127):
idx += 1
wm_extract_bin += '0'
else:
idx += 2
wm_extract_bin += '1'
first_zero = wm_extract_bin.find("1")
last_zero = len(wm_extract_bin) - wm_extract_bin[::-1].find("1")
wm_extract_bin = wm_extract_bin[first_zero + 1:last_zero - 1]
random.seed(password)
s_out = bytes([int(wm_extract_bin[8 * i:8 * i + 8], base=2) ^ random.randint(0, 255) for i in
range(len(wm_extract_bin) // 8)]).decode('utf-8')
return s_out
| zh | 0.901575 | # 8位2进制格式 # 头尾各放一个1。提取过程中把首尾的0去掉。 # 打入水印 # TODO:循环嵌入 # 8位2进制格式 # 头尾各放一个1。提取过程中把首尾的0去掉。 # 打入水印 # TODO:循环嵌入 # 嵌入水印 | 2.876472 | 3 |
src/main.gyp | pwnall/sanctum | 49 | 6622657 | <reponame>pwnall/sanctum
{
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'tests',
'type': 'none',
'dependencies': [
'bare/bare.gyp:bare_tests',
'crypto/crypto.gyp:crypto_tests',
'monitor/monitor.gyp:monitor_tests',
],
},
{
'target_name': 'libs',
'type': 'none',
'dependencies': [
'bare/bare.gyp:bare',
'crypto/crypto.gyp:crypto',
#'monitor/monitor.gyp:monitor',
],
}
],
}
| {
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'tests',
'type': 'none',
'dependencies': [
'bare/bare.gyp:bare_tests',
'crypto/crypto.gyp:crypto_tests',
'monitor/monitor.gyp:monitor_tests',
],
},
{
'target_name': 'libs',
'type': 'none',
'dependencies': [
'bare/bare.gyp:bare',
'crypto/crypto.gyp:crypto',
#'monitor/monitor.gyp:monitor',
],
}
],
} | en | 0.357274 | #'monitor/monitor.gyp:monitor', | 1.10824 | 1 |
acceptance_tests/test_full_chain.py | pleasedontbelong/raincoat | 0 | 6622658 | import traceback
import sh
from raincoat import __version__
def main():
"""
Note that this test is excluded from coverage because coverage should be
for unit tests.
"""
result = sh.raincoat(
"acceptance_tests/test_project", exclude="*ignored*",
_ok_code=1)
output = result.stdout.decode("utf-8")
try:
check_output(output)
except AssertionError:
print("Full output:\n", output)
raise
print("Ok")
def check_output(output):
details = ("raincoat == 0.1.4 vs {} "
"@ raincoat/_acceptance_test.py:use_umbrella "
"(from acceptance_tests/test_project/__init__.py:7)").format(
__version__)
assert details in output
assert "_acceptance_test.py:Umbrella.open" in output
# space left intentionally at the end to not match the previous line
assert "_acceptance_test.py:Umbrella " in output
assert "_acceptance_test.py:whole module" in output
assert "- umbrella.keep_over_me()" in output
assert "+ action(umbrella)" in output
assert "ignored" not in output
assert "27754" not in output
assert "Ticket #25981 has been merged in Django" in output
assert "peopledoc/raincoat@a35df1d vs master branch" in output
assert ("non_existant does not exist in raincoat/_acceptance_test.py"
in output)
assert "raincoat/non_existant.py does not exist" in output
if __name__ == '__main__':
main()
| import traceback
import sh
from raincoat import __version__
def main():
"""
Note that this test is excluded from coverage because coverage should be
for unit tests.
"""
result = sh.raincoat(
"acceptance_tests/test_project", exclude="*ignored*",
_ok_code=1)
output = result.stdout.decode("utf-8")
try:
check_output(output)
except AssertionError:
print("Full output:\n", output)
raise
print("Ok")
def check_output(output):
details = ("raincoat == 0.1.4 vs {} "
"@ raincoat/_acceptance_test.py:use_umbrella "
"(from acceptance_tests/test_project/__init__.py:7)").format(
__version__)
assert details in output
assert "_acceptance_test.py:Umbrella.open" in output
# space left intentionally at the end to not match the previous line
assert "_acceptance_test.py:Umbrella " in output
assert "_acceptance_test.py:whole module" in output
assert "- umbrella.keep_over_me()" in output
assert "+ action(umbrella)" in output
assert "ignored" not in output
assert "27754" not in output
assert "Ticket #25981 has been merged in Django" in output
assert "peopledoc/raincoat@a35df1d vs master branch" in output
assert ("non_existant does not exist in raincoat/_acceptance_test.py"
in output)
assert "raincoat/non_existant.py does not exist" in output
if __name__ == '__main__':
main()
| en | 0.964645 | Note that this test is excluded from coverage because coverage should be for unit tests. # space left intentionally at the end to not match the previous line #25981 has been merged in Django" in output | 2.351742 | 2 |
content/apps.py | japsu/tracontent | 0 | 6622659 | from django.apps import AppConfig
class ContentAppConfig(AppConfig):
name = 'content'
verbose_name = 'Sisältö'
| from django.apps import AppConfig
class ContentAppConfig(AppConfig):
name = 'content'
verbose_name = 'Sisältö'
| none | 1 | 1.085894 | 1 | |
taiseia101/core/base_dev_service.py | slee124565/pytwseia | 3 | 6622660 |
from .base_obj import *
import struct
import logging
logger = logging.getLogger(__name__)
class DataTypeCode(BaseObject):
"""deprecated by ValueTypeCode"""
ENUM16 = 0x01
ENUM16_BIT = 0x06
UNIT8 = 0x0a
UNIT16 = 0x0b
UINT32 = 0x0c
UINT64 = 0x0d
INT8 = 0x0f
INT16 = 0x10
INT32 = 0x11
TIME_MD = 0x14
TIME_HM = 0x15
TIME_MS = 0x16
TIME_YMDHMS = 0x17
TIME_YMDHM = 0x18
STR = 0x20
class DeviceBaseService(BasePdu):
cmd_type_code = None # read or write
# service_data = None
high_byte = None
low_byte = None
def __init__(self, pdu=None):
if type(pdu) is list:
super(DeviceBaseService, self).__init__(pdu)
self.cmd_type_code = CmdTypeCode.get_code(pdu[2])
self.service_id = pdu[2] & 0x7f
self.high_byte = pdu[3]
self.low_byte = pdu[4]
def __str__(self):
serv_param_code_cls = getattr(self.__class__, 'ParamCode', None)
if self.high_byte == 0xff and self.low_byte == 0xff:
value_text = 'ERROR'
else:
value_text = self.get_value()
if serv_param_code_cls:
value_text = serv_param_code_cls.text(self.get_value())
return '{}({}, {})'.format(self.__class__.__name__,
CmdTypeCode.text(self.cmd_type_code),
value_text)
@classmethod
def read_txt_cmd(cls):
raise Exception('{} classmethod read_txt_cmd not implemented'.format(cls.__name__))
@classmethod
def write_txt_cmd(cls, param=None):
raise Exception('{} classmethod write_txt_cmd not implemented'.format(cls.__name__))
def get_value(self):
return self.high_byte * 0x100 + self.low_byte
def to_spec(self):
cmd_info = 'R'
if self.cmd_type_code == CmdTypeCode.WRITE:
cmd_info = 'RW'
return '{}(code 0x{:02x}, {}, min {}, max {})'.format(
self.__class__.__name__, self.service_id, cmd_info, self.high_byte, self.low_byte)
def to_pdu(self):
return [((self.cmd_type_code << 7) | self.service_id),
self.high_byte, self.low_byte]
def to_json(self):
data = {
'cmd_type_code': self.cmd_type_code,
'service_id': self.service_id,
# 'high_byte': self.high_byte,
# 'low_byte': self.low_byte
}
if self.high_byte is not None:
data['high_byte'] = self.high_byte
if self.low_byte is not None:
data['low_byte'] = self.low_byte
return data
class DeviceDataService(DeviceBaseService):
"""for device with data_kind_code is DeviceBaseService.DataKindCode.MULTIPLE"""
data_type_id = None
# data_len = None
# datas = None
data_pdu = None
def __init__(self, pdu=None):
super(DeviceDataService, self).__init__(pdu)
# self.high_byte = self.low_byte = None
self.data_type_id = pdu[1]
self.data_pdu = pdu[2:]
def to_json(self):
data = super(DeviceDataService, self).to_json()
if self.data_type_id is not None:
data['data_type_id'] = self.data_type_id
if self.data_pdu is not None:
data['data_hex'] = get_byte_list_hex_str(self.data_pdu)
return data
class DeviceEnum16Service(DeviceBaseService):
pass
class DeviceCommonOnOffService(DeviceEnum16Service):
class ParamCode(BaseObject):
OFF = 0
ON = 1
class DeviceFeatureLevelService(DeviceEnum16Service):
class ParamCode(BaseObject):
LEVEL_0 = 0
LEVEL_1 = 1
LEVEL_2 = 2
LEVEL_3 = 3
LEVEL_4 = 4
LEVEL_5 = 5
LEVEL_6 = 6
LEVEL_7 = 7
LEVEL_8 = 8
LEVEL_9 = 9
LEVEL_10 = 10
LEVEL_11 = 11
LEVEL_12 = 12
LEVEL_13 = 13
LEVEL_14 = 14
LEVEL_15 = 15
@classmethod
def text(cls, code):
return 'level {}'.format(code)
class DeviceEnum16BitService(DeviceBaseService):
def get_value(self):
value = super(DeviceEnum16BitService, self).get_value()
return '{:016}'.format(value)
def get_enum_bit_value(self, bit_index):
if 0 <= bit_index <= 7:
mask = 1 << bit_index
return (self.low_byte & mask) >> bit_index
elif 8 <= bit_index <= 15:
mask = 1 << (bit_index-8)
return (self.low_byte & mask) >> (bit_index-8)
else:
raise ValueError('{} bit_index {} error'.format(self.__class__.__name__, bit_index))
class DeviceUint8Service(DeviceBaseService):
def get_value(self):
return struct.unpack('B', chr(self.low_byte))[0]
class DeviceUint16Service(DeviceBaseService):
pass
class DeviceInt8Service(DeviceBaseService):
def get_value(self):
return struct.unpack('b', chr(self.low_byte))[0]
class DeviceTimeMDService(DeviceBaseService):
def get_value(self):
return self.high_byte, self.low_byte
def get_month_value(self):
return self.high_byte
def get_day_value(self):
return self.low_byte
class DeviceTimeHMService(DeviceBaseService):
def get_value(self):
return self.high_byte, self.low_byte
def get_hour_value(self):
return self.high_byte
def get_minute_value(self):
return self.low_byte
class DeviceTimeMSService(DeviceBaseService):
def get_value(self):
return self.high_byte, self.low_byte
def get_minute_value(self):
return self.high_byte
def get_second_value(self):
return self.low_byte
|
from .base_obj import *
import struct
import logging
logger = logging.getLogger(__name__)
class DataTypeCode(BaseObject):
"""deprecated by ValueTypeCode"""
ENUM16 = 0x01
ENUM16_BIT = 0x06
UNIT8 = 0x0a
UNIT16 = 0x0b
UINT32 = 0x0c
UINT64 = 0x0d
INT8 = 0x0f
INT16 = 0x10
INT32 = 0x11
TIME_MD = 0x14
TIME_HM = 0x15
TIME_MS = 0x16
TIME_YMDHMS = 0x17
TIME_YMDHM = 0x18
STR = 0x20
class DeviceBaseService(BasePdu):
cmd_type_code = None # read or write
# service_data = None
high_byte = None
low_byte = None
def __init__(self, pdu=None):
if type(pdu) is list:
super(DeviceBaseService, self).__init__(pdu)
self.cmd_type_code = CmdTypeCode.get_code(pdu[2])
self.service_id = pdu[2] & 0x7f
self.high_byte = pdu[3]
self.low_byte = pdu[4]
def __str__(self):
serv_param_code_cls = getattr(self.__class__, 'ParamCode', None)
if self.high_byte == 0xff and self.low_byte == 0xff:
value_text = 'ERROR'
else:
value_text = self.get_value()
if serv_param_code_cls:
value_text = serv_param_code_cls.text(self.get_value())
return '{}({}, {})'.format(self.__class__.__name__,
CmdTypeCode.text(self.cmd_type_code),
value_text)
@classmethod
def read_txt_cmd(cls):
raise Exception('{} classmethod read_txt_cmd not implemented'.format(cls.__name__))
@classmethod
def write_txt_cmd(cls, param=None):
raise Exception('{} classmethod write_txt_cmd not implemented'.format(cls.__name__))
def get_value(self):
return self.high_byte * 0x100 + self.low_byte
def to_spec(self):
cmd_info = 'R'
if self.cmd_type_code == CmdTypeCode.WRITE:
cmd_info = 'RW'
return '{}(code 0x{:02x}, {}, min {}, max {})'.format(
self.__class__.__name__, self.service_id, cmd_info, self.high_byte, self.low_byte)
def to_pdu(self):
return [((self.cmd_type_code << 7) | self.service_id),
self.high_byte, self.low_byte]
def to_json(self):
data = {
'cmd_type_code': self.cmd_type_code,
'service_id': self.service_id,
# 'high_byte': self.high_byte,
# 'low_byte': self.low_byte
}
if self.high_byte is not None:
data['high_byte'] = self.high_byte
if self.low_byte is not None:
data['low_byte'] = self.low_byte
return data
class DeviceDataService(DeviceBaseService):
"""for device with data_kind_code is DeviceBaseService.DataKindCode.MULTIPLE"""
data_type_id = None
# data_len = None
# datas = None
data_pdu = None
def __init__(self, pdu=None):
super(DeviceDataService, self).__init__(pdu)
# self.high_byte = self.low_byte = None
self.data_type_id = pdu[1]
self.data_pdu = pdu[2:]
def to_json(self):
data = super(DeviceDataService, self).to_json()
if self.data_type_id is not None:
data['data_type_id'] = self.data_type_id
if self.data_pdu is not None:
data['data_hex'] = get_byte_list_hex_str(self.data_pdu)
return data
class DeviceEnum16Service(DeviceBaseService):
pass
class DeviceCommonOnOffService(DeviceEnum16Service):
class ParamCode(BaseObject):
OFF = 0
ON = 1
class DeviceFeatureLevelService(DeviceEnum16Service):
class ParamCode(BaseObject):
LEVEL_0 = 0
LEVEL_1 = 1
LEVEL_2 = 2
LEVEL_3 = 3
LEVEL_4 = 4
LEVEL_5 = 5
LEVEL_6 = 6
LEVEL_7 = 7
LEVEL_8 = 8
LEVEL_9 = 9
LEVEL_10 = 10
LEVEL_11 = 11
LEVEL_12 = 12
LEVEL_13 = 13
LEVEL_14 = 14
LEVEL_15 = 15
@classmethod
def text(cls, code):
return 'level {}'.format(code)
class DeviceEnum16BitService(DeviceBaseService):
def get_value(self):
value = super(DeviceEnum16BitService, self).get_value()
return '{:016}'.format(value)
def get_enum_bit_value(self, bit_index):
if 0 <= bit_index <= 7:
mask = 1 << bit_index
return (self.low_byte & mask) >> bit_index
elif 8 <= bit_index <= 15:
mask = 1 << (bit_index-8)
return (self.low_byte & mask) >> (bit_index-8)
else:
raise ValueError('{} bit_index {} error'.format(self.__class__.__name__, bit_index))
class DeviceUint8Service(DeviceBaseService):
def get_value(self):
return struct.unpack('B', chr(self.low_byte))[0]
class DeviceUint16Service(DeviceBaseService):
pass
class DeviceInt8Service(DeviceBaseService):
def get_value(self):
return struct.unpack('b', chr(self.low_byte))[0]
class DeviceTimeMDService(DeviceBaseService):
def get_value(self):
return self.high_byte, self.low_byte
def get_month_value(self):
return self.high_byte
def get_day_value(self):
return self.low_byte
class DeviceTimeHMService(DeviceBaseService):
def get_value(self):
return self.high_byte, self.low_byte
def get_hour_value(self):
return self.high_byte
def get_minute_value(self):
return self.low_byte
class DeviceTimeMSService(DeviceBaseService):
def get_value(self):
return self.high_byte, self.low_byte
def get_minute_value(self):
return self.high_byte
def get_second_value(self):
return self.low_byte
| en | 0.67366 | deprecated by ValueTypeCode # read or write # service_data = None # 'high_byte': self.high_byte, # 'low_byte': self.low_byte for device with data_kind_code is DeviceBaseService.DataKindCode.MULTIPLE # data_len = None # datas = None # self.high_byte = self.low_byte = None | 2.354253 | 2 |
tests/test_id_lib.py | beyond-blockchain/bbc1-lib-std | 1 | 6622661 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
import sys
import time
sys.path.extend(["../"])
from bbc1.lib import id_lib
from bbc1.lib.app_support_lib import TransactionLabel
from bbc1.core import bbc_app
from bbc1.core import bbclib
from bbc1.core.bbc_config import DEFAULT_CORE_PORT
@pytest.fixture()
def default_domain_id():
domain_id = bbclib.get_new_id("test_id_lib", include_timestamp=False)
tmpclient = bbc_app.BBcAppClient(port=DEFAULT_CORE_PORT, multiq=False, loglevel="all")
tmpclient.domain_setup(domain_id)
tmpclient.callback.synchronize()
tmpclient.unregister_from_core()
return domain_id
def test_default_map_creation(default_domain_id):
NUM_KEYPAIRS = 5
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=NUM_KEYPAIRS)
assert len(keypairs) == NUM_KEYPAIRS
for i in range(NUM_KEYPAIRS):
assert idPubkeyMap.is_mapped(user_id, keypairs[i].public_key) == True
def test_map_update(default_domain_id):
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=1)
assert len(keypairs) == 1
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == True
public_keys = []
for i in range(3):
keypair = bbclib.KeyPair()
keypair.generate()
public_keys.append(keypair.public_key)
tx = idPubkeyMap.update(user_id, public_keys_to_replace=public_keys,
keypair=keypairs[0])
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == False
for i in range(3):
assert idPubkeyMap.is_mapped(user_id, public_keys[i]) == True
def test_get_map(default_domain_id):
NUM_KEYPAIRS = 3
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=NUM_KEYPAIRS)
assert len(keypairs) == NUM_KEYPAIRS
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(public_keys) == NUM_KEYPAIRS
assert len(key_types) == NUM_KEYPAIRS
for i in range(NUM_KEYPAIRS):
assert bytes(keypairs[i].public_key) == public_keys[i]
assert keypairs[i].curvetype == key_types[i]
def test_map_creation_with_pubkeys(default_domain_id):
NUM_KEYPAIRS = 3
public_keys = []
for i in range(NUM_KEYPAIRS):
keypair = bbclib.KeyPair()
keypair.generate()
public_keys.append(keypair.public_key)
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(public_keys=public_keys)
assert len(keypairs) == 0
for i in range(NUM_KEYPAIRS):
assert idPubkeyMap.is_mapped(user_id, public_keys[i]) == True
def test_map_eval(default_domain_id):
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
label_group_id = bbclib.get_new_id('test_label_group',
include_timestamp=False)
label_id = TransactionLabel.create_label_id('test_map_eval:c', '0')
label = TransactionLabel(label_group_id, label_id)
user_id, keypairs0 = idPubkeyMap.create_user_id(label=label)
time0 = int(time.time())
print("\n2-second interval.")
time.sleep(2)
keypairs1 = []
public_keys = []
for i in range(3):
kp = bbclib.KeyPair()
kp.generate()
keypairs1.append(kp)
public_keys.append(keypairs1[i].public_key)
label_id = TransactionLabel.create_label_id('test_map_eval:u', '0')
label = TransactionLabel(label_group_id, label_id)
tx = idPubkeyMap.update(user_id, public_keys_to_add=public_keys,
keypair=keypairs0[0], label=label)
time1 = int(time.time())
print("2-second interval.")
time.sleep(2)
tx = idPubkeyMap.update(user_id, public_keys_to_remove=public_keys,
keypair=keypairs0[0])
time2 = int(time.time())
print("2-second interval.")
time.sleep(2)
tx = idPubkeyMap.update(user_id, public_keys_to_replace=public_keys,
keypair=keypairs0[0])
time3 = int(time.time())
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time3)
assert len(public_keys) == 3
assert len(key_types) == 3
for keypair in keypairs1:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time3) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time3) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time3) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time3) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time2)
assert len(public_keys) == 1
assert len(key_types) == 1
for keypair in keypairs0:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time2) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time2) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time2) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time2) == False
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time1)
assert len(public_keys) == 4
assert len(key_types) == 4
for keypair in keypairs0:
assert bytes(keypair.public_key) in public_keys
for keypair in keypairs1:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time1) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time1) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time1) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time1) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time0)
assert len(public_keys) == 1
assert len(key_types) == 1
for keypair in keypairs0:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time0) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time0) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time0) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time0) == False
idPubkeyMap._BBcIdPublickeyMap__clear_local_database(user_id)
print("cleared local database entries for the user for reconstruction.")
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(public_keys) == 3
assert len(key_types) == 3
for keypair in keypairs1:
assert bytes(keypair.public_key) in public_keys
for key_type in key_types:
assert key_type == bbclib.DEFAULT_CURVETYPE
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key) == True
idPubkeyMap.close()
def test_key_types(default_domain_id):
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id,
default_key_type=bbclib.KeyType.ECDSA_SECP256k1)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=1)
assert len(keypairs) == 1
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(key_types) == 1
assert key_types[0] == bbclib.KeyType.ECDSA_SECP256k1
public_keys = []
key_types = []
for i in range(3):
if i == 1:
key_type = bbclib.KeyType.ECDSA_SECP256k1
else:
key_type = bbclib.KeyType.ECDSA_P256v1
keypair = bbclib.KeyPair(key_type)
keypair.generate()
public_keys.append(keypair.public_key)
key_types.append(key_type)
tx = idPubkeyMap.update(user_id, public_keys_to_replace=public_keys,
key_types_to_replace=key_types, keypair=keypairs[0])
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == False
for i in range(3):
assert idPubkeyMap.is_mapped(user_id, public_keys[i]) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(key_types) == 3
for i, key_type in enumerate(key_types):
if i == 1:
assert key_type == bbclib.KeyType.ECDSA_SECP256k1
else:
assert key_type == bbclib.KeyType.ECDSA_P256v1
# end of tests/test_id_lib.py
| # -*- coding: utf-8 -*-
import pytest
import sys
import time
sys.path.extend(["../"])
from bbc1.lib import id_lib
from bbc1.lib.app_support_lib import TransactionLabel
from bbc1.core import bbc_app
from bbc1.core import bbclib
from bbc1.core.bbc_config import DEFAULT_CORE_PORT
@pytest.fixture()
def default_domain_id():
domain_id = bbclib.get_new_id("test_id_lib", include_timestamp=False)
tmpclient = bbc_app.BBcAppClient(port=DEFAULT_CORE_PORT, multiq=False, loglevel="all")
tmpclient.domain_setup(domain_id)
tmpclient.callback.synchronize()
tmpclient.unregister_from_core()
return domain_id
def test_default_map_creation(default_domain_id):
NUM_KEYPAIRS = 5
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=NUM_KEYPAIRS)
assert len(keypairs) == NUM_KEYPAIRS
for i in range(NUM_KEYPAIRS):
assert idPubkeyMap.is_mapped(user_id, keypairs[i].public_key) == True
def test_map_update(default_domain_id):
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=1)
assert len(keypairs) == 1
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == True
public_keys = []
for i in range(3):
keypair = bbclib.KeyPair()
keypair.generate()
public_keys.append(keypair.public_key)
tx = idPubkeyMap.update(user_id, public_keys_to_replace=public_keys,
keypair=keypairs[0])
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == False
for i in range(3):
assert idPubkeyMap.is_mapped(user_id, public_keys[i]) == True
def test_get_map(default_domain_id):
NUM_KEYPAIRS = 3
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=NUM_KEYPAIRS)
assert len(keypairs) == NUM_KEYPAIRS
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(public_keys) == NUM_KEYPAIRS
assert len(key_types) == NUM_KEYPAIRS
for i in range(NUM_KEYPAIRS):
assert bytes(keypairs[i].public_key) == public_keys[i]
assert keypairs[i].curvetype == key_types[i]
def test_map_creation_with_pubkeys(default_domain_id):
NUM_KEYPAIRS = 3
public_keys = []
for i in range(NUM_KEYPAIRS):
keypair = bbclib.KeyPair()
keypair.generate()
public_keys.append(keypair.public_key)
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
user_id, keypairs = idPubkeyMap.create_user_id(public_keys=public_keys)
assert len(keypairs) == 0
for i in range(NUM_KEYPAIRS):
assert idPubkeyMap.is_mapped(user_id, public_keys[i]) == True
def test_map_eval(default_domain_id):
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id)
label_group_id = bbclib.get_new_id('test_label_group',
include_timestamp=False)
label_id = TransactionLabel.create_label_id('test_map_eval:c', '0')
label = TransactionLabel(label_group_id, label_id)
user_id, keypairs0 = idPubkeyMap.create_user_id(label=label)
time0 = int(time.time())
print("\n2-second interval.")
time.sleep(2)
keypairs1 = []
public_keys = []
for i in range(3):
kp = bbclib.KeyPair()
kp.generate()
keypairs1.append(kp)
public_keys.append(keypairs1[i].public_key)
label_id = TransactionLabel.create_label_id('test_map_eval:u', '0')
label = TransactionLabel(label_group_id, label_id)
tx = idPubkeyMap.update(user_id, public_keys_to_add=public_keys,
keypair=keypairs0[0], label=label)
time1 = int(time.time())
print("2-second interval.")
time.sleep(2)
tx = idPubkeyMap.update(user_id, public_keys_to_remove=public_keys,
keypair=keypairs0[0])
time2 = int(time.time())
print("2-second interval.")
time.sleep(2)
tx = idPubkeyMap.update(user_id, public_keys_to_replace=public_keys,
keypair=keypairs0[0])
time3 = int(time.time())
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time3)
assert len(public_keys) == 3
assert len(key_types) == 3
for keypair in keypairs1:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time3) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time3) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time3) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time3) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time2)
assert len(public_keys) == 1
assert len(key_types) == 1
for keypair in keypairs0:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time2) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time2) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time2) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time2) == False
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time1)
assert len(public_keys) == 4
assert len(key_types) == 4
for keypair in keypairs0:
assert bytes(keypair.public_key) in public_keys
for keypair in keypairs1:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time1) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time1) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time1) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time1) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id, time0)
assert len(public_keys) == 1
assert len(key_types) == 1
for keypair in keypairs0:
assert bytes(keypair.public_key) in public_keys
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key, time0) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key, time0) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key, time0) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key, time0) == False
idPubkeyMap._BBcIdPublickeyMap__clear_local_database(user_id)
print("cleared local database entries for the user for reconstruction.")
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(public_keys) == 3
assert len(key_types) == 3
for keypair in keypairs1:
assert bytes(keypair.public_key) in public_keys
for key_type in key_types:
assert key_type == bbclib.DEFAULT_CURVETYPE
assert idPubkeyMap.is_mapped(user_id, keypairs0[0].public_key) == False
assert idPubkeyMap.is_mapped(user_id, keypairs1[0].public_key) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[1].public_key) == True
assert idPubkeyMap.is_mapped(user_id, keypairs1[2].public_key) == True
idPubkeyMap.close()
def test_key_types(default_domain_id):
idPubkeyMap = id_lib.BBcIdPublickeyMap(default_domain_id,
default_key_type=bbclib.KeyType.ECDSA_SECP256k1)
user_id, keypairs = idPubkeyMap.create_user_id(num_pubkeys=1)
assert len(keypairs) == 1
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(key_types) == 1
assert key_types[0] == bbclib.KeyType.ECDSA_SECP256k1
public_keys = []
key_types = []
for i in range(3):
if i == 1:
key_type = bbclib.KeyType.ECDSA_SECP256k1
else:
key_type = bbclib.KeyType.ECDSA_P256v1
keypair = bbclib.KeyPair(key_type)
keypair.generate()
public_keys.append(keypair.public_key)
key_types.append(key_type)
tx = idPubkeyMap.update(user_id, public_keys_to_replace=public_keys,
key_types_to_replace=key_types, keypair=keypairs[0])
assert idPubkeyMap.is_mapped(user_id, keypairs[0].public_key) == False
for i in range(3):
assert idPubkeyMap.is_mapped(user_id, public_keys[i]) == True
public_keys, key_types = idPubkeyMap.get_mapped_public_keys(user_id)
assert len(key_types) == 3
for i, key_type in enumerate(key_types):
if i == 1:
assert key_type == bbclib.KeyType.ECDSA_SECP256k1
else:
assert key_type == bbclib.KeyType.ECDSA_P256v1
# end of tests/test_id_lib.py | en | 0.635209 | # -*- coding: utf-8 -*- # end of tests/test_id_lib.py | 1.857847 | 2 |
FictionTools/amitools/test/unit/libmgr_mgr.py | polluks/Puddle-BuildTools | 38 | 6622662 | import logging
import pytest
from amitools.vamos.log import log_libmgr, log_exec
from amitools.vamos.libcore import LibCtx
from amitools.vamos.libmgr import LibManager, LibMgrCfg, LibCfg
from amitools.vamos.machine import Machine
from amitools.vamos.mem import MemoryAlloc
from amitools.vamos.lib.lexec.ExecLibCtx import ExecLibCtx
from amitools.vamos.lib.dos.DosLibCtx import DosLibCtx
from amitools.vamos.lib.ExecLibrary import ExecLibrary
from amitools.vamos.lib.DosLibrary import DosLibrary
from amitools.vamos.lib.VamosTestLibrary import VamosTestLibrary
from amitools.vamos.lib.VamosTestDevice import VamosTestDevice
from amitools.vamos.loader import SegmentLoader
def setup(path_mgr=None):
log_libmgr.setLevel(logging.INFO)
log_exec.setLevel(logging.INFO)
machine = Machine()
# machine.show_instr(True)
sp = machine.get_ram_begin() - 4
alloc = MemoryAlloc.for_machine(machine)
segloader = SegmentLoader(alloc, path_mgr)
cfg = LibMgrCfg()
mgr = LibManager(machine, alloc, segloader, cfg)
# setup ctx map
cpu = machine.get_cpu()
mem = machine.get_mem()
cpu_type = machine.get_cpu_type()
exec_ctx = ExecLibCtx(machine, alloc, segloader, path_mgr)
mgr.add_ctx("exec.library", exec_ctx)
mgr.add_impl_cls("exec.library", ExecLibrary)
dos_ctx = DosLibCtx(machine, alloc, segloader, path_mgr, None, None)
mgr.add_ctx("dos.library", dos_ctx)
mgr.add_impl_cls("dos.library", DosLibrary)
mgr.add_impl_cls("vamostest.library", VamosTestLibrary)
mgr.add_impl_cls("vamostestdev.device", VamosTestDevice)
return machine, alloc, mgr, sp, cfg
def libmgr_mgr_bootstrap_shutdown_test():
machine, alloc, mgr, sp, cfg = setup()
# bootstrap exec
exec_vlib = mgr.bootstrap_exec()
exec_base = exec_vlib.get_addr()
exec_lib = exec_vlib.get_library()
# make sure exec is in place
vmgr = mgr.vlib_mgr
assert vmgr.get_vlib_by_name("exec.library") == exec_vlib
assert vmgr.get_vlib_by_addr(exec_base) == exec_vlib
assert exec_vlib.get_ctx() == vmgr.ctx_map.get_ctx("exec.library")
assert exec_lib.open_cnt == 1
assert machine.get_mem().r32(4) == exec_base
# we can't expunge exec
assert not mgr.expunge_lib(exec_base)
# shutdown
left = mgr.shutdown()
assert left == 0
# exec is now gone and mem is sane
assert alloc.is_all_free()
def libmgr_mgr_open_fail_test():
class PathMgrMock:
def ami_to_sys_path(self, lock, ami_path, mustExist=True):
return None
pm = PathMgrMock()
machine, alloc, mgr, sp, cfg = setup(path_mgr=pm)
mgr.bootstrap_exec()
# open non-existing lib
lib_base = mgr.open_lib("blubber.library")
assert lib_base == 0
# shutdown
left = mgr.shutdown()
assert left == 0
# exec is now gone and mem is sane
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
test_base = mgr.open_lib("vamostest.library")
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert type(test_vlib.get_ctx()) == LibCtx
assert vmgr.get_vlib_by_name("vamostest.library") == test_vlib
impl = test_vlib.get_impl()
assert impl
assert impl.get_cnt() == 1
lib = test_vlib.get_library()
assert lib.version == impl.get_version()
mgr.close_lib(test_base)
assert impl.get_cnt() is None
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_dev_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
test_base = mgr.open_lib("vamostestdev.device")
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert type(test_vlib.get_ctx()) == LibCtx
assert vmgr.get_vlib_by_name("vamostestdev.device") == test_vlib
impl = test_vlib.get_impl()
assert impl
assert impl.get_cnt() == 1
lib = test_vlib.get_library()
assert lib.version == impl.get_version()
mgr.close_lib(test_base)
assert impl.get_cnt() is None
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_fake_fd_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
cfg.add_lib_cfg(
"dos.library", LibCfg(create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40)
)
test_base = mgr.open_lib("dos.library", 36)
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert vmgr.get_vlib_by_name("dos.library") == test_vlib
impl = test_vlib.get_impl()
assert impl is None
lib = test_vlib.get_library()
assert lib.version == 40
mgr.close_lib(test_base)
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_fake_no_fd_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
cfg.add_lib_cfg(
"foo.library",
LibCfg(
create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40, num_fake_funcs=10
),
)
test_base = mgr.open_lib("foo.library", 36)
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert vmgr.get_vlib_by_name("foo.library") == test_vlib
impl = test_vlib.get_impl()
assert impl is None
lib = test_vlib.get_library()
assert lib.version == 40
assert lib.neg_size == 68
mgr.close_lib(test_base)
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_too_new_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
cfg.add_lib_cfg(
"dos.library", LibCfg(create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40)
)
test_base = mgr.open_lib("dos.library", 41)
assert test_base == 0
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
class ALibHelper(object):
def __init__(self, lib_file, lib_name):
class PathMgrMock:
def ami_to_sys_path(self, lock, ami_path, mustExist=True):
if ami_path == "LIBS:" + lib_name:
return lib_file
pm = PathMgrMock()
self.machine, self.alloc, self.mgr, self.sp, self.cfg = setup(path_mgr=pm)
self.mgr.bootstrap_exec()
self.cfg.add_lib_cfg(
"dos.library", LibCfg(create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40)
)
self.dos_base = self.mgr.open_lib("dos.library")
def shutdown(self):
# close dos
self.mgr.close_lib(self.dos_base)
# expunge lib
left = self.mgr.shutdown(run_sp=self.sp)
assert left == 0
assert self.alloc.is_all_free()
def open_alib(lib_file, lib_name, ok=True, version=0, mode=None):
h = ALibHelper(lib_file, lib_name)
mgr = h.mgr
# setup config
if mode:
h.cfg.add_lib_cfg(lib_name, LibCfg(create_mode=mode))
# open_lib
lib_base = mgr.open_lib(lib_name, run_sp=h.sp, version=version)
if not ok:
assert lib_base == 0
h.shutdown()
return
assert lib_base > 0
amgr = mgr.alib_mgr
assert amgr.is_base_addr(lib_base)
lib_info = amgr.get_lib_info_for_name(lib_name)
assert lib_info
assert lib_info.is_base_addr(lib_base)
# close lib
seglist = mgr.close_lib(lib_base, run_sp=h.sp)
assert not amgr.is_base_addr(lib_base)
assert seglist == 0
lib_info = amgr.get_lib_info_for_name(lib_name)
assert lib_info
assert not lib_info.is_base_addr(lib_base)
# expunge lib
load_addr = lib_info.get_load_addr()
seglist = mgr.expunge_lib(load_addr, run_sp=h.sp)
assert seglist > 0
assert not amgr.is_load_addr(lib_base)
lib_info = amgr.get_lib_info_for_name(lib_name)
assert not lib_info
# shutdown
h.shutdown()
def libmgr_mgr_open_alib_libnix_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name)
def libmgr_mgr_open_alib_libsc_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name)
def libmgr_mgr_open_alib_libnix_ver_fail_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name, ok=False, version=99)
def libmgr_mgr_open_alib_libsc_ver_fail_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name, ok=False, version=99)
def libmgr_mgr_open_alib_libnix_mode_native_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name, mode=LibCfg.CREATE_MODE_AMIGA)
def libmgr_mgr_open_alib_libsc_mode_native_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name, mode=LibCfg.CREATE_MODE_AMIGA)
def libmgr_mgr_open_alib_libnix_mode_off_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name, ok=False, mode=LibCfg.CREATE_MODE_OFF)
def libmgr_mgr_open_alib_libsc_mode_off_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name, ok=False, mode=LibCfg.CREATE_MODE_OFF)
| import logging
import pytest
from amitools.vamos.log import log_libmgr, log_exec
from amitools.vamos.libcore import LibCtx
from amitools.vamos.libmgr import LibManager, LibMgrCfg, LibCfg
from amitools.vamos.machine import Machine
from amitools.vamos.mem import MemoryAlloc
from amitools.vamos.lib.lexec.ExecLibCtx import ExecLibCtx
from amitools.vamos.lib.dos.DosLibCtx import DosLibCtx
from amitools.vamos.lib.ExecLibrary import ExecLibrary
from amitools.vamos.lib.DosLibrary import DosLibrary
from amitools.vamos.lib.VamosTestLibrary import VamosTestLibrary
from amitools.vamos.lib.VamosTestDevice import VamosTestDevice
from amitools.vamos.loader import SegmentLoader
def setup(path_mgr=None):
log_libmgr.setLevel(logging.INFO)
log_exec.setLevel(logging.INFO)
machine = Machine()
# machine.show_instr(True)
sp = machine.get_ram_begin() - 4
alloc = MemoryAlloc.for_machine(machine)
segloader = SegmentLoader(alloc, path_mgr)
cfg = LibMgrCfg()
mgr = LibManager(machine, alloc, segloader, cfg)
# setup ctx map
cpu = machine.get_cpu()
mem = machine.get_mem()
cpu_type = machine.get_cpu_type()
exec_ctx = ExecLibCtx(machine, alloc, segloader, path_mgr)
mgr.add_ctx("exec.library", exec_ctx)
mgr.add_impl_cls("exec.library", ExecLibrary)
dos_ctx = DosLibCtx(machine, alloc, segloader, path_mgr, None, None)
mgr.add_ctx("dos.library", dos_ctx)
mgr.add_impl_cls("dos.library", DosLibrary)
mgr.add_impl_cls("vamostest.library", VamosTestLibrary)
mgr.add_impl_cls("vamostestdev.device", VamosTestDevice)
return machine, alloc, mgr, sp, cfg
def libmgr_mgr_bootstrap_shutdown_test():
machine, alloc, mgr, sp, cfg = setup()
# bootstrap exec
exec_vlib = mgr.bootstrap_exec()
exec_base = exec_vlib.get_addr()
exec_lib = exec_vlib.get_library()
# make sure exec is in place
vmgr = mgr.vlib_mgr
assert vmgr.get_vlib_by_name("exec.library") == exec_vlib
assert vmgr.get_vlib_by_addr(exec_base) == exec_vlib
assert exec_vlib.get_ctx() == vmgr.ctx_map.get_ctx("exec.library")
assert exec_lib.open_cnt == 1
assert machine.get_mem().r32(4) == exec_base
# we can't expunge exec
assert not mgr.expunge_lib(exec_base)
# shutdown
left = mgr.shutdown()
assert left == 0
# exec is now gone and mem is sane
assert alloc.is_all_free()
def libmgr_mgr_open_fail_test():
class PathMgrMock:
def ami_to_sys_path(self, lock, ami_path, mustExist=True):
return None
pm = PathMgrMock()
machine, alloc, mgr, sp, cfg = setup(path_mgr=pm)
mgr.bootstrap_exec()
# open non-existing lib
lib_base = mgr.open_lib("blubber.library")
assert lib_base == 0
# shutdown
left = mgr.shutdown()
assert left == 0
# exec is now gone and mem is sane
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
test_base = mgr.open_lib("vamostest.library")
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert type(test_vlib.get_ctx()) == LibCtx
assert vmgr.get_vlib_by_name("vamostest.library") == test_vlib
impl = test_vlib.get_impl()
assert impl
assert impl.get_cnt() == 1
lib = test_vlib.get_library()
assert lib.version == impl.get_version()
mgr.close_lib(test_base)
assert impl.get_cnt() is None
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_dev_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
test_base = mgr.open_lib("vamostestdev.device")
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert type(test_vlib.get_ctx()) == LibCtx
assert vmgr.get_vlib_by_name("vamostestdev.device") == test_vlib
impl = test_vlib.get_impl()
assert impl
assert impl.get_cnt() == 1
lib = test_vlib.get_library()
assert lib.version == impl.get_version()
mgr.close_lib(test_base)
assert impl.get_cnt() is None
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_fake_fd_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
cfg.add_lib_cfg(
"dos.library", LibCfg(create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40)
)
test_base = mgr.open_lib("dos.library", 36)
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert vmgr.get_vlib_by_name("dos.library") == test_vlib
impl = test_vlib.get_impl()
assert impl is None
lib = test_vlib.get_library()
assert lib.version == 40
mgr.close_lib(test_base)
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_fake_no_fd_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
cfg.add_lib_cfg(
"foo.library",
LibCfg(
create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40, num_fake_funcs=10
),
)
test_base = mgr.open_lib("foo.library", 36)
assert test_base > 0
vmgr = mgr.vlib_mgr
test_vlib = vmgr.get_vlib_by_addr(test_base)
assert test_vlib
assert vmgr.get_vlib_by_name("foo.library") == test_vlib
impl = test_vlib.get_impl()
assert impl is None
lib = test_vlib.get_library()
assert lib.version == 40
assert lib.neg_size == 68
mgr.close_lib(test_base)
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
def libmgr_mgr_open_vlib_too_new_test():
machine, alloc, mgr, sp, cfg = setup()
exec_vlib = mgr.bootstrap_exec()
# make vamos test lib
cfg.add_lib_cfg(
"dos.library", LibCfg(create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40)
)
test_base = mgr.open_lib("dos.library", 41)
assert test_base == 0
# shutdown
left = mgr.shutdown()
assert left == 0
assert alloc.is_all_free()
class ALibHelper(object):
def __init__(self, lib_file, lib_name):
class PathMgrMock:
def ami_to_sys_path(self, lock, ami_path, mustExist=True):
if ami_path == "LIBS:" + lib_name:
return lib_file
pm = PathMgrMock()
self.machine, self.alloc, self.mgr, self.sp, self.cfg = setup(path_mgr=pm)
self.mgr.bootstrap_exec()
self.cfg.add_lib_cfg(
"dos.library", LibCfg(create_mode=LibCfg.CREATE_MODE_FAKE, force_version=40)
)
self.dos_base = self.mgr.open_lib("dos.library")
def shutdown(self):
# close dos
self.mgr.close_lib(self.dos_base)
# expunge lib
left = self.mgr.shutdown(run_sp=self.sp)
assert left == 0
assert self.alloc.is_all_free()
def open_alib(lib_file, lib_name, ok=True, version=0, mode=None):
h = ALibHelper(lib_file, lib_name)
mgr = h.mgr
# setup config
if mode:
h.cfg.add_lib_cfg(lib_name, LibCfg(create_mode=mode))
# open_lib
lib_base = mgr.open_lib(lib_name, run_sp=h.sp, version=version)
if not ok:
assert lib_base == 0
h.shutdown()
return
assert lib_base > 0
amgr = mgr.alib_mgr
assert amgr.is_base_addr(lib_base)
lib_info = amgr.get_lib_info_for_name(lib_name)
assert lib_info
assert lib_info.is_base_addr(lib_base)
# close lib
seglist = mgr.close_lib(lib_base, run_sp=h.sp)
assert not amgr.is_base_addr(lib_base)
assert seglist == 0
lib_info = amgr.get_lib_info_for_name(lib_name)
assert lib_info
assert not lib_info.is_base_addr(lib_base)
# expunge lib
load_addr = lib_info.get_load_addr()
seglist = mgr.expunge_lib(load_addr, run_sp=h.sp)
assert seglist > 0
assert not amgr.is_load_addr(lib_base)
lib_info = amgr.get_lib_info_for_name(lib_name)
assert not lib_info
# shutdown
h.shutdown()
def libmgr_mgr_open_alib_libnix_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name)
def libmgr_mgr_open_alib_libsc_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name)
def libmgr_mgr_open_alib_libnix_ver_fail_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name, ok=False, version=99)
def libmgr_mgr_open_alib_libsc_ver_fail_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name, ok=False, version=99)
def libmgr_mgr_open_alib_libnix_mode_native_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name, mode=LibCfg.CREATE_MODE_AMIGA)
def libmgr_mgr_open_alib_libsc_mode_native_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name, mode=LibCfg.CREATE_MODE_AMIGA)
def libmgr_mgr_open_alib_libnix_mode_off_test(buildlibnix):
lib_file = buildlibnix.make_lib("testnix")
lib_name = "testnix.library"
open_alib(lib_file, lib_name, ok=False, mode=LibCfg.CREATE_MODE_OFF)
def libmgr_mgr_open_alib_libsc_mode_off_test(buildlibsc):
lib_file = buildlibsc.make_lib("testsc")
lib_name = "testsc.library"
open_alib(lib_file, lib_name, ok=False, mode=LibCfg.CREATE_MODE_OFF)
| en | 0.65299 | # machine.show_instr(True) # setup ctx map # bootstrap exec # make sure exec is in place # we can't expunge exec # shutdown # exec is now gone and mem is sane # open non-existing lib # shutdown # exec is now gone and mem is sane # make vamos test lib # shutdown # make vamos test lib # shutdown # make vamos test lib # shutdown # make vamos test lib # shutdown # make vamos test lib # shutdown # close dos # expunge lib # setup config # open_lib # close lib # expunge lib # shutdown | 1.798156 | 2 |
himlar_dp_prep/dp_provisioner.py | tanzr/himlar-dp-prep | 0 | 6622663 | <reponame>tanzr/himlar-dp-prep
import logging
import argparse
from keystoneclient.auth.identity import v3
from keystoneclient import session
from keystoneclient.v3 import client
from grampg import PasswordGenerator
ADMIN_NAME = 'admin'
PROJECT_NAME = 'admin'
DEFAULT_DOMAIN_NAME = 'default'
DP_DOMAIN_NAME = 'dataporten'
MEMBER_ROLE_NAME = '_member_'
log = logging.getLogger(__name__)
def group_name(user_id):
return '{}-group'.format(user_id)
def proj_name(user_id):
return user_id.lower()
def local_user_name(user_id):
return user_id.lower()
def make_password():
gen = PasswordGenerator()
return (gen.of().some('numbers').some('lower_letters').some('upper_letters')
.length(16).done().generate())
class DpProvisioner(object):
def __init__(self, config):
self.member_role_name = config['member_role_name']
self.with_local_user = config.get('with_local_user')
self.local_pw = None
dp_domain_name = config['dp_domain_name']
keystone_cachain = config.get('keystone_cachain')
auth = v3.Password(auth_url=config['url'],
username=config['username'],
password=config['password'],
project_name=config['project_name'],
user_domain_name=config['user_domain_name'],
project_domain_name=config['project_domain_name'])
sess = session.Session(auth=auth,verify=keystone_cachain)
self.ks = client.Client(session=sess)
domains = self.ks.domains.list(name=dp_domain_name)
if len(domains) == 1:
self.domain = domains[0]
else:
raise ValueError("Expecting unique '{}' domain".format(dp_domain_name))
def del_resources(self, user_id):
local_users = self.ks.users.list(name=local_user_name(user_id), domain=self.domain)
groups = self.ks.groups.list(name=group_name(user_id), domain=self.domain)
projs = self.ks.projects.list(name=proj_name(user_id), domain=self.domain)
for l in local_users:
self.ks.users.delete(l.id)
log.info("deleted local user %s", l.id)
for p in projs:
self.ks.projects.delete(p.id)
log.info("deleted project %s", p.id)
for g in groups:
self.ks.groups.delete(g.id)
log.info("deleted group %s", g.id)
def is_provisioned(self, user_id):
groups = self.ks.groups.list(name=group_name(user_id), domain=self.domain)
if len(groups) > 0:
group = groups[0]
roles = self.ks.role_assignments.list(group=group)
return any(['project' in r.scope for r in roles])
else:
return False
def grant_membership(self, proj, group):
member_roles = self.ks.roles.list(name=self.member_role_name)
if len(member_roles) == 1:
member_role = member_roles[0]
else:
raise ValueError("Expecting unique '{}' role".format(self.member_role_name))
self.ks.roles.grant(role=member_role, project=proj, group=group)
log.debug("role assignments for %s: %s",
group.name, self.ks.role_assignments.list(group=group))
def provision(self, user_id):
gname = group_name(user_id)
pname = proj_name(user_id)
lname = local_user_name(user_id)
groups = self.ks.groups.list(name=gname, domain=self.domain)
projs = self.ks.projects.list(name=pname, domain=self.domain)
if len(groups) < 1:
group = self.ks.groups.create(name=gname, domain=self.domain)
log.info("group created: %s", group.id)
else:
group = groups[0]
if len(projs) < 1:
proj = self.ks.projects.create(name=pname, domain=self.domain)
log.info("project created: %s", proj.id)
else:
proj = projs[0]
self.grant_membership(proj, group)
if self.with_local_user:
self.local_pw = make_password()
user = self.ks.users.create(name=lname, domain=self.domain,
project=proj, email=user_id, password=self.local_pw)
log.info("local user created: %s", user.id)
self.ks.users.add_to_group(user, group)
return dict(local_user_name=lname,
local_pw=self.local_pw)
if __name__ == '__main__':
DESCRIPTION = "Dataporten provisioner for Openstack"
AUTH_URL = 'http://10.0.3.11:5000/v3'
EMAIL_JK = '<EMAIL>'
def parse_args():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--id', default=EMAIL_JK,
help="Identity to provision for")
parser.add_argument('--pw', help='Password')
parser.add_argument('--url', default=AUTH_URL, help="Keystone url")
parser.add_argument('--project-name', default=PROJECT_NAME, help="Admin project name")
parser.add_argument('--dp-domain-name', default=DP_DOMAIN_NAME, help="Dataporten domain name")
parser.add_argument('--delete', default=0, type=int,
help="Set to 1 to delete resources")
parser.add_argument('--provision', default=1, type=int,
help="Set to 1 to provision")
parser.add_argument('--with-local-user', default=1, type=int,
help="Set to 1 to provision a local user for local access")
return parser.parse_args()
def make_config(args):
return dict(url=args.url,
password=<PASSWORD>,
username=ADMIN_NAME,
project_name=args.project_name,
dp_domain_name=args.dp_domain_name,
user_domain_name=DEFAULT_DOMAIN_NAME,
project_domain_name=DEFAULT_DOMAIN_NAME,
member_role_name=MEMBER_ROLE_NAME,
with_local_user=args.with_local_user)
args = parse_args()
config = make_config(args)
logging.basicConfig(level=logging.INFO)
prov = DpProvisioner(config)
if args.delete:
prov.del_resources(args.id)
if args.provision:
prov.provision(args.id)
if prov.local_pw:
print('password for local user: {}'.format(prov.local_pw))
| import logging
import argparse
from keystoneclient.auth.identity import v3
from keystoneclient import session
from keystoneclient.v3 import client
from grampg import PasswordGenerator
ADMIN_NAME = 'admin'
PROJECT_NAME = 'admin'
DEFAULT_DOMAIN_NAME = 'default'
DP_DOMAIN_NAME = 'dataporten'
MEMBER_ROLE_NAME = '_member_'
log = logging.getLogger(__name__)
def group_name(user_id):
return '{}-group'.format(user_id)
def proj_name(user_id):
return user_id.lower()
def local_user_name(user_id):
return user_id.lower()
def make_password():
gen = PasswordGenerator()
return (gen.of().some('numbers').some('lower_letters').some('upper_letters')
.length(16).done().generate())
class DpProvisioner(object):
def __init__(self, config):
self.member_role_name = config['member_role_name']
self.with_local_user = config.get('with_local_user')
self.local_pw = None
dp_domain_name = config['dp_domain_name']
keystone_cachain = config.get('keystone_cachain')
auth = v3.Password(auth_url=config['url'],
username=config['username'],
password=config['password'],
project_name=config['project_name'],
user_domain_name=config['user_domain_name'],
project_domain_name=config['project_domain_name'])
sess = session.Session(auth=auth,verify=keystone_cachain)
self.ks = client.Client(session=sess)
domains = self.ks.domains.list(name=dp_domain_name)
if len(domains) == 1:
self.domain = domains[0]
else:
raise ValueError("Expecting unique '{}' domain".format(dp_domain_name))
def del_resources(self, user_id):
local_users = self.ks.users.list(name=local_user_name(user_id), domain=self.domain)
groups = self.ks.groups.list(name=group_name(user_id), domain=self.domain)
projs = self.ks.projects.list(name=proj_name(user_id), domain=self.domain)
for l in local_users:
self.ks.users.delete(l.id)
log.info("deleted local user %s", l.id)
for p in projs:
self.ks.projects.delete(p.id)
log.info("deleted project %s", p.id)
for g in groups:
self.ks.groups.delete(g.id)
log.info("deleted group %s", g.id)
def is_provisioned(self, user_id):
groups = self.ks.groups.list(name=group_name(user_id), domain=self.domain)
if len(groups) > 0:
group = groups[0]
roles = self.ks.role_assignments.list(group=group)
return any(['project' in r.scope for r in roles])
else:
return False
def grant_membership(self, proj, group):
member_roles = self.ks.roles.list(name=self.member_role_name)
if len(member_roles) == 1:
member_role = member_roles[0]
else:
raise ValueError("Expecting unique '{}' role".format(self.member_role_name))
self.ks.roles.grant(role=member_role, project=proj, group=group)
log.debug("role assignments for %s: %s",
group.name, self.ks.role_assignments.list(group=group))
def provision(self, user_id):
gname = group_name(user_id)
pname = proj_name(user_id)
lname = local_user_name(user_id)
groups = self.ks.groups.list(name=gname, domain=self.domain)
projs = self.ks.projects.list(name=pname, domain=self.domain)
if len(groups) < 1:
group = self.ks.groups.create(name=gname, domain=self.domain)
log.info("group created: %s", group.id)
else:
group = groups[0]
if len(projs) < 1:
proj = self.ks.projects.create(name=pname, domain=self.domain)
log.info("project created: %s", proj.id)
else:
proj = projs[0]
self.grant_membership(proj, group)
if self.with_local_user:
self.local_pw = make_password()
user = self.ks.users.create(name=lname, domain=self.domain,
project=proj, email=user_id, password=self.local_pw)
log.info("local user created: %s", user.id)
self.ks.users.add_to_group(user, group)
return dict(local_user_name=lname,
local_pw=self.local_pw)
if __name__ == '__main__':
DESCRIPTION = "Dataporten provisioner for Openstack"
AUTH_URL = 'http://10.0.3.11:5000/v3'
EMAIL_JK = '<EMAIL>'
def parse_args():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--id', default=EMAIL_JK,
help="Identity to provision for")
parser.add_argument('--pw', help='Password')
parser.add_argument('--url', default=AUTH_URL, help="Keystone url")
parser.add_argument('--project-name', default=PROJECT_NAME, help="Admin project name")
parser.add_argument('--dp-domain-name', default=DP_DOMAIN_NAME, help="Dataporten domain name")
parser.add_argument('--delete', default=0, type=int,
help="Set to 1 to delete resources")
parser.add_argument('--provision', default=1, type=int,
help="Set to 1 to provision")
parser.add_argument('--with-local-user', default=1, type=int,
help="Set to 1 to provision a local user for local access")
return parser.parse_args()
def make_config(args):
return dict(url=args.url,
password=<PASSWORD>,
username=ADMIN_NAME,
project_name=args.project_name,
dp_domain_name=args.dp_domain_name,
user_domain_name=DEFAULT_DOMAIN_NAME,
project_domain_name=DEFAULT_DOMAIN_NAME,
member_role_name=MEMBER_ROLE_NAME,
with_local_user=args.with_local_user)
args = parse_args()
config = make_config(args)
logging.basicConfig(level=logging.INFO)
prov = DpProvisioner(config)
if args.delete:
prov.del_resources(args.id)
if args.provision:
prov.provision(args.id)
if prov.local_pw:
print('password for local user: {}'.format(prov.local_pw)) | none | 1 | 2.20003 | 2 | |
model/app/__init__.py | Temiloluwa/language-modelling | 0 | 6622664 | <filename>model/app/__init__.py<gh_stars>0
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
cors = CORS(app, resources={r"/generatewords": {"origins": "*"}})
from app import views | <filename>model/app/__init__.py<gh_stars>0
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
cors = CORS(app, resources={r"/generatewords": {"origins": "*"}})
from app import views | none | 1 | 1.878348 | 2 | |
Desafios 2/m01/ex035.py | joaquimjfernandes/Curso-de-Python | 0 | 6622665 | print('\033[32;1mDESAFIO 35 - Analisando Triângulos\033[m')
print('\033[32;1mALUNO:\033[m \033[36;1m<NAME>\033[m')
print('-=' * 20)
print(' \033[34;1mANALISADOR DE TRIÂNGULO\033[m')
print('-=' * 20)
r1 = float(input('Primeiro Segmento: '))
r2 = float(input('Segundo Segmento: '))
r3 = float(input('Terceiro Segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('OS Segmentos acima \033[33;1mPODEM FORMAR\033[m um Triângulo!')
else:
print('OS Segmentos acima \033[31;1mNÃO PODEM FORMAR\033[m um Triângulo!')
| print('\033[32;1mDESAFIO 35 - Analisando Triângulos\033[m')
print('\033[32;1mALUNO:\033[m \033[36;1m<NAME>\033[m')
print('-=' * 20)
print(' \033[34;1mANALISADOR DE TRIÂNGULO\033[m')
print('-=' * 20)
r1 = float(input('Primeiro Segmento: '))
r2 = float(input('Segundo Segmento: '))
r3 = float(input('Terceiro Segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('OS Segmentos acima \033[33;1mPODEM FORMAR\033[m um Triângulo!')
else:
print('OS Segmentos acima \033[31;1mNÃO PODEM FORMAR\033[m um Triângulo!')
| none | 1 | 3.829519 | 4 | |
scrappers/parser.py | foukonana/news_media_scrappers | 2 | 6622666 | import gc
import re
import numpy as np
from tqdm import tqdm
from requests import get
from bs4 import BeautifulSoup
class base_parser():
def __init__(self):
self.article_links = []
self.article_main_bodies = []
self.article_titles = []
self.article_subtitles = []
self.article_tags = []
self.article_upload_times = []
self.article_update_times = []
self.article_authors = []
def _clear(self):
# clear the lists again to avoid memory issues
self.article_links = []
self.article_main_bodies = []
self.article_titles = []
self.article_subtitles = []
self.article_tags = []
self.article_upload_times = []
self.article_update_times = []
self.article_authors = []
self.data=None
self.articles_df = None
gc.collect()
def parse(self):
raise NotImplementedError
def save_articles(self, links_df, filename):
i=1
for row_index, link in tqdm(links_df.iterrows(), total=links_df.shape[0]):
try:
if not self.parse():
continue
except Exception as e:
print(e)
else:
self.article_links += [self.article_link]
self.article_main_bodies += [self.article_main]
self.article_titles += [self.article_title]
self.article_subtitles += [self.article_subtitle]
self.article_tags += [self.article_main_tags]
self.article_upload_times += [self.article_upl_time]
self.article_update_times += [self.article_upd_time]
self.article_authors += [self.article_author]
# write the data into parts
if ((row_index+1) % 1200 == 0) or (row_index == links_df.shape[0]-1):
data = {'links': self.article_links,
'title': self.article_titles,
'subtitle': self.article_subtitles,
'main_text': self.article_main_bodies,
'tags': self.article_tags,
'author': self.article_authors,
'upload_time': self.article_upload_times,
'update_time': self.article_update_times,
}
try:
articles_df = pd.DataFrame(data=data)
except Exception as e:
print(e)
else:
articles_df.to_csv(filename, index=False)
i+=1
self._clear()
class kontra(base_parser):
def parse(self, link):
article_link = link.values[0]
article_response = get(article_link)
if article_response.status_code == 200:
article_soup = BeautifulSoup(article_response.text, 'html.parser')
else:
return False
self.article_main = str(article_soup.find_all('div', property='schema:text')[0])
# remove HTML from text
self.article_main = re.sub('\xa0|\n', ' ', re.compile(r'<[^>]+>').sub('', self.article_main)).strip()
self.article_title = str(article_soup.find_all('title')).split('<title>')[1].split('|')[0].rstrip()
self.article_subtitle = self.article_main.split('.')[0]
self.article_main_tags = [str(tag).split('>')[1].split('<')[0] for tag in article_soup.find_all('a', property='schema:about')]
self.article_upl_time = str(article_soup.find_all('span', property='schema:dateCreated')).split('"')[3]
# there is no update/ modification time for articles in Kontra
self.article_upd_time = np.nan
self.article_author=np.nan
return True
# example usage
#
# parser = kontra()
# kontra.save_articles(links_df = df, filename = f"data/kontra_part_{i}.csv")
#
| import gc
import re
import numpy as np
from tqdm import tqdm
from requests import get
from bs4 import BeautifulSoup
class base_parser():
def __init__(self):
self.article_links = []
self.article_main_bodies = []
self.article_titles = []
self.article_subtitles = []
self.article_tags = []
self.article_upload_times = []
self.article_update_times = []
self.article_authors = []
def _clear(self):
# clear the lists again to avoid memory issues
self.article_links = []
self.article_main_bodies = []
self.article_titles = []
self.article_subtitles = []
self.article_tags = []
self.article_upload_times = []
self.article_update_times = []
self.article_authors = []
self.data=None
self.articles_df = None
gc.collect()
def parse(self):
raise NotImplementedError
def save_articles(self, links_df, filename):
i=1
for row_index, link in tqdm(links_df.iterrows(), total=links_df.shape[0]):
try:
if not self.parse():
continue
except Exception as e:
print(e)
else:
self.article_links += [self.article_link]
self.article_main_bodies += [self.article_main]
self.article_titles += [self.article_title]
self.article_subtitles += [self.article_subtitle]
self.article_tags += [self.article_main_tags]
self.article_upload_times += [self.article_upl_time]
self.article_update_times += [self.article_upd_time]
self.article_authors += [self.article_author]
# write the data into parts
if ((row_index+1) % 1200 == 0) or (row_index == links_df.shape[0]-1):
data = {'links': self.article_links,
'title': self.article_titles,
'subtitle': self.article_subtitles,
'main_text': self.article_main_bodies,
'tags': self.article_tags,
'author': self.article_authors,
'upload_time': self.article_upload_times,
'update_time': self.article_update_times,
}
try:
articles_df = pd.DataFrame(data=data)
except Exception as e:
print(e)
else:
articles_df.to_csv(filename, index=False)
i+=1
self._clear()
class kontra(base_parser):
def parse(self, link):
article_link = link.values[0]
article_response = get(article_link)
if article_response.status_code == 200:
article_soup = BeautifulSoup(article_response.text, 'html.parser')
else:
return False
self.article_main = str(article_soup.find_all('div', property='schema:text')[0])
# remove HTML from text
self.article_main = re.sub('\xa0|\n', ' ', re.compile(r'<[^>]+>').sub('', self.article_main)).strip()
self.article_title = str(article_soup.find_all('title')).split('<title>')[1].split('|')[0].rstrip()
self.article_subtitle = self.article_main.split('.')[0]
self.article_main_tags = [str(tag).split('>')[1].split('<')[0] for tag in article_soup.find_all('a', property='schema:about')]
self.article_upl_time = str(article_soup.find_all('span', property='schema:dateCreated')).split('"')[3]
# there is no update/ modification time for articles in Kontra
self.article_upd_time = np.nan
self.article_author=np.nan
return True
# example usage
#
# parser = kontra()
# kontra.save_articles(links_df = df, filename = f"data/kontra_part_{i}.csv")
#
| en | 0.478683 | # clear the lists again to avoid memory issues # write the data into parts # remove HTML from text # there is no update/ modification time for articles in Kontra # example usage # # parser = kontra() # kontra.save_articles(links_df = df, filename = f"data/kontra_part_{i}.csv") # | 2.616316 | 3 |
tests/test_init.py | neroks/mutmut | 0 | 6622667 | <reponame>neroks/mutmut
from pytest import raises
from mutmut import (
partition_node_list,
name_mutation,
Context,
)
def test_partition_node_list_no_nodes():
with raises(AssertionError):
partition_node_list([], None)
def test_name_mutation_simple_mutants():
assert name_mutation(None, 'True') == 'False'
def test_context_exclude_line():
context = Context(
source="__import__('pkg_resources').declare_namespace(__name__)\n"
)
assert context.exclude_line() is True
context = Context(source="__all__ = ['hi']\n")
assert context.exclude_line() is True
| from pytest import raises
from mutmut import (
partition_node_list,
name_mutation,
Context,
)
def test_partition_node_list_no_nodes():
with raises(AssertionError):
partition_node_list([], None)
def test_name_mutation_simple_mutants():
assert name_mutation(None, 'True') == 'False'
def test_context_exclude_line():
context = Context(
source="__import__('pkg_resources').declare_namespace(__name__)\n"
)
assert context.exclude_line() is True
context = Context(source="__all__ = ['hi']\n")
assert context.exclude_line() is True | none | 1 | 2.11923 | 2 | |
main.py | CalebABG/VMU931-IMU | 0 | 6622668 | <filename>main.py
#!/usr/bin/python
"""
Project relies on PySerial package
- pip install pyserial
- https://pypi.org/project/pyserial/
Python 2 and 3 Compatibility:
- pip install future
- https://python-future.org/compatible_idioms.html
"""
import serial
import vmu931_utils
import signal
import sys
import time
from builtins import print, input
OLD_PYTHON = sys.version_info[0] < 3
running = True
imuPort = 'COM8'
execute_command = False
command = None
millis = lambda: int(time.time() * 1000.0)
time_now = millis()
time_duration = 200 # milliseconds
# serial.Serial(port='COM8', baudrate=115200, timeout=1, rtscts=1)
imuSerial = serial.Serial(port='COM8', baudrate=115200)
# Ctrl+C command handler
def imu_command_handler(signum, frame):
global execute_command, command
execute_command = True
print(vmu931_utils.IMU_Instructions)
command = input("Please Enter One of the Listed Commands: ")
if command is not None:
command = command.lower()
# Handle ctrl+C
signal.signal(signal.SIGINT, imu_command_handler)
if __name__ == "__main__":
try:
# Always request the status first, DON'T try to 'set' interfaces without first
# getting the status from the IMU
vmu931_utils.get_imu_status(imuSerial)
print("Waiting to Get Device Status: ")
while vmu931_utils.Device_Status is None:
if millis() > time_now + time_duration:
time_now = millis()
print(".", end='')
vmu931_utils.get_imu_data(imuSerial)
if vmu931_utils.Device_Status is not None:
break
print("\n\nGot Device Status, Setting IMU Interface Values")
vmu931_utils.set_imu_interface(imuSerial, 'accel', True)
vmu931_utils.set_imu_interface(imuSerial, 'gyro', True)
vmu931_utils.set_imu_interface(imuSerial, 'quat', False)
vmu931_utils.set_imu_interface(imuSerial, 'mag', False)
vmu931_utils.set_imu_interface(imuSerial, 'euler', False)
print("IMU Interface Values Set, Grabbing IMU Data")
while running:
try:
if execute_command == False:
data = vmu931_utils.get_imu_data(imuSerial)
else:
if command == 'exit':
break
elif command.startswith('set'):
args = command.split(" ")
if len(args) == 3:
vmu931_utils.set_imu_interface(imuSerial, args[1],
True if args[2].lower() == 'on' else False)
elif command.startswith('res'):
args = command.split(" ")
if len(args) == 3:
if args[1] == 'accel':
vmu931_utils.set_accelerometer_resolution(imuSerial, int(args[2]))
elif args[1] == 'gyro':
vmu931_utils.set_gyro_resolution(imuSerial, int(args[2]))
elif command.startswith('get'):
args = command.split(" ")
if len(args) == 2 and args[1].lower() == 'status':
vmu931_utils.get_imu_status(imuSerial)
elif command.startswith('debug'):
args = command.split(" ")
if len(args) == 2:
vmu931_utils.Debug = True if args[1].lower() == 'on' else False
elif command.startswith('errors'):
args = command.split(" ")
if len(args) == 2:
vmu931_utils.ShowErrors = True if args[1].lower() == 'on' else False
execute_command = False
except() as ex:
running = False
print(ex)
break
except() as err:
running = False
print(err)
finally:
if imuSerial is not None:
imuSerial.close()
| <filename>main.py
#!/usr/bin/python
"""
Project relies on PySerial package
- pip install pyserial
- https://pypi.org/project/pyserial/
Python 2 and 3 Compatibility:
- pip install future
- https://python-future.org/compatible_idioms.html
"""
import serial
import vmu931_utils
import signal
import sys
import time
from builtins import print, input
OLD_PYTHON = sys.version_info[0] < 3
running = True
imuPort = 'COM8'
execute_command = False
command = None
millis = lambda: int(time.time() * 1000.0)
time_now = millis()
time_duration = 200 # milliseconds
# serial.Serial(port='COM8', baudrate=115200, timeout=1, rtscts=1)
imuSerial = serial.Serial(port='COM8', baudrate=115200)
# Ctrl+C command handler
def imu_command_handler(signum, frame):
global execute_command, command
execute_command = True
print(vmu931_utils.IMU_Instructions)
command = input("Please Enter One of the Listed Commands: ")
if command is not None:
command = command.lower()
# Handle ctrl+C
signal.signal(signal.SIGINT, imu_command_handler)
if __name__ == "__main__":
try:
# Always request the status first, DON'T try to 'set' interfaces without first
# getting the status from the IMU
vmu931_utils.get_imu_status(imuSerial)
print("Waiting to Get Device Status: ")
while vmu931_utils.Device_Status is None:
if millis() > time_now + time_duration:
time_now = millis()
print(".", end='')
vmu931_utils.get_imu_data(imuSerial)
if vmu931_utils.Device_Status is not None:
break
print("\n\nGot Device Status, Setting IMU Interface Values")
vmu931_utils.set_imu_interface(imuSerial, 'accel', True)
vmu931_utils.set_imu_interface(imuSerial, 'gyro', True)
vmu931_utils.set_imu_interface(imuSerial, 'quat', False)
vmu931_utils.set_imu_interface(imuSerial, 'mag', False)
vmu931_utils.set_imu_interface(imuSerial, 'euler', False)
print("IMU Interface Values Set, Grabbing IMU Data")
while running:
try:
if execute_command == False:
data = vmu931_utils.get_imu_data(imuSerial)
else:
if command == 'exit':
break
elif command.startswith('set'):
args = command.split(" ")
if len(args) == 3:
vmu931_utils.set_imu_interface(imuSerial, args[1],
True if args[2].lower() == 'on' else False)
elif command.startswith('res'):
args = command.split(" ")
if len(args) == 3:
if args[1] == 'accel':
vmu931_utils.set_accelerometer_resolution(imuSerial, int(args[2]))
elif args[1] == 'gyro':
vmu931_utils.set_gyro_resolution(imuSerial, int(args[2]))
elif command.startswith('get'):
args = command.split(" ")
if len(args) == 2 and args[1].lower() == 'status':
vmu931_utils.get_imu_status(imuSerial)
elif command.startswith('debug'):
args = command.split(" ")
if len(args) == 2:
vmu931_utils.Debug = True if args[1].lower() == 'on' else False
elif command.startswith('errors'):
args = command.split(" ")
if len(args) == 2:
vmu931_utils.ShowErrors = True if args[1].lower() == 'on' else False
execute_command = False
except() as ex:
running = False
print(ex)
break
except() as err:
running = False
print(err)
finally:
if imuSerial is not None:
imuSerial.close()
| en | 0.466946 | #!/usr/bin/python Project relies on PySerial package - pip install pyserial - https://pypi.org/project/pyserial/ Python 2 and 3 Compatibility: - pip install future - https://python-future.org/compatible_idioms.html # milliseconds # serial.Serial(port='COM8', baudrate=115200, timeout=1, rtscts=1) # Ctrl+C command handler # Handle ctrl+C # Always request the status first, DON'T try to 'set' interfaces without first # getting the status from the IMU | 2.908463 | 3 |
cart/urls.py | doctsystems/jaguarete-ecommerce | 0 | 6622669 | from django.urls import path
from .views import cart_add, cart_detalle, cart_eliminar, cart_clear
urlpatterns = [
path("", cart_detalle, name="detalle"),
path("add/<int:producto_id>/", cart_add, name="add"),
path("eliminar/<int:producto_id>/", cart_eliminar, name="eliminar"),
path("clear/", cart_clear, name="clear"),
]
| from django.urls import path
from .views import cart_add, cart_detalle, cart_eliminar, cart_clear
urlpatterns = [
path("", cart_detalle, name="detalle"),
path("add/<int:producto_id>/", cart_add, name="add"),
path("eliminar/<int:producto_id>/", cart_eliminar, name="eliminar"),
path("clear/", cart_clear, name="clear"),
]
| none | 1 | 1.56714 | 2 | |
quots/migrations/0004_auto_20170402_1832.py | GSByeon/openhgsenti | 29 | 6622670 | <reponame>GSByeon/openhgsenti
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-04-02 09:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quots', '0003_auto_20170402_1826'),
]
operations = [
migrations.AlterField(
model_name='in',
name='orderer',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='quots.Inner'),
),
migrations.AlterField(
model_name='out',
name='parent',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='quots.In'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-04-02 09:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quots', '0003_auto_20170402_1826'),
]
operations = [
migrations.AlterField(
model_name='in',
name='orderer',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='quots.Inner'),
),
migrations.AlterField(
model_name='out',
name='parent',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='quots.In'),
),
] | en | 0.804655 | # -*- coding: utf-8 -*- # Generated by Django 1.11b1 on 2017-04-02 09:32 | 1.42063 | 1 |
src/api.py | computer-geek64/swamphacks | 0 | 6622671 | #!/usr/bin/python3
# api.py
import os
import json
import math
import gmplot
import pymongo
from datetime import datetime
from data import mongo
from data import predict
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask import Flask, jsonify, redirect, request, render_template, url_for, safe_join, send_from_directory
app = Flask(__name__, template_folder="templates")
app.config.from_object("config")
limiter = Limiter(app, key_func=get_remote_address)
# Home
@app.route(app.config["HOME"], methods=["GET"])
def get_home():
return "Welcome to " + app.config["API_NAME"] + "!", 200
# API Endpoint
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1"), methods=["GET"])
def api_endpoint():
return "Welcome to " + app.config["API_NAME"] + "\'s endpoint!", 200
# Update Location
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1", "update_location"), methods=["POST"])
@limiter.limit("1/second")
def update_location():
data = json.loads(request.data.decode())
user_data = {"lat": data["coords"]["latitude"], "lon": data["coords"]["longitude"], "time": data["timestamp"] / 1000}
mongo.add_user_documents([user_data], "sample_user0")
return "Success!", 200
# Get users in danger
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1", "get_users"), methods=["GET"])
def get_users():
client = pymongo.MongoClient("mongodb+srv://" + app.config["MONGODB_USER"] + ":" + app.config["MONGODB_PASS"] + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
db = client["alerts"]
user_collection = db["users"]
results = list(user_collection.find())
client.close()
return jsonify([x["user"] for x in results]), 200
# Get plotting coordinate points
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1", "points", "<string:user>"), methods=["GET"])
def get_points(user):
client = pymongo.MongoClient("mongodb+srv://" + app.config["MONGODB_USER"] + ":" + app.config["MONGODB_PASS"] + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
data = list(client["alerts"]["users"].find())
results = [x["user"] for x in data]
if user in results:
data = data[results.index(user)]
db = client["users"]
user_collection = db[user]
results = list(user_collection.find().sort("time", pymongo.ASCENDING))
x = []
y = []
for result in results:
x.append(result["lat"])
y.append(result["lon"])
time_elapsed = (datetime.now().timestamp() - data["last_location"]["time"]) / 60 / 60
radius = 4 * time_elapsed - (2 ** (time_elapsed / 4 - 2)) / math.log(2) + 1 / (4 * math.log(2))
predicted_point = predict.predict_point(x, y, radius, (data["disaster"]["lat"], data["disaster"]["lon"]))
json_response = {"latitudeP": predicted_point[0], "longitudeP": predicted_point[1], "latitudeL": x[-1], "longitudeL": y[-1], "latitudeD": data["disaster"]["lat"], "longitudeD": data["disaster"]["lon"], "radius": radius}
client.close()
gmap = gmplot.GoogleMapPlotter(x[-1], y[-1], 16)
gmap.circle(x[-1], y[-1], radius * 1609.34, "blue")
gmap.marker(x[-1], y[-1], "blue")
gmap.marker(predicted_point[0], predicted_point[1], "green")
gmap.marker(data["disaster"]["lat"], data["disaster"]["lon"], "red")
gmap.draw(os.path.join(os.path.dirname(__file__), "map.html"))
return send_from_directory(os.path.dirname(__file__), filename="map.html"), 200
# return jsonify(json_response), 200
results = client["users"][user].find().sort("time", pymongo.DESCENDING).limit(1)[0]
client.close()
return jsonify({"latitudeL": results["lat"], "longitudeL": results["lon"]}), 200
# Error handlers
@app.errorhandler(404)
def error_404(e):
return render_template("404.html"), 404
@app.errorhandler(400)
def error_400(e):
return "HTTP 400 - Bad Request", 400
@app.errorhandler(500)
def error_500(e):
return "HTTP 500 - Internal Server Error", 500
if __name__ == "__main__":
app.run(app.config["IP"], app.config["PORT"])
| #!/usr/bin/python3
# api.py
import os
import json
import math
import gmplot
import pymongo
from datetime import datetime
from data import mongo
from data import predict
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask import Flask, jsonify, redirect, request, render_template, url_for, safe_join, send_from_directory
app = Flask(__name__, template_folder="templates")
app.config.from_object("config")
limiter = Limiter(app, key_func=get_remote_address)
# Home
@app.route(app.config["HOME"], methods=["GET"])
def get_home():
return "Welcome to " + app.config["API_NAME"] + "!", 200
# API Endpoint
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1"), methods=["GET"])
def api_endpoint():
return "Welcome to " + app.config["API_NAME"] + "\'s endpoint!", 200
# Update Location
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1", "update_location"), methods=["POST"])
@limiter.limit("1/second")
def update_location():
data = json.loads(request.data.decode())
user_data = {"lat": data["coords"]["latitude"], "lon": data["coords"]["longitude"], "time": data["timestamp"] / 1000}
mongo.add_user_documents([user_data], "sample_user0")
return "Success!", 200
# Get users in danger
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1", "get_users"), methods=["GET"])
def get_users():
client = pymongo.MongoClient("mongodb+srv://" + app.config["MONGODB_USER"] + ":" + app.config["MONGODB_PASS"] + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
db = client["alerts"]
user_collection = db["users"]
results = list(user_collection.find())
client.close()
return jsonify([x["user"] for x in results]), 200
# Get plotting coordinate points
@app.route(safe_join(app.config["HOME"], "api", "alrt", "v1", "points", "<string:user>"), methods=["GET"])
def get_points(user):
client = pymongo.MongoClient("mongodb+srv://" + app.config["MONGODB_USER"] + ":" + app.config["MONGODB_PASS"] + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
data = list(client["alerts"]["users"].find())
results = [x["user"] for x in data]
if user in results:
data = data[results.index(user)]
db = client["users"]
user_collection = db[user]
results = list(user_collection.find().sort("time", pymongo.ASCENDING))
x = []
y = []
for result in results:
x.append(result["lat"])
y.append(result["lon"])
time_elapsed = (datetime.now().timestamp() - data["last_location"]["time"]) / 60 / 60
radius = 4 * time_elapsed - (2 ** (time_elapsed / 4 - 2)) / math.log(2) + 1 / (4 * math.log(2))
predicted_point = predict.predict_point(x, y, radius, (data["disaster"]["lat"], data["disaster"]["lon"]))
json_response = {"latitudeP": predicted_point[0], "longitudeP": predicted_point[1], "latitudeL": x[-1], "longitudeL": y[-1], "latitudeD": data["disaster"]["lat"], "longitudeD": data["disaster"]["lon"], "radius": radius}
client.close()
gmap = gmplot.GoogleMapPlotter(x[-1], y[-1], 16)
gmap.circle(x[-1], y[-1], radius * 1609.34, "blue")
gmap.marker(x[-1], y[-1], "blue")
gmap.marker(predicted_point[0], predicted_point[1], "green")
gmap.marker(data["disaster"]["lat"], data["disaster"]["lon"], "red")
gmap.draw(os.path.join(os.path.dirname(__file__), "map.html"))
return send_from_directory(os.path.dirname(__file__), filename="map.html"), 200
# return jsonify(json_response), 200
results = client["users"][user].find().sort("time", pymongo.DESCENDING).limit(1)[0]
client.close()
return jsonify({"latitudeL": results["lat"], "longitudeL": results["lon"]}), 200
# Error handlers
@app.errorhandler(404)
def error_404(e):
return render_template("404.html"), 404
@app.errorhandler(400)
def error_400(e):
return "HTTP 400 - Bad Request", 400
@app.errorhandler(500)
def error_500(e):
return "HTTP 500 - Internal Server Error", 500
if __name__ == "__main__":
app.run(app.config["IP"], app.config["PORT"])
| en | 0.290383 | #!/usr/bin/python3 # api.py # Home # API Endpoint # Update Location # Get users in danger # Get plotting coordinate points # return jsonify(json_response), 200 # Error handlers | 2.464311 | 2 |
website.py | SurajMalpani/Movie-Trailer-website | 0 | 6622672 | import media # import media.py file to access the class Movie definitions
import fresh_tomatoes # import fresh_tomatoes.py file to generate webpage
argo = media.Movie("Argo",
"Acting under the cover of a Hollywood producer scouting a "
"location for a science fiction film, a CIA agent launches "
"a dangerous operation to rescue six Americans in Tehran "
"during the U.S. hostage crisis in Iran in 1980.",
"https://upload.wikimedia.org/wikipedia/en/e/e1/Argo2012Poster.jpg", # NOQA
"https://www.youtube.com/watch?v=JW3WfSFgrVY")
before_sunrise = media.Movie("Before Sunrise",
"While travelling on a train in Europe, Jesse,"
" meets Celine On his last day in Europe before"
" returning to the US, he decides to spend"
" his remaining hours with her.",
"http://t0.gstatic.com/images?q=tbn:ANd9GcR1P-mPnVeKEZbHA_p0gC0osmQqYhHCOGeXcQDIeqkd0xevmmSW", # NOQA
"https://www.youtube.com/watch?v=25v7N34d5HE")
seven_pounds = media.Movie("Seven Pounds",
"<NAME>, an IRS agent,"
" embarks on an extraordinary journey in order to "
"change the lives of seven strangers.",
"http://www.gstatic.com/tv/thumb/movieposters/175217/p175217_p_v8_af.jpg", # NOQA
"https://www.youtube.com/watch?v=hvu2F6t26hs")
stuck_in_love = media.Movie("Stuck in Love",
"A successful writer (<NAME>) tries to "
"reconnect with his two children "
"(<NAME>, <NAME>) after his divorce.",
"http://www.gstatic.com/tv/thumb/movieposters/9816875/p9816875_p_v8_aa.jpg", # NOQA
"https://www.youtube.com/watch?v=ORKb_Vqbz9U")
k_pax = media.Movie("K-PAX",
"Robert, a mentally unstable man,"
" tries to convince the hospital staff that he is from "
"a planet called K-PAX.",
"http://www.gstatic.com/tv/thumb/movieposters/28596/p28596_p_v8_ae.jpg", # NOQA
"https://www.youtube.com/watch?v=bVfHNhBcMTw")
interstellar = media.Movie("Interstellar",
"In the future, Earth is becoming uninhabitable. "
"Ex-NASA pilot Cooper, along with a team,"
" is sent on a planet exploration mission to "
"report which planet can sustain life.",
"http://t1.gstatic.com/images?q=tbn:ANd9GcRf61mker2o4KH3CbVE7Zw5B1-VogMH8LfZHEaq3UdCMLxARZAB", # NOQA
"https://www.youtube.com/watch?v=zSWdZVtXT7E")
movies = [argo, before_sunrise, interstellar, seven_pounds,
k_pax, stuck_in_love]
fresh_tomatoes.open_movies_page(movies)
| import media # import media.py file to access the class Movie definitions
import fresh_tomatoes # import fresh_tomatoes.py file to generate webpage
argo = media.Movie("Argo",
"Acting under the cover of a Hollywood producer scouting a "
"location for a science fiction film, a CIA agent launches "
"a dangerous operation to rescue six Americans in Tehran "
"during the U.S. hostage crisis in Iran in 1980.",
"https://upload.wikimedia.org/wikipedia/en/e/e1/Argo2012Poster.jpg", # NOQA
"https://www.youtube.com/watch?v=JW3WfSFgrVY")
before_sunrise = media.Movie("Before Sunrise",
"While travelling on a train in Europe, Jesse,"
" meets Celine On his last day in Europe before"
" returning to the US, he decides to spend"
" his remaining hours with her.",
"http://t0.gstatic.com/images?q=tbn:ANd9GcR1P-mPnVeKEZbHA_p0gC0osmQqYhHCOGeXcQDIeqkd0xevmmSW", # NOQA
"https://www.youtube.com/watch?v=25v7N34d5HE")
seven_pounds = media.Movie("Seven Pounds",
"<NAME>, an IRS agent,"
" embarks on an extraordinary journey in order to "
"change the lives of seven strangers.",
"http://www.gstatic.com/tv/thumb/movieposters/175217/p175217_p_v8_af.jpg", # NOQA
"https://www.youtube.com/watch?v=hvu2F6t26hs")
stuck_in_love = media.Movie("Stuck in Love",
"A successful writer (<NAME>) tries to "
"reconnect with his two children "
"(<NAME>, <NAME>) after his divorce.",
"http://www.gstatic.com/tv/thumb/movieposters/9816875/p9816875_p_v8_aa.jpg", # NOQA
"https://www.youtube.com/watch?v=ORKb_Vqbz9U")
k_pax = media.Movie("K-PAX",
"Robert, a mentally unstable man,"
" tries to convince the hospital staff that he is from "
"a planet called K-PAX.",
"http://www.gstatic.com/tv/thumb/movieposters/28596/p28596_p_v8_ae.jpg", # NOQA
"https://www.youtube.com/watch?v=bVfHNhBcMTw")
interstellar = media.Movie("Interstellar",
"In the future, Earth is becoming uninhabitable. "
"Ex-NASA pilot Cooper, along with a team,"
" is sent on a planet exploration mission to "
"report which planet can sustain life.",
"http://t1.gstatic.com/images?q=tbn:ANd9GcRf61mker2o4KH3CbVE7Zw5B1-VogMH8LfZHEaq3UdCMLxARZAB", # NOQA
"https://www.youtube.com/watch?v=zSWdZVtXT7E")
movies = [argo, before_sunrise, interstellar, seven_pounds,
k_pax, stuck_in_love]
fresh_tomatoes.open_movies_page(movies)
| en | 0.474773 | # import media.py file to access the class Movie definitions # import fresh_tomatoes.py file to generate webpage # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA | 3.178295 | 3 |
AtCoder/ABC106/D.py | takaaki82/Java-Lessons | 1 | 6622673 | <reponame>takaaki82/Java-Lessons
N, M, Q = map(int, input().split())
x = [[0 for i in range(N + 1)] for j in range(N + 1)]
for i in range(M):
l, r = map(int, input().split())
x[l][r] += 1
for i in range(1, N + 1):
for j in range(1, N + 1):
x[i][j] += x[i - 1][j] + x[i][j - 1] - x[i - 1][j - 1]
for i in range(Q):
p, q = map(int, input().split())
ans = x[N][q] - x[p - 1][q]
print(ans)
| N, M, Q = map(int, input().split())
x = [[0 for i in range(N + 1)] for j in range(N + 1)]
for i in range(M):
l, r = map(int, input().split())
x[l][r] += 1
for i in range(1, N + 1):
for j in range(1, N + 1):
x[i][j] += x[i - 1][j] + x[i][j - 1] - x[i - 1][j - 1]
for i in range(Q):
p, q = map(int, input().split())
ans = x[N][q] - x[p - 1][q]
print(ans) | none | 1 | 2.553685 | 3 | |
snoopy/apps.py | Pradeek/django-snoopy | 5 | 6622674 | <reponame>Pradeek/django-snoopy
from django.apps import AppConfig
class SnoopyConfig(AppConfig):
name = 'snoopy'
verbose_name = "Snoopy"
| from django.apps import AppConfig
class SnoopyConfig(AppConfig):
name = 'snoopy'
verbose_name = "Snoopy" | none | 1 | 1.327677 | 1 | |
lm-tools/interpolate-lm.py | senarvi/senarvi-speech | 6 | 6622675 | <filename>lm-tools/interpolate-lm.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Computes the optimal mixture (in terms of devel text perplexity) of language
# models, and creates an interpolated language model using SRILM.
import argparse
import sys
import tempfile
import subprocess
import re
from filetypes import TextFileType
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, nargs='+', help='two or more input models')
parser.add_argument('--output', type=str, default='-', help='output model path (default is stdout)')
parser.add_argument('--write-weights', type=TextFileType('w'), default=None, help='write the optimized weights to this file, one per line')
parser.add_argument('--order', type=int, default=3, help='output n-gram model order (default is 3)')
parser.add_argument('--classes', type=str, default=None, help='interpret the language models as class models and read the classes from the given file')
parser.add_argument('--unk', action='store_true', help='include unknown word token')
parser.add_argument('--map-unk', metavar='TOKEN', type=str, default='<unk>', help='specify the out-of-vocabulary token (default is <unk>)')
parser.add_argument('--opt-perp', type=str, dest='tuning_text', default=None, help='a development text for tuning interpolation weights')
parser.add_argument('--ngram-cmd', type=str, default='ngram', help='SRILM ngram executable (default is "ngram")')
args = parser.parse_args()
num_components = len(args.input)
if num_components < 2:
sys.stderr.write("Expecting two or more input models.\n")
sys.exit(1)
equal_lambda = 1.0 / num_components
if args.tuning_text is not None:
ppl_files = []
for model_path in args.input:
ppl_files.append(tempfile.NamedTemporaryFile())
command = [args.ngram_cmd,
'-order', str(args.order),
'-lm', model_path,
'-map-unk', args.map_unk,
'-ppl', args.tuning_text,
'-debug', '2']
if not args.classes is None:
command.extend(['-classes', args.classes,
'-simple-classes'])
if args.unk:
command.append('-unk')
print(' '.join(command))
ppl_files[-1].write(subprocess.check_output(command))
ppl_files[-1].flush()
lambda_arg = 'lambda=' + ' '.join([str(equal_lambda)] * num_components)
command = ['compute-best-mix', lambda_arg ]
command.extend([x.name for x in ppl_files])
print(' '.join(command))
output = subprocess.check_output(command).decode('utf-8')
matches = re.search(r'best lambda \(([0-9.e\- ]+)\)', output)
if not matches:
sys.stderr.write("Couldn't parse compute-best-mix output.\n")
sys.stderr.write("Output was:\n")
sys.stderr.write(output)
sys.exit(1)
lambdas = matches.group(1).split(' ')
else:
lambdas = [str(equal_lambda)] * num_components
if args.write_weights is not None:
args.write_weights.write('\n'.join(lambdas) + '\n')
command = [args.ngram_cmd,
'-order', str(args.order),
'-write-lm', args.output,
'-lm', args.input[0],
'-map-unk', args.map_unk,
'-lambda', lambdas[0],
'-mix-lm', args.input[1]]
if not args.classes is None:
command.extend(['-classes', args.classes,
'-simple-classes'])
if args.unk:
command.append('-unk')
for i in range(2, num_components):
command.append('-mix-lm' + str(i))
command.append(args.input[i])
command.append('-mix-lambda' + str(i))
command.append(lambdas[i])
print(' '.join(command))
subprocess.check_call(command)
| <filename>lm-tools/interpolate-lm.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Computes the optimal mixture (in terms of devel text perplexity) of language
# models, and creates an interpolated language model using SRILM.
import argparse
import sys
import tempfile
import subprocess
import re
from filetypes import TextFileType
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, nargs='+', help='two or more input models')
parser.add_argument('--output', type=str, default='-', help='output model path (default is stdout)')
parser.add_argument('--write-weights', type=TextFileType('w'), default=None, help='write the optimized weights to this file, one per line')
parser.add_argument('--order', type=int, default=3, help='output n-gram model order (default is 3)')
parser.add_argument('--classes', type=str, default=None, help='interpret the language models as class models and read the classes from the given file')
parser.add_argument('--unk', action='store_true', help='include unknown word token')
parser.add_argument('--map-unk', metavar='TOKEN', type=str, default='<unk>', help='specify the out-of-vocabulary token (default is <unk>)')
parser.add_argument('--opt-perp', type=str, dest='tuning_text', default=None, help='a development text for tuning interpolation weights')
parser.add_argument('--ngram-cmd', type=str, default='ngram', help='SRILM ngram executable (default is "ngram")')
args = parser.parse_args()
num_components = len(args.input)
if num_components < 2:
sys.stderr.write("Expecting two or more input models.\n")
sys.exit(1)
equal_lambda = 1.0 / num_components
if args.tuning_text is not None:
ppl_files = []
for model_path in args.input:
ppl_files.append(tempfile.NamedTemporaryFile())
command = [args.ngram_cmd,
'-order', str(args.order),
'-lm', model_path,
'-map-unk', args.map_unk,
'-ppl', args.tuning_text,
'-debug', '2']
if not args.classes is None:
command.extend(['-classes', args.classes,
'-simple-classes'])
if args.unk:
command.append('-unk')
print(' '.join(command))
ppl_files[-1].write(subprocess.check_output(command))
ppl_files[-1].flush()
lambda_arg = 'lambda=' + ' '.join([str(equal_lambda)] * num_components)
command = ['compute-best-mix', lambda_arg ]
command.extend([x.name for x in ppl_files])
print(' '.join(command))
output = subprocess.check_output(command).decode('utf-8')
matches = re.search(r'best lambda \(([0-9.e\- ]+)\)', output)
if not matches:
sys.stderr.write("Couldn't parse compute-best-mix output.\n")
sys.stderr.write("Output was:\n")
sys.stderr.write(output)
sys.exit(1)
lambdas = matches.group(1).split(' ')
else:
lambdas = [str(equal_lambda)] * num_components
if args.write_weights is not None:
args.write_weights.write('\n'.join(lambdas) + '\n')
command = [args.ngram_cmd,
'-order', str(args.order),
'-write-lm', args.output,
'-lm', args.input[0],
'-map-unk', args.map_unk,
'-lambda', lambdas[0],
'-mix-lm', args.input[1]]
if not args.classes is None:
command.extend(['-classes', args.classes,
'-simple-classes'])
if args.unk:
command.append('-unk')
for i in range(2, num_components):
command.append('-mix-lm' + str(i))
command.append(args.input[i])
command.append('-mix-lambda' + str(i))
command.append(lambdas[i])
print(' '.join(command))
subprocess.check_call(command)
| en | 0.744506 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Computes the optimal mixture (in terms of devel text perplexity) of language # models, and creates an interpolated language model using SRILM. | 2.606217 | 3 |
vte/generate.py | textshell/flatpak-terminal-zoo | 0 | 6622676 | <reponame>textshell/flatpak-terminal-zoo
#! /usr/bin/env python3
from string import Template
VTE_TEMPLATE = Template(r"""
{
"app-id": "de.uchuujin.fp.termzoo.vte${vte_version_b}",
"runtime": "org.gnome.Platform",
"runtime-version": "${vte_runtime}",
"sdk": "org.gnome.Sdk",
"command": "vte${vte_api}",
"finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"],
"modules": [
{
"name": "vte",
"buildsystem": "autotools",
"build-options": {
"cxxflags-override": true,
"cxxflags": ""
},
"sources": [
{
"type": "archive",
"url": "https://download.gnome.org/core/${vte_file}",
"sha512": "${vte_sha}"
}
]
},
{
"name": "scripts",
"buildsystem": "simple",
"build-commands": [
"install -D run-in-host /app/bin/run-in-host"
],
"sources": [
{
"type": "file",
"path": "../run-in-host"
}
]
}
]
}
""")
VTE_MESON_TEMPLATE = Template(r"""
{
"app-id": "de.uchuujin.fp.termzoo.vte${vte_version_b}",
"runtime": "org.gnome.Platform",
"runtime-version": "${vte_runtime}",
"sdk": "org.gnome.Sdk",
"command": "vte${vte_api}",
"build-options" : {
"cxxflags": "-fno-exceptions"
},
"finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"],
"modules": [
{
"name": "vte",
"buildsystem": "meson",
"sources": [
{
"type": "archive",
"url": "https://download.gnome.org/core/${vte_file}",
"sha512": "${vte_sha}"
}
]
},
{
"name": "scripts",
"buildsystem": "simple",
"build-commands": [
"install -D run-in-host /app/bin/run-in-host"
],
"sources": [
{
"type": "file",
"path": "../run-in-host"
}
]
}
]
}
""")
vte_versions = (
('0.28.0', '', '3.0/3.0.0/sources/vte-0.28.0.tar.bz2', '5f9549ee09dd1d100bf0e90f81b12e8237ba4cedd79cf2fc33edb81edb5796ff23a88563a12ae808cdc057ae2ef508999426b36a4985fef6dc008431f1aa38f0'),
('0.28.1', '', '3.1/3.1.5/sources/vte-0.28.1.tar.xz', 'f76d5d10da069ee2a6235abc5a7db883dd2f582a58b935e8cd8150b4af8215db3bfddef103485a07c0d23608e5eba8e462d73e177d2a61a7bd97a69dd3cbf2af'),
('0.30.0', '2_90', '3.2/3.2.0/sources/vte-0.30.0.tar.xz', 'bcd35227100f326b5260db31239537f19604ce2ac7f735c4d39485640fd4f5c18bbc2298ccefcca657b8707e51e1028e145d0f367b67102ee89ddf2a6a7b914d'),
('0.30.1', '2_90', '3.2/3.2.2/sources/vte-0.30.1.tar.xz', 'b7e601f207fe87c56826b093904a27f90e51e8992707d5978ec058fb91c1c790287af8bc2594b8149f4a69223cff2f2c73bf13a43507f4ac37df7c3faee69e60'),
('0.32.0', '2_90', '3.4/3.4.0/sources/vte-0.32.0.tar.xz', '729e1dd7261fd0394a7a4a566b2076c49ae9f90505c701e663cbcd6833891a20b9a2120b4334694aaa2757312e7f973488bde6c02b3d2681eb4e116fc2842ee0'),
('0.32.1', '2_90', '3.4/3.4.1/sources/vte-0.32.1.tar.xz', '8c4d63f33ba99e8c39ee5fe6b0998dd89aa7415d35765cee4c42116e7cdb8aabc958471b68cc88a35c937949a9043678ce4ae74541476b36d5617491e8da6224'),
('0.32.2', '2_90', '3.5/3.5.5/sources/vte-0.32.2.tar.xz', '0346a3a85de8bfc84fce37f073154ee7af746b8872e7970f4f58e11cf261311def714100370c2eb371beecba5d22207dfdada7b0df7fee00f32035e93b9e8808'),
('0.34.0', '2_90', '3.6/3.6.0/sources/vte-0.34.0.tar.xz', '80c2f1c6b310324c05fe1c2373583280a7c32950ce4601151a9e81f24bf0636260ec2f02979437c99e5e9f916301ea694f2d40083f974fe92b8624618c2c0a73'),
('0.34.1', '2_90', '3.6/3.6.1/sources/vte-0.34.1.tar.xz', 'a80f74a9699c2c06b4bf269adf16298724c07d749fffc1c1dd7f3f5a89d3bb08f4d3db3aa888dbc3dea4be75846eb5ab6c794e661cc5c192fca9235aff293cbb'),
('0.34.2', '2_90', '3.6/3.6.2/sources/vte-0.34.2.tar.xz', '4a89f149e9c101ed31b76086de1a05287d9c2a750ee098792fa9508b8712fa065a63e60d50cfb4a060991c2f6f182ddb91d3f21d4c7baf61337f08dc1a960a35'),
('0.34.3', '2_90', '3.8/3.8.0/sources/vte-0.34.3.tar.xz', '6eecde8d4f03eabd5710850b4f559d1f55ab98a201b6611428025579a6d43d52faa80d6f6607a6b93e42b5efda597f2b8507312113b939574ff7b73a3bd134ef'),
('0.34.4', '2_90', '3.8/3.8.1/sources/vte-0.34.4.tar.xz', '2f1dce6ed47835b098c37351fa665ddbfd8719a4275672925d8030655cd96412ec7f3edc23e9d94a3e035a36969c74c5186d1689dcf1e1111071158e64d2360f'),
('0.34.5', '2_90', '3.8/3.8.2/sources/vte-0.34.5.tar.xz', 'bf49cffe9f5bf243aada353adda1d8f86d75883bed343792a16d69d8956e9fc6f846cd1c317c7b2b03ba446de9c645cba52384190d5343f5d000a4557b40fb53'),
('0.34.6', '2_90', '3.9/3.9.3/sources/vte-0.34.6.tar.xz', '185703bdb8d4d46c1f340af8daa41194fcd28fdffafc77f190e776350f4bd875000e94b6cc481da19cb1f8da26ae2d757f4a29b703833e71fa20fcc5ccb329fd'),
('0.34.7', '2_90', '3.9/3.9.5/sources/vte-0.34.7.tar.xz', 'a122814f7fee4202c0bc4862798c98b078e5b185eb8c3c11959001b33bd5e029579f7337f85bdffb3f8a7b22af3a6763a2353ecff16a8e5c8b941431adabc5e0'),
('0.34.8', '2_90', '3.10/3.10.0/sources/vte-0.34.8.tar.xz', 'e4a7b691d0affcb05c463e0eeeab42591e13679f8b8cd042869b2b91aaa82c24222734f68767e3246f2554f8fca481b35383239ecfdb39e5865fc9b8b3b8479b'),
('0.34.9', '2_90', '3.10/3.10.2/sources/vte-0.34.9.tar.xz', '57fff7d5916bcd8a8a13e3cf050f85ed78e9154fdf2510a6d6f594f938c6d56f512089c6bc3e06c9123d35d17f2eb3b3814a5407635027ec0116c25f73034688'),
('0.36.0', '2_90', '3.12/3.12.1/sources/vte-0.36.0.tar.xz', '7666fdb2d3458390b154add7da781cb973498b019d859a3396797102dc4840d9bdbae44c83257a3c67609e658173dc0d5ff382b0343a5bf3c6db43e04392893c'),
('0.36.2', '2_90', '3.12/3.12.2/sources/vte-0.36.2.tar.xz', 'fafd368b95918024e6324c81f4fa6c35ad15138ad44af2f92164492d9773b127202d11824c9def4a26e17de44336f42e9c8906330dd300795c280ca068381632'),
('0.38.0', '-2.91', '3.14/3.14.0/sources/vte-0.38.0.tar.xz', '624d8c9e7d4429c21830d4bdbd325d3b9a75e35ecad0494fe6051ae339745c59ab656af4989d632e638402bc1fedf4c4f69387d39bf731bd6eabaccf682b1bc0'),
('0.38.1', '-2.91', '3.15/3.15.1/sources/vte-0.38.1.tar.xz', 'd6e616d2f327ac6c28ad9ac11f0e7a081a5659b9ad90dd3246fa4240a8642ed1ffe87502ab4307527e03789195103cc33d3f783f2d89e7c866c0cc8d5cd0e24c'),
('0.38.2', '-2.91', '3.14/3.14.2/sources/vte-0.38.2.tar.xz', '4c493e18cca4b50d66640d282d7d33a471d1ac4bd2dd929b059b829a42fed852d202669751b266adf7291502203e26c513b6852b3035458d41a433b900b0c6bb'),
('0.40.0', '-2.91', '3.16/3.16.1/sources/vte-0.40.0.tar.xz', 'f7ff28cdefc80e7fa5d876b8cba5d396fd98aa13c21a6cd320ac4042a8747e67ebf7a7c13ddab7bebb6b839231aebcc4fc25be9f0cc6c55859886c7948d4ac79'),
('0.40.2', '-2.91', '3.16/3.16.2/sources/vte-0.40.2.tar.xz', '<KEY>'),
('0.42.0', '-2.91', '3.18/3.18.0/sources/vte-0.42.0.tar.xz', 'e2b2c00c81af05cdd5d99fd2de4fcb9019cffc5bd8b59878b7676cf7104c79a0c095b28d9a62586e3a531e00b80ba70062352ca1d4e96a902fef8d8f1121df49'),
('0.42.1', '-2.91', '3.18/3.18.1/sources/vte-0.42.1.tar.xz', '4cf917d3f343973dcb816a341276bfab7784a9f4dc6f8fb402193e9a9062503ac192ccc274fe92bb20a17ac182d21fd2268bf9c1ddea817f730db5612e3b21c0'),
('0.43.0', '-2.91', '3.18/3.18.2/sources/vte-0.43.0.tar.xz', 'fabe336996fd49ac08fc347f87e2b6169a875bff5570c3e0276271e0efcb215d206c6663d961ae604ee23ea668cbcacdc0664c06ec626e0a5ee7120cc57417fc'),
('0.44.0', '-2.91', '3.20/3.20.0/sources/vte-0.44.0.tar.xz', 'c190ba6cd4785fc16b1982517a0fcfe2935e50082acec095bdb5d56467b4952fdd48340776c2a4ecef4da847a668a56bca4599801b00f5090927a0e5f31d2c3a'),
('0.44.1', '-2.91', '3.20/3.20.1/sources/vte-0.44.1.tar.xz', '1fd352ea111cc13f8e7b2acae374e2fbd9d5025f6cb28b193164024594a5df12c9418bcdff11ff3247a9b785d6584c484a242c22df6a98afc3f0dfa1f716499c'),
('0.44.2', '-2.91', '3.20/3.20.2/sources/vte-0.44.2.tar.xz', '98db3c1528d5f458164e2d30056cd816e5943d9c569551878119e79d4fbca1865e52667393bf298f32fd54710d1b268a5aac125222ecb29ce854522be3776356'),
('0.46.0', '-2.91', '3.22/3.22.1/sources/vte-0.46.0.tar.xz', '543cdba5c51c5384e54fc372924c2667ded952cbc8ffafb7ff62f8643c6a7e2440439109eb12378ed70b0e0a256d3ef97d6da004dd8088d36bccdd7fa16593f9'),
('0.46.1', '-2.91', '3.22/3.22.2/sources/vte-0.46.1.tar.xz', '04b3f8ce922c4326d92bef605a0dbe195764946cd5f1acf28fd6d69c0cdb2ee661cc7e424436c72380da5d0250790ae658ac49d761b567dea88d92157285889d'),
('0.47.90', '-2.91', '3.24/3.24.0/sources/vte-0.47.90.tar.xz', 'c36310486b0575b26330879d2ca222ce4ca36af7659ec13113b209a897371da7ce0ff758f2c0fc5a9d42b7fd60caae8603aa564a2a5f58159979e4a9388a688b'),
('0.48.2', '-2.91', '3.24/3.24.1/sources/vte-0.48.2.tar.xz', 'cbb2835618c554d72a790e16f1ac5b3c06a8a810d8d517c475ed7ca46eeb56d7c9f9226918e13c5c84c04ce1ccb5467e62af7c4453f317a0aad197a4c179d76a'),
('0.48.3', '-2.91', '3.24/3.24.2/sources/vte-0.48.3.tar.xz', '3037b61a759cfcf56838bc7804df5a211da416bff9ddc5791f8a8d5157b90926985cfe57d7edbab42de64945d5e668470fe4129a218fb9c7af546648e1660c72'),
('0.50.0', '-2.91', '3.26/3.26.0/sources/vte-0.50.0.tar.xz', 'ac05622ecf92115cf6aef1ef7805b0ef19856d65d2dfc9792b149443aeee4c9bbfb871c600c9be8b0f4baac4a143e70412208d0a510cb53f13b2caf2caa33587'),
('0.50.1', '-2.91', '3.27/3.27.1/sources/vte-0.50.1.tar.xz', 'd5e9c7990ddb6ee789d4f4f8df05c23d794791e8381266551cf6574658ee8fd6adc4c7b9ac6aadcf957887b0ba13f7f89002fd820c8d35c789bba918414bd330'),
('0.50.2', '-2.91', '3.27/3.27.3/sources/vte-0.50.2.tar.xz', 'a1b12c74bedb167bf2a470294c566198c224c90be9b5c50366ef18d9542921f6cb2dc2329afd82f7342279c3eebd4ef5dfd753b4feb9d4e3e194cb878b48a7a2'),
('0.52.0', '-2.91', '3.28/3.28.0/sources/vte-0.52.0.tar.xz', '2f8b1efc7c73c4e74070d3bfcb33e61672d6ed70a352eed2c43198f8c3ffb513f6ed98dcf822dbd55d31d914c7f9dc157b29f8e4781705ee2c9ddb0e43c6e5fa'),
('0.52.1', '-2.91', '3.29/3.29.1/sources/vte-0.52.1.tar.xz', 'a1de54950cdcac9afccc1b13bd71b65ad1d6f93055d0b005b4a15b6f84f55029848cf8f2f9155cf3e6edc69fe973529fd4313f59af74fc1035aebd4c0b85655f'),
('0.54.0', '-2.91', '3.30/3.30.0/sources/vte-0.54.0.tar.xz', '69dd0caae4eacc179f84eccf98625a31140286beca4244a8f06420bd36ac62dcaddf9e9d8b114549ca97927e94b562df3e7daa9fad971484f05ebdd1c0f7b787'),
('0.54.1', '-2.91', '3.30/3.30.1/sources/vte-0.54.1.tar.xz', '5cb866e75c35e1b5728d17b0433641ceb4837495d59185a04fde9dd8c9849ab905a6b2718a50b27cc70323c7a6c5db31a808816576c30d7f18ece889a329fb61'),
('0.54.2', '-2.91', '3.30/3.30.2/sources/vte-0.54.2.tar.xz', '214ec69110d6ad6caa9bc41fb741874bfcf27f20f34d3ae745b13903c574f9c854f7e0dadbae3dbc8ce04c5a6eb818a433c50569c1ef802501a9a078385f23fc'),
('0.55.0', '-2.91', '3.31/3.31.2/sources/vte-0.55.0.tar.xz', '972bb4e616bc9436482dc938c31dcf62e8e00eba375554e4049485fbd1dc2f31f657fc6cd83777788781decb3b50559ee982684c8c50a139743e239dbfe078b1'),
('0.56.0', '-2.91', '3.32/3.32.0/sources/vte-0.56.0.tar.xz', 'f366ed4a28a896ba919184b50a55ee110eae43127847f34c049684bcb9b81327d1b72e9d871b2a5b7f7fa12f3f4aa721ac3d770770b600dca9c433cb2c674915'),
('0.56.3', '-2.91', '3.32/3.32.2/sources/vte-0.56.3.tar.xz', 'f78b3d532ca47e53c1bb51db6780697ce4692d493c0030d2dc4beb63a2595e44a43eb409ee31b94e4551eae259ac1baa8f06825a02fd66df8352e192f4ea1988'),
('0.57.0', '-2.91', '3.33/3.33.2/sources/vte-0.57.0.tar.xz', '87788ed44b39d57cf6d0ff99046ab575c8a410a0713e8f7404ada1239a1691f687b689a0b692f1bfe84ba7c38308382da409bab0780b1168d0ba99bbc0eb7b4f'),
('0.57.3', '-2.91', '3.33/3.33.4/sources/vte-0.57.3.tar.xz', 'f5496fd2b24af8d8eb895adaea59ee5ed4250c12a97745e025aacebca2d709901ae84befca58a3c5f1a54014a97af460ea53f844b1b1b9e32e192cc5883ecfed'),
('0.58.0', '-2.91', '3.34/3.34.0/sources/vte-0.58.0.tar.xz', '4d0fc725e0c71921b3d235d434600ad3c0807d5c0e7bd62fb782d857254db334bb851b75382c9613a5af753b5d6a1c05b174731427a8560b9b14101b3cc38c06'),
('0.58.1', '-2.91', '3.34/3.34.1/sources/vte-0.58.1.tar.xz', '1f795731fbb7ee76c4274562d5a55668c3b8ecad5a00ff83c762b0a2517ccffb85e796e937407d46e6bdb64327759eabc5878455d1d66cb1b8ff8b6060a4b1b7'),
('0.59.9', '-2.91', '3.35/3.35.1/sources/vte-0.59.0.tar.xz', '533d1e87a699137a33a6ddb82bf0f010925ba578974e1f6c87bae0b497309dd84c3cb2f5f6884f34f7fbcfad94fbaa07eb3a80387ee9f16b5f3f0ea2679e7376'),
('0.60.0', '-2.91', '3.36/3.36.0/sources/vte-0.60.0.tar.xz', '8c1a80ba90fa1c1f4b5ec1a1d3793af79c04fbbad4acecba094db79771555b1689017864bd81bee4366f9ef363f629f20731bac998d994b9bfa37ee59e9e58b0'),
('0.60.1', '-2.91', '3.36/3.36.1/sources/vte-0.60.1.tar.xz', '123a8fcc14f4dba450411f95f43eb60108fee95c328d0e7331c9366d96ba2caa548dece3e95a8b779dda19d322d6879d02abc6ac68e36450e4e72f17a0963c30'),
('0.60.2', '-2.91', '3.36/3.36.2/sources/vte-0.60.2.tar.xz', '801ac727cab33d2c3f4ba4d86bf7f19a82628acd2739196f24c85d038ba6bcc6a67239aac09141b8e0119a67f199ff8a8c653641a8e9aea1e8ab68bfd16017db'),
('0.60.3', '-2.91', '3.36/3.36.3/sources/vte-0.60.3.tar.xz', '3694fe711e0b3eb9d6ba37ad8036f5d3cca4265635ed7afcde750a8445b17f820d1c55b557d0ea1c8a5a45e5408915d8da2ffd65b4d397c6582f288812ae1f18'),
)
for vte_version, vte_api, vte_file, vte_sha in vte_versions:
vte_version_b = vte_version.replace('.', '_')
with open('de.uchuujin.fp.termzoo.vte{}.json'.format(vte_version_b), "w") as f:
if vte_version[:3] in ('0.2', '0.3', '0.4', '0.5') and not vte_version in ('0.57.0', '0.57.3', '0.58.0', '0.58.1', '0.59.9'):
vte_runtime = '3.30'
if vte_api == '':
vte_runtime = '3.28'
f.write(VTE_TEMPLATE.substitute(vte_version=vte_version, vte_api=vte_api, vte_runtime=vte_runtime,
vte_version_b=vte_version_b, vte_file=vte_file, vte_sha=vte_sha))
else:
vte_runtime = '3.36'
f.write(VTE_MESON_TEMPLATE.substitute(vte_version=vte_version, vte_api=vte_api, vte_runtime=vte_runtime,
vte_version_b=vte_version_b, vte_file=vte_file, vte_sha=vte_sha))
| #! /usr/bin/env python3
from string import Template
VTE_TEMPLATE = Template(r"""
{
"app-id": "de.uchuujin.fp.termzoo.vte${vte_version_b}",
"runtime": "org.gnome.Platform",
"runtime-version": "${vte_runtime}",
"sdk": "org.gnome.Sdk",
"command": "vte${vte_api}",
"finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"],
"modules": [
{
"name": "vte",
"buildsystem": "autotools",
"build-options": {
"cxxflags-override": true,
"cxxflags": ""
},
"sources": [
{
"type": "archive",
"url": "https://download.gnome.org/core/${vte_file}",
"sha512": "${vte_sha}"
}
]
},
{
"name": "scripts",
"buildsystem": "simple",
"build-commands": [
"install -D run-in-host /app/bin/run-in-host"
],
"sources": [
{
"type": "file",
"path": "../run-in-host"
}
]
}
]
}
""")
VTE_MESON_TEMPLATE = Template(r"""
{
"app-id": "de.uchuujin.fp.termzoo.vte${vte_version_b}",
"runtime": "org.gnome.Platform",
"runtime-version": "${vte_runtime}",
"sdk": "org.gnome.Sdk",
"command": "vte${vte_api}",
"build-options" : {
"cxxflags": "-fno-exceptions"
},
"finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"],
"modules": [
{
"name": "vte",
"buildsystem": "meson",
"sources": [
{
"type": "archive",
"url": "https://download.gnome.org/core/${vte_file}",
"sha512": "${vte_sha}"
}
]
},
{
"name": "scripts",
"buildsystem": "simple",
"build-commands": [
"install -D run-in-host /app/bin/run-in-host"
],
"sources": [
{
"type": "file",
"path": "../run-in-host"
}
]
}
]
}
""")
vte_versions = (
('0.28.0', '', '3.0/3.0.0/sources/vte-0.28.0.tar.bz2', '5f9549ee09dd1d100bf0e90f81b12e8237ba4cedd79cf2fc33edb81edb5796ff23a88563a12ae808cdc057ae2ef508999426b36a4985fef6dc008431f1aa38f0'),
('0.28.1', '', '3.1/3.1.5/sources/vte-0.28.1.tar.xz', 'f76d5d10da069ee2a6235abc5a7db883dd2f582a58b935e8cd8150b4af8215db3bfddef103485a07c0d23608e5eba8e462d73e177d2a61a7bd97a69dd3cbf2af'),
('0.30.0', '2_90', '3.2/3.2.0/sources/vte-0.30.0.tar.xz', 'bcd35227100f326b5260db31239537f19604ce2ac7f735c4d39485640fd4f5c18bbc2298ccefcca657b8707e51e1028e145d0f367b67102ee89ddf2a6a7b914d'),
('0.30.1', '2_90', '3.2/3.2.2/sources/vte-0.30.1.tar.xz', 'b7e601f207fe87c56826b093904a27f90e51e8992707d5978ec058fb91c1c790287af8bc2594b8149f4a69223cff2f2c73bf13a43507f4ac37df7c3faee69e60'),
('0.32.0', '2_90', '3.4/3.4.0/sources/vte-0.32.0.tar.xz', '729e1dd7261fd0394a7a4a566b2076c49ae9f90505c701e663cbcd6833891a20b9a2120b4334694aaa2757312e7f973488bde6c02b3d2681eb4e116fc2842ee0'),
('0.32.1', '2_90', '3.4/3.4.1/sources/vte-0.32.1.tar.xz', '8c4d63f33ba99e8c39ee5fe6b0998dd89aa7415d35765cee4c42116e7cdb8aabc958471b68cc88a35c937949a9043678ce4ae74541476b36d5617491e8da6224'),
('0.32.2', '2_90', '3.5/3.5.5/sources/vte-0.32.2.tar.xz', '0346a3a85de8bfc84fce37f073154ee7af746b8872e7970f4f58e11cf261311def714100370c2eb371beecba5d22207dfdada7b0df7fee00f32035e93b9e8808'),
('0.34.0', '2_90', '3.6/3.6.0/sources/vte-0.34.0.tar.xz', '80c2f1c6b310324c05fe1c2373583280a7c32950ce4601151a9e81f24bf0636260ec2f02979437c99e5e9f916301ea694f2d40083f974fe92b8624618c2c0a73'),
('0.34.1', '2_90', '3.6/3.6.1/sources/vte-0.34.1.tar.xz', 'a80f74a9699c2c06b4bf269adf16298724c07d749fffc1c1dd7f3f5a89d3bb08f4d3db3aa888dbc3dea4be75846eb5ab6c794e661cc5c192fca9235aff293cbb'),
('0.34.2', '2_90', '3.6/3.6.2/sources/vte-0.34.2.tar.xz', '4a89f149e9c101ed31b76086de1a05287d9c2a750ee098792fa9508b8712fa065a63e60d50cfb4a060991c2f6f182ddb91d3f21d4c7baf61337f08dc1a960a35'),
('0.34.3', '2_90', '3.8/3.8.0/sources/vte-0.34.3.tar.xz', '6eecde8d4f03eabd5710850b4f559d1f55ab98a201b6611428025579a6d43d52faa80d6f6607a6b93e42b5efda597f2b8507312113b939574ff7b73a3bd134ef'),
('0.34.4', '2_90', '3.8/3.8.1/sources/vte-0.34.4.tar.xz', '2f1dce6ed47835b098c37351fa665ddbfd8719a4275672925d8030655cd96412ec7f3edc23e9d94a3e035a36969c74c5186d1689dcf1e1111071158e64d2360f'),
('0.34.5', '2_90', '3.8/3.8.2/sources/vte-0.34.5.tar.xz', 'bf49cffe9f5bf243aada353adda1d8f86d75883bed343792a16d69d8956e9fc6f846cd1c317c7b2b03ba446de9c645cba52384190d5343f5d000a4557b40fb53'),
('0.34.6', '2_90', '3.9/3.9.3/sources/vte-0.34.6.tar.xz', '185703bdb8d4d46c1f340af8daa41194fcd28fdffafc77f190e776350f4bd875000e94b6cc481da19cb1f8da26ae2d757f4a29b703833e71fa20fcc5ccb329fd'),
('0.34.7', '2_90', '3.9/3.9.5/sources/vte-0.34.7.tar.xz', 'a122814f7fee4202c0bc4862798c98b078e5b185eb8c3c11959001b33bd5e029579f7337f85bdffb3f8a7b22af3a6763a2353ecff16a8e5c8b941431adabc5e0'),
('0.34.8', '2_90', '3.10/3.10.0/sources/vte-0.34.8.tar.xz', 'e4a7b691d0affcb05c463e0eeeab42591e13679f8b8cd042869b2b91aaa82c24222734f68767e3246f2554f8fca481b35383239ecfdb39e5865fc9b8b3b8479b'),
('0.34.9', '2_90', '3.10/3.10.2/sources/vte-0.34.9.tar.xz', '57fff7d5916bcd8a8a13e3cf050f85ed78e9154fdf2510a6d6f594f938c6d56f512089c6bc3e06c9123d35d17f2eb3b3814a5407635027ec0116c25f73034688'),
('0.36.0', '2_90', '3.12/3.12.1/sources/vte-0.36.0.tar.xz', '7666fdb2d3458390b154add7da781cb973498b019d859a3396797102dc4840d9bdbae44c83257a3c67609e658173dc0d5ff382b0343a5bf3c6db43e04392893c'),
('0.36.2', '2_90', '3.12/3.12.2/sources/vte-0.36.2.tar.xz', 'fafd368b95918024e6324c81f4fa6c35ad15138ad44af2f92164492d9773b127202d11824c9def4a26e17de44336f42e9c8906330dd300795c280ca068381632'),
('0.38.0', '-2.91', '3.14/3.14.0/sources/vte-0.38.0.tar.xz', '624d8c9e7d4429c21830d4bdbd325d3b9a75e35ecad0494fe6051ae339745c59ab656af4989d632e638402bc1fedf4c4f69387d39bf731bd6eabaccf682b1bc0'),
('0.38.1', '-2.91', '3.15/3.15.1/sources/vte-0.38.1.tar.xz', 'd6e616d2f327ac6c28ad9ac11f0e7a081a5659b9ad90dd3246fa4240a8642ed1ffe87502ab4307527e03789195103cc33d3f783f2d89e7c866c0cc8d5cd0e24c'),
('0.38.2', '-2.91', '3.14/3.14.2/sources/vte-0.38.2.tar.xz', '4c493e18cca4b50d66640d282d7d33a471d1ac4bd2dd929b059b829a42fed852d202669751b266adf7291502203e26c513b6852b3035458d41a433b900b0c6bb'),
('0.40.0', '-2.91', '3.16/3.16.1/sources/vte-0.40.0.tar.xz', 'f7ff28cdefc80e7fa5d876b8cba5d396fd98aa13c21a6cd320ac4042a8747e67ebf7a7c13ddab7bebb6b839231aebcc4fc25be9f0cc6c55859886c7948d4ac79'),
('0.40.2', '-2.91', '3.16/3.16.2/sources/vte-0.40.2.tar.xz', '<KEY>'),
('0.42.0', '-2.91', '3.18/3.18.0/sources/vte-0.42.0.tar.xz', 'e2b2c00c81af05cdd5d99fd2de4fcb9019cffc5bd8b59878b7676cf7104c79a0c095b28d9a62586e3a531e00b80ba70062352ca1d4e96a902fef8d8f1121df49'),
('0.42.1', '-2.91', '3.18/3.18.1/sources/vte-0.42.1.tar.xz', '4cf917d3f343973dcb816a341276bfab7784a9f4dc6f8fb402193e9a9062503ac192ccc274fe92bb20a17ac182d21fd2268bf9c1ddea817f730db5612e3b21c0'),
('0.43.0', '-2.91', '3.18/3.18.2/sources/vte-0.43.0.tar.xz', 'fabe336996fd49ac08fc347f87e2b6169a875bff5570c3e0276271e0efcb215d206c6663d961ae604ee23ea668cbcacdc0664c06ec626e0a5ee7120cc57417fc'),
('0.44.0', '-2.91', '3.20/3.20.0/sources/vte-0.44.0.tar.xz', 'c190ba6cd4785fc16b1982517a0fcfe2935e50082acec095bdb5d56467b4952fdd48340776c2a4ecef4da847a668a56bca4599801b00f5090927a0e5f31d2c3a'),
('0.44.1', '-2.91', '3.20/3.20.1/sources/vte-0.44.1.tar.xz', '1fd352ea111cc13f8e7b2acae374e2fbd9d5025f6cb28b193164024594a5df12c9418bcdff11ff3247a9b785d6584c484a242c22df6a98afc3f0dfa1f716499c'),
('0.44.2', '-2.91', '3.20/3.20.2/sources/vte-0.44.2.tar.xz', '98db3c1528d5f458164e2d30056cd816e5943d9c569551878119e79d4fbca1865e52667393bf298f32fd54710d1b268a5aac125222ecb29ce854522be3776356'),
('0.46.0', '-2.91', '3.22/3.22.1/sources/vte-0.46.0.tar.xz', '543cdba5c51c5384e54fc372924c2667ded952cbc8ffafb7ff62f8643c6a7e2440439109eb12378ed70b0e0a256d3ef97d6da004dd8088d36bccdd7fa16593f9'),
('0.46.1', '-2.91', '3.22/3.22.2/sources/vte-0.46.1.tar.xz', '04b3f8ce922c4326d92bef605a0dbe195764946cd5f1acf28fd6d69c0cdb2ee661cc7e424436c72380da5d0250790ae658ac49d761b567dea88d92157285889d'),
('0.47.90', '-2.91', '3.24/3.24.0/sources/vte-0.47.90.tar.xz', 'c36310486b0575b26330879d2ca222ce4ca36af7659ec13113b209a897371da7ce0ff758f2c0fc5a9d42b7fd60caae8603aa564a2a5f58159979e4a9388a688b'),
('0.48.2', '-2.91', '3.24/3.24.1/sources/vte-0.48.2.tar.xz', 'cbb2835618c554d72a790e16f1ac5b3c06a8a810d8d517c475ed7ca46eeb56d7c9f9226918e13c5c84c04ce1ccb5467e62af7c4453f317a0aad197a4c179d76a'),
('0.48.3', '-2.91', '3.24/3.24.2/sources/vte-0.48.3.tar.xz', '3037b61a759cfcf56838bc7804df5a211da416bff9ddc5791f8a8d5157b90926985cfe57d7edbab42de64945d5e668470fe4129a218fb9c7af546648e1660c72'),
('0.50.0', '-2.91', '3.26/3.26.0/sources/vte-0.50.0.tar.xz', 'ac05622ecf92115cf6aef1ef7805b0ef19856d65d2dfc9792b149443aeee4c9bbfb871c600c9be8b0f4baac4a143e70412208d0a510cb53f13b2caf2caa33587'),
('0.50.1', '-2.91', '3.27/3.27.1/sources/vte-0.50.1.tar.xz', 'd5e9c7990ddb6ee789d4f4f8df05c23d794791e8381266551cf6574658ee8fd6adc4c7b9ac6aadcf957887b0ba13f7f89002fd820c8d35c789bba918414bd330'),
('0.50.2', '-2.91', '3.27/3.27.3/sources/vte-0.50.2.tar.xz', 'a1b12c74bedb167bf2a470294c566198c224c90be9b5c50366ef18d9542921f6cb2dc2329afd82f7342279c3eebd4ef5dfd753b4feb9d4e3e194cb878b48a7a2'),
('0.52.0', '-2.91', '3.28/3.28.0/sources/vte-0.52.0.tar.xz', '2f8b1efc7c73c4e74070d3bfcb33e61672d6ed70a352eed2c43198f8c3ffb513f6ed98dcf822dbd55d31d914c7f9dc157b29f8e4781705ee2c9ddb0e43c6e5fa'),
('0.52.1', '-2.91', '3.29/3.29.1/sources/vte-0.52.1.tar.xz', 'a1de54950cdcac9afccc1b13bd71b65ad1d6f93055d0b005b4a15b6f84f55029848cf8f2f9155cf3e6edc69fe973529fd4313f59af74fc1035aebd4c0b85655f'),
('0.54.0', '-2.91', '3.30/3.30.0/sources/vte-0.54.0.tar.xz', '69dd0caae4eacc179f84eccf98625a31140286beca4244a8f06420bd36ac62dcaddf9e9d8b114549ca97927e94b562df3e7daa9fad971484f05ebdd1c0f7b787'),
('0.54.1', '-2.91', '3.30/3.30.1/sources/vte-0.54.1.tar.xz', '5cb866e75c35e1b5728d17b0433641ceb4837495d59185a04fde9dd8c9849ab905a6b2718a50b27cc70323c7a6c5db31a808816576c30d7f18ece889a329fb61'),
('0.54.2', '-2.91', '3.30/3.30.2/sources/vte-0.54.2.tar.xz', '214ec69110d6ad6caa9bc41fb741874bfcf27f20f34d3ae745b13903c574f9c854f7e0dadbae3dbc8ce04c5a6eb818a433c50569c1ef802501a9a078385f23fc'),
('0.55.0', '-2.91', '3.31/3.31.2/sources/vte-0.55.0.tar.xz', '972bb4e616bc9436482dc938c31dcf62e8e00eba375554e4049485fbd1dc2f31f657fc6cd83777788781decb3b50559ee982684c8c50a139743e239dbfe078b1'),
('0.56.0', '-2.91', '3.32/3.32.0/sources/vte-0.56.0.tar.xz', 'f366ed4a28a896ba919184b50a55ee110eae43127847f34c049684bcb9b81327d1b72e9d871b2a5b7f7fa12f3f4aa721ac3d770770b600dca9c433cb2c674915'),
('0.56.3', '-2.91', '3.32/3.32.2/sources/vte-0.56.3.tar.xz', 'f78b3d532ca47e53c1bb51db6780697ce4692d493c0030d2dc4beb63a2595e44a43eb409ee31b94e4551eae259ac1baa8f06825a02fd66df8352e192f4ea1988'),
('0.57.0', '-2.91', '3.33/3.33.2/sources/vte-0.57.0.tar.xz', '87788ed44b39d57cf6d0ff99046ab575c8a410a0713e8f7404ada1239a1691f687b689a0b692f1bfe84ba7c38308382da409bab0780b1168d0ba99bbc0eb7b4f'),
('0.57.3', '-2.91', '3.33/3.33.4/sources/vte-0.57.3.tar.xz', 'f5496fd2b24af8d8eb895adaea59ee5ed4250c12a97745e025aacebca2d709901ae84befca58a3c5f1a54014a97af460ea53f844b1b1b9e32e192cc5883ecfed'),
('0.58.0', '-2.91', '3.34/3.34.0/sources/vte-0.58.0.tar.xz', '4d0fc725e0c71921b3d235d434600ad3c0807d5c0e7bd62fb782d857254db334bb851b75382c9613a5af753b5d6a1c05b174731427a8560b9b14101b3cc38c06'),
('0.58.1', '-2.91', '3.34/3.34.1/sources/vte-0.58.1.tar.xz', '1f795731fbb7ee76c4274562d5a55668c3b8ecad5a00ff83c762b0a2517ccffb85e796e937407d46e6bdb64327759eabc5878455d1d66cb1b8ff8b6060a4b1b7'),
('0.59.9', '-2.91', '3.35/3.35.1/sources/vte-0.59.0.tar.xz', '533d1e87a699137a33a6ddb82bf0f010925ba578974e1f6c87bae0b497309dd84c3cb2f5f6884f34f7fbcfad94fbaa07eb3a80387ee9f16b5f3f0ea2679e7376'),
('0.60.0', '-2.91', '3.36/3.36.0/sources/vte-0.60.0.tar.xz', '8c1a80ba90fa1c1f4b5ec1a1d3793af79c04fbbad4acecba094db79771555b1689017864bd81bee4366f9ef363f629f20731bac998d994b9bfa37ee59e9e58b0'),
('0.60.1', '-2.91', '3.36/3.36.1/sources/vte-0.60.1.tar.xz', '123a8fcc14f4dba450411f95f43eb60108fee95c328d0e7331c9366d96ba2caa548dece3e95a8b779dda19d322d6879d02abc6ac68e36450e4e72f17a0963c30'),
('0.60.2', '-2.91', '3.36/3.36.2/sources/vte-0.60.2.tar.xz', '801ac727cab33d2c3f4ba4d86bf7f19a82628acd2739196f24c85d038ba6bcc6a67239aac09141b8e0119a67f199ff8a8c653641a8e9aea1e8ab68bfd16017db'),
('0.60.3', '-2.91', '3.36/3.36.3/sources/vte-0.60.3.tar.xz', '3694fe711e0b3eb9d6ba37ad8036f5d3cca4265635ed7afcde750a8445b17f820d1c55b557d0ea1c8a5a45e5408915d8da2ffd65b4d397c6582f288812ae1f18'),
)
for vte_version, vte_api, vte_file, vte_sha in vte_versions:
vte_version_b = vte_version.replace('.', '_')
with open('de.uchuujin.fp.termzoo.vte{}.json'.format(vte_version_b), "w") as f:
if vte_version[:3] in ('0.2', '0.3', '0.4', '0.5') and not vte_version in ('0.57.0', '0.57.3', '0.58.0', '0.58.1', '0.59.9'):
vte_runtime = '3.30'
if vte_api == '':
vte_runtime = '3.28'
f.write(VTE_TEMPLATE.substitute(vte_version=vte_version, vte_api=vte_api, vte_runtime=vte_runtime,
vte_version_b=vte_version_b, vte_file=vte_file, vte_sha=vte_sha))
else:
vte_runtime = '3.36'
f.write(VTE_MESON_TEMPLATE.substitute(vte_version=vte_version, vte_api=vte_api, vte_runtime=vte_runtime,
vte_version_b=vte_version_b, vte_file=vte_file, vte_sha=vte_sha)) | en | 0.303026 | #! /usr/bin/env python3 { "app-id": "de.uchuujin.fp.termzoo.vte${vte_version_b}", "runtime": "org.gnome.Platform", "runtime-version": "${vte_runtime}", "sdk": "org.gnome.Sdk", "command": "vte${vte_api}", "finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"], "modules": [ { "name": "vte", "buildsystem": "autotools", "build-options": { "cxxflags-override": true, "cxxflags": "" }, "sources": [ { "type": "archive", "url": "https://download.gnome.org/core/${vte_file}", "sha512": "${vte_sha}" } ] }, { "name": "scripts", "buildsystem": "simple", "build-commands": [ "install -D run-in-host /app/bin/run-in-host" ], "sources": [ { "type": "file", "path": "../run-in-host" } ] } ] } { "app-id": "de.uchuujin.fp.termzoo.vte${vte_version_b}", "runtime": "org.gnome.Platform", "runtime-version": "${vte_runtime}", "sdk": "org.gnome.Sdk", "command": "vte${vte_api}", "build-options" : { "cxxflags": "-fno-exceptions" }, "finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"], "modules": [ { "name": "vte", "buildsystem": "meson", "sources": [ { "type": "archive", "url": "https://download.gnome.org/core/${vte_file}", "sha512": "${vte_sha}" } ] }, { "name": "scripts", "buildsystem": "simple", "build-commands": [ "install -D run-in-host /app/bin/run-in-host" ], "sources": [ { "type": "file", "path": "../run-in-host" } ] } ] } | 2.03468 | 2 |
src/oci/cloud_guard/models/security_zone_target_details.py | pabs3/oci-python-sdk | 0 | 6622677 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .target_details import TargetDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SecurityZoneTargetDetails(TargetDetails):
"""
Details about Security Zone Target.
"""
def __init__(self, **kwargs):
"""
Initializes a new SecurityZoneTargetDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.cloud_guard.models.SecurityZoneTargetDetails.target_resource_type` attribute
of this class is ``SECURITY_ZONE`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param target_resource_type:
The value to assign to the target_resource_type property of this SecurityZoneTargetDetails.
Allowed values for this property are: "COMPARTMENT", "ERPCLOUD", "HCMCLOUD", "SECURITY_ZONE"
:type target_resource_type: str
:param security_zone_id:
The value to assign to the security_zone_id property of this SecurityZoneTargetDetails.
:type security_zone_id: str
:param security_zone_display_name:
The value to assign to the security_zone_display_name property of this SecurityZoneTargetDetails.
:type security_zone_display_name: str
:param target_security_zone_recipes:
The value to assign to the target_security_zone_recipes property of this SecurityZoneTargetDetails.
:type target_security_zone_recipes: list[oci.cloud_guard.models.SecurityRecipe]
"""
self.swagger_types = {
'target_resource_type': 'str',
'security_zone_id': 'str',
'security_zone_display_name': 'str',
'target_security_zone_recipes': 'list[SecurityRecipe]'
}
self.attribute_map = {
'target_resource_type': 'targetResourceType',
'security_zone_id': 'securityZoneId',
'security_zone_display_name': 'securityZoneDisplayName',
'target_security_zone_recipes': 'targetSecurityZoneRecipes'
}
self._target_resource_type = None
self._security_zone_id = None
self._security_zone_display_name = None
self._target_security_zone_recipes = None
self._target_resource_type = 'SECURITY_ZONE'
@property
def security_zone_id(self):
"""
Gets the security_zone_id of this SecurityZoneTargetDetails.
The OCID of the security zone to associate this compartment with.
:return: The security_zone_id of this SecurityZoneTargetDetails.
:rtype: str
"""
return self._security_zone_id
@security_zone_id.setter
def security_zone_id(self, security_zone_id):
"""
Sets the security_zone_id of this SecurityZoneTargetDetails.
The OCID of the security zone to associate this compartment with.
:param security_zone_id: The security_zone_id of this SecurityZoneTargetDetails.
:type: str
"""
self._security_zone_id = security_zone_id
@property
def security_zone_display_name(self):
"""
Gets the security_zone_display_name of this SecurityZoneTargetDetails.
The name of the security zone to associate this compartment with.
:return: The security_zone_display_name of this SecurityZoneTargetDetails.
:rtype: str
"""
return self._security_zone_display_name
@security_zone_display_name.setter
def security_zone_display_name(self, security_zone_display_name):
"""
Sets the security_zone_display_name of this SecurityZoneTargetDetails.
The name of the security zone to associate this compartment with.
:param security_zone_display_name: The security_zone_display_name of this SecurityZoneTargetDetails.
:type: str
"""
self._security_zone_display_name = security_zone_display_name
@property
def target_security_zone_recipes(self):
"""
Gets the target_security_zone_recipes of this SecurityZoneTargetDetails.
The list of security zone recipes to associate this compartment with.
:return: The target_security_zone_recipes of this SecurityZoneTargetDetails.
:rtype: list[oci.cloud_guard.models.SecurityRecipe]
"""
return self._target_security_zone_recipes
@target_security_zone_recipes.setter
def target_security_zone_recipes(self, target_security_zone_recipes):
"""
Sets the target_security_zone_recipes of this SecurityZoneTargetDetails.
The list of security zone recipes to associate this compartment with.
:param target_security_zone_recipes: The target_security_zone_recipes of this SecurityZoneTargetDetails.
:type: list[oci.cloud_guard.models.SecurityRecipe]
"""
self._target_security_zone_recipes = target_security_zone_recipes
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .target_details import TargetDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SecurityZoneTargetDetails(TargetDetails):
"""
Details about Security Zone Target.
"""
def __init__(self, **kwargs):
"""
Initializes a new SecurityZoneTargetDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.cloud_guard.models.SecurityZoneTargetDetails.target_resource_type` attribute
of this class is ``SECURITY_ZONE`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param target_resource_type:
The value to assign to the target_resource_type property of this SecurityZoneTargetDetails.
Allowed values for this property are: "COMPARTMENT", "ERPCLOUD", "HCMCLOUD", "SECURITY_ZONE"
:type target_resource_type: str
:param security_zone_id:
The value to assign to the security_zone_id property of this SecurityZoneTargetDetails.
:type security_zone_id: str
:param security_zone_display_name:
The value to assign to the security_zone_display_name property of this SecurityZoneTargetDetails.
:type security_zone_display_name: str
:param target_security_zone_recipes:
The value to assign to the target_security_zone_recipes property of this SecurityZoneTargetDetails.
:type target_security_zone_recipes: list[oci.cloud_guard.models.SecurityRecipe]
"""
self.swagger_types = {
'target_resource_type': 'str',
'security_zone_id': 'str',
'security_zone_display_name': 'str',
'target_security_zone_recipes': 'list[SecurityRecipe]'
}
self.attribute_map = {
'target_resource_type': 'targetResourceType',
'security_zone_id': 'securityZoneId',
'security_zone_display_name': 'securityZoneDisplayName',
'target_security_zone_recipes': 'targetSecurityZoneRecipes'
}
self._target_resource_type = None
self._security_zone_id = None
self._security_zone_display_name = None
self._target_security_zone_recipes = None
self._target_resource_type = 'SECURITY_ZONE'
@property
def security_zone_id(self):
"""
Gets the security_zone_id of this SecurityZoneTargetDetails.
The OCID of the security zone to associate this compartment with.
:return: The security_zone_id of this SecurityZoneTargetDetails.
:rtype: str
"""
return self._security_zone_id
@security_zone_id.setter
def security_zone_id(self, security_zone_id):
"""
Sets the security_zone_id of this SecurityZoneTargetDetails.
The OCID of the security zone to associate this compartment with.
:param security_zone_id: The security_zone_id of this SecurityZoneTargetDetails.
:type: str
"""
self._security_zone_id = security_zone_id
@property
def security_zone_display_name(self):
"""
Gets the security_zone_display_name of this SecurityZoneTargetDetails.
The name of the security zone to associate this compartment with.
:return: The security_zone_display_name of this SecurityZoneTargetDetails.
:rtype: str
"""
return self._security_zone_display_name
@security_zone_display_name.setter
def security_zone_display_name(self, security_zone_display_name):
"""
Sets the security_zone_display_name of this SecurityZoneTargetDetails.
The name of the security zone to associate this compartment with.
:param security_zone_display_name: The security_zone_display_name of this SecurityZoneTargetDetails.
:type: str
"""
self._security_zone_display_name = security_zone_display_name
@property
def target_security_zone_recipes(self):
"""
Gets the target_security_zone_recipes of this SecurityZoneTargetDetails.
The list of security zone recipes to associate this compartment with.
:return: The target_security_zone_recipes of this SecurityZoneTargetDetails.
:rtype: list[oci.cloud_guard.models.SecurityRecipe]
"""
return self._target_security_zone_recipes
@target_security_zone_recipes.setter
def target_security_zone_recipes(self, target_security_zone_recipes):
"""
Sets the target_security_zone_recipes of this SecurityZoneTargetDetails.
The list of security zone recipes to associate this compartment with.
:param target_security_zone_recipes: The target_security_zone_recipes of this SecurityZoneTargetDetails.
:type: list[oci.cloud_guard.models.SecurityRecipe]
"""
self._target_security_zone_recipes = target_security_zone_recipes
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| en | 0.695344 | # coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # noqa: F401 Details about Security Zone Target. Initializes a new SecurityZoneTargetDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.cloud_guard.models.SecurityZoneTargetDetails.target_resource_type` attribute of this class is ``SECURITY_ZONE`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param target_resource_type: The value to assign to the target_resource_type property of this SecurityZoneTargetDetails. Allowed values for this property are: "COMPARTMENT", "ERPCLOUD", "HCMCLOUD", "SECURITY_ZONE" :type target_resource_type: str :param security_zone_id: The value to assign to the security_zone_id property of this SecurityZoneTargetDetails. :type security_zone_id: str :param security_zone_display_name: The value to assign to the security_zone_display_name property of this SecurityZoneTargetDetails. :type security_zone_display_name: str :param target_security_zone_recipes: The value to assign to the target_security_zone_recipes property of this SecurityZoneTargetDetails. :type target_security_zone_recipes: list[oci.cloud_guard.models.SecurityRecipe] Gets the security_zone_id of this SecurityZoneTargetDetails. The OCID of the security zone to associate this compartment with. :return: The security_zone_id of this SecurityZoneTargetDetails. :rtype: str Sets the security_zone_id of this SecurityZoneTargetDetails. The OCID of the security zone to associate this compartment with. :param security_zone_id: The security_zone_id of this SecurityZoneTargetDetails. :type: str Gets the security_zone_display_name of this SecurityZoneTargetDetails. The name of the security zone to associate this compartment with. :return: The security_zone_display_name of this SecurityZoneTargetDetails. :rtype: str Sets the security_zone_display_name of this SecurityZoneTargetDetails. The name of the security zone to associate this compartment with. :param security_zone_display_name: The security_zone_display_name of this SecurityZoneTargetDetails. :type: str Gets the target_security_zone_recipes of this SecurityZoneTargetDetails. The list of security zone recipes to associate this compartment with. :return: The target_security_zone_recipes of this SecurityZoneTargetDetails. :rtype: list[oci.cloud_guard.models.SecurityRecipe] Sets the target_security_zone_recipes of this SecurityZoneTargetDetails. The list of security zone recipes to associate this compartment with. :param target_security_zone_recipes: The target_security_zone_recipes of this SecurityZoneTargetDetails. :type: list[oci.cloud_guard.models.SecurityRecipe] | 2.112314 | 2 |
tasks.py | guilatrova/school-api-load-test | 0 | 6622678 | import time
from locust import TaskSet, task
from factories import create_questions
class OnStartSetupDataMixin:
def post_json_get_id(self, url, payload):
response = self.client.post(url, json=payload)
time.sleep(0.1) #Required to avoid connection error
return response.json()['id']
def setup_people(self):
self.student = self.post_json_get_id("/students/", { 'name': 'Guilherme' })
self.new_comer = self.post_json_get_id("/students/", { 'name': '<NAME>' })
self.waiting_assignment = self.post_json_get_id("/students/", { 'name': '<NAME>' })
self.teacher = self.post_json_get_id("/teachers/", { 'name': 'Kwan' })
def setup_classes(self):
self.school_class = self.post_json_get_id("/classes/", { 'name': 'Load Testing', 'teacher': self.teacher })
self.student_enrollment = self.post_json_get_id("/students/{}/classes/".format(self.student), { 'student': self.student, 'school_class': self.school_class, 'semester': '2018-01-01' })
self.waiting_assignment_enrollment = self.post_json_get_id("/students/{}/classes/".format(self.student), { 'student': self.waiting_assignment, 'school_class': self.school_class, 'semester': '2018-01-01' })
def setup_quizzes_and_assignments(self):
self.quiz = self.post_json_get_id("/quizzes/", { 'school_class': self.school_class, 'questions': create_questions(10) })
self.assignment = self.post_json_get_id("/students/{}/assignments/".format(self.student), { 'quiz': self.quiz, 'enrollment': self.student_enrollment })
def on_start(self):
self.setup_people()
self.setup_classes()
self.setup_quizzes_and_assignments()
class TeacherTaskSet(OnStartSetupDataMixin, TaskSet):
@task
def create_teacher(self):
self.client.post("/teachers/", json={ 'name': 'Mary' })
@task
def create_class(self):
self.client.post("/classes/", json={ 'name': 'Managing Great Companies', 'teacher': self.teacher })
@task
def create_quiz(self):
questions = create_questions(5)
self.client.post("/quizzes/", json={ 'school_class': self.school_class, 'questions': questions })
@task
def assign_quiz_to_student(self):
self.client.post("/students/{}/assignments/".format(self.waiting_assignment), json={ 'quiz': self.quiz, 'enrollment': self.waiting_assignment_enrollment })
@task
def check_assignment_status(self):
self.client.get("/assignments/{}".format(str(self.assignment)))
@task
def check_students_grades(self):
self.client.get("/assignments/reports/student-grades/?teacher={}&semester=2018-01-01".format(self.teacher))
class StudentTaskSet(OnStartSetupDataMixin, TaskSet):
@task
def create_student(self):
self.client.post("/students/", json={ 'name': 'Jhon' })
@task
def check_classes(self):
self.client.get("/students/{}/classes/".format(self.student))
@task
def check_assignments(self):
self.client.get("/students/{}/assignments/".format(self.student))
@task
def enroll_in_class(self):
data = { 'student': self.new_comer, 'school_class': self.school_class, 'semester': '2018-01-01' }
self.client.post("/students/{}/classes/".format(self.new_comer), json=data)
@task
def check_assignment_result(self):
self.client.get("/assignments/{}/".format(str(self.assignment)))
| import time
from locust import TaskSet, task
from factories import create_questions
class OnStartSetupDataMixin:
def post_json_get_id(self, url, payload):
response = self.client.post(url, json=payload)
time.sleep(0.1) #Required to avoid connection error
return response.json()['id']
def setup_people(self):
self.student = self.post_json_get_id("/students/", { 'name': 'Guilherme' })
self.new_comer = self.post_json_get_id("/students/", { 'name': '<NAME>' })
self.waiting_assignment = self.post_json_get_id("/students/", { 'name': '<NAME>' })
self.teacher = self.post_json_get_id("/teachers/", { 'name': 'Kwan' })
def setup_classes(self):
self.school_class = self.post_json_get_id("/classes/", { 'name': 'Load Testing', 'teacher': self.teacher })
self.student_enrollment = self.post_json_get_id("/students/{}/classes/".format(self.student), { 'student': self.student, 'school_class': self.school_class, 'semester': '2018-01-01' })
self.waiting_assignment_enrollment = self.post_json_get_id("/students/{}/classes/".format(self.student), { 'student': self.waiting_assignment, 'school_class': self.school_class, 'semester': '2018-01-01' })
def setup_quizzes_and_assignments(self):
self.quiz = self.post_json_get_id("/quizzes/", { 'school_class': self.school_class, 'questions': create_questions(10) })
self.assignment = self.post_json_get_id("/students/{}/assignments/".format(self.student), { 'quiz': self.quiz, 'enrollment': self.student_enrollment })
def on_start(self):
self.setup_people()
self.setup_classes()
self.setup_quizzes_and_assignments()
class TeacherTaskSet(OnStartSetupDataMixin, TaskSet):
@task
def create_teacher(self):
self.client.post("/teachers/", json={ 'name': 'Mary' })
@task
def create_class(self):
self.client.post("/classes/", json={ 'name': 'Managing Great Companies', 'teacher': self.teacher })
@task
def create_quiz(self):
questions = create_questions(5)
self.client.post("/quizzes/", json={ 'school_class': self.school_class, 'questions': questions })
@task
def assign_quiz_to_student(self):
self.client.post("/students/{}/assignments/".format(self.waiting_assignment), json={ 'quiz': self.quiz, 'enrollment': self.waiting_assignment_enrollment })
@task
def check_assignment_status(self):
self.client.get("/assignments/{}".format(str(self.assignment)))
@task
def check_students_grades(self):
self.client.get("/assignments/reports/student-grades/?teacher={}&semester=2018-01-01".format(self.teacher))
class StudentTaskSet(OnStartSetupDataMixin, TaskSet):
@task
def create_student(self):
self.client.post("/students/", json={ 'name': 'Jhon' })
@task
def check_classes(self):
self.client.get("/students/{}/classes/".format(self.student))
@task
def check_assignments(self):
self.client.get("/students/{}/assignments/".format(self.student))
@task
def enroll_in_class(self):
data = { 'student': self.new_comer, 'school_class': self.school_class, 'semester': '2018-01-01' }
self.client.post("/students/{}/classes/".format(self.new_comer), json=data)
@task
def check_assignment_result(self):
self.client.get("/assignments/{}/".format(str(self.assignment)))
| en | 0.794519 | #Required to avoid connection error | 2.511381 | 3 |
leetcode_python/Breadth-First-Search/bus-routes.py | yennanliu/Python_basics | 0 | 6622679 | """
815. Bus Routes
Hard
You are given an array routes representing bus routes where routes[i] is a bus route that the ith bus repeats forever.
For example, if routes[0] = [1, 5, 7], this means that the 0th bus travels in the sequence 1 -> 5 -> 7 -> 1 -> 5 -> 7 -> 1 -> ... forever.
You will start at the bus stop source (You are not on any bus initially), and you want to go to the bus stop target. You can travel between bus stops by buses only.
Return the least number of buses you must take to travel from source to target. Return -1 if it is not possible.
Example 1:
Input: routes = [[1,2,7],[3,6,7]], source = 1, target = 6
Output: 2
Explanation: The best strategy is take the first bus to the bus stop 7, then take the second bus to the bus stop 6.
Example 2:
Input: routes = [[7,12],[4,5,15],[6],[15,19],[9,12,13]], source = 15, target = 12
Output: -1
Constraints:
1 <= routes.length <= 500.
1 <= routes[i].length <= 105
All the values of routes[i] are unique.
sum(routes[i].length) <= 105
0 <= routes[i][j] < 106
0 <= source, target < 106
"""
# V0
# IDEA : BFS + GRAPH
class Solution(object):
def numBusesToDestination(self, routes, S, T):
# edge case:
if S == T:
return 0
to_routes = collections.defaultdict(set)
for i, route in enumerate(routes):
for j in route:
to_routes[j].add(i)
bfs = [(S, 0)]
seen = set([S])
for stop, bus in bfs:
if stop == T:
return bus
for i in to_routes[stop]:
for j in routes[i]:
if j not in seen:
bfs.append((j, bus + 1))
seen.add(j)
routes[i] = [] # seen route
return -1
# V1
# http://zxi.mytechroad.com/blog/graph/leetcode-815-bus-routes/
# https://www.youtube.com/watch?v=vEcm5farBls
# https://blog.csdn.net/weixin_44617992/article/details/112388066
# C++
# class Solution {
# public:
# int numBusesToDestination(vector<vector<int>>& routes, int S, int T) {
# if (S == T) return 0;
#
# unordered_map<int, vector<int>> m;
# for (int i = 0; i < routes.size(); ++i)
# for (const int stop : routes[i])
# m[stop].push_back(i);
#
# vector<int> visited(routes.size(), 0);
# queue<int> q;
# q.push(S);
# int buses = 0;
#
# while (!q.empty()) {
# int size = q.size();
# ++buses;
# while (size--) {
# int curr = q.front(); q.pop();
# for (const int bus : m[curr]) {
# if (visited[bus]) continue;
# visited[bus] = 1;
# for (int stop : routes[bus]) {
# if (stop == T) return buses;
# q.push(stop);
# }
# }
# }
# }
# return -1;
# }
# };
# V1'
# IDEA : BFS
# https://leetcode.com/problems/bus-routes/discuss/122771/C%2B%2BJavaPython-BFS-Solution
# IDEA :
# The first part loop on routes and record stop to routes mapping in to_route.
# The second part is general bfs. Take a stop from queue and find all connected route.
# The hashset seen record all visited stops and we won't check a stop for twice.
# We can also use a hashset to record all visited routes, or just clear a route after visit.
class Solution(object):
def numBusesToDestination(self, routes, S, T):
to_routes = collections.defaultdict(set)
for i, route in enumerate(routes):
for j in route:
to_routes[j].add(i)
bfs = [(S, 0)]
seen = set([S])
for stop, bus in bfs:
if stop == T: return bus
for i in to_routes[stop]:
for j in routes[i]:
if j not in seen:
bfs.append((j, bus + 1))
seen.add(j)
routes[i] = [] # seen route
return -1
# V1''
# IDEA : BFS
# https://leetcode.com/problems/bus-routes/discuss/151289/Python-BFS-With-Explanation
# Reference: https://leetcode.com/problems/bus-routes/discuss/122712/Simple-Java-Solution-using-BFS
from collections import deque
class Solution:
# This is a very good BFS problem.
# In BFS, we need to traverse all positions in each level firstly, and then go to the next level.
# Our task is to figure out:
# 1. What is the level in this problem?
# 2. What is the position we want in this problem?
# 3. How to traverse all positions in a level?
#
# For this problem:
# 1. The level is each time to take bus.
# 2. The position is all of the stops you can reach for taking one time of bus.
# 3. Using a queue to record all of the stops can be arrived for each time you take buses.
def numBusesToDestination(self, routes, S, T):
"""
:type routes: List[List[int]]
:type S: int
:type T: int
:rtype: int
"""
# You already at the terminal, so you needn't take any bus.
if S == T: return 0
# You need to record all the buses you can take at each stop so that you can find out all
# of the stops you can reach when you take one time of bus.
# the key is stop and the value is all of the buses you can take at this stop.
stopBoard = {}
for bus, stops in enumerate(routes):
for stop in stops:
if stop not in stopBoard:
stopBoard[stop] = [bus]
else:
stopBoard[stop].append(bus)
# The queue is to record all of the stops you can reach when you take one time of bus.
queue = deque([S])
# Using visited to record the buses that have been taken before, because you needn't to take them again.
visited = set()
res = 0
while queue:
# take one time of bus.
res += 1
# In order to traverse all of the stops you can reach for this time, you have to traverse
# all of the stops you can reach in last time.
pre_num_stops = len(queue)
for _ in range(pre_num_stops):
curStop = queue.popleft()
# Each stop you can take at least one bus, you need to traverse all of the buses at this stop
# in order to get all of the stops can be reach at this time.
for bus in stopBoard[curStop]:
# if the bus you have taken before, you needn't take it again.
if bus in visited: continue
visited.add(bus)
for stop in routes[bus]:
if stop == T: return res
queue.append(stop)
return -1
# V1'''
# IDEA : GRAPH + BFS
# https://leetcode.com/problems/bus-routes/discuss/269514/Python-Graph-BFS
# We can view each bus route as a node. If two routes share at least one stop, then there is an edge between them. Based on that, we can build an adjacent-list graph g.
# Then to get the minimal number of bus routes to go from S to T, we can use BFS. The source node is any node containing stop S and destination node is any node containing stop T. The distance between source nodes and destination nodes is the number of bus routes. The distance starts from 1. (If S and T is on at same bus routes, we return 1).
# A corner case is that S == T, we should return 0.
class Solution(object):
def numBusesToDestination(self, routes, S, T):
if S == T: return 0
routes, n = [set(r) for r in routes], len(routes)
g = [set() for _ in range(n)]
for i in range(n):
for j in range(i):
if set(routes[i]) & set(routes[j]):
g[i].add(j), g[j].add(i)
seen, dst = set(i for i,r in enumerate(routes) if S in r), set(i for i,r in enumerate(routes) if T in r)
q = [(x, 1) for x in seen]
for x, d in q:
if x in dst: return d
for y in g[x]:
if y not in seen: seen.add(y), q.append((y, d+1))
return -1
# V1'''''
# IDEA : BFS
# https://leetcode.com/problems/bus-routes/solution/
class Solution(object):
def numBusesToDestination(self, routes, S, T):
if S == T: return 0
routes = map(set, routes)
graph = collections.defaultdict(set)
for i, r1 in enumerate(routes):
for j in range(i+1, len(routes)):
r2 = routes[j]
if any(r in r2 for r in r1):
graph[i].add(j)
graph[j].add(i)
seen, targets = set(), set()
for node, route in enumerate(routes):
if S in route: seen.add(node)
if T in route: targets.add(node)
queue = [(node, 1) for node in seen]
for node, depth in queue:
if node in targets: return depth
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
queue.append((nei, depth+1))
return -1
# V2 | """
815. Bus Routes
Hard
You are given an array routes representing bus routes where routes[i] is a bus route that the ith bus repeats forever.
For example, if routes[0] = [1, 5, 7], this means that the 0th bus travels in the sequence 1 -> 5 -> 7 -> 1 -> 5 -> 7 -> 1 -> ... forever.
You will start at the bus stop source (You are not on any bus initially), and you want to go to the bus stop target. You can travel between bus stops by buses only.
Return the least number of buses you must take to travel from source to target. Return -1 if it is not possible.
Example 1:
Input: routes = [[1,2,7],[3,6,7]], source = 1, target = 6
Output: 2
Explanation: The best strategy is take the first bus to the bus stop 7, then take the second bus to the bus stop 6.
Example 2:
Input: routes = [[7,12],[4,5,15],[6],[15,19],[9,12,13]], source = 15, target = 12
Output: -1
Constraints:
1 <= routes.length <= 500.
1 <= routes[i].length <= 105
All the values of routes[i] are unique.
sum(routes[i].length) <= 105
0 <= routes[i][j] < 106
0 <= source, target < 106
"""
# V0
# IDEA : BFS + GRAPH
class Solution(object):
def numBusesToDestination(self, routes, S, T):
# edge case:
if S == T:
return 0
to_routes = collections.defaultdict(set)
for i, route in enumerate(routes):
for j in route:
to_routes[j].add(i)
bfs = [(S, 0)]
seen = set([S])
for stop, bus in bfs:
if stop == T:
return bus
for i in to_routes[stop]:
for j in routes[i]:
if j not in seen:
bfs.append((j, bus + 1))
seen.add(j)
routes[i] = [] # seen route
return -1
# V1
# http://zxi.mytechroad.com/blog/graph/leetcode-815-bus-routes/
# https://www.youtube.com/watch?v=vEcm5farBls
# https://blog.csdn.net/weixin_44617992/article/details/112388066
# C++
# class Solution {
# public:
# int numBusesToDestination(vector<vector<int>>& routes, int S, int T) {
# if (S == T) return 0;
#
# unordered_map<int, vector<int>> m;
# for (int i = 0; i < routes.size(); ++i)
# for (const int stop : routes[i])
# m[stop].push_back(i);
#
# vector<int> visited(routes.size(), 0);
# queue<int> q;
# q.push(S);
# int buses = 0;
#
# while (!q.empty()) {
# int size = q.size();
# ++buses;
# while (size--) {
# int curr = q.front(); q.pop();
# for (const int bus : m[curr]) {
# if (visited[bus]) continue;
# visited[bus] = 1;
# for (int stop : routes[bus]) {
# if (stop == T) return buses;
# q.push(stop);
# }
# }
# }
# }
# return -1;
# }
# };
# V1'
# IDEA : BFS
# https://leetcode.com/problems/bus-routes/discuss/122771/C%2B%2BJavaPython-BFS-Solution
# IDEA :
# The first part loop on routes and record stop to routes mapping in to_route.
# The second part is general bfs. Take a stop from queue and find all connected route.
# The hashset seen record all visited stops and we won't check a stop for twice.
# We can also use a hashset to record all visited routes, or just clear a route after visit.
class Solution(object):
def numBusesToDestination(self, routes, S, T):
to_routes = collections.defaultdict(set)
for i, route in enumerate(routes):
for j in route:
to_routes[j].add(i)
bfs = [(S, 0)]
seen = set([S])
for stop, bus in bfs:
if stop == T: return bus
for i in to_routes[stop]:
for j in routes[i]:
if j not in seen:
bfs.append((j, bus + 1))
seen.add(j)
routes[i] = [] # seen route
return -1
# V1''
# IDEA : BFS
# https://leetcode.com/problems/bus-routes/discuss/151289/Python-BFS-With-Explanation
# Reference: https://leetcode.com/problems/bus-routes/discuss/122712/Simple-Java-Solution-using-BFS
from collections import deque
class Solution:
# This is a very good BFS problem.
# In BFS, we need to traverse all positions in each level firstly, and then go to the next level.
# Our task is to figure out:
# 1. What is the level in this problem?
# 2. What is the position we want in this problem?
# 3. How to traverse all positions in a level?
#
# For this problem:
# 1. The level is each time to take bus.
# 2. The position is all of the stops you can reach for taking one time of bus.
# 3. Using a queue to record all of the stops can be arrived for each time you take buses.
def numBusesToDestination(self, routes, S, T):
"""
:type routes: List[List[int]]
:type S: int
:type T: int
:rtype: int
"""
# You already at the terminal, so you needn't take any bus.
if S == T: return 0
# You need to record all the buses you can take at each stop so that you can find out all
# of the stops you can reach when you take one time of bus.
# the key is stop and the value is all of the buses you can take at this stop.
stopBoard = {}
for bus, stops in enumerate(routes):
for stop in stops:
if stop not in stopBoard:
stopBoard[stop] = [bus]
else:
stopBoard[stop].append(bus)
# The queue is to record all of the stops you can reach when you take one time of bus.
queue = deque([S])
# Using visited to record the buses that have been taken before, because you needn't to take them again.
visited = set()
res = 0
while queue:
# take one time of bus.
res += 1
# In order to traverse all of the stops you can reach for this time, you have to traverse
# all of the stops you can reach in last time.
pre_num_stops = len(queue)
for _ in range(pre_num_stops):
curStop = queue.popleft()
# Each stop you can take at least one bus, you need to traverse all of the buses at this stop
# in order to get all of the stops can be reach at this time.
for bus in stopBoard[curStop]:
# if the bus you have taken before, you needn't take it again.
if bus in visited: continue
visited.add(bus)
for stop in routes[bus]:
if stop == T: return res
queue.append(stop)
return -1
# V1'''
# IDEA : GRAPH + BFS
# https://leetcode.com/problems/bus-routes/discuss/269514/Python-Graph-BFS
# We can view each bus route as a node. If two routes share at least one stop, then there is an edge between them. Based on that, we can build an adjacent-list graph g.
# Then to get the minimal number of bus routes to go from S to T, we can use BFS. The source node is any node containing stop S and destination node is any node containing stop T. The distance between source nodes and destination nodes is the number of bus routes. The distance starts from 1. (If S and T is on at same bus routes, we return 1).
# A corner case is that S == T, we should return 0.
class Solution(object):
def numBusesToDestination(self, routes, S, T):
if S == T: return 0
routes, n = [set(r) for r in routes], len(routes)
g = [set() for _ in range(n)]
for i in range(n):
for j in range(i):
if set(routes[i]) & set(routes[j]):
g[i].add(j), g[j].add(i)
seen, dst = set(i for i,r in enumerate(routes) if S in r), set(i for i,r in enumerate(routes) if T in r)
q = [(x, 1) for x in seen]
for x, d in q:
if x in dst: return d
for y in g[x]:
if y not in seen: seen.add(y), q.append((y, d+1))
return -1
# V1'''''
# IDEA : BFS
# https://leetcode.com/problems/bus-routes/solution/
class Solution(object):
def numBusesToDestination(self, routes, S, T):
if S == T: return 0
routes = map(set, routes)
graph = collections.defaultdict(set)
for i, r1 in enumerate(routes):
for j in range(i+1, len(routes)):
r2 = routes[j]
if any(r in r2 for r in r1):
graph[i].add(j)
graph[j].add(i)
seen, targets = set(), set()
for node, route in enumerate(routes):
if S in route: seen.add(node)
if T in route: targets.add(node)
queue = [(node, 1) for node in seen]
for node, depth in queue:
if node in targets: return depth
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
queue.append((nei, depth+1))
return -1
# V2 | en | 0.866819 | 815. Bus Routes Hard You are given an array routes representing bus routes where routes[i] is a bus route that the ith bus repeats forever. For example, if routes[0] = [1, 5, 7], this means that the 0th bus travels in the sequence 1 -> 5 -> 7 -> 1 -> 5 -> 7 -> 1 -> ... forever. You will start at the bus stop source (You are not on any bus initially), and you want to go to the bus stop target. You can travel between bus stops by buses only. Return the least number of buses you must take to travel from source to target. Return -1 if it is not possible. Example 1: Input: routes = [[1,2,7],[3,6,7]], source = 1, target = 6 Output: 2 Explanation: The best strategy is take the first bus to the bus stop 7, then take the second bus to the bus stop 6. Example 2: Input: routes = [[7,12],[4,5,15],[6],[15,19],[9,12,13]], source = 15, target = 12 Output: -1 Constraints: 1 <= routes.length <= 500. 1 <= routes[i].length <= 105 All the values of routes[i] are unique. sum(routes[i].length) <= 105 0 <= routes[i][j] < 106 0 <= source, target < 106 # V0 # IDEA : BFS + GRAPH # edge case: # seen route # V1 # http://zxi.mytechroad.com/blog/graph/leetcode-815-bus-routes/ # https://www.youtube.com/watch?v=vEcm5farBls # https://blog.csdn.net/weixin_44617992/article/details/112388066 # C++ # class Solution { # public: # int numBusesToDestination(vector<vector<int>>& routes, int S, int T) { # if (S == T) return 0; # # unordered_map<int, vector<int>> m; # for (int i = 0; i < routes.size(); ++i) # for (const int stop : routes[i]) # m[stop].push_back(i); # # vector<int> visited(routes.size(), 0); # queue<int> q; # q.push(S); # int buses = 0; # # while (!q.empty()) { # int size = q.size(); # ++buses; # while (size--) { # int curr = q.front(); q.pop(); # for (const int bus : m[curr]) { # if (visited[bus]) continue; # visited[bus] = 1; # for (int stop : routes[bus]) { # if (stop == T) return buses; # q.push(stop); # } # } # } # } # return -1; # } # }; # V1' # IDEA : BFS # https://leetcode.com/problems/bus-routes/discuss/122771/C%2B%2BJavaPython-BFS-Solution # IDEA : # The first part loop on routes and record stop to routes mapping in to_route. # The second part is general bfs. Take a stop from queue and find all connected route. # The hashset seen record all visited stops and we won't check a stop for twice. # We can also use a hashset to record all visited routes, or just clear a route after visit. # seen route # V1'' # IDEA : BFS # https://leetcode.com/problems/bus-routes/discuss/151289/Python-BFS-With-Explanation # Reference: https://leetcode.com/problems/bus-routes/discuss/122712/Simple-Java-Solution-using-BFS # This is a very good BFS problem. # In BFS, we need to traverse all positions in each level firstly, and then go to the next level. # Our task is to figure out: # 1. What is the level in this problem? # 2. What is the position we want in this problem? # 3. How to traverse all positions in a level? # # For this problem: # 1. The level is each time to take bus. # 2. The position is all of the stops you can reach for taking one time of bus. # 3. Using a queue to record all of the stops can be arrived for each time you take buses. :type routes: List[List[int]] :type S: int :type T: int :rtype: int # You already at the terminal, so you needn't take any bus. # You need to record all the buses you can take at each stop so that you can find out all # of the stops you can reach when you take one time of bus. # the key is stop and the value is all of the buses you can take at this stop. # The queue is to record all of the stops you can reach when you take one time of bus. # Using visited to record the buses that have been taken before, because you needn't to take them again. # take one time of bus. # In order to traverse all of the stops you can reach for this time, you have to traverse # all of the stops you can reach in last time. # Each stop you can take at least one bus, you need to traverse all of the buses at this stop # in order to get all of the stops can be reach at this time. # if the bus you have taken before, you needn't take it again. # V1''' # IDEA : GRAPH + BFS # https://leetcode.com/problems/bus-routes/discuss/269514/Python-Graph-BFS # We can view each bus route as a node. If two routes share at least one stop, then there is an edge between them. Based on that, we can build an adjacent-list graph g. # Then to get the minimal number of bus routes to go from S to T, we can use BFS. The source node is any node containing stop S and destination node is any node containing stop T. The distance between source nodes and destination nodes is the number of bus routes. The distance starts from 1. (If S and T is on at same bus routes, we return 1). # A corner case is that S == T, we should return 0. # V1''''' # IDEA : BFS # https://leetcode.com/problems/bus-routes/solution/ # V2 | 3.794514 | 4 |
smartcliapp/informer.py | smartlegionlab/smartcliapp | 2 | 6622680 | # -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, <NAME>
# All rights reserved.
# --------------------------------------------------------
from smartprinter.printers import Printer
class Informer:
"""
Informer
- Override the attributes to yours.
"""
printer = Printer()
name = ''
title = ''
description = ''
copyright = ''
url = ''
msg = ''
version = '0.0.0'
@classmethod
def show_head(cls, char='*'):
"""Displays a header with information when the application starts."""
cls.printer.smart.echo(char=char)
cls.printer.smart.echo(cls.title, char=char)
cls.printer.smart.echo(cls.description, char=char)
@classmethod
def show_footer(cls, char='*'):
"""Displays a footer with information when the application ends."""
cls.printer.smart.echo(cls.url, char=char)
cls.printer.smart.echo(cls.copyright, char=char)
cls.printer.smart.echo(char=char)
| # -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, <NAME>
# All rights reserved.
# --------------------------------------------------------
from smartprinter.printers import Printer
class Informer:
"""
Informer
- Override the attributes to yours.
"""
printer = Printer()
name = ''
title = ''
description = ''
copyright = ''
url = ''
msg = ''
version = '0.0.0'
@classmethod
def show_head(cls, char='*'):
"""Displays a header with information when the application starts."""
cls.printer.smart.echo(char=char)
cls.printer.smart.echo(cls.title, char=char)
cls.printer.smart.echo(cls.description, char=char)
@classmethod
def show_footer(cls, char='*'):
"""Displays a footer with information when the application ends."""
cls.printer.smart.echo(cls.url, char=char)
cls.printer.smart.echo(cls.copyright, char=char)
cls.printer.smart.echo(char=char)
| en | 0.668829 | # -*- coding: utf-8 -*- # -------------------------------------------------------- # Licensed under the terms of the BSD 3-Clause License # (see LICENSE for details). # Copyright © 2018-2021, <NAME> # All rights reserved. # -------------------------------------------------------- Informer - Override the attributes to yours. Displays a header with information when the application starts. Displays a footer with information when the application ends. | 2.958591 | 3 |
ObitSystem/ObitTalk/python/AIPS.py | sarrvesh/Obit | 5 | 6622681 | # Copyright (C) 2005 Joint Institute for VLBI in Europe
# Copyright (C) 2007,2019 Associated Universities, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the AIPSDisk and AIPS classes. Together they
provide some basic infrastructure used by the AIPSTask and AIPSData
modules.
"""
# Generic Python stuff.
from __future__ import absolute_import
import os
from AIPSUtil import *
# Available proxies.
import LocalProxy
from six.moves.xmlrpc_client import ServerProxy
from six.moves import range
class AIPSDisk:
"""Class representing a (possibly remote) AIPS disk. An instance
of this class stores an AIPS disk number and the URL of the
proxy through which it can be accessed. For local AIPS disks
the URL will be None."""
def __init__(self, url, disk, dirname):
self.url = url
self.disk = disk
self.dirname = dirname
def proxy(self):
"""Return the proxy through which this AIPS disk can be
accessed."""
if self.url:
return ServerProxy(self.url)
else:
return LocalProxy
class AIPS:
"""Container for several AIPS-related default values."""
# Default AIPS user ID.
userno = 0
# List of available proxies.
proxies = [ LocalProxy ]
# AIPS disk mapping.
disks = [ None ] # Disk numbers are one-based.
# Check for disks already in the system
import OErr, Obit
err = OErr.OErr()
numb = Obit.AIPSGetNumDisk(err.me)
disk = 0
for i in range(0,numb):
disk += 1;
disks.append(AIPSDisk(None, disk, Obit.AIPSGetDirname(disk,err.me)))
# AIPS seems to support a maximum of 35 disks.
for i in range(1, 35-numb):
disk +=1
area = 'DA' + ehex(disk, 2, '0')
dirname = os.getenv(area)
if not area in os.environ:
break
disks.append(AIPSDisk(None, disk, dirname))
continue
# Message log.
log = None
# Debug log.
debuglog = None
| # Copyright (C) 2005 Joint Institute for VLBI in Europe
# Copyright (C) 2007,2019 Associated Universities, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the AIPSDisk and AIPS classes. Together they
provide some basic infrastructure used by the AIPSTask and AIPSData
modules.
"""
# Generic Python stuff.
from __future__ import absolute_import
import os
from AIPSUtil import *
# Available proxies.
import LocalProxy
from six.moves.xmlrpc_client import ServerProxy
from six.moves import range
class AIPSDisk:
"""Class representing a (possibly remote) AIPS disk. An instance
of this class stores an AIPS disk number and the URL of the
proxy through which it can be accessed. For local AIPS disks
the URL will be None."""
def __init__(self, url, disk, dirname):
self.url = url
self.disk = disk
self.dirname = dirname
def proxy(self):
"""Return the proxy through which this AIPS disk can be
accessed."""
if self.url:
return ServerProxy(self.url)
else:
return LocalProxy
class AIPS:
"""Container for several AIPS-related default values."""
# Default AIPS user ID.
userno = 0
# List of available proxies.
proxies = [ LocalProxy ]
# AIPS disk mapping.
disks = [ None ] # Disk numbers are one-based.
# Check for disks already in the system
import OErr, Obit
err = OErr.OErr()
numb = Obit.AIPSGetNumDisk(err.me)
disk = 0
for i in range(0,numb):
disk += 1;
disks.append(AIPSDisk(None, disk, Obit.AIPSGetDirname(disk,err.me)))
# AIPS seems to support a maximum of 35 disks.
for i in range(1, 35-numb):
disk +=1
area = 'DA' + ehex(disk, 2, '0')
dirname = os.getenv(area)
if not area in os.environ:
break
disks.append(AIPSDisk(None, disk, dirname))
continue
# Message log.
log = None
# Debug log.
debuglog = None
| en | 0.862994 | # Copyright (C) 2005 Joint Institute for VLBI in Europe # Copyright (C) 2007,2019 Associated Universities, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA This module provides the AIPSDisk and AIPS classes. Together they provide some basic infrastructure used by the AIPSTask and AIPSData modules. # Generic Python stuff. # Available proxies. Class representing a (possibly remote) AIPS disk. An instance of this class stores an AIPS disk number and the URL of the proxy through which it can be accessed. For local AIPS disks the URL will be None. Return the proxy through which this AIPS disk can be accessed. Container for several AIPS-related default values. # Default AIPS user ID. # List of available proxies. # AIPS disk mapping. # Disk numbers are one-based. # Check for disks already in the system # AIPS seems to support a maximum of 35 disks. # Message log. # Debug log. | 2.059915 | 2 |
packages/ipylintotype/src/ipylintotype/diagnosers/pylint_diagnoser.py | deathbeds/lintotype | 18 | 6622682 | <reponame>deathbeds/lintotype
import contextlib
import io
import re
import typing as typ
from pathlib import Path
from tempfile import TemporaryDirectory
import pylint.lint
import traitlets
from .. import shapes
from .diagnoser import Diagnoser, InteractiveShell, IPythonDiagnoser
_re_pylint = r"^(.):\s*(\d+),\s(\d+):\s*(.*?)\s*\((.*)\)$"
_help_pylint_args = (
f"https://docs.pylint.org/en/{pylint.__version__}/run.html#command-line-options"
)
_pylint_severity = {
"W": Diagnoser.Severity.warning,
"E": Diagnoser.Severity.error,
"C": Diagnoser.Severity.info,
"R": Diagnoser.Severity.hint,
}
class PyLintDiagnoser(IPythonDiagnoser):
entry_point = traitlets.Unicode(default_value=pylint.__name__)
args = traitlets.List(traitlets.Unicode(), help=_help_pylint_args)
@traitlets.default("args")
def _default_ignore(self):
rules = ["trailing-newlines"]
return [f"""--disable={",".join(rules)}"""]
def run(
self,
cell_id: typ.Text,
code: typ.List[shapes.Cell],
metadata: typ.Dict[str, typ.Dict[str, typ.Any]],
shell: InteractiveShell,
*args,
**kwargs,
) -> shapes.Annotations:
out = io.StringIO()
err = io.StringIO()
transformed_code, line_offsets = self.transform_for_diagnostics(code, shell)
with TemporaryDirectory() as td:
tdp = Path(td)
code_file = tdp / "code.py"
code_file.write_text(transformed_code)
with contextlib.redirect_stdout(out), contextlib.redirect_stderr(err):
try:
res = pylint.lint.Run(list(self.args) + [str(code_file)])
except:
pass
outs = out.getvalue()
errs = err.getvalue()
matches = re.findall(_re_pylint, outs, flags=re.M)
diagnostics = [] # type: typ.List[shapes.Diagnostic]
for severity, line, col, msg, rule in matches:
line = int(line) - line_offsets[cell_id]
col = int(col)
diagnostics.append(
{
"message": msg.strip(),
"source": self.entry_point,
"code": rule,
"severity": _pylint_severity.get(severity, self.Severity.error),
"range": {
"start": dict(line=line - 1, character=col - 1),
"end": dict(line=line - 1, character=col),
},
}
)
return dict(diagnostics=diagnostics)
| import contextlib
import io
import re
import typing as typ
from pathlib import Path
from tempfile import TemporaryDirectory
import pylint.lint
import traitlets
from .. import shapes
from .diagnoser import Diagnoser, InteractiveShell, IPythonDiagnoser
_re_pylint = r"^(.):\s*(\d+),\s(\d+):\s*(.*?)\s*\((.*)\)$"
_help_pylint_args = (
f"https://docs.pylint.org/en/{pylint.__version__}/run.html#command-line-options"
)
_pylint_severity = {
"W": Diagnoser.Severity.warning,
"E": Diagnoser.Severity.error,
"C": Diagnoser.Severity.info,
"R": Diagnoser.Severity.hint,
}
class PyLintDiagnoser(IPythonDiagnoser):
entry_point = traitlets.Unicode(default_value=pylint.__name__)
args = traitlets.List(traitlets.Unicode(), help=_help_pylint_args)
@traitlets.default("args")
def _default_ignore(self):
rules = ["trailing-newlines"]
return [f"""--disable={",".join(rules)}"""]
def run(
self,
cell_id: typ.Text,
code: typ.List[shapes.Cell],
metadata: typ.Dict[str, typ.Dict[str, typ.Any]],
shell: InteractiveShell,
*args,
**kwargs,
) -> shapes.Annotations:
out = io.StringIO()
err = io.StringIO()
transformed_code, line_offsets = self.transform_for_diagnostics(code, shell)
with TemporaryDirectory() as td:
tdp = Path(td)
code_file = tdp / "code.py"
code_file.write_text(transformed_code)
with contextlib.redirect_stdout(out), contextlib.redirect_stderr(err):
try:
res = pylint.lint.Run(list(self.args) + [str(code_file)])
except:
pass
outs = out.getvalue()
errs = err.getvalue()
matches = re.findall(_re_pylint, outs, flags=re.M)
diagnostics = [] # type: typ.List[shapes.Diagnostic]
for severity, line, col, msg, rule in matches:
line = int(line) - line_offsets[cell_id]
col = int(col)
diagnostics.append(
{
"message": msg.strip(),
"source": self.entry_point,
"code": rule,
"severity": _pylint_severity.get(severity, self.Severity.error),
"range": {
"start": dict(line=line - 1, character=col - 1),
"end": dict(line=line - 1, character=col),
},
}
)
return dict(diagnostics=diagnostics) | en | 0.41608 | #command-line-options" --disable={",".join(rules)} # type: typ.List[shapes.Diagnostic] | 2.162967 | 2 |
tests/testmodels.py | sunshiding/cca_zoo-1 | 1 | 6622683 | <filename>tests/testmodels.py
import itertools
from unittest import TestCase
import numpy as np
import scipy.sparse as sp
from sklearn.utils.validation import check_random_state
from cca_zoo.models import CCA, PLS, CCA_ALS, SCCA, PMD, ElasticCCA, rCCA, KCCA, KTCCA, MCCA, GCCA, TCCA, SCCA_ADMM, \
SpanCCA, SWCCA
class TestModels(TestCase):
def setUp(self):
self.rng = check_random_state(0)
self.X = self.rng.rand(500, 20)
self.Y = self.rng.rand(500, 21)
self.Z = self.rng.rand(500, 22)
self.X_sp = sp.random(500, 20, density=0.5, random_state=self.rng)
self.Y_sp = sp.random(500, 21, density=0.5, random_state=self.rng)
def tearDown(self):
pass
def test_unregularized_methods(self):
# Tests unregularized CCA methods. The idea is that all of these should give the same result.
latent_dims = 2
wrap_cca = CCA(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_iter = CCA_ALS(latent_dims=latent_dims, tol=1e-9, random_state=self.rng, stochastic=False).fit(self.X,
self.Y)
wrap_gcca = GCCA(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_mcca = MCCA(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_kcca = KCCA(latent_dims=latent_dims).fit(self.X, self.Y)
corr_cca = wrap_cca.score(self.X, self.Y)
corr_iter = wrap_iter.score(self.X, self.Y)
corr_gcca = wrap_gcca.score(self.X, self.Y)
corr_mcca = wrap_mcca.score(self.X, self.Y)
corr_kcca = wrap_kcca.score(self.X, self.Y)
# Check the score outputs are the right shape
self.assertTrue(wrap_iter.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_gcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_mcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_kcca.scores[0].shape == (self.X.shape[0], latent_dims))
# Check the correlations from each unregularized method are the same
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_mcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_kcca, decimal=2))
def test_sparse_input(self):
# Tests unregularized CCA methods. The idea is that all of these should give the same result.
latent_dims = 2
wrap_cca = CCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
wrap_iter = CCA_ALS(latent_dims=latent_dims, tol=1e-9, stochastic=False, centre=False).fit(self.X_sp, self.Y_sp)
wrap_gcca = GCCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
wrap_mcca = MCCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
wrap_kcca = KCCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
corr_cca = wrap_cca.score(self.X, self.Y)
corr_iter = wrap_iter.score(self.X, self.Y)
corr_gcca = wrap_gcca.score(self.X, self.Y)
corr_mcca = wrap_mcca.score(self.X, self.Y)
corr_kcca = wrap_kcca.score(self.X, self.Y)
# Check the score outputs are the right shape
self.assertTrue(wrap_iter.scores[0].shape == (self.X_sp.shape[0], latent_dims))
self.assertTrue(wrap_gcca.scores[0].shape == (self.X_sp.shape[0], latent_dims))
self.assertTrue(wrap_mcca.scores[0].shape == (self.X_sp.shape[0], latent_dims))
self.assertTrue(wrap_kcca.scores[0].shape == (self.X_sp.shape[0], latent_dims))
# Check the correlations from each unregularized method are the same
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_mcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_kcca, decimal=2))
def test_unregularized_multi(self):
# Tests unregularized CCA methods for more than 2 views. The idea is that all of these should give the same result.
latent_dims = 2
wrap_cca = rCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
wrap_iter = CCA_ALS(latent_dims=latent_dims, stochastic=False, tol=1e-12).fit(self.X, self.Y,
self.Z)
wrap_gcca = GCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
wrap_mcca = MCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
wrap_kcca = KCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
corr_cca = wrap_cca.score(self.X, self.Y, self.Z)
corr_iter = wrap_iter.score(self.X, self.Y, self.Z)
corr_gcca = wrap_gcca.score(self.X, self.Y, self.Z)
corr_mcca = wrap_mcca.score(self.X, self.Y, self.Z)
corr_kcca = wrap_kcca.score(self.X, self.Y, self.Z)
# Check the score outputs are the right shape
self.assertTrue(wrap_iter.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_gcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_mcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_kcca.scores[0].shape == (self.X.shape[0], latent_dims))
# Check the correlations from each unregularized method are the same
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=1))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_mcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_kcca, decimal=2))
def test_regularized_methods(self):
# Test that linear regularized methods match PLS solution when using maximum regularisation.
latent_dims = 2
c = 1
wrap_kernel = KCCA(latent_dims=latent_dims, c=[c, c], kernel=['linear', 'linear']).fit(self.X,
self.Y)
wrap_pls = PLS(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
wrap_mcca = MCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
wrap_rCCA = rCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
corr_gcca = wrap_gcca.score(self.X, self.Y)
corr_mcca = wrap_mcca.score(self.X, self.Y)
corr_kernel = wrap_kernel.score(self.X, self.Y)
corr_pls = wrap_pls.score(self.X, self.Y)
corr_rcca = wrap_rCCA.score(self.X, self.Y)
# Check the correlations from each unregularized method are the same
# self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_mcca, decimal=1))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_kernel, decimal=1))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_rcca, decimal=1))
def test_non_negative_methods(self):
latent_dims = 2
wrap_nnelasticca = ElasticCCA(latent_dims=latent_dims, tol=1e-9, positive=True, l1_ratio=[0.5, 0.5],
c=[1e-4, 1e-5]).fit(self.X, self.Y)
wrap_als = CCA_ALS(latent_dims=latent_dims, tol=1e-9).fit(self.X, self.Y)
wrap_nnals = CCA_ALS(latent_dims=latent_dims, tol=1e-9, positive=True).fit(self.X, self.Y)
wrap_nnscca = SCCA(latent_dims=latent_dims, tol=1e-9, positive=True, c=[1e-4, 1e-5]).fit(self.X, self.Y)
def test_sparse_methods(self):
# Test sparsity inducing methods. At the moment just checks running.
latent_dims = 2
c1 = [1, 3]
c2 = [1, 3]
param_candidates = {'c': list(itertools.product(c1, c2))}
wrap_pmd = PMD(latent_dims=latent_dims, random_state=self.rng).gridsearch_fit(self.X, self.Y,
param_candidates=param_candidates,
verbose=True, plot=True)
c1 = [1e-4, 1e-5]
c2 = [1e-4, 1e-5]
param_candidates = {'c': list(itertools.product(c1, c2))}
wrap_scca = SCCA(latent_dims=latent_dims, random_state=self.rng).gridsearch_fit(self.X, self.Y,
param_candidates=param_candidates,
verbose=True)
wrap_elastic = ElasticCCA(latent_dims=latent_dims, random_state=self.rng).gridsearch_fit(self.X, self.Y,
param_candidates=param_candidates,
verbose=True)
corr_pmd = wrap_pmd.score(self.X, self.Y)
corr_scca = wrap_scca.score(self.X, self.Y)
corr_elastic = wrap_elastic.score(self.X, self.Y)
wrap_scca_admm = SCCA_ADMM(c=[1e-4, 1e-4]).fit(self.X, self.Y)
wrap_scca = SCCA(c=[1e-4, 1e-4]).fit(self.X, self.Y)
def test_weighted_GCCA_methods(self):
# Test the 'fancy' additions to GCCA i.e. the view weighting and observation weighting.
latent_dims = 2
c = 0
wrap_unweighted_gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
wrap_deweighted_gcca = GCCA(latent_dims=latent_dims, c=[c, c], view_weights=[0.5, 0.5]).fit(
self.X, self.Y)
corr_unweighted_gcca = wrap_unweighted_gcca.score(self.X, self.Y)
corr_deweighted_gcca = wrap_deweighted_gcca.score(self.X, self.Y)
# Check the correlations from each unregularized method are the same
K = np.ones((2, self.X.shape[0]))
K[0, 200:] = 0
wrap_unobserved_gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y, K=K)
self.assertIsNone(np.testing.assert_array_almost_equal(corr_unweighted_gcca, corr_deweighted_gcca, decimal=1))
def test_TCCA(self):
# Tests tensor CCA methods
latent_dims = 2
wrap_tcca = TCCA(latent_dims=latent_dims, c=[0.2, 0.2]).fit(self.X, self.Y)
wrap_ktcca = KTCCA(latent_dims=latent_dims, c=[0.2, 0.2]).fit(self.X, self.Y)
corr_tcca = wrap_tcca.score(self.X, self.Y)
corr_ktcca = wrap_ktcca.score(self.X, self.Y)
self.assertIsNone(np.testing.assert_array_almost_equal(corr_tcca, corr_ktcca, decimal=1))
def test_cv_fit(self):
# Test the CV method
latent_dims = 2
c1 = [0.1, 0.2]
c2 = [0.1, 0.2]
param_candidates = {'c': list(itertools.product(c1, c2))}
wrap_unweighted_gcca = GCCA(latent_dims=latent_dims).gridsearch_fit(self.X, self.Y, folds=5,
param_candidates=param_candidates,
plot=True, jobs=3)
wrap_deweighted_gcca = GCCA(latent_dims=latent_dims, view_weights=[0.5, 0.5]).gridsearch_fit(
self.X, self.Y, folds=2, param_candidates=param_candidates)
wrap_mcca = MCCA(latent_dims=latent_dims).gridsearch_fit(
self.X, self.Y, folds=2, param_candidates=param_candidates)
def test_l0(self):
wrap_span_cca = SpanCCA(latent_dims=1, regularisation='l0', c=[2, 2]).fit(self.X, self.Y)
wrap_swcca = SWCCA(latent_dims=1, c=[2, 2], sample_support=5).fit(self.X, self.Y)
self.assertEqual((np.abs(wrap_span_cca.weights[0]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_span_cca.weights[1]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_swcca.weights[0]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_swcca.weights[1]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_swcca.loop.sample_weights) > 1e-5).sum(), 5)
print()
| <filename>tests/testmodels.py
import itertools
from unittest import TestCase
import numpy as np
import scipy.sparse as sp
from sklearn.utils.validation import check_random_state
from cca_zoo.models import CCA, PLS, CCA_ALS, SCCA, PMD, ElasticCCA, rCCA, KCCA, KTCCA, MCCA, GCCA, TCCA, SCCA_ADMM, \
SpanCCA, SWCCA
class TestModels(TestCase):
def setUp(self):
self.rng = check_random_state(0)
self.X = self.rng.rand(500, 20)
self.Y = self.rng.rand(500, 21)
self.Z = self.rng.rand(500, 22)
self.X_sp = sp.random(500, 20, density=0.5, random_state=self.rng)
self.Y_sp = sp.random(500, 21, density=0.5, random_state=self.rng)
def tearDown(self):
pass
def test_unregularized_methods(self):
# Tests unregularized CCA methods. The idea is that all of these should give the same result.
latent_dims = 2
wrap_cca = CCA(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_iter = CCA_ALS(latent_dims=latent_dims, tol=1e-9, random_state=self.rng, stochastic=False).fit(self.X,
self.Y)
wrap_gcca = GCCA(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_mcca = MCCA(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_kcca = KCCA(latent_dims=latent_dims).fit(self.X, self.Y)
corr_cca = wrap_cca.score(self.X, self.Y)
corr_iter = wrap_iter.score(self.X, self.Y)
corr_gcca = wrap_gcca.score(self.X, self.Y)
corr_mcca = wrap_mcca.score(self.X, self.Y)
corr_kcca = wrap_kcca.score(self.X, self.Y)
# Check the score outputs are the right shape
self.assertTrue(wrap_iter.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_gcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_mcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_kcca.scores[0].shape == (self.X.shape[0], latent_dims))
# Check the correlations from each unregularized method are the same
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_mcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_kcca, decimal=2))
def test_sparse_input(self):
# Tests unregularized CCA methods. The idea is that all of these should give the same result.
latent_dims = 2
wrap_cca = CCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
wrap_iter = CCA_ALS(latent_dims=latent_dims, tol=1e-9, stochastic=False, centre=False).fit(self.X_sp, self.Y_sp)
wrap_gcca = GCCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
wrap_mcca = MCCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
wrap_kcca = KCCA(latent_dims=latent_dims, centre=False).fit(self.X_sp, self.Y_sp)
corr_cca = wrap_cca.score(self.X, self.Y)
corr_iter = wrap_iter.score(self.X, self.Y)
corr_gcca = wrap_gcca.score(self.X, self.Y)
corr_mcca = wrap_mcca.score(self.X, self.Y)
corr_kcca = wrap_kcca.score(self.X, self.Y)
# Check the score outputs are the right shape
self.assertTrue(wrap_iter.scores[0].shape == (self.X_sp.shape[0], latent_dims))
self.assertTrue(wrap_gcca.scores[0].shape == (self.X_sp.shape[0], latent_dims))
self.assertTrue(wrap_mcca.scores[0].shape == (self.X_sp.shape[0], latent_dims))
self.assertTrue(wrap_kcca.scores[0].shape == (self.X_sp.shape[0], latent_dims))
# Check the correlations from each unregularized method are the same
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_mcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_iter, corr_kcca, decimal=2))
def test_unregularized_multi(self):
# Tests unregularized CCA methods for more than 2 views. The idea is that all of these should give the same result.
latent_dims = 2
wrap_cca = rCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
wrap_iter = CCA_ALS(latent_dims=latent_dims, stochastic=False, tol=1e-12).fit(self.X, self.Y,
self.Z)
wrap_gcca = GCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
wrap_mcca = MCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
wrap_kcca = KCCA(latent_dims=latent_dims).fit(self.X, self.Y, self.Z)
corr_cca = wrap_cca.score(self.X, self.Y, self.Z)
corr_iter = wrap_iter.score(self.X, self.Y, self.Z)
corr_gcca = wrap_gcca.score(self.X, self.Y, self.Z)
corr_mcca = wrap_mcca.score(self.X, self.Y, self.Z)
corr_kcca = wrap_kcca.score(self.X, self.Y, self.Z)
# Check the score outputs are the right shape
self.assertTrue(wrap_iter.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_gcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_mcca.scores[0].shape == (self.X.shape[0], latent_dims))
self.assertTrue(wrap_kcca.scores[0].shape == (self.X.shape[0], latent_dims))
# Check the correlations from each unregularized method are the same
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=1))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_mcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_cca, corr_kcca, decimal=2))
def test_regularized_methods(self):
# Test that linear regularized methods match PLS solution when using maximum regularisation.
latent_dims = 2
c = 1
wrap_kernel = KCCA(latent_dims=latent_dims, c=[c, c], kernel=['linear', 'linear']).fit(self.X,
self.Y)
wrap_pls = PLS(latent_dims=latent_dims).fit(self.X, self.Y)
wrap_gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
wrap_mcca = MCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
wrap_rCCA = rCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
corr_gcca = wrap_gcca.score(self.X, self.Y)
corr_mcca = wrap_mcca.score(self.X, self.Y)
corr_kernel = wrap_kernel.score(self.X, self.Y)
corr_pls = wrap_pls.score(self.X, self.Y)
corr_rcca = wrap_rCCA.score(self.X, self.Y)
# Check the correlations from each unregularized method are the same
# self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_gcca, decimal=2))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_mcca, decimal=1))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_kernel, decimal=1))
self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_rcca, decimal=1))
def test_non_negative_methods(self):
latent_dims = 2
wrap_nnelasticca = ElasticCCA(latent_dims=latent_dims, tol=1e-9, positive=True, l1_ratio=[0.5, 0.5],
c=[1e-4, 1e-5]).fit(self.X, self.Y)
wrap_als = CCA_ALS(latent_dims=latent_dims, tol=1e-9).fit(self.X, self.Y)
wrap_nnals = CCA_ALS(latent_dims=latent_dims, tol=1e-9, positive=True).fit(self.X, self.Y)
wrap_nnscca = SCCA(latent_dims=latent_dims, tol=1e-9, positive=True, c=[1e-4, 1e-5]).fit(self.X, self.Y)
def test_sparse_methods(self):
# Test sparsity inducing methods. At the moment just checks running.
latent_dims = 2
c1 = [1, 3]
c2 = [1, 3]
param_candidates = {'c': list(itertools.product(c1, c2))}
wrap_pmd = PMD(latent_dims=latent_dims, random_state=self.rng).gridsearch_fit(self.X, self.Y,
param_candidates=param_candidates,
verbose=True, plot=True)
c1 = [1e-4, 1e-5]
c2 = [1e-4, 1e-5]
param_candidates = {'c': list(itertools.product(c1, c2))}
wrap_scca = SCCA(latent_dims=latent_dims, random_state=self.rng).gridsearch_fit(self.X, self.Y,
param_candidates=param_candidates,
verbose=True)
wrap_elastic = ElasticCCA(latent_dims=latent_dims, random_state=self.rng).gridsearch_fit(self.X, self.Y,
param_candidates=param_candidates,
verbose=True)
corr_pmd = wrap_pmd.score(self.X, self.Y)
corr_scca = wrap_scca.score(self.X, self.Y)
corr_elastic = wrap_elastic.score(self.X, self.Y)
wrap_scca_admm = SCCA_ADMM(c=[1e-4, 1e-4]).fit(self.X, self.Y)
wrap_scca = SCCA(c=[1e-4, 1e-4]).fit(self.X, self.Y)
def test_weighted_GCCA_methods(self):
# Test the 'fancy' additions to GCCA i.e. the view weighting and observation weighting.
latent_dims = 2
c = 0
wrap_unweighted_gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y)
wrap_deweighted_gcca = GCCA(latent_dims=latent_dims, c=[c, c], view_weights=[0.5, 0.5]).fit(
self.X, self.Y)
corr_unweighted_gcca = wrap_unweighted_gcca.score(self.X, self.Y)
corr_deweighted_gcca = wrap_deweighted_gcca.score(self.X, self.Y)
# Check the correlations from each unregularized method are the same
K = np.ones((2, self.X.shape[0]))
K[0, 200:] = 0
wrap_unobserved_gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit(self.X, self.Y, K=K)
self.assertIsNone(np.testing.assert_array_almost_equal(corr_unweighted_gcca, corr_deweighted_gcca, decimal=1))
def test_TCCA(self):
# Tests tensor CCA methods
latent_dims = 2
wrap_tcca = TCCA(latent_dims=latent_dims, c=[0.2, 0.2]).fit(self.X, self.Y)
wrap_ktcca = KTCCA(latent_dims=latent_dims, c=[0.2, 0.2]).fit(self.X, self.Y)
corr_tcca = wrap_tcca.score(self.X, self.Y)
corr_ktcca = wrap_ktcca.score(self.X, self.Y)
self.assertIsNone(np.testing.assert_array_almost_equal(corr_tcca, corr_ktcca, decimal=1))
def test_cv_fit(self):
# Test the CV method
latent_dims = 2
c1 = [0.1, 0.2]
c2 = [0.1, 0.2]
param_candidates = {'c': list(itertools.product(c1, c2))}
wrap_unweighted_gcca = GCCA(latent_dims=latent_dims).gridsearch_fit(self.X, self.Y, folds=5,
param_candidates=param_candidates,
plot=True, jobs=3)
wrap_deweighted_gcca = GCCA(latent_dims=latent_dims, view_weights=[0.5, 0.5]).gridsearch_fit(
self.X, self.Y, folds=2, param_candidates=param_candidates)
wrap_mcca = MCCA(latent_dims=latent_dims).gridsearch_fit(
self.X, self.Y, folds=2, param_candidates=param_candidates)
def test_l0(self):
wrap_span_cca = SpanCCA(latent_dims=1, regularisation='l0', c=[2, 2]).fit(self.X, self.Y)
wrap_swcca = SWCCA(latent_dims=1, c=[2, 2], sample_support=5).fit(self.X, self.Y)
self.assertEqual((np.abs(wrap_span_cca.weights[0]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_span_cca.weights[1]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_swcca.weights[0]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_swcca.weights[1]) > 1e-5).sum(), 2)
self.assertEqual((np.abs(wrap_swcca.loop.sample_weights) > 1e-5).sum(), 5)
print()
| en | 0.883873 | # Tests unregularized CCA methods. The idea is that all of these should give the same result. # Check the score outputs are the right shape # Check the correlations from each unregularized method are the same # Tests unregularized CCA methods. The idea is that all of these should give the same result. # Check the score outputs are the right shape # Check the correlations from each unregularized method are the same # Tests unregularized CCA methods for more than 2 views. The idea is that all of these should give the same result. # Check the score outputs are the right shape # Check the correlations from each unregularized method are the same # Test that linear regularized methods match PLS solution when using maximum regularisation. # Check the correlations from each unregularized method are the same # self.assertIsNone(np.testing.assert_array_almost_equal(corr_pls, corr_gcca, decimal=2)) # Test sparsity inducing methods. At the moment just checks running. # Test the 'fancy' additions to GCCA i.e. the view weighting and observation weighting. # Check the correlations from each unregularized method are the same # Tests tensor CCA methods # Test the CV method | 2.2539 | 2 |
MUSCIMarker/cropobject_view.py | penestia/muscimarker-python3 | 6 | 6622684 | <filename>MUSCIMarker/cropobject_view.py
"""This module implements a class that..."""
from __future__ import division
from __future__ import print_function, unicode_literals
import logging
import os
import uuid
from builtins import str
import scipy.misc
from kivy.app import App
from kivy.core.window import Window
from kivy.properties import ListProperty, BooleanProperty, NumericProperty
from kivy.properties import ObjectProperty
from kivy.uix.label import Label
from kivy.uix.listview import SelectableView, CompositeListItem
from kivy.uix.spinner import Spinner
from kivy.uix.togglebutton import ToggleButton
from muscima.cropobject import split_cropobject_on_connected_components
from past.utils import old_div
import MUSCIMarker.tracker as tr
from MUSCIMarker.utils import InspectionPopup, keypress_to_dispatch_key
__version__ = "0.0.1"
__author__ = "<NAME>."
# Should behave like a ToggleButton.
# Important difference from ListItemButton:
#
# * Colors defined at initialization time,
# * Text is empty
class CropObjectView(SelectableView, ToggleButton):
"""The view to an individual CropObject. Implements interface for CropObject
manipulation.
Selection
---------
The ``CropObjectView`` is selectable by clicking. Keyboard shortcuts only work
when the button is selected.
Mouse interaction
-----------------
Once selected, the CropObject can be dragged around [NOT IMPLEMENTED].
Keyboard shortcuts
------------------
If the CropObjectView handles a key press event, it will not propagate.
The available keyboard shortcuts are:
* Backspace: Remove the CropObject
* Escape: Unselect
* Arrow keys: move the CropObject by 1 editor-scale pixel.
* Arrow keys + alt: move the CropObject by 1 display pixel. (Finest.)
* Arrow keys + shift: stretch the CropObject by 1 editor-scale pixel.
* Arrow keys + alt + shift: stretch the CropObject by 1 display pixel. (Finest.)
* i: toggle info label
* c: change class selection
"""
selected_color = ListProperty([1., 0., 0., 0.5])
deselected_color = ListProperty([1., 0., 0., 0.3])
cropobject = ObjectProperty()
_info_label_shown = BooleanProperty(False)
info_label = ObjectProperty(None, allownone=True)
_mlclass_selection_spinner_shown = BooleanProperty(False)
mlclass_selection_spinner = ObjectProperty(None, allownone=True)
_height_scaling_factor = NumericProperty(1.0)
_width_scaling_factor = NumericProperty(1.0)
_editor_scale = NumericProperty(1.0)
def __init__(self, selectable_cropobject, rgb, alpha=0.25, **kwargs):
"""
:param selectable_cropobject: The intermediate-level CropObject represnetation,
with recomputed dimension.
:param rgb:
:param alpha: Works for deselected color, when selected, multiplied by 1.5
:param kwargs:
:return:
"""
# logging.debug('Render: Initializing CropObjectView with args: c={0},'
# ' rgb={1}, alpha={2}'.format(selectable_cropobject, rgb, alpha))
super(CropObjectView, self).__init__(**kwargs)
self.text = '' # We don't want any text showing up
r, g, b = rgb
self.selected_color = r, g, b, min([1.0, alpha * 3.0])
self.deselected_color = r, g, b, alpha
self.alpha = alpha # Recorded for future color changes on class change
# Overriding the default button color and border behavior
self.background_color = self.deselected_color
self.background_normal = ''
self.background_down = ''
self.border = 0, 0, 0, 0
# Overriding default release
self.always_release = False
self.cropobject = selectable_cropobject
self.is_selected = selectable_cropobject.is_selected
# Here, we position the CropObjectView.
self.size = self.cropobject.width, self.cropobject.height
self.size_hint = (None, None)
self.pos = self.cropobject.y, self.cropobject.x
self._height_scaling_factor = old_div(self.height, float(self._model_counterpart.height))
self._width_scaling_factor = old_div(self.width, float(self._model_counterpart.width))
# self.pos_hint = {'x': self.cropobject.x, 'y': self.cropobject.y }
# self.pos_hint = {'x': 0, 'y': 0 }
# self.group = self.cropobject.objid
self._editor_scale = App.get_running_app().editor_scale
# If the underlying cropobject has a mask, render that mask
if self._model_counterpart.mask is not None:
self.render_mask()
self.register_event_type('on_key_captured')
self.create_bindings()
def create_bindings(self):
# logging.info('Creating bindings for COV {0}'.format(self))
Window.bind(on_key_down=self.on_key_down)
Window.bind(on_key_up=self.on_key_up)
self.bind(pos=self.update_info_label)
self.bind(size=self.update_info_label)
self.bind(height=self.update_info_label)
self.bind(width=self.update_info_label)
App.get_running_app().bind(editor_scale=self.setter('_editor_scale'))
def remove_bindings(self):
# logging.info('Removing bindings for COV {0}'.format(self))
Window.unbind(on_key_down=self.on_key_down)
Window.unbind(on_key_up=self.on_key_up)
self.unbind(pos=self.update_info_label)
self.unbind(size=self.update_info_label)
self.unbind(height=self.update_info_label)
self.unbind(width=self.update_info_label)
App.get_running_app().unbind(editor_scale=self.setter('_editor_scale'))
def update_color(self, rgb):
r, g, b = rgb
self.selected_color = r, g, b, min([1.0, self.alpha * 3.0])
self.deselected_color = r, g, b, self.alpha
if self.is_selected:
self.background_color = self.selected_color
else:
self.background_color = self.deselected_color
def render_mask(self):
"""NOT IMPLEMENTED
Rendering a mask in Kivy is difficult. (Can Mesh do nonconvex?)"""
pass
##########################################################################
# Touch processing
def on_touch_down(self, touch):
if touch.is_double_tap:
if self.collide_point(*touch.pos):
renderer = App.get_running_app().cropobject_list_renderer
renderer.view.select_class(self._model_counterpart.clsname)
return True
return super(CropObjectView, self).on_touch_down(touch)
##########################################################################
# Keyboard event processing: the core UI of the CropObjectView
def on_key_down(self, window, key, scancode, codepoint, modifier):
"""This method is one of the primary User Interfaces: keyboard
shortcuts to manipulate a selected CropObject.
:param window:
:param key:
:param scancode:
:param codepoint:
:param modifier:
:return:
"""
# if self.cropobject.objid < 50:
# logging.info('CropObjectView: Key caught by CropObjectView {0}: {1}'
# ''.format(self,
# (key, scancode, codepoint, modifier)))
if not self.is_selected:
return False
# Get the dispatch key
# ------------
dispatch_key = keypress_to_dispatch_key(key, scancode, codepoint, modifier)
#logging.info('CropObjectView: Handling key {0}, self.is_selected={1},'
# ' self.cropobject={2}'
# ''.format(dispatch_key, self.is_selected, str(self.cropobject.objid)))
is_handled = self.handle_dispatch_key(dispatch_key)
if is_handled:
self.dispatch('on_key_captured')
return False
def handle_dispatch_key(self, dispatch_key):
"""Does the "heavy lifting" in keyboard controls: responds to a dispatch key.
Decoupling this into a separate method facillitates giving commands to
the ListView programmatically, not just through user input,
and this way makes automation easier.
:param dispatch_key: A string of the form e.g. ``109+alt,shift``: the ``key``
number, ``+``, and comma-separated modifiers.
:returns: True if the dispatch key got handled, False if there is
no response defined for the given dispatch key.
"""
# Deletion
if dispatch_key == '8': # Delete
self.remove_from_model()
elif dispatch_key == '8+alt': # Delete attachments
self._model.graph.remove_obj_edges(self.objid)
# Unselect
elif dispatch_key == '27': # Escape
# logging.info('CropObjectView\t{0}: handling deselect + state to \'normal\''
# ''.format(self.objid))
# Simple deselection is not enough because of the adapter handle_selection()
# method.
if self.is_selected:
self.dispatch('on_release')
# self.deselect() # ...called from the adapter's handle_selection()
# Moving around
elif dispatch_key == '273': # Up arrow
logging.info('CropObjectView: handling move up: DISABLED')
#self.move(vertical=1)
elif dispatch_key == '274': # Down arrow
logging.info('CropObjectView: handling move down: DISABLED')
#self.move(vertical=-1)
elif dispatch_key == '275': # Right arrow
logging.info('CropObjectView: handling move right: DISABLED')
#self.move(horizontal=1)
elif dispatch_key == '276': # Left arrow
logging.info('CropObjectView: handling move left: DISABLED')
#self.move(horizontal=-1)
# Fine-grained moving around
elif dispatch_key == '273+alt': # Up arrow
logging.info('CropObjectView: handling move_fine up: DISABLED')
#self.move_fine(vertical=1)
elif dispatch_key == '274+alt': # Down arrow
logging.info('CropObjectView: handling move_fine down: DISABLED')
#self.move_fine(vertical=-1)
elif dispatch_key == '275+alt': # Right arrow
logging.info('CropObjectView: handling move_fine right: DISABLED')
#self.move_fine(horizontal=1)
elif dispatch_key == '276+alt': # Left arrow
logging.info('CropObjectView: handling move_fine left: DISABLED')
#self.move_fine(horizontal=-1)
# Coarse-grained stretching
elif dispatch_key == '273+shift': # Up arrow
logging.info('CropObjectView: handling stretch up: DISABLED')
#self.stretch(vertical=1)
elif dispatch_key == '274+shift': # Down arrow
logging.info('CropObjectView: handling stretch down: DISABLED')
#self.stretch(vertical=-1)
elif dispatch_key == '275+shift': # Right arrow
logging.info('CropObjectView: handling stretch right: DISABLED')
#self.stretch(horizontal=1)
elif dispatch_key == '276+shift': # Left arrow
logging.info('CropObjectView: handling stretch left: DISABLED')
#self.stretch(horizontal=-1)
# Fine-grained stretching
elif dispatch_key == '273+alt,shift': # Up arrow
logging.info('CropObjectView: handling stretch_fine up: DISABLED')
#self.stretch_fine(vertical=1)
elif dispatch_key == '274+alt,shift': # Down arrow
logging.info('CropObjectView: handling stretch_fine down: DISABLED')
#self.stretch_fine(vertical=-1)
elif dispatch_key == '275+alt,shift': # Right arrow
logging.info('CropObjectView: handling stretch_fine right: DISABLED')
#self.stretch_fine(horizontal=1)
elif dispatch_key == '276+alt,shift': # Left arrow
logging.info('CropObjectView: handling stretch_fine left: DISABLED')
#self.stretch_fine(horizontal=-1)
# Change class
elif dispatch_key == '99': # c
logging.info('CropObjectView: handling mlclass selection')
self.toggle_class_selection()
elif dispatch_key == '99+shift':
logging.info('CropObjectView: cloning mlclass to app')
self.clone_class_to_app()
# Hide relationships
elif dispatch_key == '104+alt': # h
logging.info('CropObjectView: handling hiding relationships')
self.toggle_hide_relationships()
# Inspect CropObjects
elif dispatch_key == '105': # i
logging.info('CropObjectView: handling inspection')
#self.toggle_info_panel()
self.inspect()
elif dispatch_key == '120': # x
logging.info('CropObjectView: handling split to connected components')
self.split()
else:
# The key is not recognized by the CropObjectView, try others.
return False
# If we got here, the key has been caught and processed.
# However, maybe we want to do the operation with other selected objects
# as well.
# On the other hand: this makes things propagate past the CropObjectViews,
# so for example Escape unselects all CropObjects *and* quits the application.
# Therefore, the CropObjectListView should "block" these signals
# from propagating further.
# Current policy: if any CropObjectView captures a key signal, it will propagate
# past the CropObjectListView.
return True
def on_key_up(self, window, key, scancode, *args, **kwargs):
return False
def on_key_captured(self, *largs):
"""Default handler for on_key_captured event."""
pass
# TODO: Remove this (replaced from utils)
# @staticmethod
# def keypress_to_dispatch_key(key, scancode, codepoint, modifiers):
# """Converts the key_down event data into a single string for more convenient
# keyboard shortcut dispatch."""
# if modifiers:
# return '{0}+{1}'.format(key, ','.join(sorted(modifiers)))
# else:
# return '{0}'.format(key)
##########################################################################
# Accessing the model & the cropobject in the model, so that the user
# can manipulate the underlying data through the CropObjectView.
@property
def _model(self):
return App.get_running_app().annot_model
@property
def _model_counterpart(self):
return self._model.cropobjects[self.cropobject.objid]
@property
def objid(self):
return self._model_counterpart.objid
##########################################################################
# Class selection
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.toggle_class_selection',
tracker_name='editing')
def toggle_class_selection(self):
if self._mlclass_selection_spinner_shown:
self.destroy_mlclass_selection_spinner()
else:
self.create_class_selection()
def create_class_selection(self):
logging.info('CropObjectView\t{0}: show_class_selection() fired.'
''.format(self.cropobject.objid))
self.mlclass_selection_spinner = Spinner(
id='mlclass_cropobject_selection_spinner_{0}'.format(self.cropobject.objid),
pos=self.pos,
text='{0}'.format(self.cropobject.clsname),
font_size=15,
values=sorted(list(self._model.mlclasses_by_name.keys()),
key=lambda k: self._model.mlclasses_by_name[k].clsid),
width=old_div(300, self._editor_scale),
height=old_div(50, self._editor_scale),
size_hint=(None, None),
# is_open=True,
)
self.mlclass_selection_spinner.bind(text=self.do_class_selection)
# self.mlclass_selection_spinner.option_cls.height = 37
self.add_widget(self.mlclass_selection_spinner)
self._mlclass_selection_spinner_shown = True
@tr.Tracker(track_names=['self', 'text'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.do_class_selection',
tracker_name='editing')
def do_class_selection(self, spinner_widget, clsname):
logging.info('CropObjectView\t{0}: do_class_selection() fired.'
''.format(self.cropobject.objid))
if clsname != self.cropobject.clsname:
self.set_mlclass(clsname=clsname)
self.destroy_mlclass_selection_spinner()
def set_mlclass(self, clsname):
# This should be wrapped in some cropobject's set_class method.
self._model_counterpart.clsname = clsname
self.cropobject.clsname = clsname
# We should also check that the new class name is consistent
# with the edges...
self.update_info_label()
# Update color
rgb = tuple([float(x) for x in self._model.mlclasses_by_name[clsname].color])
self.update_color(rgb)
def destroy_mlclass_selection_spinner(self, *args, **kwargs):
self.remove_widget(self.mlclass_selection_spinner)
self.mlclass_selection_spinner = None
self._mlclass_selection_spinner_shown = False
##########################################################################
# Info panel: displaying information about the view in the info palette
def toggle_info_panel(self):
# Info panel!
if self._info_label_shown:
self.destroy_info_label()
else:
self.create_info_label()
def create_info_label(self):
# logging.debug('CropObjectView.create_info_label() called.')
info_label = Label(text=self.get_info_label_text())
_info_palette = App.get_running_app()._get_tool_info_palette()
info_label.size_hint = (1.0, None)
info_label.size = (self.parent.size[0], 35)
self.info_label = info_label
_info_palette.add_widget(self.info_label)
self._info_label_shown = True
def destroy_info_label(self, *args, **kwargs):
# logging.debug('CropObjectView.destroy_info_label() called.')
App.get_running_app()._get_tool_info_palette().remove_widget(self.info_label)
self._info_label_shown = False
self.info_label = None
def get_debug_info_label_text(self):
e_cropobject = self.cropobject
output_lines = list()
output_lines.append('objid: {0}'.format(e_cropobject.objid))
output_lines.append('cls: {0}'.format(e_cropobject.clsname))
output_lines.append('M.x, M.y: {0:.2f}, {1:.2f}'
''.format(self._model_counterpart.x,
self._model_counterpart.y))
output_lines.append('M.w, M.h: {0:.2f}, {1:.2f}'
''.format(self._model_counterpart.width,
self._model_counterpart.height))
if self._model_counterpart.mask is None:
output_lines.append('Mask.nnz: None')
else:
output_lines.append('Mask.nnz: {0}'.format(self._model_counterpart.mask.sum()))
output_lines.append('E.x, E.y: {0:.2f}, {1:.2f}'.format(self.x, self.y))
output_lines.append('E.w, E.h: {0:.2f}, {1:.2f}'.format(self.width,
self.height))
output_lines.append('S.V, S.H: {0:.2f}, {1:.2f}'
''.format(self._height_scaling_factor,
self._width_scaling_factor))
return '\n'.join(output_lines)
def get_info_label_text(self):
c = self._model_counterpart
text = '({0}) {1}'.format(c.objid, c.clsname)
if c.data is not None:
logging.debug('Creating info label for object {0}:'
' data {1}'.format(c.uid, c.data))
pitch_text = ''
if 'pitch_step' in c.data:
pitch_text = '{0}'.format(c.data['pitch_step'])
elif 'normalized_pitch_step' in c.data:
pitch_text = '{0}'.format(c.data['normalized_pitch_step'])
if 'pitch_octave' in c.data:
pitch_text += '{0}'.format(c.data['pitch_octave'])
if pitch_text:
text += ' | {0}'.format(pitch_text)
if 'duration_beats' in c.data:
text += ' | {0:.2f}'.format(c.data['duration_beats'])
if 'onset_beats' in c.data:
text += ' | {0:.3f}'.format(c.data['onset_beats'])
# duration_text = None
# if 'duration_beats' in c.data:
# duration_text = '{0:.2f}'.format(c.data['duration_beats'])
# if duration_text is not None:
# text += ' | {0}'.format(duration_text)
return text
def update_info_label(self, *args):
if self.info_label is not None:
self.info_label.text = self.get_info_label_text()
##########################################################################
def remove_from_model(self):
logging.info('CropObjectView.remove_from_model(): called on objid {0}'
''.format(self.cropobject.objid))
# Problem here: the cropobject gets deleted, but the widget stays
# alive, so it keeps capturing events. This is (a) a memory leak,
# (b) causes crashes.
# Easy workaround: unselect self first. This does not fix the memory
# leak, but at least the 'invisible' CropObjectView will not
# capture any events.
self.ensure_deselected()
# Another workaround: schedule self-deletion for slightly later,
# after the widget gets removed from the call stack.
# The problem persists also with widget deletion...
# After clear()-ing the current CropObjectList, the CropObjectView
# widgets stay alive!
# What if the bindings to Window are keeping the widget alive?
self.remove_bindings()
# Let's at least deactivate it, so it doesn't do anything.
# This, however, won't help upon clearing the widgets...
self.disabled = True
self._model.remove_cropobject(self.cropobject.objid)
##########################################################################
# Movement & scaling
@tr.Tracker(track_names=['self', 'vertical', 'horizontal'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.move',
tracker_name='editing')
def move(self, vertical=0, horizontal=0):
"""Move the underlying CropObject.
NOTE: How to deal with CropObjects that have a mask? Roll it?
In the current implementation, there is no listener inside the model
for individual CropObjects, so there is no propagation of the change
to the view. We currently work around this by simply moving the view
as well, but this will not work when the underlying CropObject is moved
by some other means.
"""
logging.info('CropObjectView {0}: moving vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
# The CropObjects in the model are kept in the Numpy world.
c.x += vertical #* self._height_scaling_factor
c.y += horizontal #* self._height_scaling_factor
if c.mask is not None:
logging.warn('CropObjectView {0}: Moving a CropObject invalidates its mask!')
self._model.add_cropobject(c)
self.move_view(vertical=vertical, horizontal=horizontal)
def move_view(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: moving view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
self.pos = (self.pos[0] + horizontal * self._width_scaling_factor,
self.pos[1] + vertical * self._width_scaling_factor)
def move_fine(self, vertical=0, horizontal=0):
"""Move the underlying CropObject.
In the current implementation, there is no listener inside the model
for individual CropObjects, so there is no propagation of the change
to the view. We currently work around this by simply moving the view
as well, but this will not work when the underlying CropObject is moved
by some other means.
"""
logging.info('CropObjectView {0}: moving vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
# The CropObjects in the model are kept in the Numpy world.
c.x += vertical * self._height_scaling_factor / self._editor_scale
c.y += horizontal * self._height_scaling_factor / self._editor_scale
self._model.add_cropobject(c)
self.move_view_fine(vertical=vertical, horizontal=horizontal)
def move_view_fine(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: moving view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
self.pos = (self.pos[0] + old_div(horizontal, self._editor_scale),# / self._width_scaling_factor),
self.pos[1] + old_div(vertical, self._editor_scale))# / self._width_scaling_factor))
@tr.Tracker(track_names=['self', 'vertical', 'horizontal'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.stretch',
tracker_name='editing')
def stretch(self, vertical=0, horizontal=0):
"""Stretch the underlying CropObject. Does NOT change its position.
Cannot make the CropObject smaller than 1 in either dimension.
See :meth:`move` for a discussion on linking the model action and view."""
logging.info('CropObjectView {0}: stretching vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
if c.width + horizontal > 0:
c.width += horizontal #* self._width_scaling_factor
if c.height + vertical > 0:
c.height += vertical #* self._height_scaling_factor
self._model.add_cropobject(c)
self.stretch_view(vertical=vertical, horizontal=horizontal)
def stretch_view(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: stretching view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
if self.width + horizontal > 0:
self.width += horizontal * self._width_scaling_factor
if self.height + vertical > 0:
self.height += vertical * self._height_scaling_factor
def stretch_fine(self, vertical=0, horizontal=0):
"""Stretch the underlying CropObject. Does NOT change its position.
Cannot make the CropObject smaller than 1 in either dimension.
See :meth:`move` for a discussion on linking the model action and view."""
logging.info('CropObjectView {0}: stretching vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
if c.width + horizontal > 0:
c.width += horizontal * self._width_scaling_factor / self._editor_scale
if c.height + vertical > 0:
c.height += vertical * self._height_scaling_factor / self._editor_scale
self._model.add_cropobject(c)
self.stretch_view_fine(vertical=vertical, horizontal=horizontal)
def stretch_view_fine(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: stretching view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
if self.width + horizontal > 0:
self.width += old_div(horizontal, self._editor_scale)# / self._width_scaling_factor)
if self.height + vertical > 0:
self.height += old_div(vertical, self._editor_scale)# / self._height_scaling_factor)
##########################################################################
# Split
@tr.Tracker(track_names=['self', 'ratio'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.split',
tracker_name='editing')
def split(self):
"""Split the CropObject according to its mask.
"""
_next_objid = self._model.get_next_cropobject_id()
new_cropobjects = split_cropobject_on_connected_components(self._model_counterpart,
next_objid=_next_objid)
if len(new_cropobjects) == 1:
return
self.remove_from_model()
for c in new_cropobjects:
self._model.add_cropobject(c)
##########################################################################
# Clone class
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.clone_class_to_app',
tracker_name='editing')
def clone_class_to_app(self):
App.get_running_app().currently_selected_mlclass_name = self._model_counterpart.clsname
##########################################################################
# Hide relationships
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname),
lambda s: ('inlinks', s._model_counterpart.inlinks),
lambda s: ('outlinks', s._model_counterpart.outlinks)]},
fn_name='CropObjectView.hide_relationships',
tracker_name='editing')
def hide_relationships(self):
edges = self.collect_all_edges()
App.get_running_app().graph_renderer.mask(edges)
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname),
lambda s: ('inlinks', s._model_counterpart.inlinks),
lambda s: ('outlinks', s._model_counterpart.outlinks)]},
fn_name='CropObjectView.hide_relationships',
tracker_name='editing')
def show_relationships(self):
edges = self.collect_all_edges()
App.get_running_app().graph_renderer.unmask(edges)
def toggle_hide_relationships(self):
# A very private toggle switch that keeps track of whether
# the relationships are hidden or visible.
graph_renderer = App.get_running_app().graph_renderer
edges = self.collect_all_edges()
if graph_renderer.are_all_masked(edges):
self.show_relationships()
else:
self.hide_relationships()
def collect_all_edges(self):
edges = []
for i in self._model_counterpart.inlinks:
edges.append((i, self.objid))
for o in self._model_counterpart.outlinks:
edges.append((self.objid, o))
return edges
##########################################################################
# Inspect mask
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.clone_class_to_app',
tracker_name='editing')
def inspect(self):
"""Shows the symbol's exact mask in the context of its bounding box
in a popup."""
# Create crop
image = self._model.image
crop = self._model_counterpart.project_to(image).astype('float32')
t, l, b, r = self._model_counterpart.bounding_box
background_crop = image[t:b, l:r].astype('float32')
combined_crop = (old_div(crop, 2.0)) + (old_div(background_crop, 2.0))
# Save image
app = App.get_running_app()
tmp_dir = app.tmp_dir
fname = str(uuid.uuid4()) + '.png'
full_path = os.path.join(tmp_dir, fname)
scipy.misc.imsave(full_path, combined_crop, )
# Make popup with the crop
data_text = self._model_counterpart.data_display_text()
popup = InspectionPopup(
data_text=data_text,
title='Inspecting obj. {0} | clsname: {1} | bbox: {2}'
''.format(self.objid,
self._model_counterpart.clsname,
self._model_counterpart.bounding_box)
+ '\n\n______________________________________\nDATA\n\n' + data_text,
source=full_path)
# Bind to delete the temp file on cancel()
def __safe_unlink(fname):
if os.path.exists(full_path):
os.unlink(full_path)
popup.bind(on_dismiss=lambda x: __safe_unlink(x))
popup.open()
##########################################################################
# Copied over from ListItemButton
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.select',
tracker_name='editing')
def select(self, *args):
# logging.debug('CropObjectView\t{0}: called selection'
# ''.format(self.cropobject.objid))
self.background_color = self.selected_color
if not self._info_label_shown:
self.create_info_label()
if isinstance(self.parent, CompositeListItem):
self.parent.select_from_child(self, *args)
super(CropObjectView, self).select(*args)
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.deselect',
tracker_name='editing')
def deselect(self, *args):
"""Only handles self.is_selected, not the 'on_release'
dispatch that the ListAdapter uses to maintain selection!
Use ensure_deselected() instead."""
# logging.debug('CropObjectView\t{0}: called deselection'
# ''.format(self.cropobject.objid))
# logging.debug('CropObjectView.deselect: info label shown? {0}'
# ''.format(self._info_label_shown))
if self._info_label_shown:
# logging.debug('CropObjectView.deselect: destroying info label.')
self.destroy_info_label()
if self._mlclass_selection_spinner_shown:
self.destroy_mlclass_selection_spinner()
self.background_color = self.deselected_color
if isinstance(self.parent, CompositeListItem):
self.parent.deselect_from_child(self, *args)
super(CropObjectView, self).deselect(*args)
# def do_deselect(self):
# """Proper deselection that will be reflected in a ListAdapter
# containing this view."""
# if self.is_selected:
# self.dispatch('do_release')
def ensure_selected(self):
"""Proper selection that will be reflected in a ListAdapter
containing this view."""
if not self.is_selected:
self.dispatch('on_release')
def ensure_deselected(self):
"""Proper unselection that will be reflected in a ListAdapter
containing this view."""
if self.is_selected:
self.dispatch('on_release')
def select_from_composite(self, *args):
self.background_color = self.selected_color
def deselect_from_composite(self, *args):
self.background_color = self.deselected_color
# For logging/debugging multi-selection only.
#
# def on_is_selected(self, instance, pos):
# logging.info('CropObjectView\t{0}: is_selected changed to {1}'
# ''.format(self.cropobject.objid, self.is_selected))
#
# def on_press(self):
# logging.info('CropObjectView.on_press()\t{0}: Fired'
# ''.format(self.cropobject.objid))
# return super(CropObjectView, self).on_press()
#
# def on_release(self):
# logging.info('CropObjectView.on_release()\t{0}: Fired'
# ''.format(self.cropobject.objid))
# return super(CropObjectView, self).on_release()
#
# def on_touch_up(self, touch):
# if touch.grab_current is not self:
# logging.info('CropObjectView.on_touch_up()\t{0}: touch {1} is FOREIGN'
# ''.format(self.cropobject.objid, touch))
# else:
# logging.info('CropObjectView.on_touch_up()\t{0}: touch {1} is MINE'
# ''.format(self.cropobject.objid, touch))
# return super(CropObjectView, self).on_touch_up(touch)
| <filename>MUSCIMarker/cropobject_view.py
"""This module implements a class that..."""
from __future__ import division
from __future__ import print_function, unicode_literals
import logging
import os
import uuid
from builtins import str
import scipy.misc
from kivy.app import App
from kivy.core.window import Window
from kivy.properties import ListProperty, BooleanProperty, NumericProperty
from kivy.properties import ObjectProperty
from kivy.uix.label import Label
from kivy.uix.listview import SelectableView, CompositeListItem
from kivy.uix.spinner import Spinner
from kivy.uix.togglebutton import ToggleButton
from muscima.cropobject import split_cropobject_on_connected_components
from past.utils import old_div
import MUSCIMarker.tracker as tr
from MUSCIMarker.utils import InspectionPopup, keypress_to_dispatch_key
__version__ = "0.0.1"
__author__ = "<NAME>."
# Should behave like a ToggleButton.
# Important difference from ListItemButton:
#
# * Colors defined at initialization time,
# * Text is empty
class CropObjectView(SelectableView, ToggleButton):
"""The view to an individual CropObject. Implements interface for CropObject
manipulation.
Selection
---------
The ``CropObjectView`` is selectable by clicking. Keyboard shortcuts only work
when the button is selected.
Mouse interaction
-----------------
Once selected, the CropObject can be dragged around [NOT IMPLEMENTED].
Keyboard shortcuts
------------------
If the CropObjectView handles a key press event, it will not propagate.
The available keyboard shortcuts are:
* Backspace: Remove the CropObject
* Escape: Unselect
* Arrow keys: move the CropObject by 1 editor-scale pixel.
* Arrow keys + alt: move the CropObject by 1 display pixel. (Finest.)
* Arrow keys + shift: stretch the CropObject by 1 editor-scale pixel.
* Arrow keys + alt + shift: stretch the CropObject by 1 display pixel. (Finest.)
* i: toggle info label
* c: change class selection
"""
selected_color = ListProperty([1., 0., 0., 0.5])
deselected_color = ListProperty([1., 0., 0., 0.3])
cropobject = ObjectProperty()
_info_label_shown = BooleanProperty(False)
info_label = ObjectProperty(None, allownone=True)
_mlclass_selection_spinner_shown = BooleanProperty(False)
mlclass_selection_spinner = ObjectProperty(None, allownone=True)
_height_scaling_factor = NumericProperty(1.0)
_width_scaling_factor = NumericProperty(1.0)
_editor_scale = NumericProperty(1.0)
def __init__(self, selectable_cropobject, rgb, alpha=0.25, **kwargs):
"""
:param selectable_cropobject: The intermediate-level CropObject represnetation,
with recomputed dimension.
:param rgb:
:param alpha: Works for deselected color, when selected, multiplied by 1.5
:param kwargs:
:return:
"""
# logging.debug('Render: Initializing CropObjectView with args: c={0},'
# ' rgb={1}, alpha={2}'.format(selectable_cropobject, rgb, alpha))
super(CropObjectView, self).__init__(**kwargs)
self.text = '' # We don't want any text showing up
r, g, b = rgb
self.selected_color = r, g, b, min([1.0, alpha * 3.0])
self.deselected_color = r, g, b, alpha
self.alpha = alpha # Recorded for future color changes on class change
# Overriding the default button color and border behavior
self.background_color = self.deselected_color
self.background_normal = ''
self.background_down = ''
self.border = 0, 0, 0, 0
# Overriding default release
self.always_release = False
self.cropobject = selectable_cropobject
self.is_selected = selectable_cropobject.is_selected
# Here, we position the CropObjectView.
self.size = self.cropobject.width, self.cropobject.height
self.size_hint = (None, None)
self.pos = self.cropobject.y, self.cropobject.x
self._height_scaling_factor = old_div(self.height, float(self._model_counterpart.height))
self._width_scaling_factor = old_div(self.width, float(self._model_counterpart.width))
# self.pos_hint = {'x': self.cropobject.x, 'y': self.cropobject.y }
# self.pos_hint = {'x': 0, 'y': 0 }
# self.group = self.cropobject.objid
self._editor_scale = App.get_running_app().editor_scale
# If the underlying cropobject has a mask, render that mask
if self._model_counterpart.mask is not None:
self.render_mask()
self.register_event_type('on_key_captured')
self.create_bindings()
def create_bindings(self):
# logging.info('Creating bindings for COV {0}'.format(self))
Window.bind(on_key_down=self.on_key_down)
Window.bind(on_key_up=self.on_key_up)
self.bind(pos=self.update_info_label)
self.bind(size=self.update_info_label)
self.bind(height=self.update_info_label)
self.bind(width=self.update_info_label)
App.get_running_app().bind(editor_scale=self.setter('_editor_scale'))
def remove_bindings(self):
# logging.info('Removing bindings for COV {0}'.format(self))
Window.unbind(on_key_down=self.on_key_down)
Window.unbind(on_key_up=self.on_key_up)
self.unbind(pos=self.update_info_label)
self.unbind(size=self.update_info_label)
self.unbind(height=self.update_info_label)
self.unbind(width=self.update_info_label)
App.get_running_app().unbind(editor_scale=self.setter('_editor_scale'))
def update_color(self, rgb):
r, g, b = rgb
self.selected_color = r, g, b, min([1.0, self.alpha * 3.0])
self.deselected_color = r, g, b, self.alpha
if self.is_selected:
self.background_color = self.selected_color
else:
self.background_color = self.deselected_color
def render_mask(self):
"""NOT IMPLEMENTED
Rendering a mask in Kivy is difficult. (Can Mesh do nonconvex?)"""
pass
##########################################################################
# Touch processing
def on_touch_down(self, touch):
if touch.is_double_tap:
if self.collide_point(*touch.pos):
renderer = App.get_running_app().cropobject_list_renderer
renderer.view.select_class(self._model_counterpart.clsname)
return True
return super(CropObjectView, self).on_touch_down(touch)
##########################################################################
# Keyboard event processing: the core UI of the CropObjectView
def on_key_down(self, window, key, scancode, codepoint, modifier):
"""This method is one of the primary User Interfaces: keyboard
shortcuts to manipulate a selected CropObject.
:param window:
:param key:
:param scancode:
:param codepoint:
:param modifier:
:return:
"""
# if self.cropobject.objid < 50:
# logging.info('CropObjectView: Key caught by CropObjectView {0}: {1}'
# ''.format(self,
# (key, scancode, codepoint, modifier)))
if not self.is_selected:
return False
# Get the dispatch key
# ------------
dispatch_key = keypress_to_dispatch_key(key, scancode, codepoint, modifier)
#logging.info('CropObjectView: Handling key {0}, self.is_selected={1},'
# ' self.cropobject={2}'
# ''.format(dispatch_key, self.is_selected, str(self.cropobject.objid)))
is_handled = self.handle_dispatch_key(dispatch_key)
if is_handled:
self.dispatch('on_key_captured')
return False
def handle_dispatch_key(self, dispatch_key):
"""Does the "heavy lifting" in keyboard controls: responds to a dispatch key.
Decoupling this into a separate method facillitates giving commands to
the ListView programmatically, not just through user input,
and this way makes automation easier.
:param dispatch_key: A string of the form e.g. ``109+alt,shift``: the ``key``
number, ``+``, and comma-separated modifiers.
:returns: True if the dispatch key got handled, False if there is
no response defined for the given dispatch key.
"""
# Deletion
if dispatch_key == '8': # Delete
self.remove_from_model()
elif dispatch_key == '8+alt': # Delete attachments
self._model.graph.remove_obj_edges(self.objid)
# Unselect
elif dispatch_key == '27': # Escape
# logging.info('CropObjectView\t{0}: handling deselect + state to \'normal\''
# ''.format(self.objid))
# Simple deselection is not enough because of the adapter handle_selection()
# method.
if self.is_selected:
self.dispatch('on_release')
# self.deselect() # ...called from the adapter's handle_selection()
# Moving around
elif dispatch_key == '273': # Up arrow
logging.info('CropObjectView: handling move up: DISABLED')
#self.move(vertical=1)
elif dispatch_key == '274': # Down arrow
logging.info('CropObjectView: handling move down: DISABLED')
#self.move(vertical=-1)
elif dispatch_key == '275': # Right arrow
logging.info('CropObjectView: handling move right: DISABLED')
#self.move(horizontal=1)
elif dispatch_key == '276': # Left arrow
logging.info('CropObjectView: handling move left: DISABLED')
#self.move(horizontal=-1)
# Fine-grained moving around
elif dispatch_key == '273+alt': # Up arrow
logging.info('CropObjectView: handling move_fine up: DISABLED')
#self.move_fine(vertical=1)
elif dispatch_key == '274+alt': # Down arrow
logging.info('CropObjectView: handling move_fine down: DISABLED')
#self.move_fine(vertical=-1)
elif dispatch_key == '275+alt': # Right arrow
logging.info('CropObjectView: handling move_fine right: DISABLED')
#self.move_fine(horizontal=1)
elif dispatch_key == '276+alt': # Left arrow
logging.info('CropObjectView: handling move_fine left: DISABLED')
#self.move_fine(horizontal=-1)
# Coarse-grained stretching
elif dispatch_key == '273+shift': # Up arrow
logging.info('CropObjectView: handling stretch up: DISABLED')
#self.stretch(vertical=1)
elif dispatch_key == '274+shift': # Down arrow
logging.info('CropObjectView: handling stretch down: DISABLED')
#self.stretch(vertical=-1)
elif dispatch_key == '275+shift': # Right arrow
logging.info('CropObjectView: handling stretch right: DISABLED')
#self.stretch(horizontal=1)
elif dispatch_key == '276+shift': # Left arrow
logging.info('CropObjectView: handling stretch left: DISABLED')
#self.stretch(horizontal=-1)
# Fine-grained stretching
elif dispatch_key == '273+alt,shift': # Up arrow
logging.info('CropObjectView: handling stretch_fine up: DISABLED')
#self.stretch_fine(vertical=1)
elif dispatch_key == '274+alt,shift': # Down arrow
logging.info('CropObjectView: handling stretch_fine down: DISABLED')
#self.stretch_fine(vertical=-1)
elif dispatch_key == '275+alt,shift': # Right arrow
logging.info('CropObjectView: handling stretch_fine right: DISABLED')
#self.stretch_fine(horizontal=1)
elif dispatch_key == '276+alt,shift': # Left arrow
logging.info('CropObjectView: handling stretch_fine left: DISABLED')
#self.stretch_fine(horizontal=-1)
# Change class
elif dispatch_key == '99': # c
logging.info('CropObjectView: handling mlclass selection')
self.toggle_class_selection()
elif dispatch_key == '99+shift':
logging.info('CropObjectView: cloning mlclass to app')
self.clone_class_to_app()
# Hide relationships
elif dispatch_key == '104+alt': # h
logging.info('CropObjectView: handling hiding relationships')
self.toggle_hide_relationships()
# Inspect CropObjects
elif dispatch_key == '105': # i
logging.info('CropObjectView: handling inspection')
#self.toggle_info_panel()
self.inspect()
elif dispatch_key == '120': # x
logging.info('CropObjectView: handling split to connected components')
self.split()
else:
# The key is not recognized by the CropObjectView, try others.
return False
# If we got here, the key has been caught and processed.
# However, maybe we want to do the operation with other selected objects
# as well.
# On the other hand: this makes things propagate past the CropObjectViews,
# so for example Escape unselects all CropObjects *and* quits the application.
# Therefore, the CropObjectListView should "block" these signals
# from propagating further.
# Current policy: if any CropObjectView captures a key signal, it will propagate
# past the CropObjectListView.
return True
def on_key_up(self, window, key, scancode, *args, **kwargs):
return False
def on_key_captured(self, *largs):
"""Default handler for on_key_captured event."""
pass
# TODO: Remove this (replaced from utils)
# @staticmethod
# def keypress_to_dispatch_key(key, scancode, codepoint, modifiers):
# """Converts the key_down event data into a single string for more convenient
# keyboard shortcut dispatch."""
# if modifiers:
# return '{0}+{1}'.format(key, ','.join(sorted(modifiers)))
# else:
# return '{0}'.format(key)
##########################################################################
# Accessing the model & the cropobject in the model, so that the user
# can manipulate the underlying data through the CropObjectView.
@property
def _model(self):
return App.get_running_app().annot_model
@property
def _model_counterpart(self):
return self._model.cropobjects[self.cropobject.objid]
@property
def objid(self):
return self._model_counterpart.objid
##########################################################################
# Class selection
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.toggle_class_selection',
tracker_name='editing')
def toggle_class_selection(self):
if self._mlclass_selection_spinner_shown:
self.destroy_mlclass_selection_spinner()
else:
self.create_class_selection()
def create_class_selection(self):
logging.info('CropObjectView\t{0}: show_class_selection() fired.'
''.format(self.cropobject.objid))
self.mlclass_selection_spinner = Spinner(
id='mlclass_cropobject_selection_spinner_{0}'.format(self.cropobject.objid),
pos=self.pos,
text='{0}'.format(self.cropobject.clsname),
font_size=15,
values=sorted(list(self._model.mlclasses_by_name.keys()),
key=lambda k: self._model.mlclasses_by_name[k].clsid),
width=old_div(300, self._editor_scale),
height=old_div(50, self._editor_scale),
size_hint=(None, None),
# is_open=True,
)
self.mlclass_selection_spinner.bind(text=self.do_class_selection)
# self.mlclass_selection_spinner.option_cls.height = 37
self.add_widget(self.mlclass_selection_spinner)
self._mlclass_selection_spinner_shown = True
@tr.Tracker(track_names=['self', 'text'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.do_class_selection',
tracker_name='editing')
def do_class_selection(self, spinner_widget, clsname):
logging.info('CropObjectView\t{0}: do_class_selection() fired.'
''.format(self.cropobject.objid))
if clsname != self.cropobject.clsname:
self.set_mlclass(clsname=clsname)
self.destroy_mlclass_selection_spinner()
def set_mlclass(self, clsname):
# This should be wrapped in some cropobject's set_class method.
self._model_counterpart.clsname = clsname
self.cropobject.clsname = clsname
# We should also check that the new class name is consistent
# with the edges...
self.update_info_label()
# Update color
rgb = tuple([float(x) for x in self._model.mlclasses_by_name[clsname].color])
self.update_color(rgb)
def destroy_mlclass_selection_spinner(self, *args, **kwargs):
self.remove_widget(self.mlclass_selection_spinner)
self.mlclass_selection_spinner = None
self._mlclass_selection_spinner_shown = False
##########################################################################
# Info panel: displaying information about the view in the info palette
def toggle_info_panel(self):
# Info panel!
if self._info_label_shown:
self.destroy_info_label()
else:
self.create_info_label()
def create_info_label(self):
# logging.debug('CropObjectView.create_info_label() called.')
info_label = Label(text=self.get_info_label_text())
_info_palette = App.get_running_app()._get_tool_info_palette()
info_label.size_hint = (1.0, None)
info_label.size = (self.parent.size[0], 35)
self.info_label = info_label
_info_palette.add_widget(self.info_label)
self._info_label_shown = True
def destroy_info_label(self, *args, **kwargs):
# logging.debug('CropObjectView.destroy_info_label() called.')
App.get_running_app()._get_tool_info_palette().remove_widget(self.info_label)
self._info_label_shown = False
self.info_label = None
def get_debug_info_label_text(self):
e_cropobject = self.cropobject
output_lines = list()
output_lines.append('objid: {0}'.format(e_cropobject.objid))
output_lines.append('cls: {0}'.format(e_cropobject.clsname))
output_lines.append('M.x, M.y: {0:.2f}, {1:.2f}'
''.format(self._model_counterpart.x,
self._model_counterpart.y))
output_lines.append('M.w, M.h: {0:.2f}, {1:.2f}'
''.format(self._model_counterpart.width,
self._model_counterpart.height))
if self._model_counterpart.mask is None:
output_lines.append('Mask.nnz: None')
else:
output_lines.append('Mask.nnz: {0}'.format(self._model_counterpart.mask.sum()))
output_lines.append('E.x, E.y: {0:.2f}, {1:.2f}'.format(self.x, self.y))
output_lines.append('E.w, E.h: {0:.2f}, {1:.2f}'.format(self.width,
self.height))
output_lines.append('S.V, S.H: {0:.2f}, {1:.2f}'
''.format(self._height_scaling_factor,
self._width_scaling_factor))
return '\n'.join(output_lines)
def get_info_label_text(self):
c = self._model_counterpart
text = '({0}) {1}'.format(c.objid, c.clsname)
if c.data is not None:
logging.debug('Creating info label for object {0}:'
' data {1}'.format(c.uid, c.data))
pitch_text = ''
if 'pitch_step' in c.data:
pitch_text = '{0}'.format(c.data['pitch_step'])
elif 'normalized_pitch_step' in c.data:
pitch_text = '{0}'.format(c.data['normalized_pitch_step'])
if 'pitch_octave' in c.data:
pitch_text += '{0}'.format(c.data['pitch_octave'])
if pitch_text:
text += ' | {0}'.format(pitch_text)
if 'duration_beats' in c.data:
text += ' | {0:.2f}'.format(c.data['duration_beats'])
if 'onset_beats' in c.data:
text += ' | {0:.3f}'.format(c.data['onset_beats'])
# duration_text = None
# if 'duration_beats' in c.data:
# duration_text = '{0:.2f}'.format(c.data['duration_beats'])
# if duration_text is not None:
# text += ' | {0}'.format(duration_text)
return text
def update_info_label(self, *args):
if self.info_label is not None:
self.info_label.text = self.get_info_label_text()
##########################################################################
def remove_from_model(self):
logging.info('CropObjectView.remove_from_model(): called on objid {0}'
''.format(self.cropobject.objid))
# Problem here: the cropobject gets deleted, but the widget stays
# alive, so it keeps capturing events. This is (a) a memory leak,
# (b) causes crashes.
# Easy workaround: unselect self first. This does not fix the memory
# leak, but at least the 'invisible' CropObjectView will not
# capture any events.
self.ensure_deselected()
# Another workaround: schedule self-deletion for slightly later,
# after the widget gets removed from the call stack.
# The problem persists also with widget deletion...
# After clear()-ing the current CropObjectList, the CropObjectView
# widgets stay alive!
# What if the bindings to Window are keeping the widget alive?
self.remove_bindings()
# Let's at least deactivate it, so it doesn't do anything.
# This, however, won't help upon clearing the widgets...
self.disabled = True
self._model.remove_cropobject(self.cropobject.objid)
##########################################################################
# Movement & scaling
@tr.Tracker(track_names=['self', 'vertical', 'horizontal'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.move',
tracker_name='editing')
def move(self, vertical=0, horizontal=0):
"""Move the underlying CropObject.
NOTE: How to deal with CropObjects that have a mask? Roll it?
In the current implementation, there is no listener inside the model
for individual CropObjects, so there is no propagation of the change
to the view. We currently work around this by simply moving the view
as well, but this will not work when the underlying CropObject is moved
by some other means.
"""
logging.info('CropObjectView {0}: moving vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
# The CropObjects in the model are kept in the Numpy world.
c.x += vertical #* self._height_scaling_factor
c.y += horizontal #* self._height_scaling_factor
if c.mask is not None:
logging.warn('CropObjectView {0}: Moving a CropObject invalidates its mask!')
self._model.add_cropobject(c)
self.move_view(vertical=vertical, horizontal=horizontal)
def move_view(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: moving view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
self.pos = (self.pos[0] + horizontal * self._width_scaling_factor,
self.pos[1] + vertical * self._width_scaling_factor)
def move_fine(self, vertical=0, horizontal=0):
"""Move the underlying CropObject.
In the current implementation, there is no listener inside the model
for individual CropObjects, so there is no propagation of the change
to the view. We currently work around this by simply moving the view
as well, but this will not work when the underlying CropObject is moved
by some other means.
"""
logging.info('CropObjectView {0}: moving vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
# The CropObjects in the model are kept in the Numpy world.
c.x += vertical * self._height_scaling_factor / self._editor_scale
c.y += horizontal * self._height_scaling_factor / self._editor_scale
self._model.add_cropobject(c)
self.move_view_fine(vertical=vertical, horizontal=horizontal)
def move_view_fine(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: moving view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
self.pos = (self.pos[0] + old_div(horizontal, self._editor_scale),# / self._width_scaling_factor),
self.pos[1] + old_div(vertical, self._editor_scale))# / self._width_scaling_factor))
@tr.Tracker(track_names=['self', 'vertical', 'horizontal'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.stretch',
tracker_name='editing')
def stretch(self, vertical=0, horizontal=0):
"""Stretch the underlying CropObject. Does NOT change its position.
Cannot make the CropObject smaller than 1 in either dimension.
See :meth:`move` for a discussion on linking the model action and view."""
logging.info('CropObjectView {0}: stretching vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
if c.width + horizontal > 0:
c.width += horizontal #* self._width_scaling_factor
if c.height + vertical > 0:
c.height += vertical #* self._height_scaling_factor
self._model.add_cropobject(c)
self.stretch_view(vertical=vertical, horizontal=horizontal)
def stretch_view(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: stretching view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
if self.width + horizontal > 0:
self.width += horizontal * self._width_scaling_factor
if self.height + vertical > 0:
self.height += vertical * self._height_scaling_factor
def stretch_fine(self, vertical=0, horizontal=0):
"""Stretch the underlying CropObject. Does NOT change its position.
Cannot make the CropObject smaller than 1 in either dimension.
See :meth:`move` for a discussion on linking the model action and view."""
logging.info('CropObjectView {0}: stretching vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
c = self._model_counterpart
if c.width + horizontal > 0:
c.width += horizontal * self._width_scaling_factor / self._editor_scale
if c.height + vertical > 0:
c.height += vertical * self._height_scaling_factor / self._editor_scale
self._model.add_cropobject(c)
self.stretch_view_fine(vertical=vertical, horizontal=horizontal)
def stretch_view_fine(self, vertical=0, horizontal=0):
logging.info('CropObjectView {0}: stretching view vertical={1}, horizontal={2}'
''.format(self.cropobject.objid, vertical, horizontal))
if self.width + horizontal > 0:
self.width += old_div(horizontal, self._editor_scale)# / self._width_scaling_factor)
if self.height + vertical > 0:
self.height += old_div(vertical, self._editor_scale)# / self._height_scaling_factor)
##########################################################################
# Split
@tr.Tracker(track_names=['self', 'ratio'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.split',
tracker_name='editing')
def split(self):
"""Split the CropObject according to its mask.
"""
_next_objid = self._model.get_next_cropobject_id()
new_cropobjects = split_cropobject_on_connected_components(self._model_counterpart,
next_objid=_next_objid)
if len(new_cropobjects) == 1:
return
self.remove_from_model()
for c in new_cropobjects:
self._model.add_cropobject(c)
##########################################################################
# Clone class
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.clone_class_to_app',
tracker_name='editing')
def clone_class_to_app(self):
App.get_running_app().currently_selected_mlclass_name = self._model_counterpart.clsname
##########################################################################
# Hide relationships
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname),
lambda s: ('inlinks', s._model_counterpart.inlinks),
lambda s: ('outlinks', s._model_counterpart.outlinks)]},
fn_name='CropObjectView.hide_relationships',
tracker_name='editing')
def hide_relationships(self):
edges = self.collect_all_edges()
App.get_running_app().graph_renderer.mask(edges)
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname),
lambda s: ('inlinks', s._model_counterpart.inlinks),
lambda s: ('outlinks', s._model_counterpart.outlinks)]},
fn_name='CropObjectView.hide_relationships',
tracker_name='editing')
def show_relationships(self):
edges = self.collect_all_edges()
App.get_running_app().graph_renderer.unmask(edges)
def toggle_hide_relationships(self):
# A very private toggle switch that keeps track of whether
# the relationships are hidden or visible.
graph_renderer = App.get_running_app().graph_renderer
edges = self.collect_all_edges()
if graph_renderer.are_all_masked(edges):
self.show_relationships()
else:
self.hide_relationships()
def collect_all_edges(self):
edges = []
for i in self._model_counterpart.inlinks:
edges.append((i, self.objid))
for o in self._model_counterpart.outlinks:
edges.append((self.objid, o))
return edges
##########################################################################
# Inspect mask
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.clone_class_to_app',
tracker_name='editing')
def inspect(self):
"""Shows the symbol's exact mask in the context of its bounding box
in a popup."""
# Create crop
image = self._model.image
crop = self._model_counterpart.project_to(image).astype('float32')
t, l, b, r = self._model_counterpart.bounding_box
background_crop = image[t:b, l:r].astype('float32')
combined_crop = (old_div(crop, 2.0)) + (old_div(background_crop, 2.0))
# Save image
app = App.get_running_app()
tmp_dir = app.tmp_dir
fname = str(uuid.uuid4()) + '.png'
full_path = os.path.join(tmp_dir, fname)
scipy.misc.imsave(full_path, combined_crop, )
# Make popup with the crop
data_text = self._model_counterpart.data_display_text()
popup = InspectionPopup(
data_text=data_text,
title='Inspecting obj. {0} | clsname: {1} | bbox: {2}'
''.format(self.objid,
self._model_counterpart.clsname,
self._model_counterpart.bounding_box)
+ '\n\n______________________________________\nDATA\n\n' + data_text,
source=full_path)
# Bind to delete the temp file on cancel()
def __safe_unlink(fname):
if os.path.exists(full_path):
os.unlink(full_path)
popup.bind(on_dismiss=lambda x: __safe_unlink(x))
popup.open()
##########################################################################
# Copied over from ListItemButton
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.select',
tracker_name='editing')
def select(self, *args):
# logging.debug('CropObjectView\t{0}: called selection'
# ''.format(self.cropobject.objid))
self.background_color = self.selected_color
if not self._info_label_shown:
self.create_info_label()
if isinstance(self.parent, CompositeListItem):
self.parent.select_from_child(self, *args)
super(CropObjectView, self).select(*args)
@tr.Tracker(track_names=['self'],
transformations={'self': [lambda s: ('objid', s._model_counterpart.objid),
lambda s: ('clsname', s._model_counterpart.clsname)]},
fn_name='CropObjectView.deselect',
tracker_name='editing')
def deselect(self, *args):
"""Only handles self.is_selected, not the 'on_release'
dispatch that the ListAdapter uses to maintain selection!
Use ensure_deselected() instead."""
# logging.debug('CropObjectView\t{0}: called deselection'
# ''.format(self.cropobject.objid))
# logging.debug('CropObjectView.deselect: info label shown? {0}'
# ''.format(self._info_label_shown))
if self._info_label_shown:
# logging.debug('CropObjectView.deselect: destroying info label.')
self.destroy_info_label()
if self._mlclass_selection_spinner_shown:
self.destroy_mlclass_selection_spinner()
self.background_color = self.deselected_color
if isinstance(self.parent, CompositeListItem):
self.parent.deselect_from_child(self, *args)
super(CropObjectView, self).deselect(*args)
# def do_deselect(self):
# """Proper deselection that will be reflected in a ListAdapter
# containing this view."""
# if self.is_selected:
# self.dispatch('do_release')
def ensure_selected(self):
"""Proper selection that will be reflected in a ListAdapter
containing this view."""
if not self.is_selected:
self.dispatch('on_release')
def ensure_deselected(self):
"""Proper unselection that will be reflected in a ListAdapter
containing this view."""
if self.is_selected:
self.dispatch('on_release')
def select_from_composite(self, *args):
self.background_color = self.selected_color
def deselect_from_composite(self, *args):
self.background_color = self.deselected_color
# For logging/debugging multi-selection only.
#
# def on_is_selected(self, instance, pos):
# logging.info('CropObjectView\t{0}: is_selected changed to {1}'
# ''.format(self.cropobject.objid, self.is_selected))
#
# def on_press(self):
# logging.info('CropObjectView.on_press()\t{0}: Fired'
# ''.format(self.cropobject.objid))
# return super(CropObjectView, self).on_press()
#
# def on_release(self):
# logging.info('CropObjectView.on_release()\t{0}: Fired'
# ''.format(self.cropobject.objid))
# return super(CropObjectView, self).on_release()
#
# def on_touch_up(self, touch):
# if touch.grab_current is not self:
# logging.info('CropObjectView.on_touch_up()\t{0}: touch {1} is FOREIGN'
# ''.format(self.cropobject.objid, touch))
# else:
# logging.info('CropObjectView.on_touch_up()\t{0}: touch {1} is MINE'
# ''.format(self.cropobject.objid, touch))
# return super(CropObjectView, self).on_touch_up(touch)
| en | 0.650098 | This module implements a class that... # Should behave like a ToggleButton. # Important difference from ListItemButton: # # * Colors defined at initialization time, # * Text is empty The view to an individual CropObject. Implements interface for CropObject manipulation. Selection --------- The ``CropObjectView`` is selectable by clicking. Keyboard shortcuts only work when the button is selected. Mouse interaction ----------------- Once selected, the CropObject can be dragged around [NOT IMPLEMENTED]. Keyboard shortcuts ------------------ If the CropObjectView handles a key press event, it will not propagate. The available keyboard shortcuts are: * Backspace: Remove the CropObject * Escape: Unselect * Arrow keys: move the CropObject by 1 editor-scale pixel. * Arrow keys + alt: move the CropObject by 1 display pixel. (Finest.) * Arrow keys + shift: stretch the CropObject by 1 editor-scale pixel. * Arrow keys + alt + shift: stretch the CropObject by 1 display pixel. (Finest.) * i: toggle info label * c: change class selection :param selectable_cropobject: The intermediate-level CropObject represnetation, with recomputed dimension. :param rgb: :param alpha: Works for deselected color, when selected, multiplied by 1.5 :param kwargs: :return: # logging.debug('Render: Initializing CropObjectView with args: c={0},' # ' rgb={1}, alpha={2}'.format(selectable_cropobject, rgb, alpha)) # We don't want any text showing up # Recorded for future color changes on class change # Overriding the default button color and border behavior # Overriding default release # Here, we position the CropObjectView. # self.pos_hint = {'x': self.cropobject.x, 'y': self.cropobject.y } # self.pos_hint = {'x': 0, 'y': 0 } # self.group = self.cropobject.objid # If the underlying cropobject has a mask, render that mask # logging.info('Creating bindings for COV {0}'.format(self)) # logging.info('Removing bindings for COV {0}'.format(self)) NOT IMPLEMENTED Rendering a mask in Kivy is difficult. (Can Mesh do nonconvex?) ########################################################################## # Touch processing ########################################################################## # Keyboard event processing: the core UI of the CropObjectView This method is one of the primary User Interfaces: keyboard shortcuts to manipulate a selected CropObject. :param window: :param key: :param scancode: :param codepoint: :param modifier: :return: # if self.cropobject.objid < 50: # logging.info('CropObjectView: Key caught by CropObjectView {0}: {1}' # ''.format(self, # (key, scancode, codepoint, modifier))) # Get the dispatch key # ------------ #logging.info('CropObjectView: Handling key {0}, self.is_selected={1},' # ' self.cropobject={2}' # ''.format(dispatch_key, self.is_selected, str(self.cropobject.objid))) Does the "heavy lifting" in keyboard controls: responds to a dispatch key. Decoupling this into a separate method facillitates giving commands to the ListView programmatically, not just through user input, and this way makes automation easier. :param dispatch_key: A string of the form e.g. ``109+alt,shift``: the ``key`` number, ``+``, and comma-separated modifiers. :returns: True if the dispatch key got handled, False if there is no response defined for the given dispatch key. # Deletion # Delete # Delete attachments # Unselect # Escape # logging.info('CropObjectView\t{0}: handling deselect + state to \'normal\'' # ''.format(self.objid)) # Simple deselection is not enough because of the adapter handle_selection() # method. # self.deselect() # ...called from the adapter's handle_selection() # Moving around # Up arrow #self.move(vertical=1) # Down arrow #self.move(vertical=-1) # Right arrow #self.move(horizontal=1) # Left arrow #self.move(horizontal=-1) # Fine-grained moving around # Up arrow #self.move_fine(vertical=1) # Down arrow #self.move_fine(vertical=-1) # Right arrow #self.move_fine(horizontal=1) # Left arrow #self.move_fine(horizontal=-1) # Coarse-grained stretching # Up arrow #self.stretch(vertical=1) # Down arrow #self.stretch(vertical=-1) # Right arrow #self.stretch(horizontal=1) # Left arrow #self.stretch(horizontal=-1) # Fine-grained stretching # Up arrow #self.stretch_fine(vertical=1) # Down arrow #self.stretch_fine(vertical=-1) # Right arrow #self.stretch_fine(horizontal=1) # Left arrow #self.stretch_fine(horizontal=-1) # Change class # c # Hide relationships # h # Inspect CropObjects # i #self.toggle_info_panel() # x # The key is not recognized by the CropObjectView, try others. # If we got here, the key has been caught and processed. # However, maybe we want to do the operation with other selected objects # as well. # On the other hand: this makes things propagate past the CropObjectViews, # so for example Escape unselects all CropObjects *and* quits the application. # Therefore, the CropObjectListView should "block" these signals # from propagating further. # Current policy: if any CropObjectView captures a key signal, it will propagate # past the CropObjectListView. Default handler for on_key_captured event. # TODO: Remove this (replaced from utils) # @staticmethod # def keypress_to_dispatch_key(key, scancode, codepoint, modifiers): # """Converts the key_down event data into a single string for more convenient # keyboard shortcut dispatch.""" # if modifiers: # return '{0}+{1}'.format(key, ','.join(sorted(modifiers))) # else: # return '{0}'.format(key) ########################################################################## # Accessing the model & the cropobject in the model, so that the user # can manipulate the underlying data through the CropObjectView. ########################################################################## # Class selection # is_open=True, # self.mlclass_selection_spinner.option_cls.height = 37 # This should be wrapped in some cropobject's set_class method. # We should also check that the new class name is consistent # with the edges... # Update color ########################################################################## # Info panel: displaying information about the view in the info palette # Info panel! # logging.debug('CropObjectView.create_info_label() called.') # logging.debug('CropObjectView.destroy_info_label() called.') # duration_text = None # if 'duration_beats' in c.data: # duration_text = '{0:.2f}'.format(c.data['duration_beats']) # if duration_text is not None: # text += ' | {0}'.format(duration_text) ########################################################################## # Problem here: the cropobject gets deleted, but the widget stays # alive, so it keeps capturing events. This is (a) a memory leak, # (b) causes crashes. # Easy workaround: unselect self first. This does not fix the memory # leak, but at least the 'invisible' CropObjectView will not # capture any events. # Another workaround: schedule self-deletion for slightly later, # after the widget gets removed from the call stack. # The problem persists also with widget deletion... # After clear()-ing the current CropObjectList, the CropObjectView # widgets stay alive! # What if the bindings to Window are keeping the widget alive? # Let's at least deactivate it, so it doesn't do anything. # This, however, won't help upon clearing the widgets... ########################################################################## # Movement & scaling Move the underlying CropObject. NOTE: How to deal with CropObjects that have a mask? Roll it? In the current implementation, there is no listener inside the model for individual CropObjects, so there is no propagation of the change to the view. We currently work around this by simply moving the view as well, but this will not work when the underlying CropObject is moved by some other means. # The CropObjects in the model are kept in the Numpy world. #* self._height_scaling_factor #* self._height_scaling_factor Move the underlying CropObject. In the current implementation, there is no listener inside the model for individual CropObjects, so there is no propagation of the change to the view. We currently work around this by simply moving the view as well, but this will not work when the underlying CropObject is moved by some other means. # The CropObjects in the model are kept in the Numpy world. # / self._width_scaling_factor), # / self._width_scaling_factor)) Stretch the underlying CropObject. Does NOT change its position. Cannot make the CropObject smaller than 1 in either dimension. See :meth:`move` for a discussion on linking the model action and view. #* self._width_scaling_factor #* self._height_scaling_factor Stretch the underlying CropObject. Does NOT change its position. Cannot make the CropObject smaller than 1 in either dimension. See :meth:`move` for a discussion on linking the model action and view. # / self._width_scaling_factor) # / self._height_scaling_factor) ########################################################################## # Split Split the CropObject according to its mask. ########################################################################## # Clone class ########################################################################## # Hide relationships # A very private toggle switch that keeps track of whether # the relationships are hidden or visible. ########################################################################## # Inspect mask Shows the symbol's exact mask in the context of its bounding box in a popup. # Create crop # Save image # Make popup with the crop # Bind to delete the temp file on cancel() ########################################################################## # Copied over from ListItemButton # logging.debug('CropObjectView\t{0}: called selection' # ''.format(self.cropobject.objid)) Only handles self.is_selected, not the 'on_release' dispatch that the ListAdapter uses to maintain selection! Use ensure_deselected() instead. # logging.debug('CropObjectView\t{0}: called deselection' # ''.format(self.cropobject.objid)) # logging.debug('CropObjectView.deselect: info label shown? {0}' # ''.format(self._info_label_shown)) # logging.debug('CropObjectView.deselect: destroying info label.') # def do_deselect(self): # """Proper deselection that will be reflected in a ListAdapter # containing this view.""" # if self.is_selected: # self.dispatch('do_release') Proper selection that will be reflected in a ListAdapter containing this view. Proper unselection that will be reflected in a ListAdapter containing this view. # For logging/debugging multi-selection only. # # def on_is_selected(self, instance, pos): # logging.info('CropObjectView\t{0}: is_selected changed to {1}' # ''.format(self.cropobject.objid, self.is_selected)) # # def on_press(self): # logging.info('CropObjectView.on_press()\t{0}: Fired' # ''.format(self.cropobject.objid)) # return super(CropObjectView, self).on_press() # # def on_release(self): # logging.info('CropObjectView.on_release()\t{0}: Fired' # ''.format(self.cropobject.objid)) # return super(CropObjectView, self).on_release() # # def on_touch_up(self, touch): # if touch.grab_current is not self: # logging.info('CropObjectView.on_touch_up()\t{0}: touch {1} is FOREIGN' # ''.format(self.cropobject.objid, touch)) # else: # logging.info('CropObjectView.on_touch_up()\t{0}: touch {1} is MINE' # ''.format(self.cropobject.objid, touch)) # return super(CropObjectView, self).on_touch_up(touch) | 2.613813 | 3 |
test/test_wrapper.py | urish/wrapped_rgb_mixer | 5 | 6622685 | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
from rgb_mixer.test.encoder import Encoder
@cocotb.test()
async def test_wrapper(dut):
clock = Clock(dut.wb_clk_i, 10, units="ns")
cocotb.fork(clock.start())
clocks_per_phase = 5
encoder = Encoder(dut.wb_clk_i, dut.io_in[8], dut.io_in[9], clocks_per_phase = clocks_per_phase, noise_cycles = 0)
dut.wb_rst_i <= 1
await ClockCycles(dut.wb_clk_i, 5)
dut.wb_rst_i <= 0
dut.la_data_in <= 0
# count up with encoder with project inactive
for i in range(clocks_per_phase * 2 * 255):
await encoder.update(1)
# pause
await ClockCycles(dut.wb_clk_i, 100)
# activate project
dut.active <= 1
# reset it
dut.la_data_in <= 1 << 0
await ClockCycles(dut.wb_clk_i, 1)
dut.la_data_in <= 0 << 0
await ClockCycles(dut.wb_clk_i, 1)
# count up with encoder while project is active
for i in range(clocks_per_phase * 2 * 255):
await encoder.update(1)
await ClockCycles(dut.wb_clk_i, 1000)
| import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
from rgb_mixer.test.encoder import Encoder
@cocotb.test()
async def test_wrapper(dut):
clock = Clock(dut.wb_clk_i, 10, units="ns")
cocotb.fork(clock.start())
clocks_per_phase = 5
encoder = Encoder(dut.wb_clk_i, dut.io_in[8], dut.io_in[9], clocks_per_phase = clocks_per_phase, noise_cycles = 0)
dut.wb_rst_i <= 1
await ClockCycles(dut.wb_clk_i, 5)
dut.wb_rst_i <= 0
dut.la_data_in <= 0
# count up with encoder with project inactive
for i in range(clocks_per_phase * 2 * 255):
await encoder.update(1)
# pause
await ClockCycles(dut.wb_clk_i, 100)
# activate project
dut.active <= 1
# reset it
dut.la_data_in <= 1 << 0
await ClockCycles(dut.wb_clk_i, 1)
dut.la_data_in <= 0 << 0
await ClockCycles(dut.wb_clk_i, 1)
# count up with encoder while project is active
for i in range(clocks_per_phase * 2 * 255):
await encoder.update(1)
await ClockCycles(dut.wb_clk_i, 1000)
| en | 0.791003 | # count up with encoder with project inactive # pause # activate project # reset it # count up with encoder while project is active | 2.247041 | 2 |
allsix.py | getmykhan/Data-Visualization-Tool | 0 | 6622686 | def graphplot(dfcol1,dfcol2,path):
plt.subplot(321)
plt.plot(dfcol1,dfcol2) #line chart
plt.title(path)
# plt.show()
plt.subplot(322)
plt.bar(dfcol1,dfcol2) #bar chart
plt.title(path)
plt.subplot(323)
plt.hist(dfcol1,dfcol2) #histogram
plt.title(path)
plt.subplot(324)
plt.scatter(dfcol1,dfcol2) #scatter plot
plt.subplot(325)
plt.stackplot(dfcol1,dfcol2) #stack chart
slices=[6,8]
plt.subplot(326)
plt.pie(slices)
plt.show() | def graphplot(dfcol1,dfcol2,path):
plt.subplot(321)
plt.plot(dfcol1,dfcol2) #line chart
plt.title(path)
# plt.show()
plt.subplot(322)
plt.bar(dfcol1,dfcol2) #bar chart
plt.title(path)
plt.subplot(323)
plt.hist(dfcol1,dfcol2) #histogram
plt.title(path)
plt.subplot(324)
plt.scatter(dfcol1,dfcol2) #scatter plot
plt.subplot(325)
plt.stackplot(dfcol1,dfcol2) #stack chart
slices=[6,8]
plt.subplot(326)
plt.pie(slices)
plt.show() | en | 0.502737 | #line chart # plt.show() #bar chart #histogram #scatter plot #stack chart | 2.780679 | 3 |
windows-mlps-lrsignal/src/data_management/dataset.py | Amir-Mehrpanah/RRFLab | 0 | 6622687 | <reponame>Amir-Mehrpanah/RRFLab<gh_stars>0
import glob
import logging
import torch
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
import os
class SlidingWindowDataset(Dataset):
"""Sliding Window Dataset [Symbol,NumParWin,WinWidth]."""
def __init__(self, dynamic_config):
self.demo = dynamic_config['demo']
stock_data_path = glob.glob(os.path.join(dynamic_config['data_root_path'],
'*.csv'))
temp_stock_data = []
temp_stock_data_len = []
i = -1
while len(temp_stock_data) < dynamic_config['num_symbols']:
i += 1
temp = pd.read_csv(stock_data_path[i])
if dynamic_config['verbose'] > 1:
print(stock_data_path[i])
temp = temp.sort_values(by=['date'])
temp = temp.drop(columns=['Name',
'date',
'low',
'open',
'high',
'volume']).to_numpy()
if len(temp) < dynamic_config['min_stock_data_length']:
if dynamic_config['verbose'] > 0:
logging.warning(stock_data_path[i] +
' skipped data of length = ' +
str(len(temp)))
continue
temp_stock_data_len.append(len(temp))
temp_stock_data.append(temp)
min_length = np.min(temp_stock_data_len)
temp_stock_data = [x[0:min_length] for x in temp_stock_data]
self.stock_data = np.concatenate(temp_stock_data, axis=1)
self.forward_window_lengths = dynamic_config['forward_window_lengths']
self.backward_window_lengths = dynamic_config['backward_window_lengths']
self.offset = max(dynamic_config['backward_window_lengths'])
self.num_parallel_windows = dynamic_config['num_parallel_windows']
self.num_symbols = dynamic_config['num_symbols']
if dynamic_config['signal_function_args'] is not None:
self.signal_function = dynamic_config['signal_function'](**dynamic_config['signal_function_args'])
else:
self.signal_function = dynamic_config['signal_function']()
# self.transform = Config.transform
# print('dataset shape:', self.stock_data.shape)
def __len__(self):
return len(self.stock_data) - \
max(self.backward_window_lengths) - \
max(self.forward_window_lengths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
backward_windows, forward_windows = self.__read_window(idx)
signals = self.__compute_signals(forward_windows)
signals = torch.tensor(signals, dtype=torch.long)
backward_windows = self.to_list_tensor(backward_windows)
# if self.transform:
# backward_window = self.transform(backward_window)
return backward_windows, signals, forward_windows
@staticmethod
def to_list_tensor(data):
return [torch.tensor(datum, dtype=torch.double) for datum in data]
def __compute_signals(self, forward_windows):
signals = []
for i in range(self.num_symbols):
signals.append([])
for j in range(self.num_parallel_windows):
signals[i].append(self.signal_function(forward_windows[j][i, :]))
return np.concatenate(signals, 1).squeeze(2).transpose()
def __read_window(self, idx):
backward_windows = []
forward_windows = []
for length_idx in range(self.num_parallel_windows):
backward_windows.append(
self.stock_data[idx + self.offset -
self.backward_window_lengths[length_idx]:
idx + self.offset, :].T)
forward_windows.append(self.stock_data[idx + self.offset:
idx + self.offset +
self.forward_window_lengths[length_idx], :].T)
return backward_windows, forward_windows
| import glob
import logging
import torch
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
import os
class SlidingWindowDataset(Dataset):
"""Sliding Window Dataset [Symbol,NumParWin,WinWidth]."""
def __init__(self, dynamic_config):
self.demo = dynamic_config['demo']
stock_data_path = glob.glob(os.path.join(dynamic_config['data_root_path'],
'*.csv'))
temp_stock_data = []
temp_stock_data_len = []
i = -1
while len(temp_stock_data) < dynamic_config['num_symbols']:
i += 1
temp = pd.read_csv(stock_data_path[i])
if dynamic_config['verbose'] > 1:
print(stock_data_path[i])
temp = temp.sort_values(by=['date'])
temp = temp.drop(columns=['Name',
'date',
'low',
'open',
'high',
'volume']).to_numpy()
if len(temp) < dynamic_config['min_stock_data_length']:
if dynamic_config['verbose'] > 0:
logging.warning(stock_data_path[i] +
' skipped data of length = ' +
str(len(temp)))
continue
temp_stock_data_len.append(len(temp))
temp_stock_data.append(temp)
min_length = np.min(temp_stock_data_len)
temp_stock_data = [x[0:min_length] for x in temp_stock_data]
self.stock_data = np.concatenate(temp_stock_data, axis=1)
self.forward_window_lengths = dynamic_config['forward_window_lengths']
self.backward_window_lengths = dynamic_config['backward_window_lengths']
self.offset = max(dynamic_config['backward_window_lengths'])
self.num_parallel_windows = dynamic_config['num_parallel_windows']
self.num_symbols = dynamic_config['num_symbols']
if dynamic_config['signal_function_args'] is not None:
self.signal_function = dynamic_config['signal_function'](**dynamic_config['signal_function_args'])
else:
self.signal_function = dynamic_config['signal_function']()
# self.transform = Config.transform
# print('dataset shape:', self.stock_data.shape)
def __len__(self):
return len(self.stock_data) - \
max(self.backward_window_lengths) - \
max(self.forward_window_lengths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
backward_windows, forward_windows = self.__read_window(idx)
signals = self.__compute_signals(forward_windows)
signals = torch.tensor(signals, dtype=torch.long)
backward_windows = self.to_list_tensor(backward_windows)
# if self.transform:
# backward_window = self.transform(backward_window)
return backward_windows, signals, forward_windows
@staticmethod
def to_list_tensor(data):
return [torch.tensor(datum, dtype=torch.double) for datum in data]
def __compute_signals(self, forward_windows):
signals = []
for i in range(self.num_symbols):
signals.append([])
for j in range(self.num_parallel_windows):
signals[i].append(self.signal_function(forward_windows[j][i, :]))
return np.concatenate(signals, 1).squeeze(2).transpose()
def __read_window(self, idx):
backward_windows = []
forward_windows = []
for length_idx in range(self.num_parallel_windows):
backward_windows.append(
self.stock_data[idx + self.offset -
self.backward_window_lengths[length_idx]:
idx + self.offset, :].T)
forward_windows.append(self.stock_data[idx + self.offset:
idx + self.offset +
self.forward_window_lengths[length_idx], :].T)
return backward_windows, forward_windows | en | 0.233422 | Sliding Window Dataset [Symbol,NumParWin,WinWidth]. # self.transform = Config.transform # print('dataset shape:', self.stock_data.shape) # if self.transform: # backward_window = self.transform(backward_window) | 2.691761 | 3 |
exercises/ex20.py | ramachandrajr/lpthw | 0 | 6622688 | <reponame>ramachandrajr/lpthw<filename>exercises/ex20.py
# Import argv to get arguments
from sys import argv
# Unpacking
script, input_file = argv
# A function to print the whole file.
def print_all(f):
print f.read()
# read is used to read the whole file.
# Function with a single argument
def rewind(f):
# Sets pointer to the start of the file.
f.seek(0)
# RJ
# =====
def seek(num, f):
f.seek(num)
# function that takes two arguments
def print_a_line(line_count, f):
# Prints first argument and then reads a
# single line from file.
print line_count, f.readline()
# Open a file.
current_file = open(input_file)
print "First let's print the whole file:\n"
# Prints the whole file.
print_all(current_file)
print "Now let's rewind, kind of like a tape."
# Takes the file poiter to the start
rewind(current_file)
print "Let's print three lines:"
# We'll print First line number as 1
current_line = 1
# Prints line number and the line
print_a_line(current_line, current_file)
# Increment current line number
# current_line = 2
current_line =+ 1
# Prints line number and the line
print_a_line(current_line, current_file)
seek(current_file.tell() + 10, current_file)
# Increment current line number
# current_line = 3
current_line =+ 1
# Prints line number and the line
print_a_line(current_line, current_file)
# 5
# ====
# x+=n is used as a short hand notation of x = x + n
| # Import argv to get arguments
from sys import argv
# Unpacking
script, input_file = argv
# A function to print the whole file.
def print_all(f):
print f.read()
# read is used to read the whole file.
# Function with a single argument
def rewind(f):
# Sets pointer to the start of the file.
f.seek(0)
# RJ
# =====
def seek(num, f):
f.seek(num)
# function that takes two arguments
def print_a_line(line_count, f):
# Prints first argument and then reads a
# single line from file.
print line_count, f.readline()
# Open a file.
current_file = open(input_file)
print "First let's print the whole file:\n"
# Prints the whole file.
print_all(current_file)
print "Now let's rewind, kind of like a tape."
# Takes the file poiter to the start
rewind(current_file)
print "Let's print three lines:"
# We'll print First line number as 1
current_line = 1
# Prints line number and the line
print_a_line(current_line, current_file)
# Increment current line number
# current_line = 2
current_line =+ 1
# Prints line number and the line
print_a_line(current_line, current_file)
seek(current_file.tell() + 10, current_file)
# Increment current line number
# current_line = 3
current_line =+ 1
# Prints line number and the line
print_a_line(current_line, current_file)
# 5
# ====
# x+=n is used as a short hand notation of x = x + n | en | 0.839338 | # Import argv to get arguments # Unpacking # A function to print the whole file. # read is used to read the whole file. # Function with a single argument # Sets pointer to the start of the file. # RJ # ===== # function that takes two arguments # Prints first argument and then reads a # single line from file. # Open a file. # Prints the whole file. # Takes the file poiter to the start # We'll print First line number as 1 # Prints line number and the line # Increment current line number # current_line = 2 # Prints line number and the line # Increment current line number # current_line = 3 # Prints line number and the line # 5 # ==== # x+=n is used as a short hand notation of x = x + n | 4.483949 | 4 |
test/CheckTexinfo.py | sanel/ledger | 3,509 | 6622689 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
import os
import argparse
from os.path import *
from subprocess import Popen, PIPE
from CheckOptions import CheckOptions
class CheckTexinfo (CheckOptions):
def __init__(self, args):
CheckOptions.__init__(self, args)
self.option_pattern = '^@item\s+--([-A-Za-z]+)'
self.function_pattern = '^@defun\s+([-A-Za-z_]+)'
self.source_file = join(self.source, 'doc', 'ledger3.texi')
self.source_type = 'texinfo'
def find_functions(self, filename):
functions = set()
state_normal = 0
state_function = 1
state = state_normal
function = None
fun_doc = str()
fun_example = False
item_regex = re.compile(self.function_pattern)
itemx_regex = re.compile('^@defunx')
example_regex = re.compile('^@smallexample\s+@c\s+command:')
fix_regex = re.compile('FIX')
comment_regex = re.compile('^\s*@c')
for line in open(filename):
line = line.strip()
if state == state_normal:
match = item_regex.match(line)
if match:
state = state_function
function = match.group(1)
elif state == state_function:
if line == '@end defun':
if function and fun_example and len(fun_doc) and not fix_regex.search(fun_doc):
functions.add(function)
state = state_normal
fun_example = None
fun_doc = str()
elif itemx_regex.match(line):
continue
elif example_regex.match(line):
fun_example = True
elif not comment_regex.match(line):
fun_doc += line
return functions
def find_options(self, filename):
options = set()
state_normal = 0
state_option_table = 1
state = state_normal
option = None
opt_doc = str()
item_regex = re.compile(self.option_pattern)
itemx_regex = re.compile('^@itemx')
fix_regex = re.compile('FIX')
comment_regex = re.compile('^\s*@c')
for line in open(filename):
line = line.strip()
if state == state_normal:
if line == '@ftable @option':
state = state_option_table
elif state == state_option_table:
if line == '@end ftable':
if option and len(opt_doc) and not fix_regex.search(opt_doc):
options.add(option)
state = state_normal
option = None
continue
match = item_regex.match(line)
if match:
if option and len(opt_doc) and not fix_regex.search(opt_doc):
options.add(option)
option = match.group(1)
opt_doc = str()
elif itemx_regex.match(line):
continue
elif not comment_regex.match(line):
opt_doc += line
return options
if __name__ == "__main__":
def getargs():
parser = argparse.ArgumentParser(prog='CheckTexinfo',
description='Check that ledger options are documented in the texinfo manual')
parser.add_argument('-l', '--ledger',
dest='ledger',
type=str,
action='store',
required=True,
help='the path to the ledger executable to test with')
parser.add_argument('-s', '--source',
dest='source',
type=str,
action='store',
required=True,
help='the path to the top level ledger source directory')
return parser.parse_args()
args = getargs()
script = CheckTexinfo(args)
status = script.main()
sys.exit(status)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
import os
import argparse
from os.path import *
from subprocess import Popen, PIPE
from CheckOptions import CheckOptions
class CheckTexinfo (CheckOptions):
def __init__(self, args):
CheckOptions.__init__(self, args)
self.option_pattern = '^@item\s+--([-A-Za-z]+)'
self.function_pattern = '^@defun\s+([-A-Za-z_]+)'
self.source_file = join(self.source, 'doc', 'ledger3.texi')
self.source_type = 'texinfo'
def find_functions(self, filename):
functions = set()
state_normal = 0
state_function = 1
state = state_normal
function = None
fun_doc = str()
fun_example = False
item_regex = re.compile(self.function_pattern)
itemx_regex = re.compile('^@defunx')
example_regex = re.compile('^@smallexample\s+@c\s+command:')
fix_regex = re.compile('FIX')
comment_regex = re.compile('^\s*@c')
for line in open(filename):
line = line.strip()
if state == state_normal:
match = item_regex.match(line)
if match:
state = state_function
function = match.group(1)
elif state == state_function:
if line == '@end defun':
if function and fun_example and len(fun_doc) and not fix_regex.search(fun_doc):
functions.add(function)
state = state_normal
fun_example = None
fun_doc = str()
elif itemx_regex.match(line):
continue
elif example_regex.match(line):
fun_example = True
elif not comment_regex.match(line):
fun_doc += line
return functions
def find_options(self, filename):
options = set()
state_normal = 0
state_option_table = 1
state = state_normal
option = None
opt_doc = str()
item_regex = re.compile(self.option_pattern)
itemx_regex = re.compile('^@itemx')
fix_regex = re.compile('FIX')
comment_regex = re.compile('^\s*@c')
for line in open(filename):
line = line.strip()
if state == state_normal:
if line == '@ftable @option':
state = state_option_table
elif state == state_option_table:
if line == '@end ftable':
if option and len(opt_doc) and not fix_regex.search(opt_doc):
options.add(option)
state = state_normal
option = None
continue
match = item_regex.match(line)
if match:
if option and len(opt_doc) and not fix_regex.search(opt_doc):
options.add(option)
option = match.group(1)
opt_doc = str()
elif itemx_regex.match(line):
continue
elif not comment_regex.match(line):
opt_doc += line
return options
if __name__ == "__main__":
def getargs():
parser = argparse.ArgumentParser(prog='CheckTexinfo',
description='Check that ledger options are documented in the texinfo manual')
parser.add_argument('-l', '--ledger',
dest='ledger',
type=str,
action='store',
required=True,
help='the path to the ledger executable to test with')
parser.add_argument('-s', '--source',
dest='source',
type=str,
action='store',
required=True,
help='the path to the top level ledger source directory')
return parser.parse_args()
args = getargs()
script = CheckTexinfo(args)
status = script.main()
sys.exit(status)
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.624059 | 3 |
habitica/test_habiticaAPIClient.py | mate3241/github-rpg | 22 | 6622690 | <gh_stars>10-100
from unittest import TestCase
from httmock import HTTMock, all_requests, response
from habitica.habitica_api import HabiticaAPIClient
class TestHabiticaAPIClient(TestCase):
def test_get_existing_todo_task(self):
get_task_url_path = '/api/v3/tasks/alias'
@all_requests
def requests_mock(url, request):
if url.path == get_task_url_path:
return '{"success":true,"data":{"_id":"2b774d70-ec8b-41c1-8967-eb6b13d962ba","text":"test task","alias":"alias","type":"todo","priority":1.5,"completed":false,"id":"2b774d70-ec8b-41c1-8967-eb6b13d962ba"}}'
return '{ "success": false, "NotFound": "The specified task could not be found." }'
with HTTMock(requests_mock):
task = HabiticaAPIClient().get_todo_task('alias')
self.assertEqual(task.type, 'todo', 'task should be todo')
self.assertFalse(task.done, 'task should be done')
self.assertEqual(task.difficulty, 1.5, 'task difficulty should be 1.5')
self.assertEqual(task.alias, 'alias', 'task alias should be alias')
self.assertEqual(task.id, '2b774d70-ec8b-41c1-8967-eb6b13d962ba', 'task id should be "2b774d70-ec8b-41c1-8967-eb6b13d962ba"')
def test_get_existing_todo_task(self):
@all_requests
def requests_mock(url, request):
return response(404, '{ "success": false, "error": "NotFound" }')
with HTTMock(requests_mock):
self.assertIsNone(HabiticaAPIClient().get_todo_task('alias'), 'returned task should be None')
def test_get_existing_todo_task(self):
@all_requests
def requests_mock(url, request):
return response(404, '{ "success": false, "error": "NotFound" }')
with HTTMock(requests_mock):
self.assertIsNone(HabiticaAPIClient().get_todo_task('alias'), 'returned task should be None')
| from unittest import TestCase
from httmock import HTTMock, all_requests, response
from habitica.habitica_api import HabiticaAPIClient
class TestHabiticaAPIClient(TestCase):
def test_get_existing_todo_task(self):
get_task_url_path = '/api/v3/tasks/alias'
@all_requests
def requests_mock(url, request):
if url.path == get_task_url_path:
return '{"success":true,"data":{"_id":"2b774d70-ec8b-41c1-8967-eb6b13d962ba","text":"test task","alias":"alias","type":"todo","priority":1.5,"completed":false,"id":"2b774d70-ec8b-41c1-8967-eb6b13d962ba"}}'
return '{ "success": false, "NotFound": "The specified task could not be found." }'
with HTTMock(requests_mock):
task = HabiticaAPIClient().get_todo_task('alias')
self.assertEqual(task.type, 'todo', 'task should be todo')
self.assertFalse(task.done, 'task should be done')
self.assertEqual(task.difficulty, 1.5, 'task difficulty should be 1.5')
self.assertEqual(task.alias, 'alias', 'task alias should be alias')
self.assertEqual(task.id, '2b774d70-ec8b-41c1-8967-eb6b13d962ba', 'task id should be "2b774d70-ec8b-41c1-8967-eb6b13d962ba"')
def test_get_existing_todo_task(self):
@all_requests
def requests_mock(url, request):
return response(404, '{ "success": false, "error": "NotFound" }')
with HTTMock(requests_mock):
self.assertIsNone(HabiticaAPIClient().get_todo_task('alias'), 'returned task should be None')
def test_get_existing_todo_task(self):
@all_requests
def requests_mock(url, request):
return response(404, '{ "success": false, "error": "NotFound" }')
with HTTMock(requests_mock):
self.assertIsNone(HabiticaAPIClient().get_todo_task('alias'), 'returned task should be None') | none | 1 | 2.32572 | 2 | |
2.linked-list/single-linked-list/reverse-even-nodes/reverse_odd_list.py | tienduy-nguyen/coderust | 0 | 6622691 | class ListNode:
def __init__(self, val, next=None):
self.val = val
self.next = next
def reverse_odd(self, head):
dummy1 = odd = ListNode(0)
dummy2 = even = ListNode(0)
while head:
odd.next = head
even.next = head.next
odd = odd.next
even = even.next
head = head.next.next if even else None
prev = None
current = dummy2.next
while current:
next = current.next
current.next = prev
prev = current
current = next
odd.next = dummy2
return dummy1.next
| class ListNode:
def __init__(self, val, next=None):
self.val = val
self.next = next
def reverse_odd(self, head):
dummy1 = odd = ListNode(0)
dummy2 = even = ListNode(0)
while head:
odd.next = head
even.next = head.next
odd = odd.next
even = even.next
head = head.next.next if even else None
prev = None
current = dummy2.next
while current:
next = current.next
current.next = prev
prev = current
current = next
odd.next = dummy2
return dummy1.next
| none | 1 | 3.778013 | 4 | |
lib/decorator.py | ligulfzhou/PyBaseProject | 2 | 6622692 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pdb
import hashlib
from lib import utils
from tornado.options import options
from sqlalchemy.orm import class_mapper
def model2dict(model):
if not model:
return {}
fields = class_mapper(model.__class__).columns.keys()
return dict((col, getattr(model, col)) for col in fields)
def model_to_dict(func):
def wrap(*args, **kwargs):
ret = func(*args, **kwargs)
return model2dict(ret)
return wrap
def models_to_list(func):
def wrap(*args, **kwargs):
ret = func(*args, **kwargs)
return [model2dict(r) for r in ret]
return wrap
def filter_update_data(func):
def wrap(*args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
data = dict([(key, value) for key, value in data.items() if value or value == 0])
kwargs['data'] = data
return func(*args, **kwargs)
return wrap
def check_param_sign(func):
def wrap(*args, **kw):
if options.debug:
return func(*args, **kw)
self = args[0]
params = self.request.arguments
if not params:
return func(*args, **kw)
params = {i: j[0].decode() for i, j in params.items()}
sign = params.get('sign', '')
if not sign:
raise utils.APIError(errcode=10003)
del params['sign']
params_str = '&'.join(['%s=%s'%(i, params[i]) for i in sorted(list(params.keys()))])
if sign != hashlib.md5(params_str.encode()).hexdigest():
raise utils.APIError(errcode=10003)
return func(*args, **kw)
return wrap
def login_required(func):
def wrap(*args, **kw):
self = args[0]
login = self.current_user['login']
if not login:
raise utils.APIError(errcode=40001)
return func(*args, **kw)
return wrap
def permission_required(roles=[]):
def decorator(func):
def wrap(*args, **kw):
self = args[0]
login = int(self.current_user['login'])
if not login:
raise utils.APIError(errcode=40001)
if roles and self.current_user['role_code'] not in roles:
raise utils.APIError(errcode=40003)
return func(*args, **kw)
return wrap
return decorator
def user_required(func):
return permission_required(roles=['user', 'incharger', 'personel'])(func)
def fzr_required(func):
return permission_required(roles=['incharger'])(func)
def personel_required(func):
return permission_required(roles=['personel'])(func)
def manager_required(func):
'''
manager: 管理
personel / fzr 都有权限
'''
return permission_required(roles=['personel', 'incharger'])(func)
# def forbid_frequent_api_call(seconds=[]):
# def decorator(func):
# def wrap(*args, **kw):
# self = args[0]
# sign = self.get_argument('sign', '')
# user_id = self.get_argument('user_id', 0)
# path = self.request.path
# key = '%s_%s_%s' % (sign, user_id, path)
# if not ctrl.rs.setnx(key, 1, seconds):
# raise utils.APIError(errcode=10001, errmsg='调用太多次')
# return func(*args, **kw)
# return wrap
# return decorator
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pdb
import hashlib
from lib import utils
from tornado.options import options
from sqlalchemy.orm import class_mapper
def model2dict(model):
if not model:
return {}
fields = class_mapper(model.__class__).columns.keys()
return dict((col, getattr(model, col)) for col in fields)
def model_to_dict(func):
def wrap(*args, **kwargs):
ret = func(*args, **kwargs)
return model2dict(ret)
return wrap
def models_to_list(func):
def wrap(*args, **kwargs):
ret = func(*args, **kwargs)
return [model2dict(r) for r in ret]
return wrap
def filter_update_data(func):
def wrap(*args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
data = dict([(key, value) for key, value in data.items() if value or value == 0])
kwargs['data'] = data
return func(*args, **kwargs)
return wrap
def check_param_sign(func):
def wrap(*args, **kw):
if options.debug:
return func(*args, **kw)
self = args[0]
params = self.request.arguments
if not params:
return func(*args, **kw)
params = {i: j[0].decode() for i, j in params.items()}
sign = params.get('sign', '')
if not sign:
raise utils.APIError(errcode=10003)
del params['sign']
params_str = '&'.join(['%s=%s'%(i, params[i]) for i in sorted(list(params.keys()))])
if sign != hashlib.md5(params_str.encode()).hexdigest():
raise utils.APIError(errcode=10003)
return func(*args, **kw)
return wrap
def login_required(func):
def wrap(*args, **kw):
self = args[0]
login = self.current_user['login']
if not login:
raise utils.APIError(errcode=40001)
return func(*args, **kw)
return wrap
def permission_required(roles=[]):
def decorator(func):
def wrap(*args, **kw):
self = args[0]
login = int(self.current_user['login'])
if not login:
raise utils.APIError(errcode=40001)
if roles and self.current_user['role_code'] not in roles:
raise utils.APIError(errcode=40003)
return func(*args, **kw)
return wrap
return decorator
def user_required(func):
return permission_required(roles=['user', 'incharger', 'personel'])(func)
def fzr_required(func):
return permission_required(roles=['incharger'])(func)
def personel_required(func):
return permission_required(roles=['personel'])(func)
def manager_required(func):
'''
manager: 管理
personel / fzr 都有权限
'''
return permission_required(roles=['personel', 'incharger'])(func)
# def forbid_frequent_api_call(seconds=[]):
# def decorator(func):
# def wrap(*args, **kw):
# self = args[0]
# sign = self.get_argument('sign', '')
# user_id = self.get_argument('user_id', 0)
# path = self.request.path
# key = '%s_%s_%s' % (sign, user_id, path)
# if not ctrl.rs.setnx(key, 1, seconds):
# raise utils.APIError(errcode=10001, errmsg='调用太多次')
# return func(*args, **kw)
# return wrap
# return decorator
| en | 0.138358 | #!/usr/bin/env python # -*- coding: utf-8 -*- manager: 管理 personel / fzr 都有权限 # def forbid_frequent_api_call(seconds=[]): # def decorator(func): # def wrap(*args, **kw): # self = args[0] # sign = self.get_argument('sign', '') # user_id = self.get_argument('user_id', 0) # path = self.request.path # key = '%s_%s_%s' % (sign, user_id, path) # if not ctrl.rs.setnx(key, 1, seconds): # raise utils.APIError(errcode=10001, errmsg='调用太多次') # return func(*args, **kw) # return wrap # return decorator | 2.403484 | 2 |
tests/unittests/test_log_filtering_functions.py | anandagopal6/azure-functions-python-worker | 277 | 6622693 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import typing
from azure_functions_worker import testutils
from azure_functions_worker.testutils import TESTS_ROOT, remove_path
HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO = """\
{
"version": "2.0",
"logging": {
"logLevel": {
"default": "Information"
}
},
"functionTimeout": "00:05:00"
}
"""
class TestLogFilteringFunctions(testutils.WebHostTestCase):
"""This class is for testing the logger behavior in Python Worker when
dealing with customer's log and system's log. Here's a list of expected
behaviors:
local_console customer_app_insight functions_kusto_table
system_log false false true
customer_log true true false
Please ensure the following unit test cases align with the expectations
"""
@classmethod
def setUpClass(cls):
host_json = TESTS_ROOT / cls.get_script_dir() / 'host.json'
with open(host_json, 'w+') as f:
f.write(HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO)
super(TestLogFilteringFunctions, cls).setUpClass()
@classmethod
def tearDownClass(cls):
host_json = TESTS_ROOT / cls.get_script_dir() / 'host.json'
remove_path(host_json)
super(TestLogFilteringFunctions, cls).tearDownClass()
@classmethod
def get_script_dir(cls):
return testutils.UNIT_TESTS_FOLDER / 'log_filtering_functions'
def test_debug_logging(self):
r = self.webhost.request('GET', 'debug_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-debug')
def check_log_debug_logging(self, host_out: typing.List[str]):
self.assertIn('logging info', host_out)
self.assertIn('logging warning', host_out)
self.assertIn('logging error', host_out)
# See HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO, debug log is disabled
self.assertNotIn('logging debug', host_out)
def test_debug_with_user_logging(self):
r = self.webhost.request('GET', 'debug_user_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-user-debug')
def check_log_debug_with_user_logging(self, host_out: typing.List[str]):
self.assertIn('logging info', host_out)
self.assertIn('logging warning', host_out)
self.assertIn('logging error', host_out)
# See HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO, debug log is disabled
self.assertNotIn('logging debug', host_out)
def test_info_with_sdk_logging(self):
"""Invoke a HttpTrigger sdk_logging which contains logging invocation
via the azure.functions logger. This should be treated as system logs,
which means the log should not be displayed in local console.
"""
r = self.webhost.request('GET', 'sdk_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-sdk-logger')
def check_log_info_with_sdk_logging(self, host_out: typing.List[str]):
# See TestLogFilteringFunctions docstring
# System log should not be captured in console
self.assertNotIn('sdk_logger info', host_out)
self.assertNotIn('sdk_logger warning', host_out)
self.assertNotIn('sdk_logger error', host_out)
self.assertNotIn('sdk_logger debug', host_out)
def test_info_with_sdk_submodule_logging(self):
"""Invoke a HttpTrigger sdk_submodule_logging which contains logging
invocation via the azure.functions logger. This should be treated as
system logs.
"""
r = self.webhost.request('GET', 'sdk_submodule_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-sdk-submodule-logging')
def check_log_info_with_sdk_submodule_logging(self,
host_out: typing.List[str]):
# See TestLogFilteringFunctions docstring
# System log should not be captured in console
self.assertNotIn('sdk_submodule_logger info', host_out)
self.assertNotIn('sdk_submodule_logger warning', host_out)
self.assertNotIn('sdk_submodule_logger error', host_out)
self.assertNotIn('sdk_submodule_logger debug', host_out)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import typing
from azure_functions_worker import testutils
from azure_functions_worker.testutils import TESTS_ROOT, remove_path
HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO = """\
{
"version": "2.0",
"logging": {
"logLevel": {
"default": "Information"
}
},
"functionTimeout": "00:05:00"
}
"""
class TestLogFilteringFunctions(testutils.WebHostTestCase):
"""This class is for testing the logger behavior in Python Worker when
dealing with customer's log and system's log. Here's a list of expected
behaviors:
local_console customer_app_insight functions_kusto_table
system_log false false true
customer_log true true false
Please ensure the following unit test cases align with the expectations
"""
@classmethod
def setUpClass(cls):
host_json = TESTS_ROOT / cls.get_script_dir() / 'host.json'
with open(host_json, 'w+') as f:
f.write(HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO)
super(TestLogFilteringFunctions, cls).setUpClass()
@classmethod
def tearDownClass(cls):
host_json = TESTS_ROOT / cls.get_script_dir() / 'host.json'
remove_path(host_json)
super(TestLogFilteringFunctions, cls).tearDownClass()
@classmethod
def get_script_dir(cls):
return testutils.UNIT_TESTS_FOLDER / 'log_filtering_functions'
def test_debug_logging(self):
r = self.webhost.request('GET', 'debug_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-debug')
def check_log_debug_logging(self, host_out: typing.List[str]):
self.assertIn('logging info', host_out)
self.assertIn('logging warning', host_out)
self.assertIn('logging error', host_out)
# See HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO, debug log is disabled
self.assertNotIn('logging debug', host_out)
def test_debug_with_user_logging(self):
r = self.webhost.request('GET', 'debug_user_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-user-debug')
def check_log_debug_with_user_logging(self, host_out: typing.List[str]):
self.assertIn('logging info', host_out)
self.assertIn('logging warning', host_out)
self.assertIn('logging error', host_out)
# See HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO, debug log is disabled
self.assertNotIn('logging debug', host_out)
def test_info_with_sdk_logging(self):
"""Invoke a HttpTrigger sdk_logging which contains logging invocation
via the azure.functions logger. This should be treated as system logs,
which means the log should not be displayed in local console.
"""
r = self.webhost.request('GET', 'sdk_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-sdk-logger')
def check_log_info_with_sdk_logging(self, host_out: typing.List[str]):
# See TestLogFilteringFunctions docstring
# System log should not be captured in console
self.assertNotIn('sdk_logger info', host_out)
self.assertNotIn('sdk_logger warning', host_out)
self.assertNotIn('sdk_logger error', host_out)
self.assertNotIn('sdk_logger debug', host_out)
def test_info_with_sdk_submodule_logging(self):
"""Invoke a HttpTrigger sdk_submodule_logging which contains logging
invocation via the azure.functions logger. This should be treated as
system logs.
"""
r = self.webhost.request('GET', 'sdk_submodule_logging')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, 'OK-sdk-submodule-logging')
def check_log_info_with_sdk_submodule_logging(self,
host_out: typing.List[str]):
# See TestLogFilteringFunctions docstring
# System log should not be captured in console
self.assertNotIn('sdk_submodule_logger info', host_out)
self.assertNotIn('sdk_submodule_logger warning', host_out)
self.assertNotIn('sdk_submodule_logger error', host_out)
self.assertNotIn('sdk_submodule_logger debug', host_out)
| en | 0.745005 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. \ { "version": "2.0", "logging": { "logLevel": { "default": "Information" } }, "functionTimeout": "00:05:00" } This class is for testing the logger behavior in Python Worker when dealing with customer's log and system's log. Here's a list of expected behaviors: local_console customer_app_insight functions_kusto_table system_log false false true customer_log true true false Please ensure the following unit test cases align with the expectations # See HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO, debug log is disabled # See HOST_JSON_TEMPLATE_WITH_LOGLEVEL_INFO, debug log is disabled Invoke a HttpTrigger sdk_logging which contains logging invocation via the azure.functions logger. This should be treated as system logs, which means the log should not be displayed in local console. # See TestLogFilteringFunctions docstring # System log should not be captured in console Invoke a HttpTrigger sdk_submodule_logging which contains logging invocation via the azure.functions logger. This should be treated as system logs. # See TestLogFilteringFunctions docstring # System log should not be captured in console | 2.285564 | 2 |
src/spiegelib/features/fft.py | spiegel-lib/spiegel | 10 | 6622694 | #!/usr/bin/env python
"""
Fast Fourier Transform (FFT)
"""
import numpy as np
from spiegelib import AudioBuffer
from spiegelib.features.features_base import FeaturesBase
import spiegelib.core.utils as utils
class FFT(FeaturesBase):
"""
Args:
fft_sze (int, optional): Size of FFT to use. If set, will truncate input if
input is smaller than FFT size. If FFT size is larger than input, will zero-pad.
Defaults to None, so FFT will be the size of input.
output (str, optional): output type, must be one of ['complex', 'magnitude',
'power', 'magnitude_phase', 'power_phase'] Defaults to 'complex'.
scale_axis (int, tuple, None): When applying scaling, determines which dimensions
scaling be applied along. Defaults to None, which will flatten results and
calculate scaling variables on that.
kwargs: Keyword arguments, see :class:`spiegelib.features.features_base.FeaturesBase`.
"""
def __init__(self, fft_size=None, output='complex', scale_axis=None, **kwargs):
"""
Contructor
"""
# Setup feature base class -- FFT is time summarized, so no
# time slices are used, defaults to normalizing the entire result
# as opposed to normalizing across each bin separately.
super().__init__(scale_axis=scale_axis, **kwargs)
self.fft_size = fft_size
if output not in utils.spectrum_types:
raise TypeError('output must be one of %s' % utils.spectrum_types)
self.output = output
self.dtype = np.float32
self.complex_dtype = np.complex64
def get_features(self, audio):
"""
Run FFT on audio buffer.
Args:
audio (:ref:`AudioBuffer <audio_buffer>`): input audio
Returns:
np.ndarray: Results of FFT. Format depends on output type set during\
construction.
"""
if not isinstance(audio, AudioBuffer):
raise TypeError('audio must be AudioBuffer, recieved %s' % type(audio))
# Determine the length of audio input and determine number of FFT
# samples to keep.
buffer = audio.get_audio()
n_samples = len(buffer)
n_output = int((n_samples/2) + 1)
# Run Fast Fourier Transform
spectrum = np.fft.fft(audio.get_audio(), n=self.fft_size)[0:n_output]
features = utils.convert_spectrum(spectrum, self.output, dtype=self.dtype,
complex_dtype=self.complex_dtype)
return features
| #!/usr/bin/env python
"""
Fast Fourier Transform (FFT)
"""
import numpy as np
from spiegelib import AudioBuffer
from spiegelib.features.features_base import FeaturesBase
import spiegelib.core.utils as utils
class FFT(FeaturesBase):
"""
Args:
fft_sze (int, optional): Size of FFT to use. If set, will truncate input if
input is smaller than FFT size. If FFT size is larger than input, will zero-pad.
Defaults to None, so FFT will be the size of input.
output (str, optional): output type, must be one of ['complex', 'magnitude',
'power', 'magnitude_phase', 'power_phase'] Defaults to 'complex'.
scale_axis (int, tuple, None): When applying scaling, determines which dimensions
scaling be applied along. Defaults to None, which will flatten results and
calculate scaling variables on that.
kwargs: Keyword arguments, see :class:`spiegelib.features.features_base.FeaturesBase`.
"""
def __init__(self, fft_size=None, output='complex', scale_axis=None, **kwargs):
"""
Contructor
"""
# Setup feature base class -- FFT is time summarized, so no
# time slices are used, defaults to normalizing the entire result
# as opposed to normalizing across each bin separately.
super().__init__(scale_axis=scale_axis, **kwargs)
self.fft_size = fft_size
if output not in utils.spectrum_types:
raise TypeError('output must be one of %s' % utils.spectrum_types)
self.output = output
self.dtype = np.float32
self.complex_dtype = np.complex64
def get_features(self, audio):
"""
Run FFT on audio buffer.
Args:
audio (:ref:`AudioBuffer <audio_buffer>`): input audio
Returns:
np.ndarray: Results of FFT. Format depends on output type set during\
construction.
"""
if not isinstance(audio, AudioBuffer):
raise TypeError('audio must be AudioBuffer, recieved %s' % type(audio))
# Determine the length of audio input and determine number of FFT
# samples to keep.
buffer = audio.get_audio()
n_samples = len(buffer)
n_output = int((n_samples/2) + 1)
# Run Fast Fourier Transform
spectrum = np.fft.fft(audio.get_audio(), n=self.fft_size)[0:n_output]
features = utils.convert_spectrum(spectrum, self.output, dtype=self.dtype,
complex_dtype=self.complex_dtype)
return features
| en | 0.689745 | #!/usr/bin/env python Fast Fourier Transform (FFT) Args: fft_sze (int, optional): Size of FFT to use. If set, will truncate input if input is smaller than FFT size. If FFT size is larger than input, will zero-pad. Defaults to None, so FFT will be the size of input. output (str, optional): output type, must be one of ['complex', 'magnitude', 'power', 'magnitude_phase', 'power_phase'] Defaults to 'complex'. scale_axis (int, tuple, None): When applying scaling, determines which dimensions scaling be applied along. Defaults to None, which will flatten results and calculate scaling variables on that. kwargs: Keyword arguments, see :class:`spiegelib.features.features_base.FeaturesBase`. Contructor # Setup feature base class -- FFT is time summarized, so no # time slices are used, defaults to normalizing the entire result # as opposed to normalizing across each bin separately. Run FFT on audio buffer. Args: audio (:ref:`AudioBuffer <audio_buffer>`): input audio Returns: np.ndarray: Results of FFT. Format depends on output type set during\ construction. # Determine the length of audio input and determine number of FFT # samples to keep. # Run Fast Fourier Transform | 2.776815 | 3 |
src/file_item.py | ferranferri/exif_classifier | 0 | 6622695 | <reponame>ferranferri/exif_classifier<filename>src/file_item.py
import os
import hashlib
import piexif
import shutil
import logging
class FileItem:
def __init__(self, source_path):
if not os.path.isabs(source_path):
raise ValueError("Path must me absolut!!\n >>" + source_path)
self.source_path = source_path
self.dest_path = ''
self.exif_dic = {}
logging.basicConfig(level=logging.CRITICAL)
self.logger = logging.getLogger("FILE_ITEM")
@staticmethod
def __file_md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def exists(self):
return os.path.exists(self.source_path)
def full_file_name(self):
return self.source_path
def name(self):
return self.source_path.split(os.sep)[-1]
def directory(self):
return os.path.abspath(os.path.join(self.source_path, os.pardir))
def creation_date(self):
self.exif_dic = piexif.load(self.source_path)
return self.exif_dic['Exif'][36867]
def equals(self, fi2):
a = self.__file_md5(self.source_path)
assert fi2.exists()
b = self.__file_md5(fi2.full_file_name())
return a == b
def __create_dir_recursive(self, path):
if not os.path.exists(path):
self.__create_dir_recursive(os.path.join(path, os.pardir))
os.makedirs(path)
def get_destination_path(self):
return self.dest_path
def copy_to(self, path):
final_path = path
file_exists = False
if not os.path.exists(final_path):
file_exists = False
if final_path.endswith('/') or final_path.endswith('\\'):
# assume is a directory
os.makedirs(final_path)
shutil.copy(self.source_path, os.path.join(final_path, self.name()))
self.dest_path = os.path.join(final_path, self.name())
else:
final_folder = os.path.abspath(os.path.join(final_path, os.pardir))
# get the file name
name = final_path.split('/')[-1]
if not os.path.exists(final_folder):
os.makedirs(final_folder)
shutil.copy(self.source_path, os.path.join(final_folder, name))
self.dest_path = os.path.join(final_folder, name)
else:
file_exists = True
self.logger.warning("A file with the same name already exists")
fi = FileItem(final_path)
if self.equals(fi):
self.dest_path = fi.full_file_name()
self.logger.info("The file is the same. No copies are made")
else:
name, extension = fi.name().split(".")
directory = fi.directory()
self.dest_path = os.path.join(directory, name + '_1.' + extension)
self.copy_to(self.dest_path)
self.logger.warning("Two different files with the same name exists. Changing destination name")
return self.dest_path, file_exists
| import os
import hashlib
import piexif
import shutil
import logging
class FileItem:
def __init__(self, source_path):
if not os.path.isabs(source_path):
raise ValueError("Path must me absolut!!\n >>" + source_path)
self.source_path = source_path
self.dest_path = ''
self.exif_dic = {}
logging.basicConfig(level=logging.CRITICAL)
self.logger = logging.getLogger("FILE_ITEM")
@staticmethod
def __file_md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def exists(self):
return os.path.exists(self.source_path)
def full_file_name(self):
return self.source_path
def name(self):
return self.source_path.split(os.sep)[-1]
def directory(self):
return os.path.abspath(os.path.join(self.source_path, os.pardir))
def creation_date(self):
self.exif_dic = piexif.load(self.source_path)
return self.exif_dic['Exif'][36867]
def equals(self, fi2):
a = self.__file_md5(self.source_path)
assert fi2.exists()
b = self.__file_md5(fi2.full_file_name())
return a == b
def __create_dir_recursive(self, path):
if not os.path.exists(path):
self.__create_dir_recursive(os.path.join(path, os.pardir))
os.makedirs(path)
def get_destination_path(self):
return self.dest_path
def copy_to(self, path):
final_path = path
file_exists = False
if not os.path.exists(final_path):
file_exists = False
if final_path.endswith('/') or final_path.endswith('\\'):
# assume is a directory
os.makedirs(final_path)
shutil.copy(self.source_path, os.path.join(final_path, self.name()))
self.dest_path = os.path.join(final_path, self.name())
else:
final_folder = os.path.abspath(os.path.join(final_path, os.pardir))
# get the file name
name = final_path.split('/')[-1]
if not os.path.exists(final_folder):
os.makedirs(final_folder)
shutil.copy(self.source_path, os.path.join(final_folder, name))
self.dest_path = os.path.join(final_folder, name)
else:
file_exists = True
self.logger.warning("A file with the same name already exists")
fi = FileItem(final_path)
if self.equals(fi):
self.dest_path = fi.full_file_name()
self.logger.info("The file is the same. No copies are made")
else:
name, extension = fi.name().split(".")
directory = fi.directory()
self.dest_path = os.path.join(directory, name + '_1.' + extension)
self.copy_to(self.dest_path)
self.logger.warning("Two different files with the same name exists. Changing destination name")
return self.dest_path, file_exists | en | 0.888266 | # assume is a directory # get the file name | 2.62428 | 3 |
gpulimit/gpulimit_core/__init__.py | MendelXu/gpu-limit | 5 | 6622696 | <filename>gpulimit/gpulimit_core/__init__.py
from .run_task_core import task_manage
from .socket_utils import send_all, recv_all, send_all_str, recv_all_str | <filename>gpulimit/gpulimit_core/__init__.py
from .run_task_core import task_manage
from .socket_utils import send_all, recv_all, send_all_str, recv_all_str | none | 1 | 1.143604 | 1 | |
tests/shredding.py | 0x00-0x00/shemutils | 3 | 6622697 | #!/bin/bash
import os
from shemutils.shred import Shredder
TARGET_EXT = ".txt"
shredder = Shredder()
for root, dircd, files in os.walk("files/"):
for f in files:
base, ext = os.path.splitext(f)
if ext != TARGET_EXT:
continue
abspath = os.path.join(root, f)
shredder.shred(abspath, remove=True, v=True)
| #!/bin/bash
import os
from shemutils.shred import Shredder
TARGET_EXT = ".txt"
shredder = Shredder()
for root, dircd, files in os.walk("files/"):
for f in files:
base, ext = os.path.splitext(f)
if ext != TARGET_EXT:
continue
abspath = os.path.join(root, f)
shredder.shred(abspath, remove=True, v=True)
| en | 0.272457 | #!/bin/bash | 2.682787 | 3 |
labelling.py | elisakathrin/Financial-Time-Series-Forecasting-using-CNN | 1 | 6622698 | Method 1: Using daily adjusted barriers based on daily volatility
# Label creation if only using daily data
import pandas as pd
def get_daily_volatility(df,span0=20):
# simple percentage returns
df0 = df.close.pct_change()
# 20 days, a month EWM's std as boundary
df0=df0.ewm(span=span0).std().to_frame("volatility")
df_clean = df0.dropna()
return df0, df_clean
def adjust_data(df, volatilities_raw):
df_clean = df[volatilities_raw.isna()['volatility'] == False]
return df_clean
def get_barriers(df, volatilities, upper_lower_multipliers):
barriers = df[['close','high','low']].copy()
barriers['volatility'] = volatilities['volatility']
top_barrier = [0]
bottom_barrier = [0]
for i in range(len(barriers)-1):
vol = volatilities.volatility.iloc[i]
if upper_lower_multipliers[0] > 0:
top_barrier.append(barriers.close.iloc[i] + barriers.close.iloc[i] * upper_lower_multipliers[0] * vol)
else:
#set it to NaNs
top_barrier = pd.Series(index=prices.index)
#set the bottom barrier
if upper_lower_multipliers[1] > 0:
bottom_barrier.append(barriers.close.iloc[i] - barriers.close.iloc[i] * upper_lower_multipliers[1] * vol)
else:
#set it to NaNs
bottom_barrier = pd.Series(index=prices.index)
barriers['top_barrier'] = top_barrier
barriers['bottom_barrier'] = bottom_barrier
return barriers
def get_labels_daily(df, upper_lower_multipliers):
"""
top_barrier: profit taking limit
bottom_barrier:stop loss limit
daily_volatiliy: average daily volatility based on 20-day moving average
barriers_df: DataFrame containing top and bottom barriers on a per-day base
"""
daily_volatility_raw, daily_volatility_clean = get_daily_volatility(df)
df = adjust_data(df, daily_volatility_raw)
barriers_df = get_barriers(df = df, volatilities = daily_volatility_clean, upper_lower_multipliers = upper_lower_multipliers)
labels = []
nr_double_labels = 0
for i in range(len(barriers_df.index)-1):
if barriers_df.high.iloc[i+1] >= barriers_df.top_barrier.iloc[i+1]:
labels.append(1)
elif barriers_df.low.iloc[i+1] <= barriers_df.bottom_barrier.iloc[i+1]:
labels.append(-1)
else:
labels.append(0)
if barriers_df.high.iloc[i+1] >= barriers_df.top_barrier.iloc[i+1] and barriers_df.low.iloc[i+1] <= barriers_df.bottom_barrier.iloc[i+1]:
nr_double_labels += 1
labels.append(0)
perc_double_labels = round(nr_double_labels / len(df),4)
barriers_df['label'] = labels
return barriers_df, barriers_df.label, perc_double_labels
| Method 1: Using daily adjusted barriers based on daily volatility
# Label creation if only using daily data
import pandas as pd
def get_daily_volatility(df,span0=20):
# simple percentage returns
df0 = df.close.pct_change()
# 20 days, a month EWM's std as boundary
df0=df0.ewm(span=span0).std().to_frame("volatility")
df_clean = df0.dropna()
return df0, df_clean
def adjust_data(df, volatilities_raw):
df_clean = df[volatilities_raw.isna()['volatility'] == False]
return df_clean
def get_barriers(df, volatilities, upper_lower_multipliers):
barriers = df[['close','high','low']].copy()
barriers['volatility'] = volatilities['volatility']
top_barrier = [0]
bottom_barrier = [0]
for i in range(len(barriers)-1):
vol = volatilities.volatility.iloc[i]
if upper_lower_multipliers[0] > 0:
top_barrier.append(barriers.close.iloc[i] + barriers.close.iloc[i] * upper_lower_multipliers[0] * vol)
else:
#set it to NaNs
top_barrier = pd.Series(index=prices.index)
#set the bottom barrier
if upper_lower_multipliers[1] > 0:
bottom_barrier.append(barriers.close.iloc[i] - barriers.close.iloc[i] * upper_lower_multipliers[1] * vol)
else:
#set it to NaNs
bottom_barrier = pd.Series(index=prices.index)
barriers['top_barrier'] = top_barrier
barriers['bottom_barrier'] = bottom_barrier
return barriers
def get_labels_daily(df, upper_lower_multipliers):
"""
top_barrier: profit taking limit
bottom_barrier:stop loss limit
daily_volatiliy: average daily volatility based on 20-day moving average
barriers_df: DataFrame containing top and bottom barriers on a per-day base
"""
daily_volatility_raw, daily_volatility_clean = get_daily_volatility(df)
df = adjust_data(df, daily_volatility_raw)
barriers_df = get_barriers(df = df, volatilities = daily_volatility_clean, upper_lower_multipliers = upper_lower_multipliers)
labels = []
nr_double_labels = 0
for i in range(len(barriers_df.index)-1):
if barriers_df.high.iloc[i+1] >= barriers_df.top_barrier.iloc[i+1]:
labels.append(1)
elif barriers_df.low.iloc[i+1] <= barriers_df.bottom_barrier.iloc[i+1]:
labels.append(-1)
else:
labels.append(0)
if barriers_df.high.iloc[i+1] >= barriers_df.top_barrier.iloc[i+1] and barriers_df.low.iloc[i+1] <= barriers_df.bottom_barrier.iloc[i+1]:
nr_double_labels += 1
labels.append(0)
perc_double_labels = round(nr_double_labels / len(df),4)
barriers_df['label'] = labels
return barriers_df, barriers_df.label, perc_double_labels
| en | 0.706922 | # Label creation if only using daily data # simple percentage returns # 20 days, a month EWM's std as boundary #set it to NaNs #set the bottom barrier #set it to NaNs top_barrier: profit taking limit bottom_barrier:stop loss limit daily_volatiliy: average daily volatility based on 20-day moving average barriers_df: DataFrame containing top and bottom barriers on a per-day base | 3.108845 | 3 |
OSPEXinPython/editTime.py | LAbdrakhmanovaOBSPM/OSPEX-Object-Spectral-Executive-in-Python | 0 | 6622699 | <reponame>LAbdrakhmanovaOBSPM/OSPEX-Object-Spectral-Executive-in-Python
from tkinter import *
from astropy.io import fits
import re
import pandas as pd
import plotting
import background_plot
import warnings
import second
import editInterval
class EditTimeWindow():
"""Class to create a Select Time Window"""
bkgTimeInterv = None
defaultTime = None
def __init__(self, energyBin):
self.top1 = Toplevel()
self.top1.title('Select Time Intervals for Background')
self.top1.geometry("480x500")
Label(self.top1,
text="Select Time Intervals for Background",
fg="black",
font="Helvetica 8").pack()
Label(self.top1,
text="for Energy Band " + str(energyBin),
fg="black",
font="Helvetica 8").pack()
Label(self.top1,
text="Left/right click to define start/end of intervals",
fg="black",
font="Helvetica 8").pack()
Label(self.top1,
text="Left double click on a plotted interval for editing options",
fg="black",
font="Helvetica 8").pack()
#########################################################################################
## """ First frame """
##
self.frame1 = LabelFrame(self.top1, relief=RAISED, borderwidth=2)
self.frame1.place(relx=0.05, rely=0.17, relheight=0.25, relwidth=0.9)
""" Current Intervals"""
self.lblCurrentIntervals = Label(self.frame1, text="Current Intervals:")
self.lblCurrentIntervals.place(relx=0.01, rely=0.25)
timeInterv = str(EditTimeWindow.bkgTimeInterv) if EditTimeWindow.bkgTimeInterv is not None else 'None' # str(energyBin)
print('time intervallllllllllll', timeInterv)
EditTimeWindow.defaultTime = StringVar()
EditTimeWindow.defaultTime.set(timeInterv)
self.CurrentInterval = Button(self.frame1, textvariable = EditTimeWindow.defaultTime)
self.CurrentInterval.place(relx=0.28, rely=0.25)
self.lblIntervals = Label(self.frame1, text="#Intervals = 1")
self.lblIntervals.place(relx=0.7, rely=0.25)
self.DeleteSelectedInterv = Button(self.frame1, text="Delete selected interval", state=DISABLED)
self.DeleteSelectedInterv.place(relx=0.01, rely=0.7)
self.EditSelectedInterv = Button(self.frame1, text="Edit selected interval ...", command=lambda: self.editSelectedInterval(self.top1)) #, command=self.editSelectedInterval)
self.EditSelectedInterv.place(relx=0.35, rely=0.7)
self.EditInterv = Button(self.frame1, text="Edit interval ...", state=DISABLED)
self.EditInterv.place(relx=0.7, rely=0.7)
##
## #################################################################################
## """ Second frame """
self.frame2 = LabelFrame(self.top1, relief=RAISED, borderwidth=2)
self.frame2.place(relx=0.05, rely=0.42, relheight=0.34, relwidth=0.9)
self.OptionsCursor = Label(self.frame2, text="Options for cursor selection:")
self.OptionsCursor.place(relx=0.03, rely=0.07)
self.ContiguousInterv = Checkbutton(self.frame2, text="Contiguous intervals", state=NORMAL)
self.ContiguousInterv.place(relx=0.01, rely=0.22)
self.ff = Checkbutton(self.frame2, text="Contiguous intervals", state=NORMAL)
self.ff.place(relx=0.01, rely=0.37)
self.EditOptionParam = Label(self.frame2, text="Editing Option Parameters:")
self.EditOptionParam.place(relx=0.45, rely=0.07)
self.SubIntervalName = Label(self.frame2, text="# Sub-intervals(N):")
self.SubIntervalName.place(relx=0.45, rely=0.22)
self.SubInterval = Entry(self.frame2, width=7)
self.SubInterval.place(relx=0.72, rely=0.22)
self.LenghtSubIntervalName = Label(self.frame2, text="Length of Sub-intervals:")
self.LenghtSubIntervalName.place(relx=0.45, rely=0.44)
self.LenghtSubInterval = Entry(self.frame2, width=7)
self.LenghtSubInterval.place(relx=0.77, rely=0.44)
self.DataBinsName = Label(self.frame2, text="# Data Bins per Sub-interval(M):")
self.DataBinsName.place(relx=0.45, rely=0.66)
self.DataBins = Entry(self.frame2, width=7)
self.DataBins.place(relx=0.86, rely=0.66)
##################################################################################
self.AdjustData = Button(self.top1, text="Adjust to Data boundaries")
self.AdjustData.place(relx=0.17, rely=0.8)
self.DisplayCurrent = Button(self.top1, text="Display current")
self.DisplayCurrent.place(relx=0.52, rely=0.8)
self.DeleteAll = Button(self.top1, text="Delete all")
self.DeleteAll.place(relx=0.75, rely=0.8)
self.Help = Button(self.top1, text="Help")
self.Help.place(relx=0.27, rely=0.9)
self.Cancel = Button(self.top1, text="Cancel")
self.Cancel.place(relx=0.38, rely=0.9)
self.AcceptClose = Button(self.top1, text="Accept and Close", command=self.quit)
self.AcceptClose.place(relx=0.54, rely=0.9)
def quit(self):
self.top1.destroy()
def editSelectedInterval(self, parent):
editInterval.EditSelectedInterval(parent)
| from tkinter import *
from astropy.io import fits
import re
import pandas as pd
import plotting
import background_plot
import warnings
import second
import editInterval
class EditTimeWindow():
"""Class to create a Select Time Window"""
bkgTimeInterv = None
defaultTime = None
def __init__(self, energyBin):
self.top1 = Toplevel()
self.top1.title('Select Time Intervals for Background')
self.top1.geometry("480x500")
Label(self.top1,
text="Select Time Intervals for Background",
fg="black",
font="Helvetica 8").pack()
Label(self.top1,
text="for Energy Band " + str(energyBin),
fg="black",
font="Helvetica 8").pack()
Label(self.top1,
text="Left/right click to define start/end of intervals",
fg="black",
font="Helvetica 8").pack()
Label(self.top1,
text="Left double click on a plotted interval for editing options",
fg="black",
font="Helvetica 8").pack()
#########################################################################################
## """ First frame """
##
self.frame1 = LabelFrame(self.top1, relief=RAISED, borderwidth=2)
self.frame1.place(relx=0.05, rely=0.17, relheight=0.25, relwidth=0.9)
""" Current Intervals"""
self.lblCurrentIntervals = Label(self.frame1, text="Current Intervals:")
self.lblCurrentIntervals.place(relx=0.01, rely=0.25)
timeInterv = str(EditTimeWindow.bkgTimeInterv) if EditTimeWindow.bkgTimeInterv is not None else 'None' # str(energyBin)
print('time intervallllllllllll', timeInterv)
EditTimeWindow.defaultTime = StringVar()
EditTimeWindow.defaultTime.set(timeInterv)
self.CurrentInterval = Button(self.frame1, textvariable = EditTimeWindow.defaultTime)
self.CurrentInterval.place(relx=0.28, rely=0.25)
self.lblIntervals = Label(self.frame1, text="#Intervals = 1")
self.lblIntervals.place(relx=0.7, rely=0.25)
self.DeleteSelectedInterv = Button(self.frame1, text="Delete selected interval", state=DISABLED)
self.DeleteSelectedInterv.place(relx=0.01, rely=0.7)
self.EditSelectedInterv = Button(self.frame1, text="Edit selected interval ...", command=lambda: self.editSelectedInterval(self.top1)) #, command=self.editSelectedInterval)
self.EditSelectedInterv.place(relx=0.35, rely=0.7)
self.EditInterv = Button(self.frame1, text="Edit interval ...", state=DISABLED)
self.EditInterv.place(relx=0.7, rely=0.7)
##
## #################################################################################
## """ Second frame """
self.frame2 = LabelFrame(self.top1, relief=RAISED, borderwidth=2)
self.frame2.place(relx=0.05, rely=0.42, relheight=0.34, relwidth=0.9)
self.OptionsCursor = Label(self.frame2, text="Options for cursor selection:")
self.OptionsCursor.place(relx=0.03, rely=0.07)
self.ContiguousInterv = Checkbutton(self.frame2, text="Contiguous intervals", state=NORMAL)
self.ContiguousInterv.place(relx=0.01, rely=0.22)
self.ff = Checkbutton(self.frame2, text="Contiguous intervals", state=NORMAL)
self.ff.place(relx=0.01, rely=0.37)
self.EditOptionParam = Label(self.frame2, text="Editing Option Parameters:")
self.EditOptionParam.place(relx=0.45, rely=0.07)
self.SubIntervalName = Label(self.frame2, text="# Sub-intervals(N):")
self.SubIntervalName.place(relx=0.45, rely=0.22)
self.SubInterval = Entry(self.frame2, width=7)
self.SubInterval.place(relx=0.72, rely=0.22)
self.LenghtSubIntervalName = Label(self.frame2, text="Length of Sub-intervals:")
self.LenghtSubIntervalName.place(relx=0.45, rely=0.44)
self.LenghtSubInterval = Entry(self.frame2, width=7)
self.LenghtSubInterval.place(relx=0.77, rely=0.44)
self.DataBinsName = Label(self.frame2, text="# Data Bins per Sub-interval(M):")
self.DataBinsName.place(relx=0.45, rely=0.66)
self.DataBins = Entry(self.frame2, width=7)
self.DataBins.place(relx=0.86, rely=0.66)
##################################################################################
self.AdjustData = Button(self.top1, text="Adjust to Data boundaries")
self.AdjustData.place(relx=0.17, rely=0.8)
self.DisplayCurrent = Button(self.top1, text="Display current")
self.DisplayCurrent.place(relx=0.52, rely=0.8)
self.DeleteAll = Button(self.top1, text="Delete all")
self.DeleteAll.place(relx=0.75, rely=0.8)
self.Help = Button(self.top1, text="Help")
self.Help.place(relx=0.27, rely=0.9)
self.Cancel = Button(self.top1, text="Cancel")
self.Cancel.place(relx=0.38, rely=0.9)
self.AcceptClose = Button(self.top1, text="Accept and Close", command=self.quit)
self.AcceptClose.place(relx=0.54, rely=0.9)
def quit(self):
self.top1.destroy()
def editSelectedInterval(self, parent):
editInterval.EditSelectedInterval(parent) | de | 0.659032 | Class to create a Select Time Window ######################################################################################### ## """ First frame """ ## Current Intervals # str(energyBin) #, command=self.editSelectedInterval) ## ## ################################################################################# ## """ Second frame """ ################################################################################## | 2.721233 | 3 |
examples/nrf24l01_simple_test.py | nRF24/CircuitPython_nRF24L01 | 3 | 6622700 | <gh_stars>1-10
"""
Simple example of using the RF24 class.
"""
import time
import struct
import board
from digitalio import DigitalInOut
# if running this on a ATSAMD21 M0 based board
# from circuitpython_nrf24l01.rf24_lite import RF24
from circuitpython_nrf24l01.rf24 import RF24
# invalid default values for scoping
SPI_BUS, CSN_PIN, CE_PIN = (None, None, None)
try: # on Linux
import spidev
SPI_BUS = spidev.SpiDev() # for a faster interface on linux
CSN_PIN = 0 # use CE0 on default bus (even faster than using any pin)
CE_PIN = DigitalInOut(board.D22) # using pin gpio22 (BCM numbering)
except ImportError: # on CircuitPython only
# using board.SPI() automatically selects the MCU's
# available SPI pins, board.SCK, board.MOSI, board.MISO
SPI_BUS = board.SPI() # init spi bus object
# change these (digital output) pins accordingly
CE_PIN = DigitalInOut(board.D4)
CSN_PIN = DigitalInOut(board.D5)
# initialize the nRF24L01 on the spi bus object
nrf = RF24(SPI_BUS, CSN_PIN, CE_PIN)
# On Linux, csn value is a bit coded
# 0 = bus 0, CE0 # SPI bus 0 is enabled by default
# 10 = bus 1, CE0 # enable SPI bus 2 prior to running this
# 21 = bus 2, CE1 # enable SPI bus 1 prior to running this
# set the Power Amplifier level to -12 dBm since this test example is
# usually run with nRF24L01 transceivers in close proximity
nrf.pa_level = -12
# addresses needs to be in a buffer protocol object (bytearray)
address = [b"1Node", b"2Node"]
# to use different addresses on a pair of radios, we need a variable to
# uniquely identify which address this radio will use to transmit
# 0 uses address[0] to transmit, 1 uses address[1] to transmit
radio_number = bool(
int(input("Which radio is this? Enter '0' or '1'. Defaults to '0' ") or 0)
)
# set TX address of RX node into the TX pipe
nrf.open_tx_pipe(address[radio_number]) # always uses pipe 0
# set RX address of TX node into an RX pipe
nrf.open_rx_pipe(1, address[not radio_number]) # using pipe 1
# using the python keyword global is bad practice. Instead we'll use a 1 item
# list to store our float number for the payloads sent
payload = [0.0]
# uncomment the following 3 lines for compatibility with TMRh20 library
# nrf.allow_ask_no_ack = False
# nrf.dynamic_payloads = False
# nrf.payload_length = 4
def master(count=5): # count = 5 will only transmit 5 packets
"""Transmits an incrementing integer every second"""
nrf.listen = False # ensures the nRF24L01 is in TX mode
while count:
# use struct.pack to packetize your data
# into a usable payload
buffer = struct.pack("<f", payload[0])
# "<f" means a single little endian (4 byte) float value.
start_timer = time.monotonic_ns() # start timer
result = nrf.send(buffer)
end_timer = time.monotonic_ns() # end timer
if not result:
print("send() failed or timed out")
else:
print(
"Transmission successful! Time to Transmit:",
f"{(end_timer - start_timer) / 1000} us. Sent: {payload[0]}"
)
payload[0] += 0.01
time.sleep(1)
count -= 1
def slave(timeout=6):
"""Polls the radio and prints the received value. This method expires
after 6 seconds of no received transmission"""
nrf.listen = True # put radio into RX mode and power up
start = time.monotonic()
while (time.monotonic() - start) < timeout:
if nrf.available():
# grab information about the received payload
payload_size, pipe_number = (nrf.any(), nrf.pipe)
# fetch 1 payload from RX FIFO
buffer = nrf.read() # also clears nrf.irq_dr status flag
# expecting a little endian float, thus the format string "<f"
# buffer[:4] truncates padded 0s if dynamic payloads are disabled
payload[0] = struct.unpack("<f", buffer[:4])[0]
# print details about the received packet
print(f"Received {payload_size} bytes on pipe {pipe_number}: {payload[0]}")
start = time.monotonic()
# recommended behavior is to keep in TX mode while idle
nrf.listen = False # put the nRF24L01 is in TX mode
def set_role():
"""Set the role using stdin stream. Timeout arg for slave() can be
specified using a space delimiter (e.g. 'R 10' calls `slave(10)`)
"""
user_input = (
input(
"*** Enter 'R' for receiver role.\n"
"*** Enter 'T' for transmitter role.\n"
"*** Enter 'Q' to quit example.\n"
)
or "?"
)
user_input = user_input.split()
if user_input[0].upper().startswith("R"):
slave(*[int(x) for x in user_input[1:2]])
return True
if user_input[0].upper().startswith("T"):
master(*[int(x) for x in user_input[1:2]])
return True
if user_input[0].upper().startswith("Q"):
nrf.power = False
return False
print(user_input[0], "is an unrecognized input. Please try again.")
return set_role()
print(" nRF24L01 Simple test")
if __name__ == "__main__":
try:
while set_role():
pass # continue example until 'Q' is entered
except KeyboardInterrupt:
print(" Keyboard Interrupt detected. Powering down radio...")
nrf.power = False
else:
print(" Run slave() on receiver\n Run master() on transmitter")
| """
Simple example of using the RF24 class.
"""
import time
import struct
import board
from digitalio import DigitalInOut
# if running this on a ATSAMD21 M0 based board
# from circuitpython_nrf24l01.rf24_lite import RF24
from circuitpython_nrf24l01.rf24 import RF24
# invalid default values for scoping
SPI_BUS, CSN_PIN, CE_PIN = (None, None, None)
try: # on Linux
import spidev
SPI_BUS = spidev.SpiDev() # for a faster interface on linux
CSN_PIN = 0 # use CE0 on default bus (even faster than using any pin)
CE_PIN = DigitalInOut(board.D22) # using pin gpio22 (BCM numbering)
except ImportError: # on CircuitPython only
# using board.SPI() automatically selects the MCU's
# available SPI pins, board.SCK, board.MOSI, board.MISO
SPI_BUS = board.SPI() # init spi bus object
# change these (digital output) pins accordingly
CE_PIN = DigitalInOut(board.D4)
CSN_PIN = DigitalInOut(board.D5)
# initialize the nRF24L01 on the spi bus object
nrf = RF24(SPI_BUS, CSN_PIN, CE_PIN)
# On Linux, csn value is a bit coded
# 0 = bus 0, CE0 # SPI bus 0 is enabled by default
# 10 = bus 1, CE0 # enable SPI bus 2 prior to running this
# 21 = bus 2, CE1 # enable SPI bus 1 prior to running this
# set the Power Amplifier level to -12 dBm since this test example is
# usually run with nRF24L01 transceivers in close proximity
nrf.pa_level = -12
# addresses needs to be in a buffer protocol object (bytearray)
address = [b"1Node", b"2Node"]
# to use different addresses on a pair of radios, we need a variable to
# uniquely identify which address this radio will use to transmit
# 0 uses address[0] to transmit, 1 uses address[1] to transmit
radio_number = bool(
int(input("Which radio is this? Enter '0' or '1'. Defaults to '0' ") or 0)
)
# set TX address of RX node into the TX pipe
nrf.open_tx_pipe(address[radio_number]) # always uses pipe 0
# set RX address of TX node into an RX pipe
nrf.open_rx_pipe(1, address[not radio_number]) # using pipe 1
# using the python keyword global is bad practice. Instead we'll use a 1 item
# list to store our float number for the payloads sent
payload = [0.0]
# uncomment the following 3 lines for compatibility with TMRh20 library
# nrf.allow_ask_no_ack = False
# nrf.dynamic_payloads = False
# nrf.payload_length = 4
def master(count=5): # count = 5 will only transmit 5 packets
"""Transmits an incrementing integer every second"""
nrf.listen = False # ensures the nRF24L01 is in TX mode
while count:
# use struct.pack to packetize your data
# into a usable payload
buffer = struct.pack("<f", payload[0])
# "<f" means a single little endian (4 byte) float value.
start_timer = time.monotonic_ns() # start timer
result = nrf.send(buffer)
end_timer = time.monotonic_ns() # end timer
if not result:
print("send() failed or timed out")
else:
print(
"Transmission successful! Time to Transmit:",
f"{(end_timer - start_timer) / 1000} us. Sent: {payload[0]}"
)
payload[0] += 0.01
time.sleep(1)
count -= 1
def slave(timeout=6):
"""Polls the radio and prints the received value. This method expires
after 6 seconds of no received transmission"""
nrf.listen = True # put radio into RX mode and power up
start = time.monotonic()
while (time.monotonic() - start) < timeout:
if nrf.available():
# grab information about the received payload
payload_size, pipe_number = (nrf.any(), nrf.pipe)
# fetch 1 payload from RX FIFO
buffer = nrf.read() # also clears nrf.irq_dr status flag
# expecting a little endian float, thus the format string "<f"
# buffer[:4] truncates padded 0s if dynamic payloads are disabled
payload[0] = struct.unpack("<f", buffer[:4])[0]
# print details about the received packet
print(f"Received {payload_size} bytes on pipe {pipe_number}: {payload[0]}")
start = time.monotonic()
# recommended behavior is to keep in TX mode while idle
nrf.listen = False # put the nRF24L01 is in TX mode
def set_role():
"""Set the role using stdin stream. Timeout arg for slave() can be
specified using a space delimiter (e.g. 'R 10' calls `slave(10)`)
"""
user_input = (
input(
"*** Enter 'R' for receiver role.\n"
"*** Enter 'T' for transmitter role.\n"
"*** Enter 'Q' to quit example.\n"
)
or "?"
)
user_input = user_input.split()
if user_input[0].upper().startswith("R"):
slave(*[int(x) for x in user_input[1:2]])
return True
if user_input[0].upper().startswith("T"):
master(*[int(x) for x in user_input[1:2]])
return True
if user_input[0].upper().startswith("Q"):
nrf.power = False
return False
print(user_input[0], "is an unrecognized input. Please try again.")
return set_role()
print(" nRF24L01 Simple test")
if __name__ == "__main__":
try:
while set_role():
pass # continue example until 'Q' is entered
except KeyboardInterrupt:
print(" Keyboard Interrupt detected. Powering down radio...")
nrf.power = False
else:
print(" Run slave() on receiver\n Run master() on transmitter") | en | 0.80548 | Simple example of using the RF24 class. # if running this on a ATSAMD21 M0 based board # from circuitpython_nrf24l01.rf24_lite import RF24 # invalid default values for scoping # on Linux # for a faster interface on linux # use CE0 on default bus (even faster than using any pin) # using pin gpio22 (BCM numbering) # on CircuitPython only # using board.SPI() automatically selects the MCU's # available SPI pins, board.SCK, board.MOSI, board.MISO # init spi bus object # change these (digital output) pins accordingly # initialize the nRF24L01 on the spi bus object # On Linux, csn value is a bit coded # 0 = bus 0, CE0 # SPI bus 0 is enabled by default # 10 = bus 1, CE0 # enable SPI bus 2 prior to running this # 21 = bus 2, CE1 # enable SPI bus 1 prior to running this # set the Power Amplifier level to -12 dBm since this test example is # usually run with nRF24L01 transceivers in close proximity # addresses needs to be in a buffer protocol object (bytearray) # to use different addresses on a pair of radios, we need a variable to # uniquely identify which address this radio will use to transmit # 0 uses address[0] to transmit, 1 uses address[1] to transmit # set TX address of RX node into the TX pipe # always uses pipe 0 # set RX address of TX node into an RX pipe # using pipe 1 # using the python keyword global is bad practice. Instead we'll use a 1 item # list to store our float number for the payloads sent # uncomment the following 3 lines for compatibility with TMRh20 library # nrf.allow_ask_no_ack = False # nrf.dynamic_payloads = False # nrf.payload_length = 4 # count = 5 will only transmit 5 packets Transmits an incrementing integer every second # ensures the nRF24L01 is in TX mode # use struct.pack to packetize your data # into a usable payload # "<f" means a single little endian (4 byte) float value. # start timer # end timer Polls the radio and prints the received value. This method expires after 6 seconds of no received transmission # put radio into RX mode and power up # grab information about the received payload # fetch 1 payload from RX FIFO # also clears nrf.irq_dr status flag # expecting a little endian float, thus the format string "<f" # buffer[:4] truncates padded 0s if dynamic payloads are disabled # print details about the received packet # recommended behavior is to keep in TX mode while idle # put the nRF24L01 is in TX mode Set the role using stdin stream. Timeout arg for slave() can be specified using a space delimiter (e.g. 'R 10' calls `slave(10)`) # continue example until 'Q' is entered | 2.990403 | 3 |
tfx/tools/cli/pip_utils.py | Anon-Artist/tfx | 1,813 | 6622701 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for pip commands."""
import subprocess
import sys
from typing import List
def get_package_names() -> List[str]:
freeze_output = subprocess.check_output(
[sys.executable, '-m', 'pip', 'freeze', '--local']).decode('utf-8')
result = []
for line in freeze_output.split():
line = line.lstrip()
if not line or line[0] == '#' or '==' not in line:
continue
result.append(line.split('==')[0])
return result
| # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for pip commands."""
import subprocess
import sys
from typing import List
def get_package_names() -> List[str]:
freeze_output = subprocess.check_output(
[sys.executable, '-m', 'pip', 'freeze', '--local']).decode('utf-8')
result = []
for line in freeze_output.split():
line = line.lstrip()
if not line or line[0] == '#' or '==' not in line:
continue
result.append(line.split('==')[0])
return result
| en | 0.855917 | # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Wrapper for pip commands. | 2.090432 | 2 |
lmdb/home/views.py | huzaifafaruqui/Movies-Website | 11 | 6622702 | <filename>lmdb/home/views.py
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from .models import Movie
from django.contrib.auth import logout
from .forms import *
from django.contrib import messages
from django.core.mail import send_mail
# Create your views here.
from django.shortcuts import render
def advanced(request):
form = SearchForm(request.POST or None)
if form.is_valid():
f1 = form.cleaned_data['title'].lower()
f2 = form.cleaned_data['actors']
min_rating = form.cleaned_data['minr']
genres = form.cleaned_data['genres']
m2 = [x.pk for x in Movie.objects.all() if x.average_rating >= min_rating]
movies = (Movie.objects.filter(title__icontains=f1) | Movie.objects.filter(actors__in=f2)).filter(
genre__in=genres).filter(pk__in=m2)
context = {'movies': movies}
return render(request, 'search_results.html', context)
context = {
'form': form
}
return render(request, "advanced_search.html", context)
def search(request):
error = False
if 'q' in request.GET:
q = request.GET['q']
if not q:
error = True
else:
movies = Movie.objects.filter(title__icontains=q)
return render(request, 'search_results.html',
{'movies': movies, 'query': q})
if error is True:
messages.error(request, "Enter something!")
return HttpResponseRedirect('/')
def register_page(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
username = form.cleaned_data['username'],
password = form.cleaned_data['<PASSWORD>'],
User.objects.create_user(username=username, password=password, email=email,
first_name=form.cleaned_data['name'])
send_mail(subject='Welcome to LMDB', message='Greetings!', from_email='<EMAIL>',
recipient_list=[email])
return HttpResponseRedirect('/')
else:
form = RegistrationForm()
return render(request, 'registration/register.html', ({'form': form}))
def logout_page(request):
logout(request)
return HttpResponseRedirect('/')
def base(request):
movies = Movie.objects.all()
context = {
'movies': movies,
'user': request.user
}
return render(request, "index.html", context)
def movie_page(request, pk):
movie = get_object_or_404(Movie, id=pk)
# avg_stars = movie.objects.annotate(Avg('rating__stars'))
context = {
'movie': movie,
# 'stars':avg_stars
}
return render(request, "movie.html", context)
def list_comments(request, pk):
movie = get_object_or_404(Movie, id=pk)
# avg_stars = movie.objects.annotate(Avg('rating__stars'))
context = {
'movie': movie,
# 'stars':avg_stars
}
return render(request, "all_comments.html", context)
def add_comment_to_movie(request, id):
movie = get_object_or_404(Movie, id=id)
x = Comment.objects.filter(movie=movie, author=request.user)
if len(x) > 0:
messages.error(request, "User already commented!")
return HttpResponseRedirect('/movie/%d/' % movie.id)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.movie = movie
comment.author = request.user
comment.save()
return HttpResponseRedirect('/movie/%d/' % movie.id)
else:
form = CommentForm()
return render(request, 'add_comment_to_movie.html', {'form': form})
| <filename>lmdb/home/views.py
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from .models import Movie
from django.contrib.auth import logout
from .forms import *
from django.contrib import messages
from django.core.mail import send_mail
# Create your views here.
from django.shortcuts import render
def advanced(request):
form = SearchForm(request.POST or None)
if form.is_valid():
f1 = form.cleaned_data['title'].lower()
f2 = form.cleaned_data['actors']
min_rating = form.cleaned_data['minr']
genres = form.cleaned_data['genres']
m2 = [x.pk for x in Movie.objects.all() if x.average_rating >= min_rating]
movies = (Movie.objects.filter(title__icontains=f1) | Movie.objects.filter(actors__in=f2)).filter(
genre__in=genres).filter(pk__in=m2)
context = {'movies': movies}
return render(request, 'search_results.html', context)
context = {
'form': form
}
return render(request, "advanced_search.html", context)
def search(request):
error = False
if 'q' in request.GET:
q = request.GET['q']
if not q:
error = True
else:
movies = Movie.objects.filter(title__icontains=q)
return render(request, 'search_results.html',
{'movies': movies, 'query': q})
if error is True:
messages.error(request, "Enter something!")
return HttpResponseRedirect('/')
def register_page(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
username = form.cleaned_data['username'],
password = form.cleaned_data['<PASSWORD>'],
User.objects.create_user(username=username, password=password, email=email,
first_name=form.cleaned_data['name'])
send_mail(subject='Welcome to LMDB', message='Greetings!', from_email='<EMAIL>',
recipient_list=[email])
return HttpResponseRedirect('/')
else:
form = RegistrationForm()
return render(request, 'registration/register.html', ({'form': form}))
def logout_page(request):
logout(request)
return HttpResponseRedirect('/')
def base(request):
movies = Movie.objects.all()
context = {
'movies': movies,
'user': request.user
}
return render(request, "index.html", context)
def movie_page(request, pk):
movie = get_object_or_404(Movie, id=pk)
# avg_stars = movie.objects.annotate(Avg('rating__stars'))
context = {
'movie': movie,
# 'stars':avg_stars
}
return render(request, "movie.html", context)
def list_comments(request, pk):
movie = get_object_or_404(Movie, id=pk)
# avg_stars = movie.objects.annotate(Avg('rating__stars'))
context = {
'movie': movie,
# 'stars':avg_stars
}
return render(request, "all_comments.html", context)
def add_comment_to_movie(request, id):
movie = get_object_or_404(Movie, id=id)
x = Comment.objects.filter(movie=movie, author=request.user)
if len(x) > 0:
messages.error(request, "User already commented!")
return HttpResponseRedirect('/movie/%d/' % movie.id)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.movie = movie
comment.author = request.user
comment.save()
return HttpResponseRedirect('/movie/%d/' % movie.id)
else:
form = CommentForm()
return render(request, 'add_comment_to_movie.html', {'form': form})
| en | 0.494022 | # Create your views here. # avg_stars = movie.objects.annotate(Avg('rating__stars')) # 'stars':avg_stars # avg_stars = movie.objects.annotate(Avg('rating__stars')) # 'stars':avg_stars | 2.235627 | 2 |
tests/test_clip_version.py | gowithfloat/clippy | 2 | 6622703 | <filename>tests/test_clip_version.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for clip.py
"""
import unittest
from clippy import begin_clippy
__version__ = "0.0.1"
class TestClip(unittest.TestCase):
def test_begin_version(self):
with self.assertRaises(SystemExit) as err:
begin_clippy(["some_module", "--version"])
self.assertEqual(err.exception.code, 0)
if __name__ == "__main__":
unittest.main()
| <filename>tests/test_clip_version.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for clip.py
"""
import unittest
from clippy import begin_clippy
__version__ = "0.0.1"
class TestClip(unittest.TestCase):
def test_begin_version(self):
with self.assertRaises(SystemExit) as err:
begin_clippy(["some_module", "--version"])
self.assertEqual(err.exception.code, 0)
if __name__ == "__main__":
unittest.main()
| en | 0.499875 | #!/usr/bin/env python # -*- coding: utf-8 -*- Tests for clip.py | 2.362566 | 2 |
gdal/swig/python/scripts/tests/gdal2tiles/test_reproject_dataset.py | jpapadakis/gdal | 18 | 6622704 | from unittest import mock, TestCase
from osgeo import gdal, osr
import gdal2tiles
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class ReprojectDatasetTest(TestCase):
def setUp(self):
self.DEFAULT_OPTIONS = {
'verbose': True,
'resampling': 'near',
'title': '',
'url': '',
}
self.DEFAULT_ATTRDICT_OPTIONS = AttrDict(self.DEFAULT_OPTIONS)
self.mercator_srs = osr.SpatialReference()
self.mercator_srs.ImportFromEPSG(3857)
self.geodetic_srs = osr.SpatialReference()
self.geodetic_srs.ImportFromEPSG(4326)
def test_raises_if_no_from_or_to_srs(self):
with self.assertRaises(gdal2tiles.GDALError):
gdal2tiles.reproject_dataset(None, None, self.mercator_srs)
with self.assertRaises(gdal2tiles.GDALError):
gdal2tiles.reproject_dataset(None, self.mercator_srs, None)
def test_returns_dataset_unchanged_if_in_destination_srs_and_no_gcps(self):
from_ds = mock.MagicMock()
from_ds.GetGCPCount = mock.MagicMock(return_value=0)
to_ds = gdal2tiles.reproject_dataset(from_ds, self.mercator_srs, self.mercator_srs)
self.assertEqual(from_ds, to_ds)
@mock.patch('gdal2tiles.gdal', spec=gdal)
def test_returns_warped_vrt_dataset_when_from_srs_different_from_to_srs(self, mock_gdal):
mock_gdal.AutoCreateWarpedVRT = mock.MagicMock(spec=gdal.Dataset)
from_ds = mock.MagicMock(spec=gdal.Dataset)
from_ds.GetGCPCount = mock.MagicMock(return_value=0)
gdal2tiles.reproject_dataset(from_ds, self.mercator_srs, self.geodetic_srs)
mock_gdal.AutoCreateWarpedVRT.assert_called_with(from_ds,
self.mercator_srs.ExportToWkt(),
self.geodetic_srs.ExportToWkt())
| from unittest import mock, TestCase
from osgeo import gdal, osr
import gdal2tiles
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class ReprojectDatasetTest(TestCase):
def setUp(self):
self.DEFAULT_OPTIONS = {
'verbose': True,
'resampling': 'near',
'title': '',
'url': '',
}
self.DEFAULT_ATTRDICT_OPTIONS = AttrDict(self.DEFAULT_OPTIONS)
self.mercator_srs = osr.SpatialReference()
self.mercator_srs.ImportFromEPSG(3857)
self.geodetic_srs = osr.SpatialReference()
self.geodetic_srs.ImportFromEPSG(4326)
def test_raises_if_no_from_or_to_srs(self):
with self.assertRaises(gdal2tiles.GDALError):
gdal2tiles.reproject_dataset(None, None, self.mercator_srs)
with self.assertRaises(gdal2tiles.GDALError):
gdal2tiles.reproject_dataset(None, self.mercator_srs, None)
def test_returns_dataset_unchanged_if_in_destination_srs_and_no_gcps(self):
from_ds = mock.MagicMock()
from_ds.GetGCPCount = mock.MagicMock(return_value=0)
to_ds = gdal2tiles.reproject_dataset(from_ds, self.mercator_srs, self.mercator_srs)
self.assertEqual(from_ds, to_ds)
@mock.patch('gdal2tiles.gdal', spec=gdal)
def test_returns_warped_vrt_dataset_when_from_srs_different_from_to_srs(self, mock_gdal):
mock_gdal.AutoCreateWarpedVRT = mock.MagicMock(spec=gdal.Dataset)
from_ds = mock.MagicMock(spec=gdal.Dataset)
from_ds.GetGCPCount = mock.MagicMock(return_value=0)
gdal2tiles.reproject_dataset(from_ds, self.mercator_srs, self.geodetic_srs)
mock_gdal.AutoCreateWarpedVRT.assert_called_with(from_ds,
self.mercator_srs.ExportToWkt(),
self.geodetic_srs.ExportToWkt())
| none | 1 | 2.319479 | 2 | |
micropsi_server/tests/test_json_api.py | Doik/micropsi2 | 0 | 6622705 |
import pytest
import json
import re
def assert_success(response):
assert response.json_body['status'] == 'success'
assert 'data' in response.json_body
def assert_failure(response):
assert response.json_body['status'] == 'error'
assert 'data' in response.json_body
def test_generate_uid(app):
response = app.get_json('/rpc/generate_uid')
assert_success(response)
assert re.match('[a-f0-9]+', response.json_body['data']) is not None
def test_create_and_invalidate_auth_token(app):
response = app.post_json('/rpc/create_auth_token', params={
"user": "Pytest User",
"password": "<PASSWORD>"
})
assert_success(response)
from micropsi_server.micropsi_app import usermanager
token = response.json_body['data']
assert token in usermanager.users['Pytest User']['sessions']
response = app.post_json('/rpc/invalidate_auth_token', params={
"token": token
})
assert_success(response)
assert token not in usermanager.users['Pytest User']['sessions']
def test_get_nodenet_metadata(app, test_nodenet, node):
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
assert_success(response)
data = response.json_body['data']
assert 'nodetypes' in data
assert 'native_modules' in data
assert 'engine' in data
assert 'nodespaces' in data
assert 'nodes' not in data
assert 'links' not in data
assert data['current_step'] == 0
assert data['uid'] == test_nodenet
def test_new_nodenet(app, engine):
app.set_auth()
response = app.post_json('/rpc/new_nodenet', params={
'name': 'FooBarTestNet',
'engine': engine
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % uid)
assert_success(response)
assert response.json_body['data']['name'] == 'FooBarTestNet'
assert response.json_body['data']['engine'] == engine
def test_get_available_nodenets(app, test_nodenet):
response = app.get_json('/rpc/get_available_nodenets?user_id=Pytest User')
assert_success(response)
assert test_nodenet in response.json_body['data']
def test_delete_nodenet(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/delete_nodenet', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
response = app.get_json('/rpc/get_available_nodenets?user_id=Pytest User')
assert test_nodenet not in response.json_body['data']
def test_set_nodenet_properties(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'new_name'
assert data['worldadapter'] == 'Default'
def test_set_node_state(app, test_nodenet, resourcepath):
import os
app.set_auth()
# create a native module:
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):
return 17
""")
response = app.post_json('/rpc/reload_code')
assert_success(response)
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Testnode',
'position': [23, 23, 12],
'nodespace': None,
'name': ''
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/set_node_state', params={
'nodenet_uid': test_nodenet,
'node_uid': uid,
'state': {'foo': 'bar'}
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert response.json_body['data']['nodes'][uid]['state'] == {'foo': 'bar'}
def test_set_node_activation(app, test_nodenet, node):
response = app.post_json('/rpc/set_node_activation', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'activation': '0.734'
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
activation = response.json_body['data']['nodes'][node]['activation']
assert float("%.3f" % activation) == 0.734
def test_start_calculation(app, default_nodenet):
app.set_auth()
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert response.json_body['data']['is_active']
def test_start_calculation_with_condition(app, default_nodenet):
import time
app.set_auth()
response = app.post_json('/rpc/set_runner_condition', params={
'nodenet_uid': default_nodenet,
'steps': '2'
})
assert_success(response)
assert response.json_body['data']['step'] == 2
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
time.sleep(1)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert not response.json_body['data']['is_active']
assert response.json_body['data']['current_step'] == 2
response = app.post_json('/rpc/remove_runner_condition', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
def test_get_runner_properties(app):
app.set_auth()
response = app.get_json('/rpc/get_runner_properties')
assert_success(response)
assert 'timestep' in response.json_body['data']
assert 'infguard' in response.json_body['data']
def test_set_runner_properties(app):
app.set_auth()
response = app.post_json('/rpc/set_runner_properties', params=dict(timestep=123, infguard=False))
assert_success(response)
response = app.get_json('/rpc/get_runner_properties')
assert_success(response)
assert response.json_body['data']['timestep'] == 123
assert not response.json_body['data']['infguard']
def test_get_is_calculation_running(app, default_nodenet):
response = app.get_json('/rpc/get_is_calculation_running?nodenet_uid=%s' % default_nodenet)
assert_success(response)
assert not response.json_body['data']
def test_stop_calculation(app, default_nodenet):
app.set_auth()
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_is_calculation_running?nodenet_uid=%s' % default_nodenet)
assert_success(response)
assert response.json_body['data']
response = app.post_json('/rpc/stop_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_is_calculation_running?nodenet_uid=%s' % default_nodenet)
assert_success(response)
assert not response.json_body['data']
def test_step_calculation(app, default_nodenet):
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert response.json_body['data']['current_step'] == 0
response = app.post_json('/rpc/step_calculation', params={
"nodenet_uid": default_nodenet
})
assert_success(response)
assert response.json_body['data'] == 1
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert response.json_body['data']['current_step'] == 1
def test_get_calculation_state(app, test_nodenet, default_world, node):
from time import sleep
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
assert response.json_body['data']['current_step'] == 0
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub',
})
monitor_uid = response.json_body['data']
response = app.post_json('/rpc/step_calculation', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
response = app.post_json('/rpc/start_calculation', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
sleep(1)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet': {
'nodespaces': [None],
'step': -1,
},
'monitors': {
'logger': ['system', 'world', 'nodenet'],
'after': 0,
'monitor_from': 2,
'monitor_count': 2
},
'world': {
'step': -1
}
})
data = response.json_body['data']
assert data['current_nodenet_step'] > 0
assert data['current_world_step'] > 0
assert data['calculation_running']
assert 'servertime' in data['monitors']['logs']
assert 'logs' in data['monitors']['logs']
assert len(data['monitors']['monitors'][monitor_uid]['values']) == 2
assert test_nodenet in data['world']['agents']
assert data['world']['current_step'] > 0
def test_revert_nodenet(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
assert_success(response)
response = app.post_json('/rpc/revert_nodenet', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'Testnet'
assert data['worldadapter'] is None
def test_revert_both(app, test_nodenet, default_world):
app.set_auth()
app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
for i in range(5):
app.post_json('/rpc/step_calculation', params={
"nodenet_uid": test_nodenet
})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
assert res.json_body['data']['current_nodenet_step'] > 0
assert res.json_body['data']['current_world_step'] > 0
app.post_json('/rpc/revert_calculation', params={
"nodenet_uid": test_nodenet
})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
assert res.json_body['data']['current_nodenet_step'] == 0
assert res.json_body['data']['current_world_step'] == 0
def test_revert_and_reload(app, test_nodenet, default_world, resourcepath):
import os
app.set_auth()
app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
for i in range(5):
app.post_json('/rpc/step_calculation', params={
"nodenet_uid": test_nodenet
})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):\r\n return 17
""")
app.post_json('/rpc/reload_and_revert', params={"nodenet_uid": test_nodenet})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
assert res.json_body['data']['current_nodenet_step'] == 0
assert res.json_body['data']['current_world_step'] == 0
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
assert "Testnode" in response.json_body['data']['native_modules']
def test_save_nodenet(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
assert_success(response)
response = app.post_json('/rpc/save_nodenet', params={"nodenet_uid": test_nodenet})
assert_success(response)
response = app.post_json('/rpc/revert_nodenet', params={"nodenet_uid": test_nodenet})
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'new_name'
assert data['worldadapter'] == 'Default'
# now delete the nodenet, to get default state back.
app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": test_nodenet})
def test_export_nodenet(app, test_nodenet, node):
response = app.get_json('/rpc/export_nodenet?nodenet_uid=%s' % test_nodenet)
assert_success(response)
data = json.loads(response.json_body['data'])
assert data['name'] == 'Testnet'
assert data['nodes'][node]['type'] == 'Pipe'
assert 'links' in data
def test_import_nodenet(app, test_nodenet, node):
app.set_auth()
response = app.get_json('/rpc/export_nodenet?nodenet_uid=%s' % test_nodenet)
data = json.loads(response.json_body['data'])
del data['uid']
response = app.post_json('/rpc/import_nodenet', params={
'nodenet_data': json.dumps(data)
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % uid)
assert response.json_body['data']['name'] == data['name']
assert response.json_body['data']['world'] == data['world']
assert response.json_body['data']['worldadapter'] == data['worldadapter']
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": uid})
assert list(response.json_body['data']['nodes'].keys()) == [node]
response = app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": uid})
def test_merge_nodenet(app, test_nodenet, engine, node):
app.set_auth()
response = app.get_json('/rpc/export_nodenet?nodenet_uid=%s' % test_nodenet)
data = json.loads(response.json_body['data'])
response = app.post_json('/rpc/new_nodenet', params={
'name': 'ImporterNet',
'engine': engine,
'worldadapter': 'Default',
'owner': 'Pytest User'
})
uid = response.json_body['data']
data['uid'] = uid
response = app.post_json('/rpc/merge_nodenet', params={
'nodenet_uid': uid,
'nodenet_data': json.dumps(data)
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": uid})
assert len(list(response.json_body['data']['nodes'].keys())) == 1
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % uid)
assert response.json_body['data']['name'] == 'ImporterNet'
response = app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": uid})
###################################################
##
##
## WORLD
##
##
###################################################
def test_get_available_worlds(app, default_world):
response = app.get_json('/rpc/get_available_worlds')
assert_success(response)
assert default_world in response.json_body['data']
def test_get_available_worlds_for_user(app, default_world):
response = app.get_json('/rpc/get_available_worlds?user_id=Pytest User')
assert_success(response)
assert default_world in response.json_body['data']
# TODO: get_nodenet_properties is missing.
def test_get_world_properties(app, default_world):
response = app.get_json('/rpc/get_world_properties?world_uid=%s' % default_world)
assert_success(response)
data = response.json_body['data']
assert data['uid'] == default_world
assert data['name'] == "World of Pain"
assert 'available_worldadapters' in data
assert 'available_worldobjects' in data
def test_get_worldadapters(app, default_world):
response = app.get_json('/rpc/get_worldadapters?world_uid=%s' % default_world)
assert_success(response)
assert 'Default' in response.json_body['data']
def test_get_world_objects(app, default_world):
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
assert_success(response)
assert response.json_body['data'] == {}
def test_add_worldobject(app, default_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'TestObject'
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
assert uid in response.json_body['data']
def test_delete_worldobject(app, default_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'TestObject'
})
uid = response.json_body['data']
response = app.post_json('/rpc/delete_worldobject', params={
'world_uid': default_world,
'object_uid': uid
})
assert_success(response)
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
assert uid not in response.json_body['data']
def test_set_worldobject_properties(app, default_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'TestObject'
})
uid = response.json_body['data']
response = app.post_json('/rpc/set_worldobject_properties', params={
'world_uid': default_world,
'uid': uid,
'position': [20, 20],
'orientation': 27,
'name': 'edited'
})
assert_success(response)
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
data = response.json_body['data']
assert data[uid]['position'] == [20, 20]
assert data[uid]['orientation'] == 27
assert data[uid]['name'] == 'edited'
def test_get_world_view(app, default_world):
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
assert_success(response)
assert 'agents' in response.json_body['data']
assert 'objects' in response.json_body['data']
assert response.json_body['data']['current_step'] == 0
assert 'step' not in response.json_body['data']
def test_set_worldagent_properties(app, default_world, default_nodenet):
# create agent.
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=default_nodenet, worldadapter="Default", world_uid=default_world))
response = app.post_json('/rpc/set_worldagent_properties', params={
'world_uid': default_world,
'uid': default_nodenet,
'position': [23, 23],
'orientation': 37,
'name': 'Sepp'
})
assert_success(response)
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
data = response.json_body['data']['agents'][default_nodenet]
assert data['position'] == [23, 23]
assert data['orientation'] == 37
assert data['name'] == 'Sepp'
def test_new_world(app):
app.set_auth()
response = app.post_json('/rpc/new_world', params={
'world_name': 'FooBarTestWorld',
'world_type': 'DefaultWorld'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_available_worlds?user_id=Pytest User')
assert uid in response.json_body['data']
def test_get_available_world_types(app):
response = app.get_json('/rpc/get_available_world_types')
assert_success(response)
data = response.json_body['data']
assert 'DefaultWorld' in data
assert data['DefaultWorld']['config'] == []
def test_delete_world(app, default_world):
response = app.post_json('/rpc/delete_world', params={"world_uid": default_world})
assert_success(response)
response = app.get_json('/rpc/get_available_worlds?user_id=Pytest User')
assert default_world not in response.json_body['data']
def test_set_world_properties(app, default_world):
app.set_auth()
response = app.post_json('/rpc/set_world_properties', params={
'world_uid': default_world,
'world_name': 'asdf',
'owner': 'Pytest User'
})
assert_success(response)
response = app.get_json('/rpc/get_world_properties?world_uid=%s' % default_world)
assert response.json_body['data']['name'] == "asdf"
response = app.get_json('/rpc/get_available_worlds')
assert response.json_body['data'][default_world]['name'] == 'asdf'
def test_revert_world(app, default_world):
app.set_auth()
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'Testtree'
})
response = app.post_json('/rpc/revert_world', params={'world_uid': default_world})
assert_success(response)
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
data = response.json_body['data']
assert data['objects'] == {}
def test_save_world(app, default_world):
app.set_auth()
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'Testtree'
})
uid = response.json_body['data']
response = app.post_json('/rpc/save_world', params={"world_uid": default_world})
assert_success(response)
response = app.post_json('/rpc/revert_world', params={"world_uid": default_world})
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
data = response.json_body['data']
assert uid in data['objects']
# delete the world, to get the default state back
app.post_json('/rpc/delete_world', params={"world_uid": default_world})
def test_export_world(app, default_world):
response = app.get_json('/rpc/export_world?world_uid=%s' % default_world)
assert_success(response)
export_data = json.loads(response.json_body['data'])
assert export_data['uid'] == default_world
assert export_data['name'] == 'World of Pain'
assert export_data['objects'] == {}
assert export_data['agents'] == {}
assert export_data['owner'] == 'Pytest User'
assert export_data['current_step'] == 0
assert export_data['world_type'] == 'DefaultWorld'
def test_import_world(app, default_world):
response = app.get_json('/rpc/export_world?world_uid=%s' % default_world)
data = json.loads(response.json_body['data'])
del data['uid']
data['name'] = 'Copied Pain'
response = app.post_json('/rpc/import_world', params={
'worlddata': json.dumps(data)
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/export_world?world_uid=%s' % uid)
data = json.loads(response.json_body['data'])
assert data['owner'] == 'Pytest User'
assert data['name'] == '<NAME>'
assert data['objects'] == {}
assert data['agents'] == {}
assert uid != default_world
###################################################
##
##
## MONITORS
##
##
###################################################
def test_get_monitor_data_all(app, test_nodenet):
response = app.get_json('/rpc/get_monitor_data?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert response.json_body['data']['monitors'] == {}
def test_add_gate_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['node_uid'] == node
assert response.json_body['data']['monitors'][uid]['target'] == 'sub'
assert response.json_body['data']['monitors'][uid]['type'] == 'gate'
assert response.json_body['data']['monitors'][uid]['values'] == {}
@pytest.mark.engine("dict_engine")
@pytest.mark.engine("numpy_engine")
def test_add_slot_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_slot_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'slot': 'gen',
'name': 'Foobar'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['name'] == 'Foobar'
assert response.json_body['data']['monitors'][uid]['node_uid'] == node
assert response.json_body['data']['monitors'][uid]['target'] == 'gen'
assert response.json_body['data']['monitors'][uid]['type'] == 'slot'
assert response.json_body['data']['monitors'][uid]['values'] == {}
def test_add_link_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_link_monitor', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': 'gen',
'target_node_uid': node,
'slot_type': 'gen',
'name': 'LinkWeight'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['name'] == 'LinkWeight'
assert response.json_body['data']['monitors'][uid]['source_node_uid'] == node
assert response.json_body['data']['monitors'][uid]['gate_type'] == 'gen'
assert response.json_body['data']['monitors'][uid]['target_node_uid'] == node
assert response.json_body['data']['monitors'][uid]['slot_type'] == 'gen'
def test_add_custom_monitor(app, test_nodenet):
response = app.post_json('/rpc/add_custom_monitor', params={
'nodenet_uid': test_nodenet,
'function': 'return len(netapi.get_nodes())',
'name': 'nodecount'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['name'] == 'nodecount'
def test_add_group_monitor_by_name(app, test_nodenet):
app.set_auth()
uids = []
for i in range(3):
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Neuron',
'position': [23, 23, 12],
'nodespace': None,
'name': 'Testnode %d' % i
})
uids.append(response.json_body['data'])
response = app.post_json('/rpc/add_group_monitor', {
'nodenet_uid': test_nodenet,
'name': 'testmonitor',
'nodespace': None,
'node_name_prefix': 'Testnode',
'gate': 'gen'
})
mon_uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][mon_uid]['name'] == 'testmonitor'
assert response.json_body['data']['monitors'][mon_uid]['node_uids'] == uids
def test_add_group_monitor_by_ids(app, test_nodenet):
app.set_auth()
uids = []
for i in range(3):
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Neuron',
'position': [23, 23, 12],
'nodespace': None,
'name': 'Testnode %d' % i
})
uids.append(response.json_body['data'])
response = app.post_json('/rpc/add_group_monitor', {
'nodenet_uid': test_nodenet,
'name': 'testmonitor',
'nodespace': None,
'node_uids': uids,
'gate': 'gen'
})
mon_uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][mon_uid]['name'] == 'testmonitor'
assert response.json_body['data']['monitors'][mon_uid]['node_uids'] == uids
def test_remove_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen'
})
uid = response.json_body['data']
response = app.post_json('/rpc/remove_monitor', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert_success(response)
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert uid not in response.json_body['data']['monitors']
def test_clear_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen'
})
uid = response.json_body['data']
response = app.post_json('/rpc/clear_monitor', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert_success(response)
###################################################
##
##
## NODENET
##
##
###################################################
def test_get_nodespace_list(app, test_nodenet, node):
response = app.get_json('/rpc/get_nodespace_list?nodenet_uid=%s' % test_nodenet)
assert_success(response)
rootid = list(response.json_body['data'].keys())[0]
assert response.json_body['data'][rootid]['name'] == 'Root'
assert response.json_body['data'][rootid]['parent'] is None
assert node in response.json_body['data'][rootid]['nodes']
def test_get_nodespace_activations(app, test_nodenet, node):
response = app.post_json('/rpc/get_nodespace_activations', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'last_call_step': -1
})
assert_success(response)
assert node not in response.json_body['data']['activations']
response = app.post_json('/rpc/set_node_activation', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'activation': -1
})
response = app.post_json('/rpc/get_nodespace_activations', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'last_call_step': -1
})
assert response.json_body['data']['activations'][node][0] == -1
def test_get_node(app, test_nodenet, node):
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
assert_success(response)
assert response.json_body['data']['type'] == 'Pipe'
def test_add_node(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Pipe',
'position': [23, 42, 13],
'nodespace': None,
'name': 'N2',
'parameters': {'wait': "3"}
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['name'] == 'N2'
assert int(response.json_body['data']['parameters']['wait']) == 3
def test_add_nodespace(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': None,
'name': 'nodespace'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert uid in response.json_body['data']['nodespaces']
assert uid not in response.json_body['data']['nodes']
def test_clone_nodes(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/clone_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node],
'clone_mode': 'all',
'nodespace': None,
'offset': [23, 23, 23]
})
assert_success(response)
node = list(response.json_body['data'].values())[0]
assert node['name'] == 'N1'
assert node['position'] == [33, 33, 33]
assert node['links']['gen'][0]['target_node_uid'] == node['uid']
def test_set_node_positions(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_node_positions', params={
'nodenet_uid': test_nodenet,
'positions': {node: [42, 23, 11]}
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
assert response.json_body['data']['position'] == [42, 23, 11]
def test_set_node_name(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_node_name', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'name': 'changed'
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
assert response.json_body['data']['name'] == 'changed'
def test_delete_node(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/delete_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node]
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert response.json_body['data']['nodes'] == {}
def test_delete_nodespace(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': None,
'name': 'nodespace'
})
uid = response.json_body['data']
response = app.post_json('/rpc/delete_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': uid
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert uid not in response.json_body['data']['nodespaces']
def test_align_nodes(app, test_nodenet):
app.set_auth()
# TODO: Why does autoalign only move a node if it has no links?
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Neuron',
'position': [5, 5, 0],
'nodespace': None,
'name': 'N2'
})
uid = response.json_body['data']
response = app.post_json('/rpc/align_nodes', params={
'nodenet_uid': test_nodenet,
'nodespace': None
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['position'] != [5, 5]
def test_get_available_node_types(app, test_nodenet):
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert 'Pipe' in response.json_body['data']['nodetypes']
assert 'Neuron' in response.json_body['data']['nodetypes']
assert 'Sensor' in response.json_body['data']['nodetypes']
def test_get_available_native_module_types(app, test_nodenet, engine):
response = app.get_json('/rpc/get_available_native_module_types?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert response.json_body['data'] == {}
def test_set_node_parameters(app, test_nodenet):
app.set_auth()
# add activator
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Activator',
'nodespace': None,
'position': [23, 42, 0],
})
uid = response.json_body['data']
response = app.post_json('/rpc/set_node_parameters', params={
'nodenet_uid': test_nodenet,
'node_uid': uid,
'parameters': {'type': 'sub'}
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['type'] == 'sub'
def test_set_gate_configuration(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_gate_configuration', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
'gatefunction': 'sigmoid',
'gatefunction_parameters': {
'bias': '1'
}
})
assert_success(response)
response = app.get_json('/rpc/get_node', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
})
data = response.json_body['data']
assert data['gate_configuration']['gen']['gatefunction'] == 'sigmoid'
assert data['gate_configuration']['gen']['gatefunction_parameters'] == {'bias': 1}
# setting a non-value leads to using the default
response = app.post_json('/rpc/set_gate_configuration', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
'gatefunction': 'sigmoid',
'gatefunction_parameters': {
'bias': ''
}
})
response = app.get_json('/rpc/get_node', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
})
data = response.json_body['data']
assert data['gate_configuration']['gen']['gatefunction'] == 'sigmoid'
assert data['gate_configuration']['gen']['gatefunction_parameters'] == {'bias': 0}
def test_get_available_gatefunctions(app, test_nodenet):
response = app.get_json('/rpc/get_available_gatefunctions', params={'nodenet_uid': test_nodenet})
funcs = response.json_body['data']
assert funcs['identity'] == {}
assert funcs['absolute'] == {}
assert funcs['one_over_x'] == {}
assert funcs['sigmoid'] == {'bias': 0}
assert funcs['elu'] == {'bias': 0}
assert funcs['relu'] == {'bias': 0}
assert funcs['threshold'] == {
'minimum': 0,
'maximum': 1,
'amplification': 1,
'threshold': 0
}
def test_get_available_datasources(app, test_nodenet, default_world):
app.set_auth()
# set worldadapter
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.get_json('/rpc/get_available_datasources?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert 'static_on' in response.json_body['data']
assert 'static_off' in response.json_body['data']
def test_get_available_datatargets(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.get_json('/rpc/get_available_datatargets?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert 'echo' in response.json_body['data']
def test_bind_datasource_to_sensor(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Sensor',
'position': [23, 42, 13],
'nodespace': None,
})
uid = response.json_body['data']
response = app.post_json('/rpc/bind_datasource_to_sensor', params={
'nodenet_uid': test_nodenet,
'sensor_uid': uid,
'datasource': 'static_on'
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['datasource'] == 'static_on'
def test_bind_datatarget_to_actuator(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Actuator',
'position': [23, 42, 13],
'nodespace': None,
})
uid = response.json_body['data']
response = app.post_json('/rpc/bind_datatarget_to_actuator', params={
'nodenet_uid': test_nodenet,
'actuator_uid': uid,
'datatarget': 'echo'
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['datatarget'] == 'echo'
def test_add_link(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/add_link', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': 'sub',
'target_node_uid': node,
'slot_type': 'gen',
'weight': 0.7
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
assert data['nodes'][node]['links']['sub'][0]['target_node_uid'] == node
assert round(data['nodes'][node]['links']['sub'][0]['weight'], 3) == 0.7
def test_set_link_weight(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_link_weight', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': "gen",
'target_node_uid': node,
'slot_type': "gen",
'weight': 0.345
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
assert float("%.3f" % data['nodes'][node]['links']['gen'][0]['weight']) == 0.345
def test_get_links_for_nodes(app, test_nodenet, node):
response = app.post_json('/rpc/get_links_for_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node]
})
assert_success(response)
link = list(response.json_body['data']['links'])[0]
assert link['source_node_uid'] == node
def test_delete_link(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/delete_link', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': "gen",
'target_node_uid': node,
'slot_type': "gen"
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
data['nodes'][node]['links'] == {}
def test_reload_code(app, test_nodenet, resourcepath):
app.set_auth()
# create a native module:
import os
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):\r\n return 17
""")
response = app.post_json('/rpc/reload_code')
assert_success(response)
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']['native_modules']['Testnode']
assert data['nodefunction_name'] == "testnodefunc"
assert data['gatetypes'] == ['gen', 'foo', 'bar']
assert data['slottypes'] == ['gen', 'foo', 'bar']
assert data['name'] == 'Testnode'
def test_user_prompt_response(app, test_nodenet, resourcepath):
app.set_auth()
# create a native module:
import os
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"gatetypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"user_prompts": {
"promptident": {
"callback": "user_prompt_callback",
"parameters": [
{"name": "foo", "description": "value for foo", "default": 23},
{"name": "bar", "description": "value for bar", "default": 42}
]
}
}
}
def testnodefunc(netapi, node=None, **prams):
if not hasattr(node, 'foo'):
node.foo = 0
node.bar = 1
netapi.show_user_prompt(node, "promptident")
node.get_gate("foo").gate_function(node.foo)
node.get_gate("bar").gate_function(node.bar)
def user_prompt_callback(netapi, node, user_prompt_params):
\"\"\"Elaborate explanation as to what this user prompt is for\"\"\"
node.foo = int(user_prompt_params['foo'])
node.bar = int(user_prompt_params['bar'])
""")
response = app.post_json('/rpc/reload_code')
assert_success(response)
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Testnode',
'position': [23, 23],
'nodespace': None,
'name': 'Testnode'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/step_calculation', params={"nodenet_uid": test_nodenet})
assert_success(response)
response = app.post_json('/rpc/get_calculation_state', {'nodenet_uid': test_nodenet})
assert_success(response)
prompt_data = response.json_body['data']['user_prompt']
assert prompt_data['key'] == 'promptident'
assert prompt_data['node']['uid'] == uid
assert len(prompt_data['parameters']) == 2
response = app.post_json('/rpc/user_prompt_response', {
'nodenet_uid': test_nodenet,
'node_uid': uid,
'key': prompt_data['key'],
'parameters': {
'foo': '77',
'bar': '99'
},
'resume_nodenet': False
})
assert_success(response)
response = app.post_json('/rpc/step_calculation', {"nodenet_uid": test_nodenet})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
assert data['nodes'][uid]['gate_activations']['foo'] == 77
assert data['nodes'][uid]['gate_activations']['bar'] == 99
def test_set_logging_levels(app):
response = app.post_json('/rpc/set_logging_levels', params={
'logging_levels': {
'system': 'INFO',
'world': 'DEBUG',
}
})
assert_success(response)
import logging
assert logging.getLogger('world').getEffectiveLevel() == logging.DEBUG
assert logging.getLogger('system').getEffectiveLevel() == logging.INFO
def test_get_logger_messages(app, default_nodenet):
response = app.get_json('/rpc/get_logger_messages?logger=system)')
assert_success(response)
assert 'servertime' in response.json_body['data']
assert type(response.json_body['data']['logs']) == list
def test_get_nodenet_logger_messages(app, test_nodenet):
import logging
logging.getLogger('agent.%s' % test_nodenet).warning('asdf')
logging.getLogger('system').warning('foobar')
response = app.get_json('/rpc/get_logger_messages?logger=system&logger=agent.%s' % test_nodenet)
assert 'servertime' in response.json_body['data']
netlog = syslog = None
for item in response.json_body['data']['logs']:
if item['logger'] == 'system':
syslog = item
elif item['logger'].startswith('agent'):
netlog = item
assert netlog['step'] == 0
assert syslog['step'] is None
def test_get_monitoring_info(app, test_nodenet):
response = app.get_json('/rpc/get_monitoring_info?nodenet_uid=%s&logger=system&logger=world&monitor_from=3&monitor_count=10' % test_nodenet)
assert_success(response)
assert 'logs' in response.json_body['data']
assert 'current_step' in response.json_body['data']
assert response.json_body['data']['monitors'] == {}
assert 'servertime' in response.json_body['data']['logs']
assert response.json_body['data']['logs']['logs'] == []
@pytest.mark.engine("theano_engine")
def test_get_benchmark_info(app, test_nodenet):
from unittest import mock
with mock.patch("micropsi_core.benchmark_system.benchmark_system", return_value="testbench") as benchmock:
response = app.get_json('/rpc/benchmark_info')
assert_success(response)
assert response.json_body['data']['benchmark'] == 'testbench'
def test_400(app):
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata?foobar', expect_errors=True)
assert_failure(response)
assert "unexpected keyword argument" in response.json_body['data']
def test_401(app, default_nodenet):
app.unset_auth()
response = app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": default_nodenet}, expect_errors=True)
assert_failure(response)
assert 'Insufficient permissions' in response.json_body['data']
def test_404(app):
response = app.get_json('/rpc/notthere?foo=bar', expect_errors=True)
assert_failure(response)
assert response.json_body['data'] == "Function not found"
def test_405(app, default_nodenet):
response = app.get_json('/rpc/delete_nodenet?nodenet_uid=%s' % default_nodenet, expect_errors=True)
assert_failure(response)
assert response.json_body['data'] == "Method not allowed"
def test_500(app):
response = app.get_json('/rpc/generate_uid?foo=bar', expect_errors=True)
assert_failure(response)
assert "unexpected keyword argument" in response.json_body['data']
assert response.json_body['traceback'] is not None
def test_get_recipes(app, default_nodenet, resourcepath):
app.set_auth()
import os
os.mkdir(os.path.join(resourcepath, 'recipes', 'Test'))
recipe_file = os.path.join(resourcepath, 'recipes', 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def foobar(netapi, quatsch=23):
return {'quatsch': quatsch}
""")
response = app.post_json('/rpc/reload_code')
response = app.get_json('/rpc/get_available_recipes')
data = response.json_body['data']
assert 'foobar' in data
assert len(data['foobar']['parameters']) == 1
assert data['foobar']['parameters'][0]['name'] == 'quatsch'
assert data['foobar']['parameters'][0]['default'] == 23
def test_run_recipes(app, test_nodenet, resourcepath):
app.set_auth()
import os
os.mkdir(os.path.join(resourcepath, 'recipes', 'Test'))
recipe_file = os.path.join(resourcepath, 'recipes', 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def foobar(netapi, quatsch=23):
return {'quatsch': quatsch}
""")
response = app.post_json('/rpc/reload_code')
response = app.post_json('/rpc/run_recipe', {
'nodenet_uid': test_nodenet,
'name': 'foobar',
'parameters': {
'quatsch': ''
}
})
data = response.json_body['data']
assert data['quatsch'] == 23
def test_get_agent_dashboard(app, test_nodenet, node, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
response = app.get_json('/rpc/get_agent_dashboard?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['count_nodes'] == 1
def test_nodenet_data_structure(app, test_nodenet, resourcepath, node):
app.set_auth()
import os
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):\r\n return 17
""")
response = app.post_json('/rpc/reload_code')
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': None,
'name': 'Test-Node-Space'
})
nodespace_uid = response.json_body['data']
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Pipe',
'position': [42, 42, 23],
'nodespace': nodespace_uid,
'name': 'N2'
})
n2_uid = response.json_body['data']
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen',
'name': 'Testmonitor',
'color': '#332211'
})
monitor_uid = response.json_body['data']
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
metadata = response.json_body['data']
response_1 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
response = app.post_json('/rpc/save_nodenet', params={"nodenet_uid": test_nodenet})
response = app.post_json('/rpc/revert_nodenet', params={"nodenet_uid": test_nodenet})
response_2 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
assert response_1.json_body['data']['nodenet'] == response_2.json_body['data']['nodenet']
assert response_1.json_body['data']['monitors']['monitors'] == response_2.json_body['data']['monitors']['monitors']
data = response_2.json_body['data']
# Monitors
response = app.get_json('/rpc/get_monitor_data?nodenet_uid=%s' % test_nodenet)
monitor_data = response.json_body['data']['monitors'][monitor_uid]
assert data['monitors']['monitors'][monitor_uid]['name'] == 'Testmonitor'
assert data['monitors']['monitors'][monitor_uid]['node_uid'] == node
assert data['monitors']['monitors'][monitor_uid]['target'] == 'gen'
assert data['monitors']['monitors'][monitor_uid]['type'] == 'gate'
assert data['monitors']['monitors'][monitor_uid]['uid'] == monitor_uid
assert data['monitors']['monitors'][monitor_uid]['values'] == {}
assert data['monitors']['monitors'][monitor_uid]['color'] == '#332211'
assert data['monitors']['monitors'][monitor_uid] == monitor_data
# Nodes
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
node_data = response.json_body['data']
assert node in data['nodenet']['nodes']
assert n2_uid not in data['nodenet']['nodes']
assert nodespace_uid not in data['nodenet']['nodes']
# gates
for key in ['gen', 'por', 'ret', 'sub', 'sur', 'cat', 'exp']:
assert data['nodenet']['nodes'][node]['gate_activations'][key] == 0
assert data['nodenet']['nodes'][node]['parameters']['expectation'] == 1
assert data['nodenet']['nodes'][node]['parameters']['wait'] == 10
assert data['nodenet']['nodes'][node]['position'] == [10, 10, 10]
assert data['nodenet']['nodes'][node]['type'] == "Pipe"
assert 'links' not in data
assert node_data['parameters']['expectation'] == 1
assert node_data['parameters']['wait'] == 10
assert node_data['position'] == [10, 10, 10]
assert node_data['type'] == "Pipe"
# Links
for link in data['nodenet']['nodes'][node]['links']['gen']:
assert link['weight'] == 1
assert link['target_node_uid'] == node
assert link['target_slot_name'] == 'gen'
# Nodespaces
# assert data['nodenet']['nodespaces'][nodespace_uid]['index'] == 3
assert data['nodenet']['nodespaces'][nodespace_uid]['name'] == 'Test-Node-Space'
# assert data['nodenet']['nodespaces'][nodespace_uid]['parent_nodespace'] == 'Root'
# Nodetypes
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
node_type_data = response.json_body['data']
assert 'gatetypes' not in metadata['nodetypes']['Comment']
assert 'slottypes' not in metadata['nodetypes']['Comment']
for key in ['Pipe', 'Neuron', 'Actuator']:
assert 'gatetypes' in metadata['nodetypes'][key]
assert 'slottypes' in metadata['nodetypes'][key]
assert 'slottypes' in metadata['nodetypes']['Activator']
assert 'gatetypes' not in metadata['nodetypes']['Activator']
assert 'slottypes' not in metadata['nodetypes']['Sensor']
assert 'gatetypes' in metadata['nodetypes']['Sensor']
assert metadata['nodetypes'] == node_type_data['nodetypes']
# Native Modules
response = app.get_json('/rpc/get_available_native_module_types?nodenet_uid=%s' % test_nodenet)
native_module_data = response.json_body['data']
assert metadata['native_modules']['Testnode']['gatetypes'] == ['gen', 'foo', 'bar']
assert metadata['native_modules']['Testnode']['name'] == 'Testnode'
assert metadata['native_modules']['Testnode']['nodefunction_name'] == 'testnodefunc'
assert metadata['native_modules']['Testnode']['slottypes'] == ['gen', 'foo', 'bar']
assert metadata['native_modules']['Testnode']['symbol'] == 't'
assert metadata['native_modules'] == native_module_data
# Nodenet
assert metadata['current_step'] == 0 # TODO:
assert 'step' not in data # current_step && step?
assert metadata['version'] == 2
assert metadata['world'] is None
assert metadata['worldadapter'] is None
def test_get_state_diff(app, test_nodenet, node):
from micropsi_core import runtime
nodenet = runtime.nodenets[test_nodenet]
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet_diff': {
'nodespaces': [None],
'step': 0,
}
})
data = response.json_body['data']['nodenet_diff']
assert 'activations' in data
assert 'changes' in data
assert node in data['changes']['nodes_dirty']
node2 = nodenet.create_node("Neuron", None, [10, 10], name="node2")
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet_diff': {
'nodespaces': [None],
'step': 1,
}
})
data = response.json_body['data']['nodenet_diff']
assert [node2] == list(data['changes']['nodes_dirty'].keys())
def test_get_nodenet_diff(app, test_nodenet, node):
from micropsi_core import runtime
nodenet = runtime.nodenets[test_nodenet]
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_nodenet_changes', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'since_step': 0
})
data = response.json_body['data']
assert 'activations' in data
assert 'changes' in data
assert node in data['changes']['nodes_dirty']
node2 = nodenet.create_node("Neuron", None, [10, 10], name="node2")
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_nodenet_changes', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'since_step': 1
})
data = response.json_body['data']
assert [node2] == list(data['changes']['nodes_dirty'].keys())
def test_get_operations(app):
response = app.get_json('/rpc/get_available_operations')
data = response.json_body['data']
for selectioninfo in data['autoalign']['selection']:
if selectioninfo['nodetypes'] == ['Nodespace']:
assert selectioninfo['mincount'] == 1
assert selectioninfo['maxcount'] == -1
else:
assert selectioninfo['mincount'] == 2
assert selectioninfo['maxcount'] == -1
assert selectioninfo['nodetypes'] == []
def test_run_operation(app, test_nodenet, node):
response = app.post_json('/rpc/run_operation', {
'nodenet_uid': test_nodenet,
'name': 'autoalign',
'parameters': {},
'selection_uids': [None]
})
assert response.json_body['status'] == 'success'
@pytest.mark.engine("theano_engine")
def test_flow_modules(app, runtime, test_nodenet, resourcepath):
import os
import numpy as np
with open(os.path.join(resourcepath, 'worlds.json'), 'w') as fp:
fp.write("""{"worlds":["flowworld.py"],"worldadapters":["flowworld.py"]}""")
with open(os.path.join(resourcepath, 'flowworld.py'), 'w') as fp:
fp.write("""
import numpy as np
from micropsi_core.world.world import World
from micropsi_core.world.worldadapter import ArrayWorldAdapter
class FlowWorld(World):
supported_worldadapters = ["SimpleArrayWA"]
class SimpleArrayWA(ArrayWorldAdapter):
def __init__(self, world, **kwargs):
super().__init__(world, **kwargs)
self.add_flow_datasource("foo", shape=(2,3))
self.add_flow_datatarget("bar", shape=(2,3))
self.update_data_sources_and_targets()
def update_data_sources_and_targets(self):
for key in self.flow_datatargets:
self.flow_datatarget_feedbacks[key] = np.copy(self.flow_datatargets[key])
for key in self.flow_datasources:
self.flow_datasources[key][:] = np.random.rand(*self.flow_datasources[key].shape)
""")
with open(os.path.join(resourcepath, 'nodetypes', 'double.py'), 'w') as fp:
fp.write("""
nodetype_definition = {
"flow_module": True,
"implementation": "theano",
"name": "Double",
"build_function_name" : "double",
"inputs": ["inputs"],
"outputs": ["outputs"],
"inputdims": [2]
}
def double(inputs, netapi, node, parameters):
return inputs * 2
""")
app.set_auth()
nodenet = runtime.nodenets[test_nodenet]
netapi = nodenet.netapi
runtime.reload_code()
res, wuid = runtime.new_world("FlowWorld", "FlowWorld")
runtime.set_nodenet_properties(test_nodenet, worldadapter="SimpleArrayWA", world_uid=wuid)
worldadapter = nodenet.worldadapter_instance
datasource_uid = nodenet.worldadapter_flow_nodes['datasources']
datatarget_uid = nodenet.worldadapter_flow_nodes['datatargets']
# create one flow_module, wire to sources & targets
result = app.post_json('/rpc/add_node', {
'nodenet_uid': test_nodenet,
'type': 'Double',
'position': [200, 200, 0],
'nodespace': None,
'name': 'Double'})
assert_success(result)
flow_uid = result.json_body['data']
source = netapi.create_node("Neuron", None, "Source")
source.activation = 1
netapi.link(source, 'gen', source, 'gen')
netapi.link(source, 'gen', netapi.get_node(flow_uid), 'sub')
outward = {
'nodenet_uid': test_nodenet,
'source_uid': flow_uid,
'source_output': 'outputs',
'target_uid': 'worldadapter',
'target_input': 'bar'
}
result = app.post_json('/rpc/flow', outward)
assert_success(result)
inward = {
'nodenet_uid': test_nodenet,
'source_uid': 'worldadapter',
'source_output': 'foo',
'target_uid': flow_uid,
'target_input': 'inputs',
}
result = app.post_json('/rpc/flow', inward)
assert_success(result)
sources = np.array(np.random.randn(2, 3))
worldadapter.flow_datasources['foo'][:] = sources
runtime.step_nodenet(test_nodenet)
assert np.all(worldadapter.get_flow_datatarget_feedback('bar') == sources * 2)
response = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
data = response.json_body['data']
assert data['nodenet']['nodes'][flow_uid]
assert data['nodenet']['nodes'][flow_uid]['activation'] == 1.0
assert data['nodenet']['nodes'][datasource_uid]['activation'] == 1.0
assert data['nodenet']['nodes'][datatarget_uid]['activation'] == 1.0
# disconnect first flow_module from datatargets, create a second one, and chain them
result = app.post_json('/rpc/unflow', outward)
assert_success(result)
double2 = netapi.create_node("Double", None, "double2")
netapi.link(source, 'gen', double2, 'sub')
netapi.flow(double2, 'outputs', 'worldadapter', 'bar')
result = app.post_json('/rpc/flow', {
'nodenet_uid': test_nodenet,
'source_uid': flow_uid,
'source_output': 'outputs',
'target_uid': double2.uid,
'target_input': 'inputs'
})
assert_success(result)
sources[:] = worldadapter.flow_datasources['foo']
runtime.step_nodenet(test_nodenet)
assert np.all(worldadapter.get_flow_datatarget_feedback('bar') == sources * 4)
# disconnect the two flow_modules
result = app.post_json('/rpc/unflow', {
'nodenet_uid': test_nodenet,
'source_uid': flow_uid,
'source_output': 'outputs',
'target_uid': double2.uid,
'target_input': 'inputs'
})
runtime.step_nodenet(test_nodenet)
assert np.all(worldadapter.get_flow_datatarget_feedback('bar') == np.zeros(worldadapter.flow_datatargets['bar'].shape))
def test_start_behavior(app, default_nodenet):
result = app.post_json('/rpc/start_behavior', {'nodenet_uid': default_nodenet, 'condition': {'steps': 3}})
assert_success(result)
token = result.json_body['data']['token']
result = app.get_json('/rpc/get_behavior_state?token=%s' % token)
assert_success(result)
assert result.json_body['data']
import time
time.sleep(1)
result = app.get_json('/rpc/get_behavior_state?token=%s' % token)
assert_success(result)
from micropsi_core import runtime
assert not result.json_body['data']
assert runtime.nodenets[default_nodenet].current_step == 3
assert not runtime.nodenets[default_nodenet].is_active
def test_abort_behavior(app, default_nodenet):
result = app.post_json('/rpc/start_behavior', {'nodenet_uid': default_nodenet, 'condition': {'steps': 500}})
assert_success(result)
token = result.json_body['data']['token']
result = app.post_json('/rpc/abort_behavior', params={"token": token})
assert_success(result)
result = app.get_json('/rpc/get_behavior_state?token=%s' % token)
assert_success(result)
assert not result.json_body['data']
from micropsi_core import runtime
assert runtime.nodenets[default_nodenet].current_step < 500
assert not runtime.nodenets[default_nodenet].is_active
def test_gate_activation_is_persisted(app, runtime, test_nodenet, resourcepath):
import os
with open(os.path.join(resourcepath, 'nodetypes', 'foobar.py'), 'w') as fp:
fp.write("""nodetype_definition = {
"name": "foobar",
"nodefunction_name": "foobar",
"slottypes": ["gen", "foo", "bar"],
"gatetypes": ["gen", "foo", "bar"]
}
def foobar(node, netapi, **_):
node.get_gate('gen').gate_function(0.1)
node.get_gate('foo').gate_function(0.3)
node.get_gate('bar').gate_function(0.5)
""")
res, err = runtime.reload_code()
netapi = runtime.nodenets[test_nodenet].netapi
source = netapi.create_node("Neuron")
target = netapi.create_node("Neuron")
netapi.link(source, 'gen', target, 'gen')
source.activation = 0.73
foobar = netapi.create_node("foobar")
ns_uid = netapi.get_nodespace(None).uid
runtime.step_nodenet(test_nodenet)
runtime.save_nodenet(test_nodenet)
runtime.revert_nodenet(test_nodenet)
result = app.post_json('/rpc/get_nodes', {
'nodenet_uid': test_nodenet,
'nodespaces': [ns_uid],
'include_links': True
})
data = result.json_body['data']['nodes']
assert round(data[target.uid]['gate_activations']['gen'], 2) == 0.73
assert round(data[source.uid]['gate_activations']['gen'], 2) == 0
assert round(data[foobar.uid]['gate_activations']['gen'], 2) == 0.1
assert round(data[foobar.uid]['gate_activations']['foo'], 2) == 0.3
assert round(data[foobar.uid]['gate_activations']['bar'], 2) == 0.5
|
import pytest
import json
import re
def assert_success(response):
assert response.json_body['status'] == 'success'
assert 'data' in response.json_body
def assert_failure(response):
assert response.json_body['status'] == 'error'
assert 'data' in response.json_body
def test_generate_uid(app):
response = app.get_json('/rpc/generate_uid')
assert_success(response)
assert re.match('[a-f0-9]+', response.json_body['data']) is not None
def test_create_and_invalidate_auth_token(app):
response = app.post_json('/rpc/create_auth_token', params={
"user": "Pytest User",
"password": "<PASSWORD>"
})
assert_success(response)
from micropsi_server.micropsi_app import usermanager
token = response.json_body['data']
assert token in usermanager.users['Pytest User']['sessions']
response = app.post_json('/rpc/invalidate_auth_token', params={
"token": token
})
assert_success(response)
assert token not in usermanager.users['Pytest User']['sessions']
def test_get_nodenet_metadata(app, test_nodenet, node):
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
assert_success(response)
data = response.json_body['data']
assert 'nodetypes' in data
assert 'native_modules' in data
assert 'engine' in data
assert 'nodespaces' in data
assert 'nodes' not in data
assert 'links' not in data
assert data['current_step'] == 0
assert data['uid'] == test_nodenet
def test_new_nodenet(app, engine):
app.set_auth()
response = app.post_json('/rpc/new_nodenet', params={
'name': 'FooBarTestNet',
'engine': engine
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % uid)
assert_success(response)
assert response.json_body['data']['name'] == 'FooBarTestNet'
assert response.json_body['data']['engine'] == engine
def test_get_available_nodenets(app, test_nodenet):
response = app.get_json('/rpc/get_available_nodenets?user_id=Pytest User')
assert_success(response)
assert test_nodenet in response.json_body['data']
def test_delete_nodenet(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/delete_nodenet', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
response = app.get_json('/rpc/get_available_nodenets?user_id=Pytest User')
assert test_nodenet not in response.json_body['data']
def test_set_nodenet_properties(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'new_name'
assert data['worldadapter'] == 'Default'
def test_set_node_state(app, test_nodenet, resourcepath):
import os
app.set_auth()
# create a native module:
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):
return 17
""")
response = app.post_json('/rpc/reload_code')
assert_success(response)
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Testnode',
'position': [23, 23, 12],
'nodespace': None,
'name': ''
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/set_node_state', params={
'nodenet_uid': test_nodenet,
'node_uid': uid,
'state': {'foo': 'bar'}
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert response.json_body['data']['nodes'][uid]['state'] == {'foo': 'bar'}
def test_set_node_activation(app, test_nodenet, node):
response = app.post_json('/rpc/set_node_activation', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'activation': '0.734'
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
activation = response.json_body['data']['nodes'][node]['activation']
assert float("%.3f" % activation) == 0.734
def test_start_calculation(app, default_nodenet):
app.set_auth()
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert response.json_body['data']['is_active']
def test_start_calculation_with_condition(app, default_nodenet):
import time
app.set_auth()
response = app.post_json('/rpc/set_runner_condition', params={
'nodenet_uid': default_nodenet,
'steps': '2'
})
assert_success(response)
assert response.json_body['data']['step'] == 2
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
time.sleep(1)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert not response.json_body['data']['is_active']
assert response.json_body['data']['current_step'] == 2
response = app.post_json('/rpc/remove_runner_condition', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
def test_get_runner_properties(app):
app.set_auth()
response = app.get_json('/rpc/get_runner_properties')
assert_success(response)
assert 'timestep' in response.json_body['data']
assert 'infguard' in response.json_body['data']
def test_set_runner_properties(app):
app.set_auth()
response = app.post_json('/rpc/set_runner_properties', params=dict(timestep=123, infguard=False))
assert_success(response)
response = app.get_json('/rpc/get_runner_properties')
assert_success(response)
assert response.json_body['data']['timestep'] == 123
assert not response.json_body['data']['infguard']
def test_get_is_calculation_running(app, default_nodenet):
response = app.get_json('/rpc/get_is_calculation_running?nodenet_uid=%s' % default_nodenet)
assert_success(response)
assert not response.json_body['data']
def test_stop_calculation(app, default_nodenet):
app.set_auth()
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_is_calculation_running?nodenet_uid=%s' % default_nodenet)
assert_success(response)
assert response.json_body['data']
response = app.post_json('/rpc/stop_calculation', params=dict(nodenet_uid=default_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_is_calculation_running?nodenet_uid=%s' % default_nodenet)
assert_success(response)
assert not response.json_body['data']
def test_step_calculation(app, default_nodenet):
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert response.json_body['data']['current_step'] == 0
response = app.post_json('/rpc/step_calculation', params={
"nodenet_uid": default_nodenet
})
assert_success(response)
assert response.json_body['data'] == 1
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % default_nodenet)
assert response.json_body['data']['current_step'] == 1
def test_get_calculation_state(app, test_nodenet, default_world, node):
from time import sleep
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
assert response.json_body['data']['current_step'] == 0
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub',
})
monitor_uid = response.json_body['data']
response = app.post_json('/rpc/step_calculation', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
response = app.post_json('/rpc/start_calculation', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
sleep(1)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet': {
'nodespaces': [None],
'step': -1,
},
'monitors': {
'logger': ['system', 'world', 'nodenet'],
'after': 0,
'monitor_from': 2,
'monitor_count': 2
},
'world': {
'step': -1
}
})
data = response.json_body['data']
assert data['current_nodenet_step'] > 0
assert data['current_world_step'] > 0
assert data['calculation_running']
assert 'servertime' in data['monitors']['logs']
assert 'logs' in data['monitors']['logs']
assert len(data['monitors']['monitors'][monitor_uid]['values']) == 2
assert test_nodenet in data['world']['agents']
assert data['world']['current_step'] > 0
def test_revert_nodenet(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
assert_success(response)
response = app.post_json('/rpc/revert_nodenet', params={
"nodenet_uid": test_nodenet
})
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'Testnet'
assert data['worldadapter'] is None
def test_revert_both(app, test_nodenet, default_world):
app.set_auth()
app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
for i in range(5):
app.post_json('/rpc/step_calculation', params={
"nodenet_uid": test_nodenet
})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
assert res.json_body['data']['current_nodenet_step'] > 0
assert res.json_body['data']['current_world_step'] > 0
app.post_json('/rpc/revert_calculation', params={
"nodenet_uid": test_nodenet
})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
assert res.json_body['data']['current_nodenet_step'] == 0
assert res.json_body['data']['current_world_step'] == 0
def test_revert_and_reload(app, test_nodenet, default_world, resourcepath):
import os
app.set_auth()
app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
for i in range(5):
app.post_json('/rpc/step_calculation', params={
"nodenet_uid": test_nodenet
})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):\r\n return 17
""")
app.post_json('/rpc/reload_and_revert', params={"nodenet_uid": test_nodenet})
res = app.post_json('/rpc/get_calculation_state', params={"nodenet_uid": test_nodenet})
assert res.json_body['data']['current_nodenet_step'] == 0
assert res.json_body['data']['current_world_step'] == 0
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
assert "Testnode" in response.json_body['data']['native_modules']
def test_save_nodenet(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Default", world_uid=default_world))
assert_success(response)
response = app.post_json('/rpc/save_nodenet', params={"nodenet_uid": test_nodenet})
assert_success(response)
response = app.post_json('/rpc/revert_nodenet', params={"nodenet_uid": test_nodenet})
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'new_name'
assert data['worldadapter'] == 'Default'
# now delete the nodenet, to get default state back.
app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": test_nodenet})
def test_export_nodenet(app, test_nodenet, node):
response = app.get_json('/rpc/export_nodenet?nodenet_uid=%s' % test_nodenet)
assert_success(response)
data = json.loads(response.json_body['data'])
assert data['name'] == 'Testnet'
assert data['nodes'][node]['type'] == 'Pipe'
assert 'links' in data
def test_import_nodenet(app, test_nodenet, node):
app.set_auth()
response = app.get_json('/rpc/export_nodenet?nodenet_uid=%s' % test_nodenet)
data = json.loads(response.json_body['data'])
del data['uid']
response = app.post_json('/rpc/import_nodenet', params={
'nodenet_data': json.dumps(data)
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % uid)
assert response.json_body['data']['name'] == data['name']
assert response.json_body['data']['world'] == data['world']
assert response.json_body['data']['worldadapter'] == data['worldadapter']
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": uid})
assert list(response.json_body['data']['nodes'].keys()) == [node]
response = app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": uid})
def test_merge_nodenet(app, test_nodenet, engine, node):
app.set_auth()
response = app.get_json('/rpc/export_nodenet?nodenet_uid=%s' % test_nodenet)
data = json.loads(response.json_body['data'])
response = app.post_json('/rpc/new_nodenet', params={
'name': 'ImporterNet',
'engine': engine,
'worldadapter': 'Default',
'owner': 'Pytest User'
})
uid = response.json_body['data']
data['uid'] = uid
response = app.post_json('/rpc/merge_nodenet', params={
'nodenet_uid': uid,
'nodenet_data': json.dumps(data)
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": uid})
assert len(list(response.json_body['data']['nodes'].keys())) == 1
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % uid)
assert response.json_body['data']['name'] == 'ImporterNet'
response = app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": uid})
###################################################
##
##
## WORLD
##
##
###################################################
def test_get_available_worlds(app, default_world):
response = app.get_json('/rpc/get_available_worlds')
assert_success(response)
assert default_world in response.json_body['data']
def test_get_available_worlds_for_user(app, default_world):
response = app.get_json('/rpc/get_available_worlds?user_id=Pytest User')
assert_success(response)
assert default_world in response.json_body['data']
# TODO: get_nodenet_properties is missing.
def test_get_world_properties(app, default_world):
response = app.get_json('/rpc/get_world_properties?world_uid=%s' % default_world)
assert_success(response)
data = response.json_body['data']
assert data['uid'] == default_world
assert data['name'] == "World of Pain"
assert 'available_worldadapters' in data
assert 'available_worldobjects' in data
def test_get_worldadapters(app, default_world):
response = app.get_json('/rpc/get_worldadapters?world_uid=%s' % default_world)
assert_success(response)
assert 'Default' in response.json_body['data']
def test_get_world_objects(app, default_world):
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
assert_success(response)
assert response.json_body['data'] == {}
def test_add_worldobject(app, default_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'TestObject'
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
assert uid in response.json_body['data']
def test_delete_worldobject(app, default_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'TestObject'
})
uid = response.json_body['data']
response = app.post_json('/rpc/delete_worldobject', params={
'world_uid': default_world,
'object_uid': uid
})
assert_success(response)
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
assert uid not in response.json_body['data']
def test_set_worldobject_properties(app, default_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'TestObject'
})
uid = response.json_body['data']
response = app.post_json('/rpc/set_worldobject_properties', params={
'world_uid': default_world,
'uid': uid,
'position': [20, 20],
'orientation': 27,
'name': 'edited'
})
assert_success(response)
response = app.get_json('/rpc/get_world_objects?world_uid=%s' % default_world)
data = response.json_body['data']
assert data[uid]['position'] == [20, 20]
assert data[uid]['orientation'] == 27
assert data[uid]['name'] == 'edited'
def test_get_world_view(app, default_world):
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
assert_success(response)
assert 'agents' in response.json_body['data']
assert 'objects' in response.json_body['data']
assert response.json_body['data']['current_step'] == 0
assert 'step' not in response.json_body['data']
def test_set_worldagent_properties(app, default_world, default_nodenet):
# create agent.
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=default_nodenet, worldadapter="Default", world_uid=default_world))
response = app.post_json('/rpc/set_worldagent_properties', params={
'world_uid': default_world,
'uid': default_nodenet,
'position': [23, 23],
'orientation': 37,
'name': 'Sepp'
})
assert_success(response)
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
data = response.json_body['data']['agents'][default_nodenet]
assert data['position'] == [23, 23]
assert data['orientation'] == 37
assert data['name'] == 'Sepp'
def test_new_world(app):
app.set_auth()
response = app.post_json('/rpc/new_world', params={
'world_name': 'FooBarTestWorld',
'world_type': 'DefaultWorld'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_available_worlds?user_id=Pytest User')
assert uid in response.json_body['data']
def test_get_available_world_types(app):
response = app.get_json('/rpc/get_available_world_types')
assert_success(response)
data = response.json_body['data']
assert 'DefaultWorld' in data
assert data['DefaultWorld']['config'] == []
def test_delete_world(app, default_world):
response = app.post_json('/rpc/delete_world', params={"world_uid": default_world})
assert_success(response)
response = app.get_json('/rpc/get_available_worlds?user_id=Pytest User')
assert default_world not in response.json_body['data']
def test_set_world_properties(app, default_world):
app.set_auth()
response = app.post_json('/rpc/set_world_properties', params={
'world_uid': default_world,
'world_name': 'asdf',
'owner': 'Pytest User'
})
assert_success(response)
response = app.get_json('/rpc/get_world_properties?world_uid=%s' % default_world)
assert response.json_body['data']['name'] == "asdf"
response = app.get_json('/rpc/get_available_worlds')
assert response.json_body['data'][default_world]['name'] == 'asdf'
def test_revert_world(app, default_world):
app.set_auth()
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'Testtree'
})
response = app.post_json('/rpc/revert_world', params={'world_uid': default_world})
assert_success(response)
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
data = response.json_body['data']
assert data['objects'] == {}
def test_save_world(app, default_world):
app.set_auth()
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': default_world,
'type': 'TestObject',
'position': [10, 10],
'name': 'Testtree'
})
uid = response.json_body['data']
response = app.post_json('/rpc/save_world', params={"world_uid": default_world})
assert_success(response)
response = app.post_json('/rpc/revert_world', params={"world_uid": default_world})
response = app.get_json('/rpc/get_world_view?world_uid=%s&step=0' % default_world)
data = response.json_body['data']
assert uid in data['objects']
# delete the world, to get the default state back
app.post_json('/rpc/delete_world', params={"world_uid": default_world})
def test_export_world(app, default_world):
response = app.get_json('/rpc/export_world?world_uid=%s' % default_world)
assert_success(response)
export_data = json.loads(response.json_body['data'])
assert export_data['uid'] == default_world
assert export_data['name'] == 'World of Pain'
assert export_data['objects'] == {}
assert export_data['agents'] == {}
assert export_data['owner'] == 'Pytest User'
assert export_data['current_step'] == 0
assert export_data['world_type'] == 'DefaultWorld'
def test_import_world(app, default_world):
response = app.get_json('/rpc/export_world?world_uid=%s' % default_world)
data = json.loads(response.json_body['data'])
del data['uid']
data['name'] = 'Copied Pain'
response = app.post_json('/rpc/import_world', params={
'worlddata': json.dumps(data)
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/export_world?world_uid=%s' % uid)
data = json.loads(response.json_body['data'])
assert data['owner'] == 'Pytest User'
assert data['name'] == '<NAME>'
assert data['objects'] == {}
assert data['agents'] == {}
assert uid != default_world
###################################################
##
##
## MONITORS
##
##
###################################################
def test_get_monitor_data_all(app, test_nodenet):
response = app.get_json('/rpc/get_monitor_data?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert response.json_body['data']['monitors'] == {}
def test_add_gate_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['node_uid'] == node
assert response.json_body['data']['monitors'][uid]['target'] == 'sub'
assert response.json_body['data']['monitors'][uid]['type'] == 'gate'
assert response.json_body['data']['monitors'][uid]['values'] == {}
@pytest.mark.engine("dict_engine")
@pytest.mark.engine("numpy_engine")
def test_add_slot_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_slot_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'slot': 'gen',
'name': 'Foobar'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['name'] == 'Foobar'
assert response.json_body['data']['monitors'][uid]['node_uid'] == node
assert response.json_body['data']['monitors'][uid]['target'] == 'gen'
assert response.json_body['data']['monitors'][uid]['type'] == 'slot'
assert response.json_body['data']['monitors'][uid]['values'] == {}
def test_add_link_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_link_monitor', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': 'gen',
'target_node_uid': node,
'slot_type': 'gen',
'name': 'LinkWeight'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['name'] == 'LinkWeight'
assert response.json_body['data']['monitors'][uid]['source_node_uid'] == node
assert response.json_body['data']['monitors'][uid]['gate_type'] == 'gen'
assert response.json_body['data']['monitors'][uid]['target_node_uid'] == node
assert response.json_body['data']['monitors'][uid]['slot_type'] == 'gen'
def test_add_custom_monitor(app, test_nodenet):
response = app.post_json('/rpc/add_custom_monitor', params={
'nodenet_uid': test_nodenet,
'function': 'return len(netapi.get_nodes())',
'name': 'nodecount'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][uid]['name'] == 'nodecount'
def test_add_group_monitor_by_name(app, test_nodenet):
app.set_auth()
uids = []
for i in range(3):
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Neuron',
'position': [23, 23, 12],
'nodespace': None,
'name': 'Testnode %d' % i
})
uids.append(response.json_body['data'])
response = app.post_json('/rpc/add_group_monitor', {
'nodenet_uid': test_nodenet,
'name': 'testmonitor',
'nodespace': None,
'node_name_prefix': 'Testnode',
'gate': 'gen'
})
mon_uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][mon_uid]['name'] == 'testmonitor'
assert response.json_body['data']['monitors'][mon_uid]['node_uids'] == uids
def test_add_group_monitor_by_ids(app, test_nodenet):
app.set_auth()
uids = []
for i in range(3):
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Neuron',
'position': [23, 23, 12],
'nodespace': None,
'name': 'Testnode %d' % i
})
uids.append(response.json_body['data'])
response = app.post_json('/rpc/add_group_monitor', {
'nodenet_uid': test_nodenet,
'name': 'testmonitor',
'nodespace': None,
'node_uids': uids,
'gate': 'gen'
})
mon_uid = response.json_body['data']
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert response.json_body['data']['monitors'][mon_uid]['name'] == 'testmonitor'
assert response.json_body['data']['monitors'][mon_uid]['node_uids'] == uids
def test_remove_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen'
})
uid = response.json_body['data']
response = app.post_json('/rpc/remove_monitor', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert_success(response)
response = app.get_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert uid not in response.json_body['data']['monitors']
def test_clear_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen'
})
uid = response.json_body['data']
response = app.post_json('/rpc/clear_monitor', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert_success(response)
###################################################
##
##
## NODENET
##
##
###################################################
def test_get_nodespace_list(app, test_nodenet, node):
response = app.get_json('/rpc/get_nodespace_list?nodenet_uid=%s' % test_nodenet)
assert_success(response)
rootid = list(response.json_body['data'].keys())[0]
assert response.json_body['data'][rootid]['name'] == 'Root'
assert response.json_body['data'][rootid]['parent'] is None
assert node in response.json_body['data'][rootid]['nodes']
def test_get_nodespace_activations(app, test_nodenet, node):
response = app.post_json('/rpc/get_nodespace_activations', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'last_call_step': -1
})
assert_success(response)
assert node not in response.json_body['data']['activations']
response = app.post_json('/rpc/set_node_activation', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'activation': -1
})
response = app.post_json('/rpc/get_nodespace_activations', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'last_call_step': -1
})
assert response.json_body['data']['activations'][node][0] == -1
def test_get_node(app, test_nodenet, node):
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
assert_success(response)
assert response.json_body['data']['type'] == 'Pipe'
def test_add_node(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Pipe',
'position': [23, 42, 13],
'nodespace': None,
'name': 'N2',
'parameters': {'wait': "3"}
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['name'] == 'N2'
assert int(response.json_body['data']['parameters']['wait']) == 3
def test_add_nodespace(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': None,
'name': 'nodespace'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert uid in response.json_body['data']['nodespaces']
assert uid not in response.json_body['data']['nodes']
def test_clone_nodes(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/clone_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node],
'clone_mode': 'all',
'nodespace': None,
'offset': [23, 23, 23]
})
assert_success(response)
node = list(response.json_body['data'].values())[0]
assert node['name'] == 'N1'
assert node['position'] == [33, 33, 33]
assert node['links']['gen'][0]['target_node_uid'] == node['uid']
def test_set_node_positions(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_node_positions', params={
'nodenet_uid': test_nodenet,
'positions': {node: [42, 23, 11]}
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
assert response.json_body['data']['position'] == [42, 23, 11]
def test_set_node_name(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_node_name', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'name': 'changed'
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
assert response.json_body['data']['name'] == 'changed'
def test_delete_node(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/delete_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node]
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert response.json_body['data']['nodes'] == {}
def test_delete_nodespace(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': None,
'name': 'nodespace'
})
uid = response.json_body['data']
response = app.post_json('/rpc/delete_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': uid
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
assert uid not in response.json_body['data']['nodespaces']
def test_align_nodes(app, test_nodenet):
app.set_auth()
# TODO: Why does autoalign only move a node if it has no links?
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Neuron',
'position': [5, 5, 0],
'nodespace': None,
'name': 'N2'
})
uid = response.json_body['data']
response = app.post_json('/rpc/align_nodes', params={
'nodenet_uid': test_nodenet,
'nodespace': None
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['position'] != [5, 5]
def test_get_available_node_types(app, test_nodenet):
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert 'Pipe' in response.json_body['data']['nodetypes']
assert 'Neuron' in response.json_body['data']['nodetypes']
assert 'Sensor' in response.json_body['data']['nodetypes']
def test_get_available_native_module_types(app, test_nodenet, engine):
response = app.get_json('/rpc/get_available_native_module_types?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert response.json_body['data'] == {}
def test_set_node_parameters(app, test_nodenet):
app.set_auth()
# add activator
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Activator',
'nodespace': None,
'position': [23, 42, 0],
})
uid = response.json_body['data']
response = app.post_json('/rpc/set_node_parameters', params={
'nodenet_uid': test_nodenet,
'node_uid': uid,
'parameters': {'type': 'sub'}
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['type'] == 'sub'
def test_set_gate_configuration(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_gate_configuration', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
'gatefunction': 'sigmoid',
'gatefunction_parameters': {
'bias': '1'
}
})
assert_success(response)
response = app.get_json('/rpc/get_node', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
})
data = response.json_body['data']
assert data['gate_configuration']['gen']['gatefunction'] == 'sigmoid'
assert data['gate_configuration']['gen']['gatefunction_parameters'] == {'bias': 1}
# setting a non-value leads to using the default
response = app.post_json('/rpc/set_gate_configuration', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
'gatefunction': 'sigmoid',
'gatefunction_parameters': {
'bias': ''
}
})
response = app.get_json('/rpc/get_node', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
})
data = response.json_body['data']
assert data['gate_configuration']['gen']['gatefunction'] == 'sigmoid'
assert data['gate_configuration']['gen']['gatefunction_parameters'] == {'bias': 0}
def test_get_available_gatefunctions(app, test_nodenet):
response = app.get_json('/rpc/get_available_gatefunctions', params={'nodenet_uid': test_nodenet})
funcs = response.json_body['data']
assert funcs['identity'] == {}
assert funcs['absolute'] == {}
assert funcs['one_over_x'] == {}
assert funcs['sigmoid'] == {'bias': 0}
assert funcs['elu'] == {'bias': 0}
assert funcs['relu'] == {'bias': 0}
assert funcs['threshold'] == {
'minimum': 0,
'maximum': 1,
'amplification': 1,
'threshold': 0
}
def test_get_available_datasources(app, test_nodenet, default_world):
app.set_auth()
# set worldadapter
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.get_json('/rpc/get_available_datasources?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert 'static_on' in response.json_body['data']
assert 'static_off' in response.json_body['data']
def test_get_available_datatargets(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.get_json('/rpc/get_available_datatargets?nodenet_uid=%s' % test_nodenet)
assert_success(response)
assert 'echo' in response.json_body['data']
def test_bind_datasource_to_sensor(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Sensor',
'position': [23, 42, 13],
'nodespace': None,
})
uid = response.json_body['data']
response = app.post_json('/rpc/bind_datasource_to_sensor', params={
'nodenet_uid': test_nodenet,
'sensor_uid': uid,
'datasource': 'static_on'
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['datasource'] == 'static_on'
def test_bind_datatarget_to_actuator(app, test_nodenet, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=default_world, worldadapter="Default"))
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Actuator',
'position': [23, 42, 13],
'nodespace': None,
})
uid = response.json_body['data']
response = app.post_json('/rpc/bind_datatarget_to_actuator', params={
'nodenet_uid': test_nodenet,
'actuator_uid': uid,
'datatarget': 'echo'
})
assert_success(response)
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['datatarget'] == 'echo'
def test_add_link(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/add_link', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': 'sub',
'target_node_uid': node,
'slot_type': 'gen',
'weight': 0.7
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
assert data['nodes'][node]['links']['sub'][0]['target_node_uid'] == node
assert round(data['nodes'][node]['links']['sub'][0]['weight'], 3) == 0.7
def test_set_link_weight(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_link_weight', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': "gen",
'target_node_uid': node,
'slot_type': "gen",
'weight': 0.345
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
assert float("%.3f" % data['nodes'][node]['links']['gen'][0]['weight']) == 0.345
def test_get_links_for_nodes(app, test_nodenet, node):
response = app.post_json('/rpc/get_links_for_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node]
})
assert_success(response)
link = list(response.json_body['data']['links'])[0]
assert link['source_node_uid'] == node
def test_delete_link(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/delete_link', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': "gen",
'target_node_uid': node,
'slot_type': "gen"
})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
data['nodes'][node]['links'] == {}
def test_reload_code(app, test_nodenet, resourcepath):
app.set_auth()
# create a native module:
import os
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):\r\n return 17
""")
response = app.post_json('/rpc/reload_code')
assert_success(response)
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']['native_modules']['Testnode']
assert data['nodefunction_name'] == "testnodefunc"
assert data['gatetypes'] == ['gen', 'foo', 'bar']
assert data['slottypes'] == ['gen', 'foo', 'bar']
assert data['name'] == 'Testnode'
def test_user_prompt_response(app, test_nodenet, resourcepath):
app.set_auth()
# create a native module:
import os
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"gatetypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"user_prompts": {
"promptident": {
"callback": "user_prompt_callback",
"parameters": [
{"name": "foo", "description": "value for foo", "default": 23},
{"name": "bar", "description": "value for bar", "default": 42}
]
}
}
}
def testnodefunc(netapi, node=None, **prams):
if not hasattr(node, 'foo'):
node.foo = 0
node.bar = 1
netapi.show_user_prompt(node, "promptident")
node.get_gate("foo").gate_function(node.foo)
node.get_gate("bar").gate_function(node.bar)
def user_prompt_callback(netapi, node, user_prompt_params):
\"\"\"Elaborate explanation as to what this user prompt is for\"\"\"
node.foo = int(user_prompt_params['foo'])
node.bar = int(user_prompt_params['bar'])
""")
response = app.post_json('/rpc/reload_code')
assert_success(response)
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Testnode',
'position': [23, 23],
'nodespace': None,
'name': 'Testnode'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/step_calculation', params={"nodenet_uid": test_nodenet})
assert_success(response)
response = app.post_json('/rpc/get_calculation_state', {'nodenet_uid': test_nodenet})
assert_success(response)
prompt_data = response.json_body['data']['user_prompt']
assert prompt_data['key'] == 'promptident'
assert prompt_data['node']['uid'] == uid
assert len(prompt_data['parameters']) == 2
response = app.post_json('/rpc/user_prompt_response', {
'nodenet_uid': test_nodenet,
'node_uid': uid,
'key': prompt_data['key'],
'parameters': {
'foo': '77',
'bar': '99'
},
'resume_nodenet': False
})
assert_success(response)
response = app.post_json('/rpc/step_calculation', {"nodenet_uid": test_nodenet})
assert_success(response)
response = app.post_json('/rpc/get_nodes', params={"nodenet_uid": test_nodenet})
data = response.json_body['data']
assert data['nodes'][uid]['gate_activations']['foo'] == 77
assert data['nodes'][uid]['gate_activations']['bar'] == 99
def test_set_logging_levels(app):
response = app.post_json('/rpc/set_logging_levels', params={
'logging_levels': {
'system': 'INFO',
'world': 'DEBUG',
}
})
assert_success(response)
import logging
assert logging.getLogger('world').getEffectiveLevel() == logging.DEBUG
assert logging.getLogger('system').getEffectiveLevel() == logging.INFO
def test_get_logger_messages(app, default_nodenet):
response = app.get_json('/rpc/get_logger_messages?logger=system)')
assert_success(response)
assert 'servertime' in response.json_body['data']
assert type(response.json_body['data']['logs']) == list
def test_get_nodenet_logger_messages(app, test_nodenet):
import logging
logging.getLogger('agent.%s' % test_nodenet).warning('asdf')
logging.getLogger('system').warning('foobar')
response = app.get_json('/rpc/get_logger_messages?logger=system&logger=agent.%s' % test_nodenet)
assert 'servertime' in response.json_body['data']
netlog = syslog = None
for item in response.json_body['data']['logs']:
if item['logger'] == 'system':
syslog = item
elif item['logger'].startswith('agent'):
netlog = item
assert netlog['step'] == 0
assert syslog['step'] is None
def test_get_monitoring_info(app, test_nodenet):
response = app.get_json('/rpc/get_monitoring_info?nodenet_uid=%s&logger=system&logger=world&monitor_from=3&monitor_count=10' % test_nodenet)
assert_success(response)
assert 'logs' in response.json_body['data']
assert 'current_step' in response.json_body['data']
assert response.json_body['data']['monitors'] == {}
assert 'servertime' in response.json_body['data']['logs']
assert response.json_body['data']['logs']['logs'] == []
@pytest.mark.engine("theano_engine")
def test_get_benchmark_info(app, test_nodenet):
from unittest import mock
with mock.patch("micropsi_core.benchmark_system.benchmark_system", return_value="testbench") as benchmock:
response = app.get_json('/rpc/benchmark_info')
assert_success(response)
assert response.json_body['data']['benchmark'] == 'testbench'
def test_400(app):
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata?foobar', expect_errors=True)
assert_failure(response)
assert "unexpected keyword argument" in response.json_body['data']
def test_401(app, default_nodenet):
app.unset_auth()
response = app.post_json('/rpc/delete_nodenet', params={"nodenet_uid": default_nodenet}, expect_errors=True)
assert_failure(response)
assert 'Insufficient permissions' in response.json_body['data']
def test_404(app):
response = app.get_json('/rpc/notthere?foo=bar', expect_errors=True)
assert_failure(response)
assert response.json_body['data'] == "Function not found"
def test_405(app, default_nodenet):
response = app.get_json('/rpc/delete_nodenet?nodenet_uid=%s' % default_nodenet, expect_errors=True)
assert_failure(response)
assert response.json_body['data'] == "Method not allowed"
def test_500(app):
response = app.get_json('/rpc/generate_uid?foo=bar', expect_errors=True)
assert_failure(response)
assert "unexpected keyword argument" in response.json_body['data']
assert response.json_body['traceback'] is not None
def test_get_recipes(app, default_nodenet, resourcepath):
app.set_auth()
import os
os.mkdir(os.path.join(resourcepath, 'recipes', 'Test'))
recipe_file = os.path.join(resourcepath, 'recipes', 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def foobar(netapi, quatsch=23):
return {'quatsch': quatsch}
""")
response = app.post_json('/rpc/reload_code')
response = app.get_json('/rpc/get_available_recipes')
data = response.json_body['data']
assert 'foobar' in data
assert len(data['foobar']['parameters']) == 1
assert data['foobar']['parameters'][0]['name'] == 'quatsch'
assert data['foobar']['parameters'][0]['default'] == 23
def test_run_recipes(app, test_nodenet, resourcepath):
app.set_auth()
import os
os.mkdir(os.path.join(resourcepath, 'recipes', 'Test'))
recipe_file = os.path.join(resourcepath, 'recipes', 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def foobar(netapi, quatsch=23):
return {'quatsch': quatsch}
""")
response = app.post_json('/rpc/reload_code')
response = app.post_json('/rpc/run_recipe', {
'nodenet_uid': test_nodenet,
'name': 'foobar',
'parameters': {
'quatsch': ''
}
})
data = response.json_body['data']
assert data['quatsch'] == 23
def test_get_agent_dashboard(app, test_nodenet, node, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
response = app.get_json('/rpc/get_agent_dashboard?nodenet_uid=%s' % test_nodenet)
data = response.json_body['data']
assert data['count_nodes'] == 1
def test_nodenet_data_structure(app, test_nodenet, resourcepath, node):
app.set_auth()
import os
nodetype_file = os.path.join(resourcepath, 'nodetypes', 'Test', 'testnode.py')
with open(nodetype_file, 'w') as fp:
fp.write("""nodetype_definition = {
"name": "Testnode",
"slottypes": ["gen", "foo", "bar"],
"nodefunction_name": "testnodefunc",
"gatetypes": ["gen", "foo", "bar"],
"symbol": "t"}
def testnodefunc(netapi, node=None, **prams):\r\n return 17
""")
response = app.post_json('/rpc/reload_code')
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': None,
'name': 'Test-Node-Space'
})
nodespace_uid = response.json_body['data']
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Pipe',
'position': [42, 42, 23],
'nodespace': nodespace_uid,
'name': 'N2'
})
n2_uid = response.json_body['data']
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen',
'name': 'Testmonitor',
'color': '#332211'
})
monitor_uid = response.json_body['data']
response = app.get_json('/rpc/get_nodenet_metadata?nodenet_uid=%s' % test_nodenet)
metadata = response.json_body['data']
response_1 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
response = app.post_json('/rpc/save_nodenet', params={"nodenet_uid": test_nodenet})
response = app.post_json('/rpc/revert_nodenet', params={"nodenet_uid": test_nodenet})
response_2 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
assert response_1.json_body['data']['nodenet'] == response_2.json_body['data']['nodenet']
assert response_1.json_body['data']['monitors']['monitors'] == response_2.json_body['data']['monitors']['monitors']
data = response_2.json_body['data']
# Monitors
response = app.get_json('/rpc/get_monitor_data?nodenet_uid=%s' % test_nodenet)
monitor_data = response.json_body['data']['monitors'][monitor_uid]
assert data['monitors']['monitors'][monitor_uid]['name'] == 'Testmonitor'
assert data['monitors']['monitors'][monitor_uid]['node_uid'] == node
assert data['monitors']['monitors'][monitor_uid]['target'] == 'gen'
assert data['monitors']['monitors'][monitor_uid]['type'] == 'gate'
assert data['monitors']['monitors'][monitor_uid]['uid'] == monitor_uid
assert data['monitors']['monitors'][monitor_uid]['values'] == {}
assert data['monitors']['monitors'][monitor_uid]['color'] == '#332211'
assert data['monitors']['monitors'][monitor_uid] == monitor_data
# Nodes
response = app.get_json('/rpc/get_node?nodenet_uid=%s&node_uid=%s' % (test_nodenet, node))
node_data = response.json_body['data']
assert node in data['nodenet']['nodes']
assert n2_uid not in data['nodenet']['nodes']
assert nodespace_uid not in data['nodenet']['nodes']
# gates
for key in ['gen', 'por', 'ret', 'sub', 'sur', 'cat', 'exp']:
assert data['nodenet']['nodes'][node]['gate_activations'][key] == 0
assert data['nodenet']['nodes'][node]['parameters']['expectation'] == 1
assert data['nodenet']['nodes'][node]['parameters']['wait'] == 10
assert data['nodenet']['nodes'][node]['position'] == [10, 10, 10]
assert data['nodenet']['nodes'][node]['type'] == "Pipe"
assert 'links' not in data
assert node_data['parameters']['expectation'] == 1
assert node_data['parameters']['wait'] == 10
assert node_data['position'] == [10, 10, 10]
assert node_data['type'] == "Pipe"
# Links
for link in data['nodenet']['nodes'][node]['links']['gen']:
assert link['weight'] == 1
assert link['target_node_uid'] == node
assert link['target_slot_name'] == 'gen'
# Nodespaces
# assert data['nodenet']['nodespaces'][nodespace_uid]['index'] == 3
assert data['nodenet']['nodespaces'][nodespace_uid]['name'] == 'Test-Node-Space'
# assert data['nodenet']['nodespaces'][nodespace_uid]['parent_nodespace'] == 'Root'
# Nodetypes
response = app.get_json('/rpc/get_available_node_types?nodenet_uid=%s' % test_nodenet)
node_type_data = response.json_body['data']
assert 'gatetypes' not in metadata['nodetypes']['Comment']
assert 'slottypes' not in metadata['nodetypes']['Comment']
for key in ['Pipe', 'Neuron', 'Actuator']:
assert 'gatetypes' in metadata['nodetypes'][key]
assert 'slottypes' in metadata['nodetypes'][key]
assert 'slottypes' in metadata['nodetypes']['Activator']
assert 'gatetypes' not in metadata['nodetypes']['Activator']
assert 'slottypes' not in metadata['nodetypes']['Sensor']
assert 'gatetypes' in metadata['nodetypes']['Sensor']
assert metadata['nodetypes'] == node_type_data['nodetypes']
# Native Modules
response = app.get_json('/rpc/get_available_native_module_types?nodenet_uid=%s' % test_nodenet)
native_module_data = response.json_body['data']
assert metadata['native_modules']['Testnode']['gatetypes'] == ['gen', 'foo', 'bar']
assert metadata['native_modules']['Testnode']['name'] == 'Testnode'
assert metadata['native_modules']['Testnode']['nodefunction_name'] == 'testnodefunc'
assert metadata['native_modules']['Testnode']['slottypes'] == ['gen', 'foo', 'bar']
assert metadata['native_modules']['Testnode']['symbol'] == 't'
assert metadata['native_modules'] == native_module_data
# Nodenet
assert metadata['current_step'] == 0 # TODO:
assert 'step' not in data # current_step && step?
assert metadata['version'] == 2
assert metadata['world'] is None
assert metadata['worldadapter'] is None
def test_get_state_diff(app, test_nodenet, node):
from micropsi_core import runtime
nodenet = runtime.nodenets[test_nodenet]
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet_diff': {
'nodespaces': [None],
'step': 0,
}
})
data = response.json_body['data']['nodenet_diff']
assert 'activations' in data
assert 'changes' in data
assert node in data['changes']['nodes_dirty']
node2 = nodenet.create_node("Neuron", None, [10, 10], name="node2")
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet_diff': {
'nodespaces': [None],
'step': 1,
}
})
data = response.json_body['data']['nodenet_diff']
assert [node2] == list(data['changes']['nodes_dirty'].keys())
def test_get_nodenet_diff(app, test_nodenet, node):
from micropsi_core import runtime
nodenet = runtime.nodenets[test_nodenet]
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_nodenet_changes', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'since_step': 0
})
data = response.json_body['data']
assert 'activations' in data
assert 'changes' in data
assert node in data['changes']['nodes_dirty']
node2 = nodenet.create_node("Neuron", None, [10, 10], name="node2")
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_nodenet_changes', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'since_step': 1
})
data = response.json_body['data']
assert [node2] == list(data['changes']['nodes_dirty'].keys())
def test_get_operations(app):
response = app.get_json('/rpc/get_available_operations')
data = response.json_body['data']
for selectioninfo in data['autoalign']['selection']:
if selectioninfo['nodetypes'] == ['Nodespace']:
assert selectioninfo['mincount'] == 1
assert selectioninfo['maxcount'] == -1
else:
assert selectioninfo['mincount'] == 2
assert selectioninfo['maxcount'] == -1
assert selectioninfo['nodetypes'] == []
def test_run_operation(app, test_nodenet, node):
response = app.post_json('/rpc/run_operation', {
'nodenet_uid': test_nodenet,
'name': 'autoalign',
'parameters': {},
'selection_uids': [None]
})
assert response.json_body['status'] == 'success'
@pytest.mark.engine("theano_engine")
def test_flow_modules(app, runtime, test_nodenet, resourcepath):
import os
import numpy as np
with open(os.path.join(resourcepath, 'worlds.json'), 'w') as fp:
fp.write("""{"worlds":["flowworld.py"],"worldadapters":["flowworld.py"]}""")
with open(os.path.join(resourcepath, 'flowworld.py'), 'w') as fp:
fp.write("""
import numpy as np
from micropsi_core.world.world import World
from micropsi_core.world.worldadapter import ArrayWorldAdapter
class FlowWorld(World):
supported_worldadapters = ["SimpleArrayWA"]
class SimpleArrayWA(ArrayWorldAdapter):
def __init__(self, world, **kwargs):
super().__init__(world, **kwargs)
self.add_flow_datasource("foo", shape=(2,3))
self.add_flow_datatarget("bar", shape=(2,3))
self.update_data_sources_and_targets()
def update_data_sources_and_targets(self):
for key in self.flow_datatargets:
self.flow_datatarget_feedbacks[key] = np.copy(self.flow_datatargets[key])
for key in self.flow_datasources:
self.flow_datasources[key][:] = np.random.rand(*self.flow_datasources[key].shape)
""")
with open(os.path.join(resourcepath, 'nodetypes', 'double.py'), 'w') as fp:
fp.write("""
nodetype_definition = {
"flow_module": True,
"implementation": "theano",
"name": "Double",
"build_function_name" : "double",
"inputs": ["inputs"],
"outputs": ["outputs"],
"inputdims": [2]
}
def double(inputs, netapi, node, parameters):
return inputs * 2
""")
app.set_auth()
nodenet = runtime.nodenets[test_nodenet]
netapi = nodenet.netapi
runtime.reload_code()
res, wuid = runtime.new_world("FlowWorld", "FlowWorld")
runtime.set_nodenet_properties(test_nodenet, worldadapter="SimpleArrayWA", world_uid=wuid)
worldadapter = nodenet.worldadapter_instance
datasource_uid = nodenet.worldadapter_flow_nodes['datasources']
datatarget_uid = nodenet.worldadapter_flow_nodes['datatargets']
# create one flow_module, wire to sources & targets
result = app.post_json('/rpc/add_node', {
'nodenet_uid': test_nodenet,
'type': 'Double',
'position': [200, 200, 0],
'nodespace': None,
'name': 'Double'})
assert_success(result)
flow_uid = result.json_body['data']
source = netapi.create_node("Neuron", None, "Source")
source.activation = 1
netapi.link(source, 'gen', source, 'gen')
netapi.link(source, 'gen', netapi.get_node(flow_uid), 'sub')
outward = {
'nodenet_uid': test_nodenet,
'source_uid': flow_uid,
'source_output': 'outputs',
'target_uid': 'worldadapter',
'target_input': 'bar'
}
result = app.post_json('/rpc/flow', outward)
assert_success(result)
inward = {
'nodenet_uid': test_nodenet,
'source_uid': 'worldadapter',
'source_output': 'foo',
'target_uid': flow_uid,
'target_input': 'inputs',
}
result = app.post_json('/rpc/flow', inward)
assert_success(result)
sources = np.array(np.random.randn(2, 3))
worldadapter.flow_datasources['foo'][:] = sources
runtime.step_nodenet(test_nodenet)
assert np.all(worldadapter.get_flow_datatarget_feedback('bar') == sources * 2)
response = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
data = response.json_body['data']
assert data['nodenet']['nodes'][flow_uid]
assert data['nodenet']['nodes'][flow_uid]['activation'] == 1.0
assert data['nodenet']['nodes'][datasource_uid]['activation'] == 1.0
assert data['nodenet']['nodes'][datatarget_uid]['activation'] == 1.0
# disconnect first flow_module from datatargets, create a second one, and chain them
result = app.post_json('/rpc/unflow', outward)
assert_success(result)
double2 = netapi.create_node("Double", None, "double2")
netapi.link(source, 'gen', double2, 'sub')
netapi.flow(double2, 'outputs', 'worldadapter', 'bar')
result = app.post_json('/rpc/flow', {
'nodenet_uid': test_nodenet,
'source_uid': flow_uid,
'source_output': 'outputs',
'target_uid': double2.uid,
'target_input': 'inputs'
})
assert_success(result)
sources[:] = worldadapter.flow_datasources['foo']
runtime.step_nodenet(test_nodenet)
assert np.all(worldadapter.get_flow_datatarget_feedback('bar') == sources * 4)
# disconnect the two flow_modules
result = app.post_json('/rpc/unflow', {
'nodenet_uid': test_nodenet,
'source_uid': flow_uid,
'source_output': 'outputs',
'target_uid': double2.uid,
'target_input': 'inputs'
})
runtime.step_nodenet(test_nodenet)
assert np.all(worldadapter.get_flow_datatarget_feedback('bar') == np.zeros(worldadapter.flow_datatargets['bar'].shape))
def test_start_behavior(app, default_nodenet):
result = app.post_json('/rpc/start_behavior', {'nodenet_uid': default_nodenet, 'condition': {'steps': 3}})
assert_success(result)
token = result.json_body['data']['token']
result = app.get_json('/rpc/get_behavior_state?token=%s' % token)
assert_success(result)
assert result.json_body['data']
import time
time.sleep(1)
result = app.get_json('/rpc/get_behavior_state?token=%s' % token)
assert_success(result)
from micropsi_core import runtime
assert not result.json_body['data']
assert runtime.nodenets[default_nodenet].current_step == 3
assert not runtime.nodenets[default_nodenet].is_active
def test_abort_behavior(app, default_nodenet):
result = app.post_json('/rpc/start_behavior', {'nodenet_uid': default_nodenet, 'condition': {'steps': 500}})
assert_success(result)
token = result.json_body['data']['token']
result = app.post_json('/rpc/abort_behavior', params={"token": token})
assert_success(result)
result = app.get_json('/rpc/get_behavior_state?token=%s' % token)
assert_success(result)
assert not result.json_body['data']
from micropsi_core import runtime
assert runtime.nodenets[default_nodenet].current_step < 500
assert not runtime.nodenets[default_nodenet].is_active
def test_gate_activation_is_persisted(app, runtime, test_nodenet, resourcepath):
import os
with open(os.path.join(resourcepath, 'nodetypes', 'foobar.py'), 'w') as fp:
fp.write("""nodetype_definition = {
"name": "foobar",
"nodefunction_name": "foobar",
"slottypes": ["gen", "foo", "bar"],
"gatetypes": ["gen", "foo", "bar"]
}
def foobar(node, netapi, **_):
node.get_gate('gen').gate_function(0.1)
node.get_gate('foo').gate_function(0.3)
node.get_gate('bar').gate_function(0.5)
""")
res, err = runtime.reload_code()
netapi = runtime.nodenets[test_nodenet].netapi
source = netapi.create_node("Neuron")
target = netapi.create_node("Neuron")
netapi.link(source, 'gen', target, 'gen')
source.activation = 0.73
foobar = netapi.create_node("foobar")
ns_uid = netapi.get_nodespace(None).uid
runtime.step_nodenet(test_nodenet)
runtime.save_nodenet(test_nodenet)
runtime.revert_nodenet(test_nodenet)
result = app.post_json('/rpc/get_nodes', {
'nodenet_uid': test_nodenet,
'nodespaces': [ns_uid],
'include_links': True
})
data = result.json_body['data']['nodes']
assert round(data[target.uid]['gate_activations']['gen'], 2) == 0.73
assert round(data[source.uid]['gate_activations']['gen'], 2) == 0
assert round(data[foobar.uid]['gate_activations']['gen'], 2) == 0.1
assert round(data[foobar.uid]['gate_activations']['foo'], 2) == 0.3
assert round(data[foobar.uid]['gate_activations']['bar'], 2) == 0.5
| en | 0.373152 | # create a native module: nodetype_definition = { "name": "Testnode", "slottypes": ["gen", "foo", "bar"], "nodefunction_name": "testnodefunc", "gatetypes": ["gen", "foo", "bar"], "symbol": "t"} def testnodefunc(netapi, node=None, **prams): return 17 nodetype_definition = { "name": "Testnode", "slottypes": ["gen", "foo", "bar"], "nodefunction_name": "testnodefunc", "gatetypes": ["gen", "foo", "bar"], "symbol": "t"} def testnodefunc(netapi, node=None, **prams):\r\n return 17 # now delete the nodenet, to get default state back. ################################################### ## ## ## WORLD ## ## ################################################### # TODO: get_nodenet_properties is missing. # create agent. # delete the world, to get the default state back ################################################### ## ## ## MONITORS ## ## ################################################### ################################################### ## ## ## NODENET ## ## ################################################### # TODO: Why does autoalign only move a node if it has no links? # add activator # setting a non-value leads to using the default # set worldadapter # create a native module: nodetype_definition = { "name": "Testnode", "slottypes": ["gen", "foo", "bar"], "nodefunction_name": "testnodefunc", "gatetypes": ["gen", "foo", "bar"], "symbol": "t"} def testnodefunc(netapi, node=None, **prams):\r\n return 17 # create a native module: nodetype_definition = { "name": "Testnode", "slottypes": ["gen", "foo", "bar"], "gatetypes": ["gen", "foo", "bar"], "nodefunction_name": "testnodefunc", "user_prompts": { "promptident": { "callback": "user_prompt_callback", "parameters": [ {"name": "foo", "description": "value for foo", "default": 23}, {"name": "bar", "description": "value for bar", "default": 42} ] } } } def testnodefunc(netapi, node=None, **prams): if not hasattr(node, 'foo'): node.foo = 0 node.bar = 1 netapi.show_user_prompt(node, "promptident") node.get_gate("foo").gate_function(node.foo) node.get_gate("bar").gate_function(node.bar) def user_prompt_callback(netapi, node, user_prompt_params): \"\"\"Elaborate explanation as to what this user prompt is for\"\"\" node.foo = int(user_prompt_params['foo']) node.bar = int(user_prompt_params['bar']) def foobar(netapi, quatsch=23): return {'quatsch': quatsch} def foobar(netapi, quatsch=23): return {'quatsch': quatsch} nodetype_definition = { "name": "Testnode", "slottypes": ["gen", "foo", "bar"], "nodefunction_name": "testnodefunc", "gatetypes": ["gen", "foo", "bar"], "symbol": "t"} def testnodefunc(netapi, node=None, **prams):\r\n return 17 # Monitors # Nodes # gates # Links # Nodespaces # assert data['nodenet']['nodespaces'][nodespace_uid]['index'] == 3 # assert data['nodenet']['nodespaces'][nodespace_uid]['parent_nodespace'] == 'Root' # Nodetypes # Native Modules # Nodenet # TODO: # current_step && step? {"worlds":["flowworld.py"],"worldadapters":["flowworld.py"]} import numpy as np from micropsi_core.world.world import World from micropsi_core.world.worldadapter import ArrayWorldAdapter class FlowWorld(World): supported_worldadapters = ["SimpleArrayWA"] class SimpleArrayWA(ArrayWorldAdapter): def __init__(self, world, **kwargs): super().__init__(world, **kwargs) self.add_flow_datasource("foo", shape=(2,3)) self.add_flow_datatarget("bar", shape=(2,3)) self.update_data_sources_and_targets() def update_data_sources_and_targets(self): for key in self.flow_datatargets: self.flow_datatarget_feedbacks[key] = np.copy(self.flow_datatargets[key]) for key in self.flow_datasources: self.flow_datasources[key][:] = np.random.rand(*self.flow_datasources[key].shape) nodetype_definition = { "flow_module": True, "implementation": "theano", "name": "Double", "build_function_name" : "double", "inputs": ["inputs"], "outputs": ["outputs"], "inputdims": [2] } def double(inputs, netapi, node, parameters): return inputs * 2 # create one flow_module, wire to sources & targets # disconnect first flow_module from datatargets, create a second one, and chain them # disconnect the two flow_modules nodetype_definition = { "name": "foobar", "nodefunction_name": "foobar", "slottypes": ["gen", "foo", "bar"], "gatetypes": ["gen", "foo", "bar"] } def foobar(node, netapi, **_): node.get_gate('gen').gate_function(0.1) node.get_gate('foo').gate_function(0.3) node.get_gate('bar').gate_function(0.5) | 2.059968 | 2 |
exercise/migrations/0019_auto_20191107_1613.py | Arpit8081/Phishtray_Edited_Version | 2 | 6622706 | # Generated by Django 2.2.5 on 2019-11-07 16:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exercise', '0018_auto_20191104_1647'),
]
operations = [
migrations.RemoveField(
model_name='exerciseemail',
name='from_profile_img_url',
),
migrations.RemoveField(
model_name='exerciseemail',
name='to_profile_img_url',
),
]
| # Generated by Django 2.2.5 on 2019-11-07 16:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exercise', '0018_auto_20191104_1647'),
]
operations = [
migrations.RemoveField(
model_name='exerciseemail',
name='from_profile_img_url',
),
migrations.RemoveField(
model_name='exerciseemail',
name='to_profile_img_url',
),
]
| en | 0.793059 | # Generated by Django 2.2.5 on 2019-11-07 16:13 | 1.360637 | 1 |
scripts/subpro/parent.py | MadhuNimmo/jalangi2 | 0 | 6622707 | import subprocess
p = subprocess.run(["python", "/home/anon/jalangi2/scripts/subpro/child.py"], capture_output=True)
print(p) | import subprocess
p = subprocess.run(["python", "/home/anon/jalangi2/scripts/subpro/child.py"], capture_output=True)
print(p) | none | 1 | 1.974646 | 2 | |
main.py | phtaedrus/salad | 0 | 6622708 | import csv
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import Date, Text, Enum
pd.set_option('display.max_rows', 500)
FILE = "challenge_dataset.csv"
# sqlAlchemy declarative config
Base = declarative_base()
engine = create_engine('sqlite:///sweetgreen_db', echo=False)
def get_df(file: csv) -> pd.DataFrame():
with open(FILE) as f:
df = pd.read_csv(file)
return df
def transform_df(df):
# TODO format all of the columns
df['created_at'] = pd.to_datetime(df.created_at)
df['birth_date'] = pd.to_datetime(df.birth_date)
df['age_at_creation'] = round((df['created_at'] - df['birth_date']).dt.days / 365, 1)
return df
def validate_data(df):
is_NaN = df[df.isna().any(axis=1)]
# df['Passed2'] = df['status'].map(lambda x: x.is_integer())
# TODO with a better rested brain check the logic here, implement and submit.
df['Passed'] = (df['status'] == 'active') & (df['first_name'] is not np.NaN) & (df['age_at_creation'] >= 18)
print(df)
print(is_NaN)
print(df)
return df
# TODO create logic to check birthdate against when it was created.
def bday_verify(df):
df_test = df
print(df_test)
class PostMetricsAndComments(Base):
__tablename__ = 'users_new'
id = Column(Integer, primary_key=True)
first_name = Column(String(255), nullable=False)
last_name = Column(String(255), nullable=False)
phone = Column(Integer, )
status = Column(Enum("active", "cancelled"), nullable=False)
birth_date = Column(Date, nullable=False)
created_at = Column(Date, nullable=False)
try:
Base.metadata.create_all(engine)
print('Database Schema Instantiated')
except sqlalchemy.except_.DatabaseError as db_error:
print(db_error)
def load_to_sql(df):
"""
Loads modified pd.Dataframe() objects to sql database.
args:
-self, dfs: [pd.Dataframe]
output:
Log message
"""
print(f"Table {df} successfully loaded to SQL DB")
return
df = validate_data(transform_df(get_df(FILE)))
df.to_sql('users_new', if_exists='replace')
| import csv
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import Date, Text, Enum
pd.set_option('display.max_rows', 500)
FILE = "challenge_dataset.csv"
# sqlAlchemy declarative config
Base = declarative_base()
engine = create_engine('sqlite:///sweetgreen_db', echo=False)
def get_df(file: csv) -> pd.DataFrame():
with open(FILE) as f:
df = pd.read_csv(file)
return df
def transform_df(df):
# TODO format all of the columns
df['created_at'] = pd.to_datetime(df.created_at)
df['birth_date'] = pd.to_datetime(df.birth_date)
df['age_at_creation'] = round((df['created_at'] - df['birth_date']).dt.days / 365, 1)
return df
def validate_data(df):
is_NaN = df[df.isna().any(axis=1)]
# df['Passed2'] = df['status'].map(lambda x: x.is_integer())
# TODO with a better rested brain check the logic here, implement and submit.
df['Passed'] = (df['status'] == 'active') & (df['first_name'] is not np.NaN) & (df['age_at_creation'] >= 18)
print(df)
print(is_NaN)
print(df)
return df
# TODO create logic to check birthdate against when it was created.
def bday_verify(df):
df_test = df
print(df_test)
class PostMetricsAndComments(Base):
__tablename__ = 'users_new'
id = Column(Integer, primary_key=True)
first_name = Column(String(255), nullable=False)
last_name = Column(String(255), nullable=False)
phone = Column(Integer, )
status = Column(Enum("active", "cancelled"), nullable=False)
birth_date = Column(Date, nullable=False)
created_at = Column(Date, nullable=False)
try:
Base.metadata.create_all(engine)
print('Database Schema Instantiated')
except sqlalchemy.except_.DatabaseError as db_error:
print(db_error)
def load_to_sql(df):
"""
Loads modified pd.Dataframe() objects to sql database.
args:
-self, dfs: [pd.Dataframe]
output:
Log message
"""
print(f"Table {df} successfully loaded to SQL DB")
return
df = validate_data(transform_df(get_df(FILE)))
df.to_sql('users_new', if_exists='replace')
| en | 0.61698 | # sqlAlchemy declarative config # TODO format all of the columns # df['Passed2'] = df['status'].map(lambda x: x.is_integer()) # TODO with a better rested brain check the logic here, implement and submit. # TODO create logic to check birthdate against when it was created. Loads modified pd.Dataframe() objects to sql database. args: -self, dfs: [pd.Dataframe] output: Log message | 3.541858 | 4 |
Test/getSystemVisitCode.py | sulantha2006/Processing_Pipeline | 1 | 6622709 | <reponame>sulantha2006/Processing_Pipeline<filename>Test/getSystemVisitCode.py
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
csvFile = '/data/data03/sulantha/Downloads/av45_list.csv'
MatchDBClient = DbUtils(database='Study_Data.ADNI')
DBClient = DbUtils()
with open(csvFile, 'r') as csv:
next(csv)
for line in csv:
row = line.split(',')
rid = row[0].strip()
date = row[1].strip()
dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
#dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
dateS = dateT.strftime('%Y-%m-%d')
sql = "SELECT DISTINCT subject, visit, seriesid, imageid FROM PET_META_LIST WHERE subject like '%_%_{0}' and scandate = '{1}' and origproc = 'Original'".format(rid, dateS)
result = MatchDBClient.executeAllResults(sql)
checkDBSQL = "SELECT * FROM Conversion WHERE RID = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}'".format(rid, 'S{0}'.format(result[0][2]), 'I{0}'.format(result[0][3]))
#print(checkDBSQL)
resultN = DBClient.executeAllResults(checkDBSQL)
if len(resultN) == 0:
print('########################### Not in DB - {0} - {1}'.format(rid, date))
else:
pass
| __author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
csvFile = '/data/data03/sulantha/Downloads/av45_list.csv'
MatchDBClient = DbUtils(database='Study_Data.ADNI')
DBClient = DbUtils()
with open(csvFile, 'r') as csv:
next(csv)
for line in csv:
row = line.split(',')
rid = row[0].strip()
date = row[1].strip()
dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
#dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
dateS = dateT.strftime('%Y-%m-%d')
sql = "SELECT DISTINCT subject, visit, seriesid, imageid FROM PET_META_LIST WHERE subject like '%_%_{0}' and scandate = '{1}' and origproc = 'Original'".format(rid, dateS)
result = MatchDBClient.executeAllResults(sql)
checkDBSQL = "SELECT * FROM Conversion WHERE RID = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}'".format(rid, 'S{0}'.format(result[0][2]), 'I{0}'.format(result[0][3]))
#print(checkDBSQL)
resultN = DBClient.executeAllResults(checkDBSQL)
if len(resultN) == 0:
print('########################### Not in DB - {0} - {1}'.format(rid, date))
else:
pass | en | 0.212838 | #dateT = datetime.datetime.strptime(date, '%Y-%m-%d') #print(checkDBSQL) ########################## Not in DB - {0} - {1}'.format(rid, date)) | 2.725999 | 3 |
backend/tests/unit/controller/auth/test_login_controller.py | willrp/willbuyer | 4 | 6622710 | import pytest
import re
import responses
import json
from unittest.mock import MagicMock
from flask_dance.consumer.storage import MemoryStorage
from flask_login import current_user
from oauthlib.oauth2 import InvalidGrantError, MissingCodeError, MismatchingStateError
from sqlalchemy.exc import DatabaseError
from backend.service import UserService
from backend.controller.auth import login_manager, refresh_user_logged_in
from backend.controller.auth.login import bplogin, user_logged_in
from backend.util.response.error import ErrorSchema
@pytest.fixture(scope="function", autouse=True)
def controller_mocker(mocker):
mocker.patch.object(UserService, "__init__", return_value=None)
@pytest.fixture(scope="function")
def memory_blueprint(monkeypatch):
storage = MemoryStorage({"access_token": "fake-token"})
monkeypatch.setattr(bplogin, "storage", storage)
return bplogin
def test_login_controller_authorized(mocker, flask_app, memory_blueprint):
mocker.patch.object(UserService, "get_create_oauth", return_value=MagicMock(autospec=True))
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, re.compile(".+google.+"),
status=200,
json={
"id": "fake-id"
}
)
with flask_app.test_request_context("/auth/google/authorized"):
returned = user_logged_in(memory_blueprint, {"access_token": "fake-token"})
assert returned is False
def test_login_controller_authorized_no_token(flask_app):
with flask_app.test_request_context("/auth/google/authorized"):
assert current_user.is_authenticated is False
returned = user_logged_in(bplogin, None)
assert current_user.is_authenticated is False
assert returned is not False
assert re.search(r"error=token$", returned.headers["Location"]) is not None
assert returned.status_code == 302
def test_login_controller_authorized_not_ok(flask_app, memory_blueprint):
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, re.compile(".+google.+"),
status=400
)
with flask_app.test_request_context("/auth/google/authorized"):
assert current_user.is_authenticated is False
returned = user_logged_in(memory_blueprint, {"access_token": "<PASSWORD>-token"})
assert current_user.is_authenticated is False
assert returned is not False
assert re.search(r"error=error$", returned.headers["Location"]) is not None
assert returned.status_code == 302
def test_login_controller_authorized_error(mocker, flask_app, memory_blueprint):
mocker.patch.object(UserService, "get_create_oauth", side_effect=DatabaseError("statement", "params", "orig"))
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, re.compile(".+google.+"),
status=200
)
with flask_app.test_request_context("/auth/google/authorized"):
assert current_user.is_authenticated is False
returned = user_logged_in(memory_blueprint, {"access_token": "fake-token"})
assert current_user.is_authenticated is False
assert returned is not False
assert re.search(r"error=error$", returned.headers["Location"]) is not None
assert returned.status_code == 302
def test_login_controller_login(flask_app):
with flask_app.test_client() as client:
with client.session_transaction() as sess:
assert sess.get("next_url") is None
response = client.get(
"auth/login"
)
with client.session_transaction() as sess:
assert sess.get("next_url") is None
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
def test_login_controller_login_next(flask_app):
with flask_app.test_client() as client:
with client.session_transaction() as sess:
assert sess.get("next_url") is None
response = client.get(
"auth/login?next=willroger"
)
with client.session_transaction() as sess:
assert sess.get("next_url") == "willroger"
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
def test_login_controller_login_logged_in(flask_app, auth_user):
with flask_app.test_client(user=auth_user) as client:
response = client.get(
"auth/login"
)
assert re.search(r"google_login", response.headers["Location"]) is None
assert response.status_code == 302
def test_login_controller_user_logged_in(flask_app):
with flask_app.test_request_context("/auth/login"):
assert login_manager.refresh_view is None
refresh_user_logged_in()
assert login_manager.refresh_view == "google.login"
login_manager.refresh_view = None
@pytest.mark.parametrize(
"error",
[
(MissingCodeError()),
(MismatchingStateError())
]
)
def test_login_controller_missing_mismatching(mocker, flask_app, error):
mocker.patch("flask_dance.consumer.oauth2.redirect", side_effect=error)
with flask_app.test_client() as client:
response = client.get(
"auth/google/authorized"
)
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
def test_login_controller_invalid_grant(monkeypatch, mocker, flask_app, test_vcr):
storage = MemoryStorage({"access_token": "fake-token"})
monkeypatch.setattr(bplogin, "storage", storage)
with test_vcr.use_cassette("auth_google_token_revoke_fake.yml"):
mocker.patch("flask_dance.consumer.oauth2.redirect", side_effect=InvalidGrantError())
with flask_app.test_client() as client:
response = client.get(
"auth/google/authorized"
)
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
@pytest.mark.parametrize(
"test_url, error, status_code",
[
("/auth/login", Exception(), 500)
]
)
def test_login_controller_error(mocker, flask_app, test_url, error, status_code):
mocker.patch("flask_login.utils._get_user", side_effect=error)
with flask_app.test_client() as client:
response = client.get(
test_url
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == status_code
| import pytest
import re
import responses
import json
from unittest.mock import MagicMock
from flask_dance.consumer.storage import MemoryStorage
from flask_login import current_user
from oauthlib.oauth2 import InvalidGrantError, MissingCodeError, MismatchingStateError
from sqlalchemy.exc import DatabaseError
from backend.service import UserService
from backend.controller.auth import login_manager, refresh_user_logged_in
from backend.controller.auth.login import bplogin, user_logged_in
from backend.util.response.error import ErrorSchema
@pytest.fixture(scope="function", autouse=True)
def controller_mocker(mocker):
mocker.patch.object(UserService, "__init__", return_value=None)
@pytest.fixture(scope="function")
def memory_blueprint(monkeypatch):
storage = MemoryStorage({"access_token": "fake-token"})
monkeypatch.setattr(bplogin, "storage", storage)
return bplogin
def test_login_controller_authorized(mocker, flask_app, memory_blueprint):
mocker.patch.object(UserService, "get_create_oauth", return_value=MagicMock(autospec=True))
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, re.compile(".+google.+"),
status=200,
json={
"id": "fake-id"
}
)
with flask_app.test_request_context("/auth/google/authorized"):
returned = user_logged_in(memory_blueprint, {"access_token": "fake-token"})
assert returned is False
def test_login_controller_authorized_no_token(flask_app):
with flask_app.test_request_context("/auth/google/authorized"):
assert current_user.is_authenticated is False
returned = user_logged_in(bplogin, None)
assert current_user.is_authenticated is False
assert returned is not False
assert re.search(r"error=token$", returned.headers["Location"]) is not None
assert returned.status_code == 302
def test_login_controller_authorized_not_ok(flask_app, memory_blueprint):
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, re.compile(".+google.+"),
status=400
)
with flask_app.test_request_context("/auth/google/authorized"):
assert current_user.is_authenticated is False
returned = user_logged_in(memory_blueprint, {"access_token": "<PASSWORD>-token"})
assert current_user.is_authenticated is False
assert returned is not False
assert re.search(r"error=error$", returned.headers["Location"]) is not None
assert returned.status_code == 302
def test_login_controller_authorized_error(mocker, flask_app, memory_blueprint):
mocker.patch.object(UserService, "get_create_oauth", side_effect=DatabaseError("statement", "params", "orig"))
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, re.compile(".+google.+"),
status=200
)
with flask_app.test_request_context("/auth/google/authorized"):
assert current_user.is_authenticated is False
returned = user_logged_in(memory_blueprint, {"access_token": "fake-token"})
assert current_user.is_authenticated is False
assert returned is not False
assert re.search(r"error=error$", returned.headers["Location"]) is not None
assert returned.status_code == 302
def test_login_controller_login(flask_app):
with flask_app.test_client() as client:
with client.session_transaction() as sess:
assert sess.get("next_url") is None
response = client.get(
"auth/login"
)
with client.session_transaction() as sess:
assert sess.get("next_url") is None
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
def test_login_controller_login_next(flask_app):
with flask_app.test_client() as client:
with client.session_transaction() as sess:
assert sess.get("next_url") is None
response = client.get(
"auth/login?next=willroger"
)
with client.session_transaction() as sess:
assert sess.get("next_url") == "willroger"
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
def test_login_controller_login_logged_in(flask_app, auth_user):
with flask_app.test_client(user=auth_user) as client:
response = client.get(
"auth/login"
)
assert re.search(r"google_login", response.headers["Location"]) is None
assert response.status_code == 302
def test_login_controller_user_logged_in(flask_app):
with flask_app.test_request_context("/auth/login"):
assert login_manager.refresh_view is None
refresh_user_logged_in()
assert login_manager.refresh_view == "google.login"
login_manager.refresh_view = None
@pytest.mark.parametrize(
"error",
[
(MissingCodeError()),
(MismatchingStateError())
]
)
def test_login_controller_missing_mismatching(mocker, flask_app, error):
mocker.patch("flask_dance.consumer.oauth2.redirect", side_effect=error)
with flask_app.test_client() as client:
response = client.get(
"auth/google/authorized"
)
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
def test_login_controller_invalid_grant(monkeypatch, mocker, flask_app, test_vcr):
storage = MemoryStorage({"access_token": "fake-token"})
monkeypatch.setattr(bplogin, "storage", storage)
with test_vcr.use_cassette("auth_google_token_revoke_fake.yml"):
mocker.patch("flask_dance.consumer.oauth2.redirect", side_effect=InvalidGrantError())
with flask_app.test_client() as client:
response = client.get(
"auth/google/authorized"
)
assert re.search(r"google_login", response.headers["Location"]) is not None
assert response.status_code == 302
@pytest.mark.parametrize(
"test_url, error, status_code",
[
("/auth/login", Exception(), 500)
]
)
def test_login_controller_error(mocker, flask_app, test_url, error, status_code):
mocker.patch("flask_login.utils._get_user", side_effect=error)
with flask_app.test_client() as client:
response = client.get(
test_url
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == status_code
| none | 1 | 2.368243 | 2 | |
ChordalPy/Chord.py | P-bibs/PyChord | 2 | 6622711 | import functools
from ChordalPy import Tables
class Chord:
"""A class representing a chord.
Attributes:
root (string): The root note of the chord.
intervals (list[(int, int), ...]): The imaginary part of complex number.
bass (string): The bass note of the chord.
"""
def __init__(self, root, intervals, bass):
"""Construct a chord given a root, intervals, and a bass."""
# PitchName object
self.root = root
# tuple of Interval object
self.intervals = intervals
# PitchName object
self.bass = bass
self.spelling = []
def __repr__(self):
"""returns each characteristic of chord concatenated (ie: root + quality + size)."""
return self.root + ":" + str(self.intervals) + "/" + self.bass
def get_spelling(self):
"""Computes chord spelling, or returns cached version if already computed."""
if self.spelling == []:
notes = [self.root]
for i in range(1, len(self.intervals)):
notes.append(self.note_from_interval(self.intervals[i]))
self.spelling = notes
return self.spelling
def get_note_array(self):
"""Returns a 12 item, binary list that represents the chord.
For example: C:maj is [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0].
"""
noteArray = [0 for c in range(12)]
rootNumeral = Tables.notes["naturalToHalfStep"][self.root]
noteArray[rootNumeral % 12] = 1
for i in range(len(self.intervals)):
halfStep = self.intervals[i][1]
noteArray[(rootNumeral+halfStep) % 12] = 1
return noteArray
def get_pseudo_hash(self):
"""Returns a four character string that encodes all interval information
Hash contains only letters a-h.
"""
noteArray = self.get_note_array()
pseudoHash = ''
for i in range(0, 12, 3):
seg = functools.reduce((lambda a,b : str(a)+str(b)), noteArray[i:i+3])
pseudoHash += str(chr(int(seg, 2) + 97))
return pseudoHash
def note_from_interval(self, interval):
"""Given an interval above the root,
returns the letter name of the corresponding note."""
# -1 is added because an interval of a first corresponds to the root pitch
rootNumeral = Tables.notes["naturalToStep"][self.root[0]]-1
natural = Tables.notes["stepToNatural"][str(((rootNumeral + interval[0]) % 7))]
naturalHalfSteps = Tables.notes["naturalToHalfStep"][natural]
rootHalfSteps = Tables.notes["naturalToHalfStep"][self.root]
# This is necessary for it all to work. Don't ask why
if self.root=="Cb":
naturalHalfSteps+=12
if (naturalHalfSteps - rootHalfSteps)<0:
halfStepOffset = interval[1]%12 - (naturalHalfSteps+12 - rootHalfSteps)
else:
halfStepOffset = interval[1]%12 - (naturalHalfSteps - rootHalfSteps)
if halfStepOffset == 0:
accidental = ""
elif halfStepOffset > 0:
accidental = "#" * halfStepOffset
elif halfStepOffset < 0:
accidental = "b" * (-1*halfStepOffset)
return natural + accidental
| import functools
from ChordalPy import Tables
class Chord:
"""A class representing a chord.
Attributes:
root (string): The root note of the chord.
intervals (list[(int, int), ...]): The imaginary part of complex number.
bass (string): The bass note of the chord.
"""
def __init__(self, root, intervals, bass):
"""Construct a chord given a root, intervals, and a bass."""
# PitchName object
self.root = root
# tuple of Interval object
self.intervals = intervals
# PitchName object
self.bass = bass
self.spelling = []
def __repr__(self):
"""returns each characteristic of chord concatenated (ie: root + quality + size)."""
return self.root + ":" + str(self.intervals) + "/" + self.bass
def get_spelling(self):
"""Computes chord spelling, or returns cached version if already computed."""
if self.spelling == []:
notes = [self.root]
for i in range(1, len(self.intervals)):
notes.append(self.note_from_interval(self.intervals[i]))
self.spelling = notes
return self.spelling
def get_note_array(self):
"""Returns a 12 item, binary list that represents the chord.
For example: C:maj is [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0].
"""
noteArray = [0 for c in range(12)]
rootNumeral = Tables.notes["naturalToHalfStep"][self.root]
noteArray[rootNumeral % 12] = 1
for i in range(len(self.intervals)):
halfStep = self.intervals[i][1]
noteArray[(rootNumeral+halfStep) % 12] = 1
return noteArray
def get_pseudo_hash(self):
"""Returns a four character string that encodes all interval information
Hash contains only letters a-h.
"""
noteArray = self.get_note_array()
pseudoHash = ''
for i in range(0, 12, 3):
seg = functools.reduce((lambda a,b : str(a)+str(b)), noteArray[i:i+3])
pseudoHash += str(chr(int(seg, 2) + 97))
return pseudoHash
def note_from_interval(self, interval):
"""Given an interval above the root,
returns the letter name of the corresponding note."""
# -1 is added because an interval of a first corresponds to the root pitch
rootNumeral = Tables.notes["naturalToStep"][self.root[0]]-1
natural = Tables.notes["stepToNatural"][str(((rootNumeral + interval[0]) % 7))]
naturalHalfSteps = Tables.notes["naturalToHalfStep"][natural]
rootHalfSteps = Tables.notes["naturalToHalfStep"][self.root]
# This is necessary for it all to work. Don't ask why
if self.root=="Cb":
naturalHalfSteps+=12
if (naturalHalfSteps - rootHalfSteps)<0:
halfStepOffset = interval[1]%12 - (naturalHalfSteps+12 - rootHalfSteps)
else:
halfStepOffset = interval[1]%12 - (naturalHalfSteps - rootHalfSteps)
if halfStepOffset == 0:
accidental = ""
elif halfStepOffset > 0:
accidental = "#" * halfStepOffset
elif halfStepOffset < 0:
accidental = "b" * (-1*halfStepOffset)
return natural + accidental
| en | 0.732239 | A class representing a chord. Attributes: root (string): The root note of the chord. intervals (list[(int, int), ...]): The imaginary part of complex number. bass (string): The bass note of the chord. Construct a chord given a root, intervals, and a bass. # PitchName object # tuple of Interval object # PitchName object returns each characteristic of chord concatenated (ie: root + quality + size). Computes chord spelling, or returns cached version if already computed. Returns a 12 item, binary list that represents the chord. For example: C:maj is [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0]. Returns a four character string that encodes all interval information Hash contains only letters a-h. Given an interval above the root, returns the letter name of the corresponding note. # -1 is added because an interval of a first corresponds to the root pitch # This is necessary for it all to work. Don't ask why | 3.608048 | 4 |
src/main.py | megan-levy/python-life | 1 | 6622712 | <reponame>megan-levy/python-life
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from array import *
ALIVE = 1
DEAD = 0
OFFSETS = [-1, 0, 1]
def makeUniverse(rows, columns):
return [[0] * columns for i in range(rows)]
def evolve(universe: list[list[int]]):
# 1100 0000
# 0X00 => 0000
# 0000 0000
# 0011 0000
rowCount = len(universe)
colCount = len(universe[0])
newUniverse: list[list[int]] = makeUniverse(rowCount, colCount)
for r in range(rowCount):
for c in range(colCount):
totalAlive: int = 0
for rIndex in OFFSETS:
for cIndex in OFFSETS:
if not (rIndex == 0 and cIndex == 0):
if (r + rIndex) >= 0 and (c + cIndex) >= 0:
if (r + rIndex) < rowCount and (c + cIndex) < colCount:
totalAlive += universe[r+rIndex][c+cIndex]
newUniverse[r][c] = evolveCell(universe[r][c], totalAlive)
return newUniverse
def evolveCell(cellState: int, total_alive: int):
if cellState == ALIVE and total_alive < 2:
return DEAD
elif cellState == ALIVE and total_alive > 3:
return DEAD
elif cellState == DEAD and total_alive == 3:
return ALIVE
else:
return cellState
def printUniverse(universe: list[list[int]]):
converter = lambda cell: "*" if cell == 1 else "_"
for row in universe:
print(" ".join(map(converter, row)))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
u = makeUniverse(10,10)
u[1][2] = 1
u[2][3] = 1
u[3][1] = 1
u[3][2] = 1
u[3][3] = 1
# u[1][2] = 1
printUniverse(u)
print("\n")
for _ in range(100):
u = evolve(u)
printUniverse(u)
print("\n")
| # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from array import *
ALIVE = 1
DEAD = 0
OFFSETS = [-1, 0, 1]
def makeUniverse(rows, columns):
return [[0] * columns for i in range(rows)]
def evolve(universe: list[list[int]]):
# 1100 0000
# 0X00 => 0000
# 0000 0000
# 0011 0000
rowCount = len(universe)
colCount = len(universe[0])
newUniverse: list[list[int]] = makeUniverse(rowCount, colCount)
for r in range(rowCount):
for c in range(colCount):
totalAlive: int = 0
for rIndex in OFFSETS:
for cIndex in OFFSETS:
if not (rIndex == 0 and cIndex == 0):
if (r + rIndex) >= 0 and (c + cIndex) >= 0:
if (r + rIndex) < rowCount and (c + cIndex) < colCount:
totalAlive += universe[r+rIndex][c+cIndex]
newUniverse[r][c] = evolveCell(universe[r][c], totalAlive)
return newUniverse
def evolveCell(cellState: int, total_alive: int):
if cellState == ALIVE and total_alive < 2:
return DEAD
elif cellState == ALIVE and total_alive > 3:
return DEAD
elif cellState == DEAD and total_alive == 3:
return ALIVE
else:
return cellState
def printUniverse(universe: list[list[int]]):
converter = lambda cell: "*" if cell == 1 else "_"
for row in universe:
print(" ".join(map(converter, row)))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
u = makeUniverse(10,10)
u[1][2] = 1
u[2][3] = 1
u[3][1] = 1
u[3][2] = 1
u[3][3] = 1
# u[1][2] = 1
printUniverse(u)
print("\n")
for _ in range(100):
u = evolve(u)
printUniverse(u)
print("\n") | en | 0.720151 | # This is a sample Python script. # Press Shift+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. # 1100 0000 # 0X00 => 0000 # 0000 0000 # 0011 0000 # Press the green button in the gutter to run the script. # u[1][2] = 1 | 3.22633 | 3 |
Anagram_Sort.py | sidhu177/pythonprog | 2 | 6622713 | def anagram(lst):
ana = []
tot = []
dict_sort = {'word':[],'lst_sort':[]}
for i in lst:
val = sorted(i)
val = "".join(val)
dict_sort['word'].append(i)
dict_sort['lst_sort'].append(val)
words = list(dict_sort.values())[0]
lst_sorts = list(dict_sort.values())[1]
set_sorts = list(set(lst_sorts))
for k in set_sorts:
ana = []
for j in range(len(lst_sorts)):
v1 = lst_sorts[j]
if v1==k:
ana.append(words[j])
tot.append(ana)
print(tot)
if __name__=='__main__':
lst = ['tea','ate','bat','tab','nab','eat']
output = [['nab'],['bat','tab'],['tea','eat','ate']]
anagram(lst) | def anagram(lst):
ana = []
tot = []
dict_sort = {'word':[],'lst_sort':[]}
for i in lst:
val = sorted(i)
val = "".join(val)
dict_sort['word'].append(i)
dict_sort['lst_sort'].append(val)
words = list(dict_sort.values())[0]
lst_sorts = list(dict_sort.values())[1]
set_sorts = list(set(lst_sorts))
for k in set_sorts:
ana = []
for j in range(len(lst_sorts)):
v1 = lst_sorts[j]
if v1==k:
ana.append(words[j])
tot.append(ana)
print(tot)
if __name__=='__main__':
lst = ['tea','ate','bat','tab','nab','eat']
output = [['nab'],['bat','tab'],['tea','eat','ate']]
anagram(lst) | none | 1 | 3.688244 | 4 | |
IdioMaticPython/dictionaries.py | shaunryan/PythonReference | 0 | 6622714 | <reponame>shaunryan/PythonReference
# https://www.youtube.com/watch?v=OSGv2VnC0go
from pprint import pprint
d = {
'matthew': 'blue',
'rachel': 'green',
'raymond': 'red'
}
# method 1
for k in d:
print(k)
# method 2 - when mutating the dictionary
delete = [k for k in d if k.startswith("r")]
# looping keys and values
for k, v in enumerate(d):
print(v)
# construct dictionary using lists
names = ['Shaun', 'Sarah', 'Finley']
colors = ['red', 'green', 'yellow']
d = dict(zip(names, colors))
pprint(d)
# counting with dictionaries
colors = ['red', 'green', 'yellow', 'red', 'black']
d = {}
for c in colors:
d[c] = d.get(c, 0) + 1
pprint(d)
# better
# https://realpython.com/python-defaultdict/
# default the dict using int(0)
from collections import defaultdict
d = defaultdict(int)
for c in colors:
d[c] += 1
# group with dictionaries
d = {}
for c in colors:
key = len(c)
if key not in d:
d[key] = []
d[key].append(c)
pprint(d)
# better
d = {}
for c in colors:
key = len(c)
# if no key exist will insert key value pair
d.setdefault(key, []).append(c)
pprint(d)
# better still
d = defaultdict(list)
for c in colors:
key = len(c)
d[key].append(c)
pprint(dict(d))
# popitem
# this is thread safe atomic
d = {
"matthew": "blue",
"rachel": "green",
"raymond": "red"
}
while d:
key, value = d.popitem()
print(key, '-->', value)
# linking dictionaries
import argparse, os
from collections import ChainMap
defaults = {"USER": "red", "USER": "guest"}
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user")
parser.add_argument("-c", "--color")
namespace = parser.parse_args([])
command_line_args = {k:v for k, v in
vars(namespace).items() if v}
d = ChainMap(command_line_args, defaults, os.environ)
pprint(d["USER"]) | # https://www.youtube.com/watch?v=OSGv2VnC0go
from pprint import pprint
d = {
'matthew': 'blue',
'rachel': 'green',
'raymond': 'red'
}
# method 1
for k in d:
print(k)
# method 2 - when mutating the dictionary
delete = [k for k in d if k.startswith("r")]
# looping keys and values
for k, v in enumerate(d):
print(v)
# construct dictionary using lists
names = ['Shaun', 'Sarah', 'Finley']
colors = ['red', 'green', 'yellow']
d = dict(zip(names, colors))
pprint(d)
# counting with dictionaries
colors = ['red', 'green', 'yellow', 'red', 'black']
d = {}
for c in colors:
d[c] = d.get(c, 0) + 1
pprint(d)
# better
# https://realpython.com/python-defaultdict/
# default the dict using int(0)
from collections import defaultdict
d = defaultdict(int)
for c in colors:
d[c] += 1
# group with dictionaries
d = {}
for c in colors:
key = len(c)
if key not in d:
d[key] = []
d[key].append(c)
pprint(d)
# better
d = {}
for c in colors:
key = len(c)
# if no key exist will insert key value pair
d.setdefault(key, []).append(c)
pprint(d)
# better still
d = defaultdict(list)
for c in colors:
key = len(c)
d[key].append(c)
pprint(dict(d))
# popitem
# this is thread safe atomic
d = {
"matthew": "blue",
"rachel": "green",
"raymond": "red"
}
while d:
key, value = d.popitem()
print(key, '-->', value)
# linking dictionaries
import argparse, os
from collections import ChainMap
defaults = {"USER": "red", "USER": "guest"}
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user")
parser.add_argument("-c", "--color")
namespace = parser.parse_args([])
command_line_args = {k:v for k, v in
vars(namespace).items() if v}
d = ChainMap(command_line_args, defaults, os.environ)
pprint(d["USER"]) | en | 0.629021 | # https://www.youtube.com/watch?v=OSGv2VnC0go # method 1 # method 2 - when mutating the dictionary # looping keys and values # construct dictionary using lists # counting with dictionaries # better # https://realpython.com/python-defaultdict/ # default the dict using int(0) # group with dictionaries # better # if no key exist will insert key value pair # better still # popitem # this is thread safe atomic # linking dictionaries | 3.454495 | 3 |
retopoflow/rfwidget.py | senjacob/retopoflow | 0 | 6622715 | '''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>, and <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from ..addon_common.common.debug import debugger
from ..addon_common.common.fsm import FSM
from ..addon_common.common.drawing import DrawCallbacks
from ..addon_common.common.utils import find_fns
class RFWidget:
'''
Assumes that direct subclass will have singleton instance (shared FSM among all instances of that subclass and any subclasses)
'''
registry = []
@classmethod
def __init_subclass__(cls, *args, **kwargs):
# print('rfwidget subclass', cls, super(cls))
if not hasattr(cls, '_rfwidget_index'):
# add cls to registry (might get updated later) and add FSM,Draw
cls._rfwidget_index = len(RFWidget.registry)
RFWidget.registry.append(cls)
cls._fsm = FSM()
cls._draw = DrawCallbacks()
# convenience functions
cls.FSM_State = cls._fsm.wrapper
cls.FSM_OnlyInState = cls._fsm.onlyinstate_wrapper
cls.Draw = cls._draw.wrapper
else:
# update registry, but do not add new FSM
RFWidget.registry[cls._rfwidget_index] = cls
super().__init_subclass__(*args, **kwargs)
@staticmethod
def on_init(fn):
fn._widget_on_init = True
return fn
@staticmethod
def on_reset(fn):
fn._widget_on_reset = True
return fn
@staticmethod
def on_timer(fn):
fn._widget_on_timer = True
return fn
@staticmethod
def on_target_change(fn):
fn._widget_on_target_change = True
return fn
@staticmethod
def on_view_change(fn):
fn._widget_on_view_change = True
return fn
@staticmethod
def on_action(fn):
fn._widget_on_action = True
return fn
@staticmethod
def on_actioning(fn):
fn._widget_on_actioning = True
return fn
def __init__(self, rftool, **kwargs):
self.rftool = rftool
self.rfcontext = rftool.rfcontext
self.actions = rftool.rfcontext.actions
self.redraw_on_mouse = False
self._init_callbacks()
self._callback_widget('init', **kwargs)
self._fsm.init(self, start='main')
self._draw.init(self)
# self._init_action_callback()
self._reset()
def _callback_widget(self, event, *args, **kwargs):
if event != 'timer':
#print('callback', self, event, self._widget_callbacks.get(event, []))
pass
if event not in self._widget_callbacks: return
for fn in self._widget_callbacks[event]:
fn(self, *args, **kwargs)
def _callback_tool(self, event, *args, **kwargs):
if event != 'timer':
#print('callback', self, event, self._tool_callbacks.get(event, []))
pass
if event not in self._tool_callbacks: return
for fn in self._tool_callbacks[event]:
fn(self.rftool, *args, **kwargs)
def _init_callbacks(self):
def fw(key):
return [fn for (_,fn) in find_fns(self, '_widget_on_%s' % key)]
def ft(key):
return [fn for (_,fn) in find_fns(self.rftool, '_widget_on_%s' % key)]
self._widget_callbacks = {
'init': fw('init'), # called when RF starts up
'reset': fw('reset'), # called when RF switches into tool or undo/redo
'timer': fw('timer'), # called every timer interval
'target change': fw('target_change'), # called whenever rftarget has changed (selection or edited)
'view change': fw('view_change'), # called whenever view has changed
}
self._tool_callbacks = {
'action': ft('action'), # called when user performs widget action, per instance!
'actioning': ft('actioning'), # called when user is performing widget action, per instance!
}
def callback_actions(self, *args, **kwargs):
self._callback_tool('action', *args, **kwargs)
def callback_actioning(self, *args, **kwargs):
self._callback_tool('actioning', *args, **kwargs)
def _reset(self):
self._fsm.force_set_state('main')
self._callback_widget('reset')
self._update_all()
def _fsm_update(self):
return self._fsm.update()
def _update_all(self):
self._callback_widget('timer')
self._callback_widget('target change')
self._callback_widget('view change')
@staticmethod
def dirty_when_done(fn):
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
RFWidget.rfcontext.dirty()
return ret
return wrapper
def inactive_passthrough(self): pass
def _draw_pre3d(self): self._draw.pre3d()
def _draw_post3d(self): self._draw.post3d()
def _draw_post2d(self): self._draw.post2d()
| '''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>, and <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from ..addon_common.common.debug import debugger
from ..addon_common.common.fsm import FSM
from ..addon_common.common.drawing import DrawCallbacks
from ..addon_common.common.utils import find_fns
class RFWidget:
'''
Assumes that direct subclass will have singleton instance (shared FSM among all instances of that subclass and any subclasses)
'''
registry = []
@classmethod
def __init_subclass__(cls, *args, **kwargs):
# print('rfwidget subclass', cls, super(cls))
if not hasattr(cls, '_rfwidget_index'):
# add cls to registry (might get updated later) and add FSM,Draw
cls._rfwidget_index = len(RFWidget.registry)
RFWidget.registry.append(cls)
cls._fsm = FSM()
cls._draw = DrawCallbacks()
# convenience functions
cls.FSM_State = cls._fsm.wrapper
cls.FSM_OnlyInState = cls._fsm.onlyinstate_wrapper
cls.Draw = cls._draw.wrapper
else:
# update registry, but do not add new FSM
RFWidget.registry[cls._rfwidget_index] = cls
super().__init_subclass__(*args, **kwargs)
@staticmethod
def on_init(fn):
fn._widget_on_init = True
return fn
@staticmethod
def on_reset(fn):
fn._widget_on_reset = True
return fn
@staticmethod
def on_timer(fn):
fn._widget_on_timer = True
return fn
@staticmethod
def on_target_change(fn):
fn._widget_on_target_change = True
return fn
@staticmethod
def on_view_change(fn):
fn._widget_on_view_change = True
return fn
@staticmethod
def on_action(fn):
fn._widget_on_action = True
return fn
@staticmethod
def on_actioning(fn):
fn._widget_on_actioning = True
return fn
def __init__(self, rftool, **kwargs):
self.rftool = rftool
self.rfcontext = rftool.rfcontext
self.actions = rftool.rfcontext.actions
self.redraw_on_mouse = False
self._init_callbacks()
self._callback_widget('init', **kwargs)
self._fsm.init(self, start='main')
self._draw.init(self)
# self._init_action_callback()
self._reset()
def _callback_widget(self, event, *args, **kwargs):
if event != 'timer':
#print('callback', self, event, self._widget_callbacks.get(event, []))
pass
if event not in self._widget_callbacks: return
for fn in self._widget_callbacks[event]:
fn(self, *args, **kwargs)
def _callback_tool(self, event, *args, **kwargs):
if event != 'timer':
#print('callback', self, event, self._tool_callbacks.get(event, []))
pass
if event not in self._tool_callbacks: return
for fn in self._tool_callbacks[event]:
fn(self.rftool, *args, **kwargs)
def _init_callbacks(self):
def fw(key):
return [fn for (_,fn) in find_fns(self, '_widget_on_%s' % key)]
def ft(key):
return [fn for (_,fn) in find_fns(self.rftool, '_widget_on_%s' % key)]
self._widget_callbacks = {
'init': fw('init'), # called when RF starts up
'reset': fw('reset'), # called when RF switches into tool or undo/redo
'timer': fw('timer'), # called every timer interval
'target change': fw('target_change'), # called whenever rftarget has changed (selection or edited)
'view change': fw('view_change'), # called whenever view has changed
}
self._tool_callbacks = {
'action': ft('action'), # called when user performs widget action, per instance!
'actioning': ft('actioning'), # called when user is performing widget action, per instance!
}
def callback_actions(self, *args, **kwargs):
self._callback_tool('action', *args, **kwargs)
def callback_actioning(self, *args, **kwargs):
self._callback_tool('actioning', *args, **kwargs)
def _reset(self):
self._fsm.force_set_state('main')
self._callback_widget('reset')
self._update_all()
def _fsm_update(self):
return self._fsm.update()
def _update_all(self):
self._callback_widget('timer')
self._callback_widget('target change')
self._callback_widget('view change')
@staticmethod
def dirty_when_done(fn):
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
RFWidget.rfcontext.dirty()
return ret
return wrapper
def inactive_passthrough(self): pass
def _draw_pre3d(self): self._draw.pre3d()
def _draw_post3d(self): self._draw.post3d()
def _draw_post2d(self): self._draw.post2d()
| en | 0.877714 | Copyright (C) 2021 CG Cookie http://cgcookie.com <EMAIL> Created by <NAME>, <NAME>, and <NAME> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Assumes that direct subclass will have singleton instance (shared FSM among all instances of that subclass and any subclasses) # print('rfwidget subclass', cls, super(cls)) # add cls to registry (might get updated later) and add FSM,Draw # convenience functions # update registry, but do not add new FSM # self._init_action_callback() #print('callback', self, event, self._widget_callbacks.get(event, [])) #print('callback', self, event, self._tool_callbacks.get(event, [])) # called when RF starts up # called when RF switches into tool or undo/redo # called every timer interval # called whenever rftarget has changed (selection or edited) # called whenever view has changed # called when user performs widget action, per instance! # called when user is performing widget action, per instance! | 1.703797 | 2 |
PythonExecicios/ex004.py | lucasohara98/Python_CursoemVideo | 0 | 6622716 | n = input('digite algo: ') # função input sempre retorna uma variavel do tipo str
print('o tipo primitivo desse valor é {}'.format(type(n)))
print('Só tem espaço? {}'.format(n.isspace()))
print('É um número? {}'.format(n.isnumeric()))
print('É um alfábetico? {}'.format(n.isalpha()))
print('É alfanumérico? {}'.format(n.isalnum()))
print('Está em maíusculo? {}'.format(n.isupper()))
print('esta em minusculo? {}'.format(n.islower()))
print('Está captalizada? {}'.format(n.istitle()))
| n = input('digite algo: ') # função input sempre retorna uma variavel do tipo str
print('o tipo primitivo desse valor é {}'.format(type(n)))
print('Só tem espaço? {}'.format(n.isspace()))
print('É um número? {}'.format(n.isnumeric()))
print('É um alfábetico? {}'.format(n.isalpha()))
print('É alfanumérico? {}'.format(n.isalnum()))
print('Está em maíusculo? {}'.format(n.isupper()))
print('esta em minusculo? {}'.format(n.islower()))
print('Está captalizada? {}'.format(n.istitle()))
| pt | 0.993738 | # função input sempre retorna uma variavel do tipo str | 4.138374 | 4 |
tests/test_qqplot.py | piccolbo/altair_recipes | 89 | 6622717 | <reponame>piccolbo/altair_recipes<gh_stars>10-100
import altair_recipes as ar
from altair_recipes.common import viz_reg_test
from altair_recipes.display_pweave import show_test
import numpy as np
import pandas as pd
#' <h2>Qqplot</h2>
@viz_reg_test
def test_qqplot():
df = pd.DataFrame(
{
"Trial A": np.random.normal(0, 0.8, 1000),
"Trial B": np.random.normal(-2, 1, 1000),
"Trial C": np.random.uniform(3, 2, 1000),
}
)
return ar.qqplot(df, x="Trial A", y="Trial C")
show_test(test_qqplot)
| import altair_recipes as ar
from altair_recipes.common import viz_reg_test
from altair_recipes.display_pweave import show_test
import numpy as np
import pandas as pd
#' <h2>Qqplot</h2>
@viz_reg_test
def test_qqplot():
df = pd.DataFrame(
{
"Trial A": np.random.normal(0, 0.8, 1000),
"Trial B": np.random.normal(-2, 1, 1000),
"Trial C": np.random.uniform(3, 2, 1000),
}
)
return ar.qqplot(df, x="Trial A", y="Trial C")
show_test(test_qqplot) | th | 0.148764 | #' <h2>Qqplot</h2> | 2.385839 | 2 |
Protheus_WebApp/Modules/SIGATEC/TECA640TESTCASE.py | 98llm/tir-script-samples | 17 | 6622718 | from tir import Webapp
import unittest
class TECA640(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGATEC","11/12/2020","T1","D MG 01","28")
inst.oHelper.Program("TECA640")
def test_TECA640_001(self):
self.oHelper.SetValue("Cliente de ?","")
self.oHelper.SetValue("Cliente ate ?","zzzzzz")
self.oHelper.SetValue("Produto de ?","")
self.oHelper.SetValue("Produto ate ?","ZZZZZZ")
self.oHelper.SetButton("OK")
self.oHelper.SetButton("Visualizar")
self.oHelper.SetButton("Cancelar")
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | from tir import Webapp
import unittest
class TECA640(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGATEC","11/12/2020","T1","D MG 01","28")
inst.oHelper.Program("TECA640")
def test_TECA640_001(self):
self.oHelper.SetValue("Cliente de ?","")
self.oHelper.SetValue("Cliente ate ?","zzzzzz")
self.oHelper.SetValue("Produto de ?","")
self.oHelper.SetValue("Produto ate ?","ZZZZZZ")
self.oHelper.SetButton("OK")
self.oHelper.SetButton("Visualizar")
self.oHelper.SetButton("Cancelar")
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | none | 1 | 2.702161 | 3 | |
hackerrank/coin_change.py | anouard24/problem-solving | 0 | 6622719 | <reponame>anouard24/problem-solving
# https://www.hackerrank.com/challenges/coin-change/problem
def getWays(n, c):
c.sort()
coins = len(c) + 1
mem = [0] * coins
for i in range(coins):
mem[i] = [0] * (n + 1)
for i in range(1, coins):
for j in range(n + 1):
if j < c[i - 1]:
mem[i][j] = mem[i - 1][j]
elif j == c[i - 1]:
mem[i][j] = mem[i - 1][j] + 1
else:
mem[i][j] = mem[i - 1][j] + mem[i][j - c[i - 1]]
return mem[-1][-1]
if __name__ == "__main__":
n, m = map(int, input().rstrip().split())
c = list(map(int, input().rstrip().split()))
# Print the number of ways of making change for 'n' units
# using coins having the values given by 'c'
print(getWays(n, c))
| # https://www.hackerrank.com/challenges/coin-change/problem
def getWays(n, c):
c.sort()
coins = len(c) + 1
mem = [0] * coins
for i in range(coins):
mem[i] = [0] * (n + 1)
for i in range(1, coins):
for j in range(n + 1):
if j < c[i - 1]:
mem[i][j] = mem[i - 1][j]
elif j == c[i - 1]:
mem[i][j] = mem[i - 1][j] + 1
else:
mem[i][j] = mem[i - 1][j] + mem[i][j - c[i - 1]]
return mem[-1][-1]
if __name__ == "__main__":
n, m = map(int, input().rstrip().split())
c = list(map(int, input().rstrip().split()))
# Print the number of ways of making change for 'n' units
# using coins having the values given by 'c'
print(getWays(n, c)) | en | 0.739607 | # https://www.hackerrank.com/challenges/coin-change/problem # Print the number of ways of making change for 'n' units # using coins having the values given by 'c' | 3.457684 | 3 |
autoscout24/autoscout24/properties/properties.py | vkresch/autoscout24-crawler | 3 | 6622720 | <filename>autoscout24/autoscout24/properties/properties.py
from enum import Enum
class Make(Enum):
ALL = ""
BMW = "/bmw"
AUDI = "/audi"
FORD = "/ford"
MERCEDES_BENZ = "/mercedes-benz"
OPEL = "/opel"
VOLKSWAGEN = "/volkswagen"
RENAULT = "/renault"
TOYOTA = "/toyota"
PEUGEOT = "/peugeot"
NISSAN = "/nissan"
MINI = "/mini"
KIA = "/kia"
JAGUAR = "/jaguar"
HYNDAI = "/hyundai"
HONDA = "/honda"
FIAT = "/fiat"
DAIMLER = "/daimler"
CITROEN = "/citroen"
CHRYSLER = "/chrysler"
CHEVROLET = "/chevrolet"
# NINEFF = "/9ff"
# ABARTH = "/abarth"
# AC = "/ac"
# ACM = "/acm"
# ACURA = "/acura"
# AIXAM = "/aixam"
# ALFA_ROMEO = "/alfa-romeo"
# ALPINA = "/alpina"
# ALPINE = "/alpine"
# AMPHICAR = "/amphicar"
# ARIEL_MOTOR = "/ariel-motor"
# ARTEGA = "/artega"
# ASPID = "/aspid"
# ASTON_MARTIN = "/aston-martin"
# AUSTIN = "/austin"
# AUTOBIANCHI = "/autobianch"
# AUVERLAND = "/auverland"
# BAIC = "/baic"
# BEDFORD = "/bedford"
# BELLIER = "/bellier"
# BENTLEY = "/bentley"
# BOLLORE = "/bolloré"
# BORGWARD = "/borgward"
# BRILLIANCE = "/brilliance"
# BUGATTI = "/bugatti"
# BUICK = "/buick"
# BYD = "/byd"
# CADILLAC = "/cadillac"
# CARAVANS_WOHNM = "/caravans-wohnm"
# CASALINI = "/casalini"
# TESLA = "/tesla"
class Model(Enum):
pass
class Variant(Enum):
pass
class FuelType(Enum):
ALL = ""
GASOLINE = "B"
DIESEL = "D"
ETHANOL = "M"
ELECTRIC = "E"
HYDROGEN = "H"
LPG = "L"
CNG = "C"
ELECTRIC_GASOLINE = "2"
OTHER = "O"
ELECTRIC_DIESEL = "3"
class BodyType(Enum):
ALL = ""
COMPACT = "1"
CONVERTIBLE = "2"
COUPE = "3"
OFFROAD = "4"
SEDANS = "5"
STATIONWAGON = "6"
TRANSPORTER = "7"
VAN = "8"
OTHER = "9"
class Gear(Enum):
AUTOMATIC = "A"
MANUAL = "M"
SEMI_AUTOMATIC = "S"
class VehicleCondition(Enum):
ALL = ""
NEW = "N"
USED = "U"
EMPLOYEE_CAR = "J"
CLASSIC = "O"
DEMONSTRATION = "D"
PRE_REGISTERED = "S"
class Seller(Enum):
ALL = ""
DEALER = "D"
PRIVATE = "P"
class SortCriteria(Enum):
AGE = "age"
PRICE = "price"
MILEAGE = "mileage"
POWER = "power"
YEAR = "year"
class SortDirection(Enum):
DESCENDING = "desc=1"
ASCENDING = "desc=0" | <filename>autoscout24/autoscout24/properties/properties.py
from enum import Enum
class Make(Enum):
ALL = ""
BMW = "/bmw"
AUDI = "/audi"
FORD = "/ford"
MERCEDES_BENZ = "/mercedes-benz"
OPEL = "/opel"
VOLKSWAGEN = "/volkswagen"
RENAULT = "/renault"
TOYOTA = "/toyota"
PEUGEOT = "/peugeot"
NISSAN = "/nissan"
MINI = "/mini"
KIA = "/kia"
JAGUAR = "/jaguar"
HYNDAI = "/hyundai"
HONDA = "/honda"
FIAT = "/fiat"
DAIMLER = "/daimler"
CITROEN = "/citroen"
CHRYSLER = "/chrysler"
CHEVROLET = "/chevrolet"
# NINEFF = "/9ff"
# ABARTH = "/abarth"
# AC = "/ac"
# ACM = "/acm"
# ACURA = "/acura"
# AIXAM = "/aixam"
# ALFA_ROMEO = "/alfa-romeo"
# ALPINA = "/alpina"
# ALPINE = "/alpine"
# AMPHICAR = "/amphicar"
# ARIEL_MOTOR = "/ariel-motor"
# ARTEGA = "/artega"
# ASPID = "/aspid"
# ASTON_MARTIN = "/aston-martin"
# AUSTIN = "/austin"
# AUTOBIANCHI = "/autobianch"
# AUVERLAND = "/auverland"
# BAIC = "/baic"
# BEDFORD = "/bedford"
# BELLIER = "/bellier"
# BENTLEY = "/bentley"
# BOLLORE = "/bolloré"
# BORGWARD = "/borgward"
# BRILLIANCE = "/brilliance"
# BUGATTI = "/bugatti"
# BUICK = "/buick"
# BYD = "/byd"
# CADILLAC = "/cadillac"
# CARAVANS_WOHNM = "/caravans-wohnm"
# CASALINI = "/casalini"
# TESLA = "/tesla"
class Model(Enum):
pass
class Variant(Enum):
pass
class FuelType(Enum):
ALL = ""
GASOLINE = "B"
DIESEL = "D"
ETHANOL = "M"
ELECTRIC = "E"
HYDROGEN = "H"
LPG = "L"
CNG = "C"
ELECTRIC_GASOLINE = "2"
OTHER = "O"
ELECTRIC_DIESEL = "3"
class BodyType(Enum):
ALL = ""
COMPACT = "1"
CONVERTIBLE = "2"
COUPE = "3"
OFFROAD = "4"
SEDANS = "5"
STATIONWAGON = "6"
TRANSPORTER = "7"
VAN = "8"
OTHER = "9"
class Gear(Enum):
AUTOMATIC = "A"
MANUAL = "M"
SEMI_AUTOMATIC = "S"
class VehicleCondition(Enum):
ALL = ""
NEW = "N"
USED = "U"
EMPLOYEE_CAR = "J"
CLASSIC = "O"
DEMONSTRATION = "D"
PRE_REGISTERED = "S"
class Seller(Enum):
ALL = ""
DEALER = "D"
PRIVATE = "P"
class SortCriteria(Enum):
AGE = "age"
PRICE = "price"
MILEAGE = "mileage"
POWER = "power"
YEAR = "year"
class SortDirection(Enum):
DESCENDING = "desc=1"
ASCENDING = "desc=0" | en | 0.469831 | # NINEFF = "/9ff" # ABARTH = "/abarth" # AC = "/ac" # ACM = "/acm" # ACURA = "/acura" # AIXAM = "/aixam" # ALFA_ROMEO = "/alfa-romeo" # ALPINA = "/alpina" # ALPINE = "/alpine" # AMPHICAR = "/amphicar" # ARIEL_MOTOR = "/ariel-motor" # ARTEGA = "/artega" # ASPID = "/aspid" # ASTON_MARTIN = "/aston-martin" # AUSTIN = "/austin" # AUTOBIANCHI = "/autobianch" # AUVERLAND = "/auverland" # BAIC = "/baic" # BEDFORD = "/bedford" # BELLIER = "/bellier" # BENTLEY = "/bentley" # BOLLORE = "/bolloré" # BORGWARD = "/borgward" # BRILLIANCE = "/brilliance" # BUGATTI = "/bugatti" # BUICK = "/buick" # BYD = "/byd" # CADILLAC = "/cadillac" # CARAVANS_WOHNM = "/caravans-wohnm" # CASALINI = "/casalini" # TESLA = "/tesla" | 2.570418 | 3 |
src/clients/aws_athena_async_client.py | jezd-axyl/platsec-aws-scanner | 0 | 6622721 | from logging import getLogger
from string import Template
from time import sleep
from typing import Any, Dict, List, Type
from botocore.client import BaseClient
from botocore.exceptions import BotoCoreError, ClientError
from src.data import aws_scanner_exceptions as exceptions
from src.clients import aws_athena_system_queries as queries
from src.data.aws_athena_data_partition import AwsAthenaDataPartition
from src.clients.aws_athena_query_states import COMPLETED_STATES, SUCCESS_STATES
from src.data.aws_organizations_types import Account
from src.aws_scanner_config import AwsScannerConfig as Config
class AwsAthenaAsyncClient:
def __init__(self, boto_athena: BaseClient):
self._logger = getLogger(self.__class__.__name__)
self._boto_athena = boto_athena
self._catalog = "AwsDataCatalog"
self._config = Config()
def create_database(self, database_name: str) -> str:
self._logger.info(f"creating database {database_name}")
return self.run_query(
query=Template(queries.CREATE_DATABASE).substitute(database_name=database_name),
raise_on_failure=exceptions.CreateDatabaseException,
)
def drop_database(self, database_name: str) -> str:
self._logger.info(f"dropping database {database_name}")
return self.run_query(
query=Template(queries.DROP_DATABASE).substitute(database_name=database_name),
raise_on_failure=exceptions.DropDatabaseException,
)
def create_table(self, database: str, account: Account) -> str:
self._logger.info(f"creating table {account.identifier} in database {database}")
return self.run_query(
query=Template(queries.CREATE_TABLE).substitute(
account=account.identifier, cloudtrail_logs_bucket=self._config.cloudtrail_logs_bucket()
),
database=database,
raise_on_failure=exceptions.CreateTableException,
)
def drop_table(self, database: str, table: str) -> str:
self._logger.info(f"dropping table {table} in database {database}")
return self.run_query(
query=Template(queries.DROP_TABLE).substitute(table=table),
database=database,
raise_on_failure=exceptions.DropTableException,
)
def add_partition(self, database: str, account: Account, partition: AwsAthenaDataPartition) -> str:
self._logger.info(f"loading {partition} for table {account.identifier} in database {database}")
return self.run_query(
query=Template(queries.ADD_PARTITION_YEAR_MONTH).substitute(
account=account.identifier,
cloudtrail_logs_bucket=self._config.cloudtrail_logs_bucket(),
region=partition.region,
year=partition.year,
month=partition.month,
),
database=database,
raise_on_failure=exceptions.AddPartitionException,
)
def has_query_completed(self, query_id: str) -> bool:
return self._is_query_state_in(query_id, COMPLETED_STATES)
def has_query_succeeded(self, query_id: str) -> bool:
return self._is_query_state_in(query_id, SUCCESS_STATES)
def get_query_results(self, query_id: str) -> List[Any]:
self._logger.debug(f"fetching results for query {query_id}")
try:
query_result_response = self._boto_athena.get_query_results(QueryExecutionId=query_id)
return list(query_result_response["ResultSet"]["Rows"][1:])
except (BotoCoreError, ClientError) as error:
raise exceptions.GetQueryResultsException(f"query {query_id} results unknown: {error}") from None
def get_query_error(self, query_id: str) -> str:
return str(self._get_query_execution(query_id)["QueryExecution"]["Status"]["StateChangeReason"])
def run_query(
self,
query: str,
database: str = "",
raise_on_failure: Type[Exception] = exceptions.RunQueryException,
) -> str:
sleep(self._config.athena_query_throttling_seconds())
self._logger.debug(f"running query {query}")
try:
query_execution_response = self._boto_athena.start_query_execution(
QueryString=query,
QueryExecutionContext=self._build_exec_context(database),
ResultConfiguration={"OutputLocation": f"s3://{self._config.athena_query_results_bucket()}"},
)
return str(query_execution_response["QueryExecutionId"])
except (BotoCoreError, ClientError) as error:
raise raise_on_failure(f"query execution failure: {error}") from None
def list_tables(self, database: str) -> List[str]:
self._logger.info(f"listing tables in database {database}")
try:
response = self._boto_athena.list_table_metadata(CatalogName=self._catalog, DatabaseName=database)
return [table["Name"] for table in response["TableMetadataList"]]
except (BotoCoreError, ClientError) as error:
raise exceptions.ListTablesException(error) from None
def list_databases(self) -> List[str]:
self._logger.info("listing databases")
try:
response = self._boto_athena.list_databases(CatalogName=self._catalog)
return [db["Name"] for db in response["DatabaseList"]]
except (BotoCoreError, ClientError) as error:
raise exceptions.ListTablesException(error) from None
def _is_query_state_in(self, query_id: str, expected_states: List[str]) -> bool:
return self._get_query_execution(query_id)["QueryExecution"]["Status"]["State"] in expected_states
def _build_exec_context(self, database: str) -> Dict[str, str]:
return {"Catalog": self._catalog, "Database": database} if database else {"Catalog": self._catalog}
def _get_query_execution(self, query_id: str) -> Dict[Any, Any]:
self._logger.debug(f"polling execution state for query {query_id}")
try:
return dict(self._boto_athena.get_query_execution(QueryExecutionId=query_id))
except (BotoCoreError, ClientError) as error:
raise exceptions.UnknownQueryStateException(f"query {query_id} state unknown: {error}") from None
| from logging import getLogger
from string import Template
from time import sleep
from typing import Any, Dict, List, Type
from botocore.client import BaseClient
from botocore.exceptions import BotoCoreError, ClientError
from src.data import aws_scanner_exceptions as exceptions
from src.clients import aws_athena_system_queries as queries
from src.data.aws_athena_data_partition import AwsAthenaDataPartition
from src.clients.aws_athena_query_states import COMPLETED_STATES, SUCCESS_STATES
from src.data.aws_organizations_types import Account
from src.aws_scanner_config import AwsScannerConfig as Config
class AwsAthenaAsyncClient:
def __init__(self, boto_athena: BaseClient):
self._logger = getLogger(self.__class__.__name__)
self._boto_athena = boto_athena
self._catalog = "AwsDataCatalog"
self._config = Config()
def create_database(self, database_name: str) -> str:
self._logger.info(f"creating database {database_name}")
return self.run_query(
query=Template(queries.CREATE_DATABASE).substitute(database_name=database_name),
raise_on_failure=exceptions.CreateDatabaseException,
)
def drop_database(self, database_name: str) -> str:
self._logger.info(f"dropping database {database_name}")
return self.run_query(
query=Template(queries.DROP_DATABASE).substitute(database_name=database_name),
raise_on_failure=exceptions.DropDatabaseException,
)
def create_table(self, database: str, account: Account) -> str:
self._logger.info(f"creating table {account.identifier} in database {database}")
return self.run_query(
query=Template(queries.CREATE_TABLE).substitute(
account=account.identifier, cloudtrail_logs_bucket=self._config.cloudtrail_logs_bucket()
),
database=database,
raise_on_failure=exceptions.CreateTableException,
)
def drop_table(self, database: str, table: str) -> str:
self._logger.info(f"dropping table {table} in database {database}")
return self.run_query(
query=Template(queries.DROP_TABLE).substitute(table=table),
database=database,
raise_on_failure=exceptions.DropTableException,
)
def add_partition(self, database: str, account: Account, partition: AwsAthenaDataPartition) -> str:
self._logger.info(f"loading {partition} for table {account.identifier} in database {database}")
return self.run_query(
query=Template(queries.ADD_PARTITION_YEAR_MONTH).substitute(
account=account.identifier,
cloudtrail_logs_bucket=self._config.cloudtrail_logs_bucket(),
region=partition.region,
year=partition.year,
month=partition.month,
),
database=database,
raise_on_failure=exceptions.AddPartitionException,
)
def has_query_completed(self, query_id: str) -> bool:
return self._is_query_state_in(query_id, COMPLETED_STATES)
def has_query_succeeded(self, query_id: str) -> bool:
return self._is_query_state_in(query_id, SUCCESS_STATES)
def get_query_results(self, query_id: str) -> List[Any]:
self._logger.debug(f"fetching results for query {query_id}")
try:
query_result_response = self._boto_athena.get_query_results(QueryExecutionId=query_id)
return list(query_result_response["ResultSet"]["Rows"][1:])
except (BotoCoreError, ClientError) as error:
raise exceptions.GetQueryResultsException(f"query {query_id} results unknown: {error}") from None
def get_query_error(self, query_id: str) -> str:
return str(self._get_query_execution(query_id)["QueryExecution"]["Status"]["StateChangeReason"])
def run_query(
self,
query: str,
database: str = "",
raise_on_failure: Type[Exception] = exceptions.RunQueryException,
) -> str:
sleep(self._config.athena_query_throttling_seconds())
self._logger.debug(f"running query {query}")
try:
query_execution_response = self._boto_athena.start_query_execution(
QueryString=query,
QueryExecutionContext=self._build_exec_context(database),
ResultConfiguration={"OutputLocation": f"s3://{self._config.athena_query_results_bucket()}"},
)
return str(query_execution_response["QueryExecutionId"])
except (BotoCoreError, ClientError) as error:
raise raise_on_failure(f"query execution failure: {error}") from None
def list_tables(self, database: str) -> List[str]:
self._logger.info(f"listing tables in database {database}")
try:
response = self._boto_athena.list_table_metadata(CatalogName=self._catalog, DatabaseName=database)
return [table["Name"] for table in response["TableMetadataList"]]
except (BotoCoreError, ClientError) as error:
raise exceptions.ListTablesException(error) from None
def list_databases(self) -> List[str]:
self._logger.info("listing databases")
try:
response = self._boto_athena.list_databases(CatalogName=self._catalog)
return [db["Name"] for db in response["DatabaseList"]]
except (BotoCoreError, ClientError) as error:
raise exceptions.ListTablesException(error) from None
def _is_query_state_in(self, query_id: str, expected_states: List[str]) -> bool:
return self._get_query_execution(query_id)["QueryExecution"]["Status"]["State"] in expected_states
def _build_exec_context(self, database: str) -> Dict[str, str]:
return {"Catalog": self._catalog, "Database": database} if database else {"Catalog": self._catalog}
def _get_query_execution(self, query_id: str) -> Dict[Any, Any]:
self._logger.debug(f"polling execution state for query {query_id}")
try:
return dict(self._boto_athena.get_query_execution(QueryExecutionId=query_id))
except (BotoCoreError, ClientError) as error:
raise exceptions.UnknownQueryStateException(f"query {query_id} state unknown: {error}") from None
| none | 1 | 2.035024 | 2 | |
tgtypes/models/shipping_address.py | autogram/tgtypes | 0 | 6622722 | <gh_stars>0
from __future__ import annotations
from ._base import TelegramObject
class ShippingAddress(TelegramObject):
"""
This object represents a shipping address.
Source: https://core.telegram.org/bots/api#shippingaddress
"""
country_code: str
"""ISO 3166-1 alpha-2 country code"""
state: str
"""State, if applicable"""
city: str
"""City"""
street_line1: str
"""First line for the address"""
street_line2: str
"""Second line for the address"""
post_code: str
"""Address post code"""
| from __future__ import annotations
from ._base import TelegramObject
class ShippingAddress(TelegramObject):
"""
This object represents a shipping address.
Source: https://core.telegram.org/bots/api#shippingaddress
"""
country_code: str
"""ISO 3166-1 alpha-2 country code"""
state: str
"""State, if applicable"""
city: str
"""City"""
street_line1: str
"""First line for the address"""
street_line2: str
"""Second line for the address"""
post_code: str
"""Address post code""" | en | 0.843465 | This object represents a shipping address. Source: https://core.telegram.org/bots/api#shippingaddress ISO 3166-1 alpha-2 country code State, if applicable City First line for the address Second line for the address Address post code | 2.926208 | 3 |
addons/io_sketchfab_plugin/blender/com/gltf2_blender_material_helpers.py | gorenje/blender-plugin | 2 | 6622723 | <filename>addons/io_sketchfab_plugin/blender/com/gltf2_blender_material_helpers.py
"""
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): <NAME>.
*
* ***** END GPL LICENSE BLOCK *****
"""
def get_pbr_node(node_tree):
pass
def get_output_node(node_tree):
output = [node for node in node_tree.nodes if node.type == 'OUTPUT_MATERIAL'][0]
return output
def get_output_surface_input(node_tree):
output_node = get_output_node(node_tree)
return output_node.inputs['Surface']
def get_diffuse_texture(node_tree):
for node in node_tree.nodes:
print(node.name)
if node.label == 'BASE COLOR':
return node
return None
def get_preoutput_node(node_tree):
output_node = get_output_node(node_tree)
return output_node.inputs['Surface'].links[0].from_node
def get_preoutput_node_output(node_tree):
output_node = get_output_node(node_tree)
preoutput_node = output_node.inputs['Surface'].links[0].from_node
# Pre output node is Principled BSDF or any BSDF => BSDF
if 'BSDF' in preoutput_node.type:
return preoutput_node.outputs['BSDF']
elif 'SHADER' in preoutput_node.type:
return preoutput_node.outputs['Shader']
else:
print(preoutput_node.type)
def get_base_color_node(node_tree):
""" returns the last node of the diffuse block """
for node in node_tree.nodes:
if node.label == 'BASE COLOR':
return node
return None
| <filename>addons/io_sketchfab_plugin/blender/com/gltf2_blender_material_helpers.py
"""
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): <NAME>.
*
* ***** END GPL LICENSE BLOCK *****
"""
def get_pbr_node(node_tree):
pass
def get_output_node(node_tree):
output = [node for node in node_tree.nodes if node.type == 'OUTPUT_MATERIAL'][0]
return output
def get_output_surface_input(node_tree):
output_node = get_output_node(node_tree)
return output_node.inputs['Surface']
def get_diffuse_texture(node_tree):
for node in node_tree.nodes:
print(node.name)
if node.label == 'BASE COLOR':
return node
return None
def get_preoutput_node(node_tree):
output_node = get_output_node(node_tree)
return output_node.inputs['Surface'].links[0].from_node
def get_preoutput_node_output(node_tree):
output_node = get_output_node(node_tree)
preoutput_node = output_node.inputs['Surface'].links[0].from_node
# Pre output node is Principled BSDF or any BSDF => BSDF
if 'BSDF' in preoutput_node.type:
return preoutput_node.outputs['BSDF']
elif 'SHADER' in preoutput_node.type:
return preoutput_node.outputs['Shader']
else:
print(preoutput_node.type)
def get_base_color_node(node_tree):
""" returns the last node of the diffuse block """
for node in node_tree.nodes:
if node.label == 'BASE COLOR':
return node
return None
| en | 0.83216 | * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contributor(s): <NAME>. * * ***** END GPL LICENSE BLOCK ***** # Pre output node is Principled BSDF or any BSDF => BSDF returns the last node of the diffuse block | 1.847404 | 2 |
utils/ASSO/utils.py | mrazekv/BLASYS | 11 | 6622724 | import numpy as np
def get_matrix(file_path):
with open(file_path) as f:
lines = f.readlines()
mat = [list(i.strip().replace(' ', '')) for i in lines]
return np.array(mat, dtype=np.uint8)
def write_matrix(mat, file_path):
with open(file_path, 'w') as f:
for row in mat:
for ele in row:
f.write('{} '.format(ele))
f.write('\n')
def HD(org, app):
assert org.shape == app.shape
return np.sum(org != app)
def weighted_HD(org, app):
assert org.shape == app.shape
row, col = org.shape
weight = np.array([2**e for e in range(col-1, -1, -1)])
HD = (org != app)
return np.sum(HD * weight)
# Compute association matrix
def calculate_association(matrix, threshold=0.5):
row, col = matrix.shape
ASSO = np.zeros((col, col))
for i in range(col):
idx = (matrix[:, i] == 1)
for j in range(col):
ASSO[i, j] = sum(matrix[:, j][idx])
if ASSO[i, i] != 0:
ASSO[i, :] = ASSO[i, :] / ASSO[i, i]
ASSO[ASSO >= threshold] = 1
ASSO[ASSO < threshold] = 0
return ASSO.astype(np.uint8)
def solve_basis(matrix, k, asso, bonus, penalty, binary=False):
row, col = matrix.shape
# Mark whether an entry is already covered
covered = np.zeros((row, col)).astype(np.uint8)
# Coefficient matrix for bonux or penalty
coef = np.zeros(matrix.shape)
coef[matrix == 0] = penalty
coef[matrix == 1] = bonus
# If in binary mode, make coef exponential
if binary:
coef *= np.array([2**e for e in range(col-1, -1, -1)])
for i in range(k):
best_basis = np.zeros((1, col)).astype(np.uint8)
best_solver = np.zeros((row, 1)).astype(np.uint8)
best_score = 0
for b in range(col):
# Candidate pair of basis and solver
basis = asso[b, :]
solver = np.zeros((row, 1)).astype(np.uint8)
# Compute score for each row
not_covered = 1 - covered
score_matrix = coef * not_covered * basis
score_per_row = np.sum(score_matrix, axis=1)
# Compute solver
solver[score_per_row > 0] = 1
# Compute accumulate point
score = np.sum(score_per_row[score_per_row > 0])
if score > best_score:
best_basis = basis
best_solver = solver
best_score = score
# Stack matrix B and S
if i == 0:
B = best_basis.reshape((1, -1))
S = best_solver.copy()
else:
B = np.vstack((B, best_basis))
S = np.hstack((S, best_solver))
# Update covered matrix
covered = np.matmul(S, B)
covered[covered > 1] = 1
return S, B, covered
| import numpy as np
def get_matrix(file_path):
with open(file_path) as f:
lines = f.readlines()
mat = [list(i.strip().replace(' ', '')) for i in lines]
return np.array(mat, dtype=np.uint8)
def write_matrix(mat, file_path):
with open(file_path, 'w') as f:
for row in mat:
for ele in row:
f.write('{} '.format(ele))
f.write('\n')
def HD(org, app):
assert org.shape == app.shape
return np.sum(org != app)
def weighted_HD(org, app):
assert org.shape == app.shape
row, col = org.shape
weight = np.array([2**e for e in range(col-1, -1, -1)])
HD = (org != app)
return np.sum(HD * weight)
# Compute association matrix
def calculate_association(matrix, threshold=0.5):
row, col = matrix.shape
ASSO = np.zeros((col, col))
for i in range(col):
idx = (matrix[:, i] == 1)
for j in range(col):
ASSO[i, j] = sum(matrix[:, j][idx])
if ASSO[i, i] != 0:
ASSO[i, :] = ASSO[i, :] / ASSO[i, i]
ASSO[ASSO >= threshold] = 1
ASSO[ASSO < threshold] = 0
return ASSO.astype(np.uint8)
def solve_basis(matrix, k, asso, bonus, penalty, binary=False):
row, col = matrix.shape
# Mark whether an entry is already covered
covered = np.zeros((row, col)).astype(np.uint8)
# Coefficient matrix for bonux or penalty
coef = np.zeros(matrix.shape)
coef[matrix == 0] = penalty
coef[matrix == 1] = bonus
# If in binary mode, make coef exponential
if binary:
coef *= np.array([2**e for e in range(col-1, -1, -1)])
for i in range(k):
best_basis = np.zeros((1, col)).astype(np.uint8)
best_solver = np.zeros((row, 1)).astype(np.uint8)
best_score = 0
for b in range(col):
# Candidate pair of basis and solver
basis = asso[b, :]
solver = np.zeros((row, 1)).astype(np.uint8)
# Compute score for each row
not_covered = 1 - covered
score_matrix = coef * not_covered * basis
score_per_row = np.sum(score_matrix, axis=1)
# Compute solver
solver[score_per_row > 0] = 1
# Compute accumulate point
score = np.sum(score_per_row[score_per_row > 0])
if score > best_score:
best_basis = basis
best_solver = solver
best_score = score
# Stack matrix B and S
if i == 0:
B = best_basis.reshape((1, -1))
S = best_solver.copy()
else:
B = np.vstack((B, best_basis))
S = np.hstack((S, best_solver))
# Update covered matrix
covered = np.matmul(S, B)
covered[covered > 1] = 1
return S, B, covered
| en | 0.827401 | # Compute association matrix # Mark whether an entry is already covered # Coefficient matrix for bonux or penalty # If in binary mode, make coef exponential # Candidate pair of basis and solver # Compute score for each row # Compute solver # Compute accumulate point # Stack matrix B and S # Update covered matrix | 2.582603 | 3 |
Codewars/5kyu/moveZero.py | Ry4nW/python-wars | 1 | 6622725 | <reponame>Ry4nW/python-wars
def move_zeros(array):
numArr = []
zeroCount = 0
for i in array:
if i != 0:
numArr.append(i)
else:
zeroCount += 1
for i in range(zeroCount):
numArr.append(0)
return numArr
print(move_zeros([1, 0]))
# One-liner:
def move_zeros2(array):
return sorted(array, key=lambda x: x==0 and type(x) is not bool)
| def move_zeros(array):
numArr = []
zeroCount = 0
for i in array:
if i != 0:
numArr.append(i)
else:
zeroCount += 1
for i in range(zeroCount):
numArr.append(0)
return numArr
print(move_zeros([1, 0]))
# One-liner:
def move_zeros2(array):
return sorted(array, key=lambda x: x==0 and type(x) is not bool) | en | 0.582608 | # One-liner: | 3.767092 | 4 |
mi9_dj/handlers/websocket_handler.py | nhardy/mi9-dj | 0 | 6622726 | import tornado.websocket, tornado.gen
import json
import asyncio
from ..app import App
class WebSocketHandler(tornado.websocket.WebSocketHandler):
_APP = App()
_sockets = set()
def _write_all(self, message):
for ws in self._sockets:
ws.write_message(message)
def _send_state(self):
state = {'cmd': 'state'}
state.update(self._APP.get_state())
print('state:', state)
self._write_all(json.dumps(state))
def initialize(self):
self._APP.add_callback(self._send_state)
def open(self):
self._sockets.add(self)
self._send_state()
@tornado.gen.coroutine
def on_message(self, content):
print(content)
message = json.loads(content)
cmd = message['cmd']
if 'value' in message:
value = message['value']
if cmd == 'add_video':
_ = yield self._APP.add_video(value)
# TODO: Send message to sender that video was added successfully
self._send_state()
print(self._APP.get_state())
elif cmd == 'toggle_playback':
self._APP.toggle_playback()
elif cmd == 'volume':
pass
def on_close(self):
self._sockets.remove(self)
| import tornado.websocket, tornado.gen
import json
import asyncio
from ..app import App
class WebSocketHandler(tornado.websocket.WebSocketHandler):
_APP = App()
_sockets = set()
def _write_all(self, message):
for ws in self._sockets:
ws.write_message(message)
def _send_state(self):
state = {'cmd': 'state'}
state.update(self._APP.get_state())
print('state:', state)
self._write_all(json.dumps(state))
def initialize(self):
self._APP.add_callback(self._send_state)
def open(self):
self._sockets.add(self)
self._send_state()
@tornado.gen.coroutine
def on_message(self, content):
print(content)
message = json.loads(content)
cmd = message['cmd']
if 'value' in message:
value = message['value']
if cmd == 'add_video':
_ = yield self._APP.add_video(value)
# TODO: Send message to sender that video was added successfully
self._send_state()
print(self._APP.get_state())
elif cmd == 'toggle_playback':
self._APP.toggle_playback()
elif cmd == 'volume':
pass
def on_close(self):
self._sockets.remove(self)
| en | 0.971809 | # TODO: Send message to sender that video was added successfully | 2.485285 | 2 |
ibis/impala/compiler.py | nubank/ibis | 3 | 6622727 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import StringIO
import datetime
import ibis
import ibis.expr.analysis as L
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.expr.temporal as tempo
import ibis.sql.compiler as comp
import ibis.sql.transforms as transforms
import ibis.impala.identifiers as identifiers
import ibis.common as com
import ibis.util as util
def build_ast(expr, context=None):
builder = ImpalaQueryBuilder(expr, context=context)
return builder.get_result()
def _get_query(expr, context):
ast = build_ast(expr, context)
query = ast.queries[0]
return query
def to_sql(expr, context=None):
query = _get_query(expr, context)
return query.compile()
# ----------------------------------------------------------------------
# Select compilation
class ImpalaSelectBuilder(comp.SelectBuilder):
@property
def _select_class(self):
return ImpalaSelect
class ImpalaQueryBuilder(comp.QueryBuilder):
select_builder = ImpalaSelectBuilder
@property
def _make_context(self):
return ImpalaContext
@property
def _union_class(self):
return ImpalaUnion
class ImpalaContext(comp.QueryContext):
def _to_sql(self, expr, ctx):
return to_sql(expr, context=ctx)
class ImpalaSelect(comp.Select):
"""
A SELECT statement which, after execution, might yield back to the user a
table, array/list, or scalar value, depending on the expression that
generated it
"""
def compile(self):
"""
This method isn't yet idempotent; calling multiple times may yield
unexpected results
"""
# Can't tell if this is a hack or not. Revisit later
self.context.set_query(self)
# If any subqueries, translate them and add to beginning of query as
# part of the WITH section
with_frag = self.format_subqueries()
# SELECT
select_frag = self.format_select_set()
# FROM, JOIN, UNION
from_frag = self.format_table_set()
# WHERE
where_frag = self.format_where()
# GROUP BY and HAVING
groupby_frag = self.format_group_by()
# ORDER BY and LIMIT
order_frag = self.format_postamble()
# Glue together the query fragments and return
query = _join_not_none('\n', [with_frag, select_frag, from_frag,
where_frag, groupby_frag, order_frag])
return query
def format_subqueries(self):
if len(self.subqueries) == 0:
return
context = self.context
buf = StringIO()
buf.write('WITH ')
for i, expr in enumerate(self.subqueries):
if i > 0:
buf.write(',\n')
formatted = util.indent(context.get_compiled_expr(expr), 2)
alias = context.get_ref(expr)
buf.write('{0} AS (\n{1}\n)'.format(alias, formatted))
return buf.getvalue()
def format_select_set(self):
# TODO:
context = self.context
formatted = []
for expr in self.select_set:
if isinstance(expr, ir.ValueExpr):
expr_str = self._translate(expr, named=True)
elif isinstance(expr, ir.TableExpr):
# A * selection, possibly prefixed
if context.need_aliases():
alias = context.get_ref(expr)
# materialized join will not have an alias. see #491
expr_str = '{0}.*'.format(alias) if alias else '*'
else:
expr_str = '*'
formatted.append(expr_str)
buf = StringIO()
line_length = 0
max_length = 70
tokens = 0
for i, val in enumerate(formatted):
# always line-break for multi-line expressions
if val.count('\n'):
if i:
buf.write(',')
buf.write('\n')
indented = util.indent(val, self.indent)
buf.write(indented)
# set length of last line
line_length = len(indented.split('\n')[-1])
tokens = 1
elif (tokens > 0 and line_length and
len(val) + line_length > max_length):
# There is an expr, and adding this new one will make the line
# too long
buf.write(',\n ') if i else buf.write('\n')
buf.write(val)
line_length = len(val) + 7
tokens = 1
else:
if i:
buf.write(',')
buf.write(' ')
buf.write(val)
tokens += 1
line_length += len(val) + 2
if self.distinct:
select_key = 'SELECT DISTINCT'
else:
select_key = 'SELECT'
return '{0}{1}'.format(select_key, buf.getvalue())
def format_table_set(self):
if self.table_set is None:
return None
fragment = 'FROM '
helper = _TableSetFormatter(self, self.table_set)
fragment += helper.get_result()
return fragment
def format_group_by(self):
if not len(self.group_by):
# There is no aggregation, nothing to see here
return None
lines = []
if len(self.group_by) > 0:
clause = 'GROUP BY {0}'.format(', '.join([
str(x + 1) for x in self.group_by]))
lines.append(clause)
if len(self.having) > 0:
trans_exprs = []
for expr in self.having:
translated = self._translate(expr)
trans_exprs.append(translated)
lines.append('HAVING {0}'.format(' AND '.join(trans_exprs)))
return '\n'.join(lines)
def format_where(self):
if len(self.where) == 0:
return None
buf = StringIO()
buf.write('WHERE ')
fmt_preds = []
for pred in self.where:
new_pred = self._translate(pred, permit_subquery=True)
if isinstance(pred.op(), ops.Or):
# parens for OR exprs because it binds looser than AND
new_pred = _parenthesize(new_pred)
fmt_preds.append(new_pred)
conj = ' AND\n{0}'.format(' ' * 6)
buf.write(conj.join(fmt_preds))
return buf.getvalue()
def format_postamble(self):
buf = StringIO()
lines = 0
if len(self.order_by) > 0:
buf.write('ORDER BY ')
formatted = []
for expr in self.order_by:
key = expr.op()
translated = self._translate(key.expr)
if not key.ascending:
translated += ' DESC'
formatted.append(translated)
buf.write(', '.join(formatted))
lines += 1
if self.limit is not None:
if lines:
buf.write('\n')
n, offset = self.limit['n'], self.limit['offset']
buf.write('LIMIT {0}'.format(n))
if offset is not None and offset != 0:
buf.write(' OFFSET {0}'.format(offset))
lines += 1
if not lines:
return None
return buf.getvalue()
@property
def translator(self):
return ImpalaExprTranslator
def _join_not_none(sep, pieces):
pieces = [x for x in pieces if x is not None]
return sep.join(pieces)
class _TableSetFormatter(comp.TableSetFormatter):
def get_result(self):
# Got to unravel the join stack; the nesting order could be
# arbitrary, so we do a depth first search and push the join tokens
# and predicates onto a flat list, then format them
op = self.expr.op()
if isinstance(op, ops.Join):
self._walk_join_tree(op)
else:
self.join_tables.append(self._format_table(self.expr))
# TODO: Now actually format the things
buf = StringIO()
buf.write(self.join_tables[0])
for jtype, table, preds in zip(self.join_types, self.join_tables[1:],
self.join_predicates):
buf.write('\n')
buf.write(util.indent('{0} {1}'.format(jtype, table), self.indent))
if len(preds):
buf.write('\n')
fmt_preds = [self._translate(pred) for pred in preds]
conj = ' AND\n{0}'.format(' ' * 3)
fmt_preds = util.indent('ON ' + conj.join(fmt_preds),
self.indent * 2)
buf.write(fmt_preds)
return buf.getvalue()
_join_names = {
ops.InnerJoin: 'INNER JOIN',
ops.LeftJoin: 'LEFT OUTER JOIN',
ops.RightJoin: 'RIGHT OUTER JOIN',
ops.OuterJoin: 'FULL OUTER JOIN',
ops.LeftAntiJoin: 'LEFT ANTI JOIN',
ops.LeftSemiJoin: 'LEFT SEMI JOIN',
ops.CrossJoin: 'CROSS JOIN'
}
def _get_join_type(self, op):
jname = self._join_names[type(op)]
# Impala requires this
if len(op.predicates) == 0:
jname = self._join_names[ops.CrossJoin]
return jname
def _format_table(self, expr):
# TODO: This could probably go in a class and be significantly nicer
ctx = self.context
ref_expr = expr
op = ref_op = expr.op()
if isinstance(op, ops.SelfReference):
ref_expr = op.table
ref_op = ref_expr.op()
if isinstance(ref_op, ops.PhysicalTable):
name = ref_op.name
if name is None:
raise com.RelationError('Table did not have a name: {0!r}'
.format(expr))
result = quote_identifier(name)
is_subquery = False
else:
# A subquery
if ctx.is_extracted(ref_expr):
# Was put elsewhere, e.g. WITH block, we just need to grab its
# alias
alias = ctx.get_ref(expr)
# HACK: self-references have to be treated more carefully here
if isinstance(op, ops.SelfReference):
return '{0} {1}'.format(ctx.get_ref(ref_expr), alias)
else:
return alias
subquery = ctx.get_compiled_expr(expr)
result = '(\n{0}\n)'.format(util.indent(subquery, self.indent))
is_subquery = True
if is_subquery or ctx.need_aliases():
result += ' {0}'.format(ctx.get_ref(expr))
return result
class ImpalaUnion(comp.Union):
def compile(self):
context = self.context
if self.distinct:
union_keyword = 'UNION'
else:
union_keyword = 'UNION ALL'
left_set = context.get_compiled_expr(self.left, isolated=True)
right_set = context.get_compiled_expr(self.right, isolated=True)
# XXX: hack of all trades - our right relation has a CTE
# TODO: factor out common subqueries in the union
if right_set.startswith('WITH'):
format_string = '({0})\n{1}\n({2})'
else:
format_string = '{0}\n{1}\n{2}'
return format_string.format(left_set, union_keyword, right_set)
# ---------------------------------------------------------------------
# Scalar and array expression formatting
_sql_type_names = {
'int8': 'tinyint',
'int16': 'smallint',
'int32': 'int',
'int64': 'bigint',
'float': 'float',
'double': 'double',
'string': 'string',
'boolean': 'boolean',
'timestamp': 'timestamp',
'decimal': 'decimal',
}
def _cast(translator, expr):
op = expr.op()
arg, target_type = op.args
arg_formatted = translator.translate(arg)
if isinstance(arg, ir.CategoryValue) and target_type == 'int32':
return arg_formatted
else:
sql_type = _type_to_sql_string(target_type)
return 'CAST({0!s} AS {1!s})'.format(arg_formatted, sql_type)
def _type_to_sql_string(tval):
if isinstance(tval, dt.Decimal):
return 'decimal({0},{1})'.format(tval.precision, tval.scale)
else:
return _sql_type_names[tval.name()]
def _between(translator, expr):
op = expr.op()
comp, lower, upper = [translator.translate(x) for x in op.args]
return '{0!s} BETWEEN {1!s} AND {2!s}'.format(comp, lower, upper)
def _is_null(translator, expr):
formatted_arg = translator.translate(expr.op().args[0])
return '{0!s} IS NULL'.format(formatted_arg)
def _not_null(translator, expr):
formatted_arg = translator.translate(expr.op().args[0])
return '{0!s} IS NOT NULL'.format(formatted_arg)
_cumulative_to_reduction = {
ops.CumulativeSum: ops.Sum,
ops.CumulativeMin: ops.Min,
ops.CumulativeMax: ops.Max,
ops.CumulativeMean: ops.Mean,
ops.CumulativeAny: ops.Any,
ops.CumulativeAll: ops.All,
}
def _cumulative_to_window(translator, expr, window):
win = ibis.cumulative_window()
win = (win.group_by(window._group_by)
.order_by(window._order_by))
op = expr.op()
klass = _cumulative_to_reduction[type(op)]
new_op = klass(*op.args)
new_expr = expr._factory(new_op, name=expr._name)
if type(new_op) in translator._rewrites:
new_expr = translator._rewrites[type(new_op)](new_expr)
new_expr = L.windowize_function(new_expr, win)
return new_expr
def _window(translator, expr):
op = expr.op()
arg, window = op.args
window_op = arg.op()
_require_order_by = (ops.Lag,
ops.Lead,
ops.DenseRank,
ops.MinRank,
ops.FirstValue,
ops.LastValue)
_unsupported_reductions = (
ops.CMSMedian,
ops.GroupConcat,
ops.HLLCardinality,
)
if isinstance(window_op, _unsupported_reductions):
raise com.TranslationError('{0!s} is not supported in '
'window functions'
.format(type(window_op)))
if isinstance(window_op, ops.CumulativeOp):
arg = _cumulative_to_window(translator, arg, window)
return translator.translate(arg)
# Some analytic functions need to have the expression of interest in
# the ORDER BY part of the window clause
if (isinstance(window_op, _require_order_by) and
len(window._order_by) == 0):
window = window.order_by(window_op.args[0])
window_formatted = _format_window(translator, window)
arg_formatted = translator.translate(arg)
result = '{0} {1}'.format(arg_formatted, window_formatted)
if type(window_op) in _expr_transforms:
return _expr_transforms[type(window_op)](result)
else:
return result
def _format_window(translator, window):
components = []
if len(window._group_by) > 0:
partition_args = [translator.translate(x)
for x in window._group_by]
components.append('PARTITION BY {0}'.format(', '.join(partition_args)))
if len(window._order_by) > 0:
order_args = []
for expr in window._order_by:
key = expr.op()
translated = translator.translate(key.expr)
if not key.ascending:
translated += ' DESC'
order_args.append(translated)
components.append('ORDER BY {0}'.format(', '.join(order_args)))
p, f = window.preceding, window.following
def _prec(p):
return '{0} PRECEDING'.format(p) if p > 0 else 'CURRENT ROW'
def _foll(f):
return '{0} FOLLOWING'.format(f) if f > 0 else 'CURRENT ROW'
if p is not None and f is not None:
frame = ('ROWS BETWEEN {0} AND {1}'
.format(_prec(p), _foll(f)))
elif p is not None:
if isinstance(p, tuple):
start, end = p
frame = ('ROWS BETWEEN {0} AND {1}'
.format(_prec(start), _prec(end)))
else:
kind = 'ROWS' if p > 0 else 'RANGE'
frame = ('{0} BETWEEN {1} AND UNBOUNDED FOLLOWING'
.format(kind, _prec(p)))
elif f is not None:
if isinstance(f, tuple):
start, end = f
frame = ('ROWS BETWEEN {0} AND {1}'
.format(_foll(start), _foll(end)))
else:
kind = 'ROWS' if f > 0 else 'RANGE'
frame = ('{0} BETWEEN UNBOUNDED PRECEDING AND {1}'
.format(kind, _foll(f)))
else:
# no-op, default is full sample
frame = None
if frame is not None:
components.append(frame)
return 'OVER ({0})'.format(' '.join(components))
def _shift_like(name):
def formatter(translator, expr):
op = expr.op()
arg, offset, default = op.args
arg_formatted = translator.translate(arg)
if default is not None:
if offset is None:
offset_formatted = '1'
else:
offset_formatted = translator.translate(offset)
default_formatted = translator.translate(default)
return '{0}({1}, {2}, {3})'.format(name, arg_formatted,
offset_formatted,
default_formatted)
elif offset is not None:
offset_formatted = translator.translate(offset)
return '{0}({1}, {2})'.format(name, arg_formatted,
offset_formatted)
else:
return '{0}({1})'.format(name, arg_formatted)
return formatter
def _nth_value(translator, expr):
op = expr.op()
arg, rank = op.args
arg_formatted = translator.translate(arg)
rank_formatted = translator.translate(rank - 1)
return 'first_value(lag({0}, {1}))'.format(arg_formatted,
rank_formatted)
def _negate(translator, expr):
arg = expr.op().args[0]
formatted_arg = translator.translate(arg)
if isinstance(expr, ir.BooleanValue):
return 'NOT {0!s}'.format(formatted_arg)
else:
if _needs_parens(arg):
formatted_arg = _parenthesize(formatted_arg)
return '-{0!s}'.format(formatted_arg)
def _parenthesize(what):
return '({0!s})'.format(what)
def unary(func_name):
return fixed_arity(func_name, 1)
def _reduction_format(translator, func_name, arg, where):
if where is not None:
case = where.ifelse(arg, ibis.NA)
arg = translator.translate(case)
else:
arg = translator.translate(arg)
return '{0!s}({1!s})'.format(func_name, arg)
def _reduction(func_name):
def formatter(translator, expr):
op = expr.op()
# HACK: support trailing arguments
arg, where = op.args[:2]
return _reduction_format(translator, func_name, arg, where)
return formatter
def _variance_like(func_name):
func_names = {
'sample': func_name,
'pop': '{0}_pop'.format(func_name)
}
def formatter(translator, expr):
arg, where, how = expr.op().args
return _reduction_format(translator, func_names[how], arg, where)
return formatter
def fixed_arity(func_name, arity):
def formatter(translator, expr):
op = expr.op()
if arity != len(op.args):
raise com.IbisError('incorrect number of args')
return _format_call(translator, func_name, *op.args)
return formatter
def _ifnull_workaround(translator, expr):
op = expr.op()
a, b = op.args
# work around per #345, #360
if (isinstance(a, ir.DecimalValue) and
isinstance(b, ir.IntegerValue)):
b = b.cast(a.type())
return _format_call(translator, 'isnull', a, b)
def _format_call(translator, func, *args):
formatted_args = []
for arg in args:
fmt_arg = translator.translate(arg)
formatted_args.append(fmt_arg)
return '{0!s}({1!s})'.format(func, ', '.join(formatted_args))
def _binary_infix_op(infix_sym):
def formatter(translator, expr):
op = expr.op()
left, right = op.args
left_arg = translator.translate(left)
right_arg = translator.translate(right)
if _needs_parens(left):
left_arg = _parenthesize(left_arg)
if _needs_parens(right):
right_arg = _parenthesize(right_arg)
return '{0!s} {1!s} {2!s}'.format(left_arg, infix_sym, right_arg)
return formatter
def _xor(translator, expr):
op = expr.op()
left_arg = translator.translate(op.left)
right_arg = translator.translate(op.right)
if _needs_parens(op.left):
left_arg = _parenthesize(left_arg)
if _needs_parens(op.right):
right_arg = _parenthesize(right_arg)
return ('{0} AND NOT {1}'
.format('({0} {1} {2})'.format(left_arg, 'OR', right_arg),
'({0} {1} {2})'.format(left_arg, 'AND', right_arg)))
def _name_expr(formatted_expr, quoted_name):
return '{0!s} AS {1!s}'.format(formatted_expr, quoted_name)
def _needs_parens(op):
if isinstance(op, ir.Expr):
op = op.op()
op_klass = type(op)
# function calls don't need parens
return (op_klass in _binary_infix_ops or
op_klass in [ops.Negate])
def _need_parenthesize_args(op):
if isinstance(op, ir.Expr):
op = op.op()
op_klass = type(op)
return (op_klass in _binary_infix_ops or
op_klass in [ops.Negate])
def _boolean_literal_format(expr):
value = expr.op().value
return 'TRUE' if value else 'FALSE'
def _number_literal_format(expr):
value = expr.op().value
return repr(value)
def _string_literal_format(expr):
value = expr.op().value
return "'{0!s}'".format(value.replace("'", "\\'"))
def _timestamp_literal_format(expr):
value = expr.op().value
if isinstance(value, datetime.datetime):
if value.microsecond != 0:
raise ValueError(value)
value = value.strftime('%Y-%m-%d %H:%M:%S')
return "'{0!s}'".format(value)
def quote_identifier(name, quotechar='`', force=False):
if force or name.count(' ') or name in identifiers.impala_identifiers:
return '{0}{1}{0}'.format(quotechar, name)
else:
return name
class CaseFormatter(object):
def __init__(self, translator, base, cases, results, default):
self.translator = translator
self.base = base
self.cases = cases
self.results = results
self.default = default
# HACK
self.indent = 2
self.multiline = len(cases) > 1
self.buf = StringIO()
def _trans(self, expr):
return self.translator.translate(expr)
def get_result(self):
self.buf.seek(0)
self.buf.write('CASE')
if self.base is not None:
base_str = self._trans(self.base)
self.buf.write(' {0}'.format(base_str))
for case, result in zip(self.cases, self.results):
self._next_case()
case_str = self._trans(case)
result_str = self._trans(result)
self.buf.write('WHEN {0} THEN {1}'.format(case_str, result_str))
if self.default is not None:
self._next_case()
default_str = self._trans(self.default)
self.buf.write('ELSE {0}'.format(default_str))
if self.multiline:
self.buf.write('\nEND')
else:
self.buf.write(' END')
return self.buf.getvalue()
def _next_case(self):
if self.multiline:
self.buf.write('\n{0}'.format(' ' * self.indent))
else:
self.buf.write(' ')
def _simple_case(translator, expr):
op = expr.op()
formatter = CaseFormatter(translator, op.base, op.cases, op.results,
op.default)
return formatter.get_result()
def _searched_case(translator, expr):
op = expr.op()
formatter = CaseFormatter(translator, None, op.cases, op.results,
op.default)
return formatter.get_result()
def _table_array_view(translator, expr):
ctx = translator.context
table = expr.op().table
query = ctx.get_compiled_expr(table)
return '(\n{0}\n)'.format(util.indent(query, ctx.indent))
# ---------------------------------------------------------------------
# Timestamp arithmetic and other functions
def _timestamp_delta(translator, expr):
op = expr.op()
arg, offset = op.args
formatted_arg = translator.translate(arg)
return _timestamp_format_offset(offset, formatted_arg)
_impala_delta_functions = {
tempo.Year: 'years_add',
tempo.Month: 'months_add',
tempo.Week: 'weeks_add',
tempo.Day: 'days_add',
tempo.Hour: 'hours_add',
tempo.Minute: 'minutes_add',
tempo.Second: 'seconds_add',
tempo.Millisecond: 'milliseconds_add',
tempo.Microsecond: 'microseconds_add',
tempo.Nanosecond: 'nanoseconds_add'
}
def _timestamp_format_offset(offset, arg):
f = _impala_delta_functions[type(offset)]
return '{0}({1}, {2})'.format(f, arg, offset.n)
# ---------------------------------------------------------------------
# Semi/anti-join supports
def _exists_subquery(translator, expr):
op = expr.op()
ctx = translator.context
dummy = ir.literal(1).name(ir.unnamed)
filtered = op.foreign_table.filter(op.predicates)
expr = filtered.projection([dummy])
subquery = ctx.get_compiled_expr(expr)
if isinstance(op, transforms.ExistsSubquery):
key = 'EXISTS'
elif isinstance(op, transforms.NotExistsSubquery):
key = 'NOT EXISTS'
else:
raise NotImplementedError
return '{0} (\n{1}\n)'.format(key, util.indent(subquery, ctx.indent))
def _table_column(translator, expr):
op = expr.op()
field_name = op.name
quoted_name = quote_identifier(field_name, force=True)
table = op.table
ctx = translator.context
# If the column does not originate from the table set in the current SELECT
# context, we should format as a subquery
if translator.permit_subquery and ctx.is_foreign_expr(table):
proj_expr = table.projection([field_name]).to_array()
return _table_array_view(translator, proj_expr)
if ctx.need_aliases():
alias = ctx.get_ref(table)
if alias is not None:
quoted_name = '{0}.{1}'.format(alias, quoted_name)
return quoted_name
def _extract_field(sql_attr):
def extract_field_formatter(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
# This is pre-2.0 Impala-style, which did not used to support the
# SQL-99 format extract($FIELD from expr)
return "extract({0!s}, '{1!s}')".format(arg, sql_attr)
return extract_field_formatter
def _truncate(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
_impala_unit_names = {
'M': 'MONTH',
'D': 'J',
'J': 'D',
'H': 'HH'
}
unit = op.args[1]
unit = _impala_unit_names.get(unit, unit)
return "trunc({0!s}, '{1!s}')".format(arg, unit)
def _timestamp_from_unix(translator, expr):
op = expr.op()
val, unit = op.args
if unit == 'ms':
val = (val / 1000).cast('int32')
elif unit == 'us':
val = (val / 1000000).cast('int32')
arg = _from_unixtime(translator, val)
return 'CAST({0} AS timestamp)'.format(arg)
def _from_unixtime(translator, expr):
arg = translator.translate(expr)
return 'from_unixtime({0}, "yyyy-MM-dd HH:mm:ss")'.format(arg)
def varargs(func_name):
def varargs_formatter(translator, expr):
op = expr.op()
return _format_call(translator, func_name, *op.args)
return varargs_formatter
def _substring(translator, expr):
op = expr.op()
arg, start, length = op.args
arg_formatted = translator.translate(arg)
start_formatted = translator.translate(start)
# Impala is 1-indexed
if length is None or isinstance(length.op(), ir.Literal):
lvalue = length.op().value if length is not None else None
if lvalue:
return 'substr({0}, {1} + 1, {2})'.format(arg_formatted,
start_formatted,
lvalue)
else:
return 'substr({0}, {1} + 1)'.format(arg_formatted,
start_formatted)
else:
length_formatted = translator.translate(length)
return 'substr({0}, {1} + 1, {2})'.format(arg_formatted,
start_formatted,
length_formatted)
def _string_find(translator, expr):
op = expr.op()
arg, substr, start, _ = op.args
arg_formatted = translator.translate(arg)
substr_formatted = translator.translate(substr)
if start is not None and not isinstance(start.op(), ir.Literal):
start_fmt = translator.translate(start)
return 'locate({0}, {1}, {2} + 1) - 1'.format(substr_formatted,
arg_formatted,
start_fmt)
elif start is not None and start.op().value:
sval = start.op().value
return 'locate({0}, {1}, {2}) - 1'.format(substr_formatted,
arg_formatted,
sval + 1)
else:
return 'locate({0}, {1}) - 1'.format(substr_formatted, arg_formatted)
def _string_join(translator, expr):
op = expr.op()
arg, strings = op.args
return _format_call(translator, 'concat_ws', arg, *strings)
def _parse_url(translator, expr):
op = expr.op()
arg, extract, key = op.args
arg_formatted = translator.translate(arg)
if key is None:
return "parse_url({0}, '{1}')".format(arg_formatted, extract)
else:
key_fmt = translator.translate(key)
return "parse_url({0}, '{1}', {2})".format(arg_formatted,
extract, key_fmt)
def _find_in_set(translator, expr):
op = expr.op()
arg, str_list = op.args
arg_formatted = translator.translate(arg)
str_formatted = ','.join([x._arg.value for x in str_list])
return "find_in_set({0}, '{1}') - 1".format(arg_formatted, str_formatted)
def _round(translator, expr):
op = expr.op()
arg, digits = op.args
arg_formatted = translator.translate(arg)
if digits is not None:
digits_formatted = translator.translate(digits)
return 'round({0}, {1})'.format(arg_formatted,
digits_formatted)
else:
return 'round({0})'.format(arg_formatted)
def _hash(translator, expr):
op = expr.op()
arg, how = op.args
arg_formatted = translator.translate(arg)
if how == 'fnv':
return 'fnv_hash({0})'.format(arg_formatted)
else:
raise NotImplementedError(how)
def _log(translator, expr):
op = expr.op()
arg, base = op.args
arg_formatted = translator.translate(arg)
if base is None:
return 'ln({0})'.format(arg_formatted)
else:
return 'log({0}, {1})'.format(arg_formatted,
translator.translate(base))
def _count_distinct(translator, expr):
op = expr.op()
arg_formatted = translator.translate(op.args[0])
return 'COUNT(DISTINCT {0})'.format(arg_formatted)
def _literal(translator, expr):
if isinstance(expr, ir.BooleanValue):
typeclass = 'boolean'
elif isinstance(expr, ir.StringValue):
typeclass = 'string'
elif isinstance(expr, ir.NumericValue):
typeclass = 'number'
elif isinstance(expr, ir.TimestampValue):
typeclass = 'timestamp'
else:
raise NotImplementedError
return _literal_formatters[typeclass](expr)
def _null_literal(translator, expr):
return 'NULL'
_literal_formatters = {
'boolean': _boolean_literal_format,
'number': _number_literal_format,
'string': _string_literal_format,
'timestamp': _timestamp_literal_format
}
def _value_list(translator, expr):
op = expr.op()
formatted = [translator.translate(x) for x in op.values]
return '({0})'.format(', '.join(formatted))
_subtract_one = '({0} - 1)'.format
_expr_transforms = {
ops.RowNumber: _subtract_one,
ops.DenseRank: _subtract_one,
ops.MinRank: _subtract_one,
}
_binary_infix_ops = {
# Binary operations
ops.Add: _binary_infix_op('+'),
ops.Subtract: _binary_infix_op('-'),
ops.Multiply: _binary_infix_op('*'),
ops.Divide: _binary_infix_op('/'),
ops.Power: fixed_arity('pow', 2),
ops.Modulus: _binary_infix_op('%'),
# Comparisons
ops.Equals: _binary_infix_op('='),
ops.NotEquals: _binary_infix_op('!='),
ops.GreaterEqual: _binary_infix_op('>='),
ops.Greater: _binary_infix_op('>'),
ops.LessEqual: _binary_infix_op('<='),
ops.Less: _binary_infix_op('<'),
# Boolean comparisons
ops.And: _binary_infix_op('AND'),
ops.Or: _binary_infix_op('OR'),
ops.Xor: _xor,
}
_operation_registry = {
# Unary operations
ops.NotNull: _not_null,
ops.IsNull: _is_null,
ops.Negate: _negate,
ops.IfNull: _ifnull_workaround,
ops.NullIf: fixed_arity('nullif', 2),
ops.ZeroIfNull: unary('zeroifnull'),
ops.NullIfZero: unary('nullifzero'),
ops.Abs: unary('abs'),
ops.BaseConvert: fixed_arity('conv', 3),
ops.Ceil: unary('ceil'),
ops.Floor: unary('floor'),
ops.Exp: unary('exp'),
ops.Round: _round,
ops.Sign: unary('sign'),
ops.Sqrt: unary('sqrt'),
ops.Hash: _hash,
ops.Log: _log,
ops.Ln: unary('ln'),
ops.Log2: unary('log2'),
ops.Log10: unary('log10'),
ops.DecimalPrecision: unary('precision'),
ops.DecimalScale: unary('scale'),
# Unary aggregates
ops.CMSMedian: _reduction('appx_median'),
ops.HLLCardinality: _reduction('ndv'),
ops.Mean: _reduction('avg'),
ops.Sum: _reduction('sum'),
ops.Max: _reduction('max'),
ops.Min: _reduction('min'),
ops.StandardDev: _variance_like('stddev'),
ops.Variance: _variance_like('variance'),
ops.GroupConcat: fixed_arity('group_concat', 2),
ops.Count: _reduction('count'),
ops.CountDistinct: _count_distinct,
# string operations
ops.StringLength: unary('length'),
ops.StringAscii: unary('ascii'),
ops.Lowercase: unary('lower'),
ops.Uppercase: unary('upper'),
ops.Reverse: unary('reverse'),
ops.Strip: unary('trim'),
ops.LStrip: unary('ltrim'),
ops.RStrip: unary('rtrim'),
ops.Capitalize: unary('initcap'),
ops.Substring: _substring,
ops.StrRight: fixed_arity('strright', 2),
ops.Repeat: fixed_arity('repeat', 2),
ops.StringFind: _string_find,
ops.Translate: fixed_arity('translate', 3),
ops.FindInSet: _find_in_set,
ops.LPad: fixed_arity('lpad', 3),
ops.RPad: fixed_arity('rpad', 3),
ops.StringJoin: _string_join,
ops.StringSQLLike: _binary_infix_op('LIKE'),
ops.RegexSearch: _binary_infix_op('RLIKE'),
ops.RegexExtract: fixed_arity('regexp_extract', 3),
ops.RegexReplace: fixed_arity('regexp_replace', 3),
ops.ParseURL: _parse_url,
# Timestamp operations
ops.TimestampNow: lambda *args: 'now()',
ops.ExtractYear: _extract_field('year'),
ops.ExtractMonth: _extract_field('month'),
ops.ExtractDay: _extract_field('day'),
ops.ExtractHour: _extract_field('hour'),
ops.ExtractMinute: _extract_field('minute'),
ops.ExtractSecond: _extract_field('second'),
ops.ExtractMillisecond: _extract_field('millisecond'),
ops.Truncate: _truncate,
# Other operations
ops.E: lambda *args: 'e()',
ir.Literal: _literal,
ir.NullLiteral: _null_literal,
ir.ValueList: _value_list,
ops.Cast: _cast,
ops.Coalesce: varargs('coalesce'),
ops.Greatest: varargs('greatest'),
ops.Least: varargs('least'),
ops.Where: fixed_arity('if', 3),
ops.Between: _between,
ops.Contains: _binary_infix_op('IN'),
ops.NotContains: _binary_infix_op('NOT IN'),
ops.SimpleCase: _simple_case,
ops.SearchedCase: _searched_case,
ops.TableColumn: _table_column,
ops.TableArrayView: _table_array_view,
ops.TimestampDelta: _timestamp_delta,
ops.TimestampFromUNIX: _timestamp_from_unix,
transforms.ExistsSubquery: _exists_subquery,
transforms.NotExistsSubquery: _exists_subquery,
# RowNumber, and rank functions starts with 0 in Ibis-land
ops.RowNumber: lambda *args: 'row_number()',
ops.DenseRank: lambda *args: 'dense_rank()',
ops.MinRank: lambda *args: 'rank()',
ops.FirstValue: unary('first_value'),
ops.LastValue: unary('last_value'),
ops.NthValue: _nth_value,
ops.Lag: _shift_like('lag'),
ops.Lead: _shift_like('lead'),
ops.WindowOp: _window
}
_operation_registry.update(_binary_infix_ops)
class ImpalaExprTranslator(comp.ExprTranslator):
_registry = _operation_registry
_context_class = ImpalaContext
def name(self, translated, name, force=True):
return _name_expr(translated,
quote_identifier(name, force=force))
compiles = ImpalaExprTranslator.compiles
rewrites = ImpalaExprTranslator.rewrites
@rewrites(ops.FloorDivide)
def _floor_divide(expr):
left, right = expr.op().args
return left.div(right).floor()
| # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import StringIO
import datetime
import ibis
import ibis.expr.analysis as L
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.expr.temporal as tempo
import ibis.sql.compiler as comp
import ibis.sql.transforms as transforms
import ibis.impala.identifiers as identifiers
import ibis.common as com
import ibis.util as util
def build_ast(expr, context=None):
builder = ImpalaQueryBuilder(expr, context=context)
return builder.get_result()
def _get_query(expr, context):
ast = build_ast(expr, context)
query = ast.queries[0]
return query
def to_sql(expr, context=None):
query = _get_query(expr, context)
return query.compile()
# ----------------------------------------------------------------------
# Select compilation
class ImpalaSelectBuilder(comp.SelectBuilder):
@property
def _select_class(self):
return ImpalaSelect
class ImpalaQueryBuilder(comp.QueryBuilder):
select_builder = ImpalaSelectBuilder
@property
def _make_context(self):
return ImpalaContext
@property
def _union_class(self):
return ImpalaUnion
class ImpalaContext(comp.QueryContext):
def _to_sql(self, expr, ctx):
return to_sql(expr, context=ctx)
class ImpalaSelect(comp.Select):
"""
A SELECT statement which, after execution, might yield back to the user a
table, array/list, or scalar value, depending on the expression that
generated it
"""
def compile(self):
"""
This method isn't yet idempotent; calling multiple times may yield
unexpected results
"""
# Can't tell if this is a hack or not. Revisit later
self.context.set_query(self)
# If any subqueries, translate them and add to beginning of query as
# part of the WITH section
with_frag = self.format_subqueries()
# SELECT
select_frag = self.format_select_set()
# FROM, JOIN, UNION
from_frag = self.format_table_set()
# WHERE
where_frag = self.format_where()
# GROUP BY and HAVING
groupby_frag = self.format_group_by()
# ORDER BY and LIMIT
order_frag = self.format_postamble()
# Glue together the query fragments and return
query = _join_not_none('\n', [with_frag, select_frag, from_frag,
where_frag, groupby_frag, order_frag])
return query
def format_subqueries(self):
if len(self.subqueries) == 0:
return
context = self.context
buf = StringIO()
buf.write('WITH ')
for i, expr in enumerate(self.subqueries):
if i > 0:
buf.write(',\n')
formatted = util.indent(context.get_compiled_expr(expr), 2)
alias = context.get_ref(expr)
buf.write('{0} AS (\n{1}\n)'.format(alias, formatted))
return buf.getvalue()
def format_select_set(self):
# TODO:
context = self.context
formatted = []
for expr in self.select_set:
if isinstance(expr, ir.ValueExpr):
expr_str = self._translate(expr, named=True)
elif isinstance(expr, ir.TableExpr):
# A * selection, possibly prefixed
if context.need_aliases():
alias = context.get_ref(expr)
# materialized join will not have an alias. see #491
expr_str = '{0}.*'.format(alias) if alias else '*'
else:
expr_str = '*'
formatted.append(expr_str)
buf = StringIO()
line_length = 0
max_length = 70
tokens = 0
for i, val in enumerate(formatted):
# always line-break for multi-line expressions
if val.count('\n'):
if i:
buf.write(',')
buf.write('\n')
indented = util.indent(val, self.indent)
buf.write(indented)
# set length of last line
line_length = len(indented.split('\n')[-1])
tokens = 1
elif (tokens > 0 and line_length and
len(val) + line_length > max_length):
# There is an expr, and adding this new one will make the line
# too long
buf.write(',\n ') if i else buf.write('\n')
buf.write(val)
line_length = len(val) + 7
tokens = 1
else:
if i:
buf.write(',')
buf.write(' ')
buf.write(val)
tokens += 1
line_length += len(val) + 2
if self.distinct:
select_key = 'SELECT DISTINCT'
else:
select_key = 'SELECT'
return '{0}{1}'.format(select_key, buf.getvalue())
def format_table_set(self):
if self.table_set is None:
return None
fragment = 'FROM '
helper = _TableSetFormatter(self, self.table_set)
fragment += helper.get_result()
return fragment
def format_group_by(self):
if not len(self.group_by):
# There is no aggregation, nothing to see here
return None
lines = []
if len(self.group_by) > 0:
clause = 'GROUP BY {0}'.format(', '.join([
str(x + 1) for x in self.group_by]))
lines.append(clause)
if len(self.having) > 0:
trans_exprs = []
for expr in self.having:
translated = self._translate(expr)
trans_exprs.append(translated)
lines.append('HAVING {0}'.format(' AND '.join(trans_exprs)))
return '\n'.join(lines)
def format_where(self):
if len(self.where) == 0:
return None
buf = StringIO()
buf.write('WHERE ')
fmt_preds = []
for pred in self.where:
new_pred = self._translate(pred, permit_subquery=True)
if isinstance(pred.op(), ops.Or):
# parens for OR exprs because it binds looser than AND
new_pred = _parenthesize(new_pred)
fmt_preds.append(new_pred)
conj = ' AND\n{0}'.format(' ' * 6)
buf.write(conj.join(fmt_preds))
return buf.getvalue()
def format_postamble(self):
buf = StringIO()
lines = 0
if len(self.order_by) > 0:
buf.write('ORDER BY ')
formatted = []
for expr in self.order_by:
key = expr.op()
translated = self._translate(key.expr)
if not key.ascending:
translated += ' DESC'
formatted.append(translated)
buf.write(', '.join(formatted))
lines += 1
if self.limit is not None:
if lines:
buf.write('\n')
n, offset = self.limit['n'], self.limit['offset']
buf.write('LIMIT {0}'.format(n))
if offset is not None and offset != 0:
buf.write(' OFFSET {0}'.format(offset))
lines += 1
if not lines:
return None
return buf.getvalue()
@property
def translator(self):
return ImpalaExprTranslator
def _join_not_none(sep, pieces):
pieces = [x for x in pieces if x is not None]
return sep.join(pieces)
class _TableSetFormatter(comp.TableSetFormatter):
def get_result(self):
# Got to unravel the join stack; the nesting order could be
# arbitrary, so we do a depth first search and push the join tokens
# and predicates onto a flat list, then format them
op = self.expr.op()
if isinstance(op, ops.Join):
self._walk_join_tree(op)
else:
self.join_tables.append(self._format_table(self.expr))
# TODO: Now actually format the things
buf = StringIO()
buf.write(self.join_tables[0])
for jtype, table, preds in zip(self.join_types, self.join_tables[1:],
self.join_predicates):
buf.write('\n')
buf.write(util.indent('{0} {1}'.format(jtype, table), self.indent))
if len(preds):
buf.write('\n')
fmt_preds = [self._translate(pred) for pred in preds]
conj = ' AND\n{0}'.format(' ' * 3)
fmt_preds = util.indent('ON ' + conj.join(fmt_preds),
self.indent * 2)
buf.write(fmt_preds)
return buf.getvalue()
_join_names = {
ops.InnerJoin: 'INNER JOIN',
ops.LeftJoin: 'LEFT OUTER JOIN',
ops.RightJoin: 'RIGHT OUTER JOIN',
ops.OuterJoin: 'FULL OUTER JOIN',
ops.LeftAntiJoin: 'LEFT ANTI JOIN',
ops.LeftSemiJoin: 'LEFT SEMI JOIN',
ops.CrossJoin: 'CROSS JOIN'
}
def _get_join_type(self, op):
jname = self._join_names[type(op)]
# Impala requires this
if len(op.predicates) == 0:
jname = self._join_names[ops.CrossJoin]
return jname
def _format_table(self, expr):
# TODO: This could probably go in a class and be significantly nicer
ctx = self.context
ref_expr = expr
op = ref_op = expr.op()
if isinstance(op, ops.SelfReference):
ref_expr = op.table
ref_op = ref_expr.op()
if isinstance(ref_op, ops.PhysicalTable):
name = ref_op.name
if name is None:
raise com.RelationError('Table did not have a name: {0!r}'
.format(expr))
result = quote_identifier(name)
is_subquery = False
else:
# A subquery
if ctx.is_extracted(ref_expr):
# Was put elsewhere, e.g. WITH block, we just need to grab its
# alias
alias = ctx.get_ref(expr)
# HACK: self-references have to be treated more carefully here
if isinstance(op, ops.SelfReference):
return '{0} {1}'.format(ctx.get_ref(ref_expr), alias)
else:
return alias
subquery = ctx.get_compiled_expr(expr)
result = '(\n{0}\n)'.format(util.indent(subquery, self.indent))
is_subquery = True
if is_subquery or ctx.need_aliases():
result += ' {0}'.format(ctx.get_ref(expr))
return result
class ImpalaUnion(comp.Union):
def compile(self):
context = self.context
if self.distinct:
union_keyword = 'UNION'
else:
union_keyword = 'UNION ALL'
left_set = context.get_compiled_expr(self.left, isolated=True)
right_set = context.get_compiled_expr(self.right, isolated=True)
# XXX: hack of all trades - our right relation has a CTE
# TODO: factor out common subqueries in the union
if right_set.startswith('WITH'):
format_string = '({0})\n{1}\n({2})'
else:
format_string = '{0}\n{1}\n{2}'
return format_string.format(left_set, union_keyword, right_set)
# ---------------------------------------------------------------------
# Scalar and array expression formatting
_sql_type_names = {
'int8': 'tinyint',
'int16': 'smallint',
'int32': 'int',
'int64': 'bigint',
'float': 'float',
'double': 'double',
'string': 'string',
'boolean': 'boolean',
'timestamp': 'timestamp',
'decimal': 'decimal',
}
def _cast(translator, expr):
op = expr.op()
arg, target_type = op.args
arg_formatted = translator.translate(arg)
if isinstance(arg, ir.CategoryValue) and target_type == 'int32':
return arg_formatted
else:
sql_type = _type_to_sql_string(target_type)
return 'CAST({0!s} AS {1!s})'.format(arg_formatted, sql_type)
def _type_to_sql_string(tval):
if isinstance(tval, dt.Decimal):
return 'decimal({0},{1})'.format(tval.precision, tval.scale)
else:
return _sql_type_names[tval.name()]
def _between(translator, expr):
op = expr.op()
comp, lower, upper = [translator.translate(x) for x in op.args]
return '{0!s} BETWEEN {1!s} AND {2!s}'.format(comp, lower, upper)
def _is_null(translator, expr):
formatted_arg = translator.translate(expr.op().args[0])
return '{0!s} IS NULL'.format(formatted_arg)
def _not_null(translator, expr):
formatted_arg = translator.translate(expr.op().args[0])
return '{0!s} IS NOT NULL'.format(formatted_arg)
_cumulative_to_reduction = {
ops.CumulativeSum: ops.Sum,
ops.CumulativeMin: ops.Min,
ops.CumulativeMax: ops.Max,
ops.CumulativeMean: ops.Mean,
ops.CumulativeAny: ops.Any,
ops.CumulativeAll: ops.All,
}
def _cumulative_to_window(translator, expr, window):
win = ibis.cumulative_window()
win = (win.group_by(window._group_by)
.order_by(window._order_by))
op = expr.op()
klass = _cumulative_to_reduction[type(op)]
new_op = klass(*op.args)
new_expr = expr._factory(new_op, name=expr._name)
if type(new_op) in translator._rewrites:
new_expr = translator._rewrites[type(new_op)](new_expr)
new_expr = L.windowize_function(new_expr, win)
return new_expr
def _window(translator, expr):
op = expr.op()
arg, window = op.args
window_op = arg.op()
_require_order_by = (ops.Lag,
ops.Lead,
ops.DenseRank,
ops.MinRank,
ops.FirstValue,
ops.LastValue)
_unsupported_reductions = (
ops.CMSMedian,
ops.GroupConcat,
ops.HLLCardinality,
)
if isinstance(window_op, _unsupported_reductions):
raise com.TranslationError('{0!s} is not supported in '
'window functions'
.format(type(window_op)))
if isinstance(window_op, ops.CumulativeOp):
arg = _cumulative_to_window(translator, arg, window)
return translator.translate(arg)
# Some analytic functions need to have the expression of interest in
# the ORDER BY part of the window clause
if (isinstance(window_op, _require_order_by) and
len(window._order_by) == 0):
window = window.order_by(window_op.args[0])
window_formatted = _format_window(translator, window)
arg_formatted = translator.translate(arg)
result = '{0} {1}'.format(arg_formatted, window_formatted)
if type(window_op) in _expr_transforms:
return _expr_transforms[type(window_op)](result)
else:
return result
def _format_window(translator, window):
components = []
if len(window._group_by) > 0:
partition_args = [translator.translate(x)
for x in window._group_by]
components.append('PARTITION BY {0}'.format(', '.join(partition_args)))
if len(window._order_by) > 0:
order_args = []
for expr in window._order_by:
key = expr.op()
translated = translator.translate(key.expr)
if not key.ascending:
translated += ' DESC'
order_args.append(translated)
components.append('ORDER BY {0}'.format(', '.join(order_args)))
p, f = window.preceding, window.following
def _prec(p):
return '{0} PRECEDING'.format(p) if p > 0 else 'CURRENT ROW'
def _foll(f):
return '{0} FOLLOWING'.format(f) if f > 0 else 'CURRENT ROW'
if p is not None and f is not None:
frame = ('ROWS BETWEEN {0} AND {1}'
.format(_prec(p), _foll(f)))
elif p is not None:
if isinstance(p, tuple):
start, end = p
frame = ('ROWS BETWEEN {0} AND {1}'
.format(_prec(start), _prec(end)))
else:
kind = 'ROWS' if p > 0 else 'RANGE'
frame = ('{0} BETWEEN {1} AND UNBOUNDED FOLLOWING'
.format(kind, _prec(p)))
elif f is not None:
if isinstance(f, tuple):
start, end = f
frame = ('ROWS BETWEEN {0} AND {1}'
.format(_foll(start), _foll(end)))
else:
kind = 'ROWS' if f > 0 else 'RANGE'
frame = ('{0} BETWEEN UNBOUNDED PRECEDING AND {1}'
.format(kind, _foll(f)))
else:
# no-op, default is full sample
frame = None
if frame is not None:
components.append(frame)
return 'OVER ({0})'.format(' '.join(components))
def _shift_like(name):
def formatter(translator, expr):
op = expr.op()
arg, offset, default = op.args
arg_formatted = translator.translate(arg)
if default is not None:
if offset is None:
offset_formatted = '1'
else:
offset_formatted = translator.translate(offset)
default_formatted = translator.translate(default)
return '{0}({1}, {2}, {3})'.format(name, arg_formatted,
offset_formatted,
default_formatted)
elif offset is not None:
offset_formatted = translator.translate(offset)
return '{0}({1}, {2})'.format(name, arg_formatted,
offset_formatted)
else:
return '{0}({1})'.format(name, arg_formatted)
return formatter
def _nth_value(translator, expr):
op = expr.op()
arg, rank = op.args
arg_formatted = translator.translate(arg)
rank_formatted = translator.translate(rank - 1)
return 'first_value(lag({0}, {1}))'.format(arg_formatted,
rank_formatted)
def _negate(translator, expr):
arg = expr.op().args[0]
formatted_arg = translator.translate(arg)
if isinstance(expr, ir.BooleanValue):
return 'NOT {0!s}'.format(formatted_arg)
else:
if _needs_parens(arg):
formatted_arg = _parenthesize(formatted_arg)
return '-{0!s}'.format(formatted_arg)
def _parenthesize(what):
return '({0!s})'.format(what)
def unary(func_name):
return fixed_arity(func_name, 1)
def _reduction_format(translator, func_name, arg, where):
if where is not None:
case = where.ifelse(arg, ibis.NA)
arg = translator.translate(case)
else:
arg = translator.translate(arg)
return '{0!s}({1!s})'.format(func_name, arg)
def _reduction(func_name):
def formatter(translator, expr):
op = expr.op()
# HACK: support trailing arguments
arg, where = op.args[:2]
return _reduction_format(translator, func_name, arg, where)
return formatter
def _variance_like(func_name):
func_names = {
'sample': func_name,
'pop': '{0}_pop'.format(func_name)
}
def formatter(translator, expr):
arg, where, how = expr.op().args
return _reduction_format(translator, func_names[how], arg, where)
return formatter
def fixed_arity(func_name, arity):
def formatter(translator, expr):
op = expr.op()
if arity != len(op.args):
raise com.IbisError('incorrect number of args')
return _format_call(translator, func_name, *op.args)
return formatter
def _ifnull_workaround(translator, expr):
op = expr.op()
a, b = op.args
# work around per #345, #360
if (isinstance(a, ir.DecimalValue) and
isinstance(b, ir.IntegerValue)):
b = b.cast(a.type())
return _format_call(translator, 'isnull', a, b)
def _format_call(translator, func, *args):
formatted_args = []
for arg in args:
fmt_arg = translator.translate(arg)
formatted_args.append(fmt_arg)
return '{0!s}({1!s})'.format(func, ', '.join(formatted_args))
def _binary_infix_op(infix_sym):
def formatter(translator, expr):
op = expr.op()
left, right = op.args
left_arg = translator.translate(left)
right_arg = translator.translate(right)
if _needs_parens(left):
left_arg = _parenthesize(left_arg)
if _needs_parens(right):
right_arg = _parenthesize(right_arg)
return '{0!s} {1!s} {2!s}'.format(left_arg, infix_sym, right_arg)
return formatter
def _xor(translator, expr):
op = expr.op()
left_arg = translator.translate(op.left)
right_arg = translator.translate(op.right)
if _needs_parens(op.left):
left_arg = _parenthesize(left_arg)
if _needs_parens(op.right):
right_arg = _parenthesize(right_arg)
return ('{0} AND NOT {1}'
.format('({0} {1} {2})'.format(left_arg, 'OR', right_arg),
'({0} {1} {2})'.format(left_arg, 'AND', right_arg)))
def _name_expr(formatted_expr, quoted_name):
return '{0!s} AS {1!s}'.format(formatted_expr, quoted_name)
def _needs_parens(op):
if isinstance(op, ir.Expr):
op = op.op()
op_klass = type(op)
# function calls don't need parens
return (op_klass in _binary_infix_ops or
op_klass in [ops.Negate])
def _need_parenthesize_args(op):
if isinstance(op, ir.Expr):
op = op.op()
op_klass = type(op)
return (op_klass in _binary_infix_ops or
op_klass in [ops.Negate])
def _boolean_literal_format(expr):
value = expr.op().value
return 'TRUE' if value else 'FALSE'
def _number_literal_format(expr):
value = expr.op().value
return repr(value)
def _string_literal_format(expr):
value = expr.op().value
return "'{0!s}'".format(value.replace("'", "\\'"))
def _timestamp_literal_format(expr):
value = expr.op().value
if isinstance(value, datetime.datetime):
if value.microsecond != 0:
raise ValueError(value)
value = value.strftime('%Y-%m-%d %H:%M:%S')
return "'{0!s}'".format(value)
def quote_identifier(name, quotechar='`', force=False):
if force or name.count(' ') or name in identifiers.impala_identifiers:
return '{0}{1}{0}'.format(quotechar, name)
else:
return name
class CaseFormatter(object):
def __init__(self, translator, base, cases, results, default):
self.translator = translator
self.base = base
self.cases = cases
self.results = results
self.default = default
# HACK
self.indent = 2
self.multiline = len(cases) > 1
self.buf = StringIO()
def _trans(self, expr):
return self.translator.translate(expr)
def get_result(self):
self.buf.seek(0)
self.buf.write('CASE')
if self.base is not None:
base_str = self._trans(self.base)
self.buf.write(' {0}'.format(base_str))
for case, result in zip(self.cases, self.results):
self._next_case()
case_str = self._trans(case)
result_str = self._trans(result)
self.buf.write('WHEN {0} THEN {1}'.format(case_str, result_str))
if self.default is not None:
self._next_case()
default_str = self._trans(self.default)
self.buf.write('ELSE {0}'.format(default_str))
if self.multiline:
self.buf.write('\nEND')
else:
self.buf.write(' END')
return self.buf.getvalue()
def _next_case(self):
if self.multiline:
self.buf.write('\n{0}'.format(' ' * self.indent))
else:
self.buf.write(' ')
def _simple_case(translator, expr):
op = expr.op()
formatter = CaseFormatter(translator, op.base, op.cases, op.results,
op.default)
return formatter.get_result()
def _searched_case(translator, expr):
op = expr.op()
formatter = CaseFormatter(translator, None, op.cases, op.results,
op.default)
return formatter.get_result()
def _table_array_view(translator, expr):
ctx = translator.context
table = expr.op().table
query = ctx.get_compiled_expr(table)
return '(\n{0}\n)'.format(util.indent(query, ctx.indent))
# ---------------------------------------------------------------------
# Timestamp arithmetic and other functions
def _timestamp_delta(translator, expr):
op = expr.op()
arg, offset = op.args
formatted_arg = translator.translate(arg)
return _timestamp_format_offset(offset, formatted_arg)
_impala_delta_functions = {
tempo.Year: 'years_add',
tempo.Month: 'months_add',
tempo.Week: 'weeks_add',
tempo.Day: 'days_add',
tempo.Hour: 'hours_add',
tempo.Minute: 'minutes_add',
tempo.Second: 'seconds_add',
tempo.Millisecond: 'milliseconds_add',
tempo.Microsecond: 'microseconds_add',
tempo.Nanosecond: 'nanoseconds_add'
}
def _timestamp_format_offset(offset, arg):
f = _impala_delta_functions[type(offset)]
return '{0}({1}, {2})'.format(f, arg, offset.n)
# ---------------------------------------------------------------------
# Semi/anti-join supports
def _exists_subquery(translator, expr):
op = expr.op()
ctx = translator.context
dummy = ir.literal(1).name(ir.unnamed)
filtered = op.foreign_table.filter(op.predicates)
expr = filtered.projection([dummy])
subquery = ctx.get_compiled_expr(expr)
if isinstance(op, transforms.ExistsSubquery):
key = 'EXISTS'
elif isinstance(op, transforms.NotExistsSubquery):
key = 'NOT EXISTS'
else:
raise NotImplementedError
return '{0} (\n{1}\n)'.format(key, util.indent(subquery, ctx.indent))
def _table_column(translator, expr):
op = expr.op()
field_name = op.name
quoted_name = quote_identifier(field_name, force=True)
table = op.table
ctx = translator.context
# If the column does not originate from the table set in the current SELECT
# context, we should format as a subquery
if translator.permit_subquery and ctx.is_foreign_expr(table):
proj_expr = table.projection([field_name]).to_array()
return _table_array_view(translator, proj_expr)
if ctx.need_aliases():
alias = ctx.get_ref(table)
if alias is not None:
quoted_name = '{0}.{1}'.format(alias, quoted_name)
return quoted_name
def _extract_field(sql_attr):
def extract_field_formatter(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
# This is pre-2.0 Impala-style, which did not used to support the
# SQL-99 format extract($FIELD from expr)
return "extract({0!s}, '{1!s}')".format(arg, sql_attr)
return extract_field_formatter
def _truncate(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
_impala_unit_names = {
'M': 'MONTH',
'D': 'J',
'J': 'D',
'H': 'HH'
}
unit = op.args[1]
unit = _impala_unit_names.get(unit, unit)
return "trunc({0!s}, '{1!s}')".format(arg, unit)
def _timestamp_from_unix(translator, expr):
op = expr.op()
val, unit = op.args
if unit == 'ms':
val = (val / 1000).cast('int32')
elif unit == 'us':
val = (val / 1000000).cast('int32')
arg = _from_unixtime(translator, val)
return 'CAST({0} AS timestamp)'.format(arg)
def _from_unixtime(translator, expr):
arg = translator.translate(expr)
return 'from_unixtime({0}, "yyyy-MM-dd HH:mm:ss")'.format(arg)
def varargs(func_name):
def varargs_formatter(translator, expr):
op = expr.op()
return _format_call(translator, func_name, *op.args)
return varargs_formatter
def _substring(translator, expr):
op = expr.op()
arg, start, length = op.args
arg_formatted = translator.translate(arg)
start_formatted = translator.translate(start)
# Impala is 1-indexed
if length is None or isinstance(length.op(), ir.Literal):
lvalue = length.op().value if length is not None else None
if lvalue:
return 'substr({0}, {1} + 1, {2})'.format(arg_formatted,
start_formatted,
lvalue)
else:
return 'substr({0}, {1} + 1)'.format(arg_formatted,
start_formatted)
else:
length_formatted = translator.translate(length)
return 'substr({0}, {1} + 1, {2})'.format(arg_formatted,
start_formatted,
length_formatted)
def _string_find(translator, expr):
op = expr.op()
arg, substr, start, _ = op.args
arg_formatted = translator.translate(arg)
substr_formatted = translator.translate(substr)
if start is not None and not isinstance(start.op(), ir.Literal):
start_fmt = translator.translate(start)
return 'locate({0}, {1}, {2} + 1) - 1'.format(substr_formatted,
arg_formatted,
start_fmt)
elif start is not None and start.op().value:
sval = start.op().value
return 'locate({0}, {1}, {2}) - 1'.format(substr_formatted,
arg_formatted,
sval + 1)
else:
return 'locate({0}, {1}) - 1'.format(substr_formatted, arg_formatted)
def _string_join(translator, expr):
op = expr.op()
arg, strings = op.args
return _format_call(translator, 'concat_ws', arg, *strings)
def _parse_url(translator, expr):
op = expr.op()
arg, extract, key = op.args
arg_formatted = translator.translate(arg)
if key is None:
return "parse_url({0}, '{1}')".format(arg_formatted, extract)
else:
key_fmt = translator.translate(key)
return "parse_url({0}, '{1}', {2})".format(arg_formatted,
extract, key_fmt)
def _find_in_set(translator, expr):
op = expr.op()
arg, str_list = op.args
arg_formatted = translator.translate(arg)
str_formatted = ','.join([x._arg.value for x in str_list])
return "find_in_set({0}, '{1}') - 1".format(arg_formatted, str_formatted)
def _round(translator, expr):
op = expr.op()
arg, digits = op.args
arg_formatted = translator.translate(arg)
if digits is not None:
digits_formatted = translator.translate(digits)
return 'round({0}, {1})'.format(arg_formatted,
digits_formatted)
else:
return 'round({0})'.format(arg_formatted)
def _hash(translator, expr):
op = expr.op()
arg, how = op.args
arg_formatted = translator.translate(arg)
if how == 'fnv':
return 'fnv_hash({0})'.format(arg_formatted)
else:
raise NotImplementedError(how)
def _log(translator, expr):
op = expr.op()
arg, base = op.args
arg_formatted = translator.translate(arg)
if base is None:
return 'ln({0})'.format(arg_formatted)
else:
return 'log({0}, {1})'.format(arg_formatted,
translator.translate(base))
def _count_distinct(translator, expr):
op = expr.op()
arg_formatted = translator.translate(op.args[0])
return 'COUNT(DISTINCT {0})'.format(arg_formatted)
def _literal(translator, expr):
if isinstance(expr, ir.BooleanValue):
typeclass = 'boolean'
elif isinstance(expr, ir.StringValue):
typeclass = 'string'
elif isinstance(expr, ir.NumericValue):
typeclass = 'number'
elif isinstance(expr, ir.TimestampValue):
typeclass = 'timestamp'
else:
raise NotImplementedError
return _literal_formatters[typeclass](expr)
def _null_literal(translator, expr):
return 'NULL'
_literal_formatters = {
'boolean': _boolean_literal_format,
'number': _number_literal_format,
'string': _string_literal_format,
'timestamp': _timestamp_literal_format
}
def _value_list(translator, expr):
op = expr.op()
formatted = [translator.translate(x) for x in op.values]
return '({0})'.format(', '.join(formatted))
_subtract_one = '({0} - 1)'.format
_expr_transforms = {
ops.RowNumber: _subtract_one,
ops.DenseRank: _subtract_one,
ops.MinRank: _subtract_one,
}
_binary_infix_ops = {
# Binary operations
ops.Add: _binary_infix_op('+'),
ops.Subtract: _binary_infix_op('-'),
ops.Multiply: _binary_infix_op('*'),
ops.Divide: _binary_infix_op('/'),
ops.Power: fixed_arity('pow', 2),
ops.Modulus: _binary_infix_op('%'),
# Comparisons
ops.Equals: _binary_infix_op('='),
ops.NotEquals: _binary_infix_op('!='),
ops.GreaterEqual: _binary_infix_op('>='),
ops.Greater: _binary_infix_op('>'),
ops.LessEqual: _binary_infix_op('<='),
ops.Less: _binary_infix_op('<'),
# Boolean comparisons
ops.And: _binary_infix_op('AND'),
ops.Or: _binary_infix_op('OR'),
ops.Xor: _xor,
}
_operation_registry = {
# Unary operations
ops.NotNull: _not_null,
ops.IsNull: _is_null,
ops.Negate: _negate,
ops.IfNull: _ifnull_workaround,
ops.NullIf: fixed_arity('nullif', 2),
ops.ZeroIfNull: unary('zeroifnull'),
ops.NullIfZero: unary('nullifzero'),
ops.Abs: unary('abs'),
ops.BaseConvert: fixed_arity('conv', 3),
ops.Ceil: unary('ceil'),
ops.Floor: unary('floor'),
ops.Exp: unary('exp'),
ops.Round: _round,
ops.Sign: unary('sign'),
ops.Sqrt: unary('sqrt'),
ops.Hash: _hash,
ops.Log: _log,
ops.Ln: unary('ln'),
ops.Log2: unary('log2'),
ops.Log10: unary('log10'),
ops.DecimalPrecision: unary('precision'),
ops.DecimalScale: unary('scale'),
# Unary aggregates
ops.CMSMedian: _reduction('appx_median'),
ops.HLLCardinality: _reduction('ndv'),
ops.Mean: _reduction('avg'),
ops.Sum: _reduction('sum'),
ops.Max: _reduction('max'),
ops.Min: _reduction('min'),
ops.StandardDev: _variance_like('stddev'),
ops.Variance: _variance_like('variance'),
ops.GroupConcat: fixed_arity('group_concat', 2),
ops.Count: _reduction('count'),
ops.CountDistinct: _count_distinct,
# string operations
ops.StringLength: unary('length'),
ops.StringAscii: unary('ascii'),
ops.Lowercase: unary('lower'),
ops.Uppercase: unary('upper'),
ops.Reverse: unary('reverse'),
ops.Strip: unary('trim'),
ops.LStrip: unary('ltrim'),
ops.RStrip: unary('rtrim'),
ops.Capitalize: unary('initcap'),
ops.Substring: _substring,
ops.StrRight: fixed_arity('strright', 2),
ops.Repeat: fixed_arity('repeat', 2),
ops.StringFind: _string_find,
ops.Translate: fixed_arity('translate', 3),
ops.FindInSet: _find_in_set,
ops.LPad: fixed_arity('lpad', 3),
ops.RPad: fixed_arity('rpad', 3),
ops.StringJoin: _string_join,
ops.StringSQLLike: _binary_infix_op('LIKE'),
ops.RegexSearch: _binary_infix_op('RLIKE'),
ops.RegexExtract: fixed_arity('regexp_extract', 3),
ops.RegexReplace: fixed_arity('regexp_replace', 3),
ops.ParseURL: _parse_url,
# Timestamp operations
ops.TimestampNow: lambda *args: 'now()',
ops.ExtractYear: _extract_field('year'),
ops.ExtractMonth: _extract_field('month'),
ops.ExtractDay: _extract_field('day'),
ops.ExtractHour: _extract_field('hour'),
ops.ExtractMinute: _extract_field('minute'),
ops.ExtractSecond: _extract_field('second'),
ops.ExtractMillisecond: _extract_field('millisecond'),
ops.Truncate: _truncate,
# Other operations
ops.E: lambda *args: 'e()',
ir.Literal: _literal,
ir.NullLiteral: _null_literal,
ir.ValueList: _value_list,
ops.Cast: _cast,
ops.Coalesce: varargs('coalesce'),
ops.Greatest: varargs('greatest'),
ops.Least: varargs('least'),
ops.Where: fixed_arity('if', 3),
ops.Between: _between,
ops.Contains: _binary_infix_op('IN'),
ops.NotContains: _binary_infix_op('NOT IN'),
ops.SimpleCase: _simple_case,
ops.SearchedCase: _searched_case,
ops.TableColumn: _table_column,
ops.TableArrayView: _table_array_view,
ops.TimestampDelta: _timestamp_delta,
ops.TimestampFromUNIX: _timestamp_from_unix,
transforms.ExistsSubquery: _exists_subquery,
transforms.NotExistsSubquery: _exists_subquery,
# RowNumber, and rank functions starts with 0 in Ibis-land
ops.RowNumber: lambda *args: 'row_number()',
ops.DenseRank: lambda *args: 'dense_rank()',
ops.MinRank: lambda *args: 'rank()',
ops.FirstValue: unary('first_value'),
ops.LastValue: unary('last_value'),
ops.NthValue: _nth_value,
ops.Lag: _shift_like('lag'),
ops.Lead: _shift_like('lead'),
ops.WindowOp: _window
}
_operation_registry.update(_binary_infix_ops)
class ImpalaExprTranslator(comp.ExprTranslator):
_registry = _operation_registry
_context_class = ImpalaContext
def name(self, translated, name, force=True):
return _name_expr(translated,
quote_identifier(name, force=force))
compiles = ImpalaExprTranslator.compiles
rewrites = ImpalaExprTranslator.rewrites
@rewrites(ops.FloorDivide)
def _floor_divide(expr):
left, right = expr.op().args
return left.div(right).floor()
| en | 0.808955 | # Copyright 2014 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------- # Select compilation A SELECT statement which, after execution, might yield back to the user a table, array/list, or scalar value, depending on the expression that generated it This method isn't yet idempotent; calling multiple times may yield unexpected results # Can't tell if this is a hack or not. Revisit later # If any subqueries, translate them and add to beginning of query as # part of the WITH section # SELECT # FROM, JOIN, UNION # WHERE # GROUP BY and HAVING # ORDER BY and LIMIT # Glue together the query fragments and return # TODO: # A * selection, possibly prefixed # materialized join will not have an alias. see #491 # always line-break for multi-line expressions # set length of last line # There is an expr, and adding this new one will make the line # too long # There is no aggregation, nothing to see here # parens for OR exprs because it binds looser than AND # Got to unravel the join stack; the nesting order could be # arbitrary, so we do a depth first search and push the join tokens # and predicates onto a flat list, then format them # TODO: Now actually format the things # Impala requires this # TODO: This could probably go in a class and be significantly nicer # A subquery # Was put elsewhere, e.g. WITH block, we just need to grab its # alias # HACK: self-references have to be treated more carefully here # XXX: hack of all trades - our right relation has a CTE # TODO: factor out common subqueries in the union # --------------------------------------------------------------------- # Scalar and array expression formatting # Some analytic functions need to have the expression of interest in # the ORDER BY part of the window clause # no-op, default is full sample # HACK: support trailing arguments # work around per #345, #360 # function calls don't need parens # HACK # --------------------------------------------------------------------- # Timestamp arithmetic and other functions # --------------------------------------------------------------------- # Semi/anti-join supports # If the column does not originate from the table set in the current SELECT # context, we should format as a subquery # This is pre-2.0 Impala-style, which did not used to support the # SQL-99 format extract($FIELD from expr) # Impala is 1-indexed # Binary operations # Comparisons # Boolean comparisons # Unary operations # Unary aggregates # string operations # Timestamp operations # Other operations # RowNumber, and rank functions starts with 0 in Ibis-land | 1.800585 | 2 |
ROAR/perception_module/depth_to_pointcloud_detector.py | listar2000/ROAR | 0 | 6622728 | <gh_stars>0
from ROAR.agent_module.agent import Agent
from ROAR.perception_module.detector import Detector
import numpy as np
from typing import Optional
import time
from ROAR.utilities_module.utilities import img_to_world
import cv2
from numpy.matlib import repmat
class DepthToPointCloudDetector(Detector):
def __init__(self,
agent: Agent,
should_compute_global_pointcloud: bool = False,
should_sample_points: bool = False,
should_filter_by_distance: float = False,
max_detectable_distance: float = 1,
scale_factor: int = 1000,
max_points_to_convert=10000, **kwargs):
super().__init__(agent, **kwargs)
self.should_compute_global_pointcloud = should_compute_global_pointcloud
self.should_sample_points = should_sample_points
self.should_filter_by_distance = should_filter_by_distance
self.max_detectable_distance = max_detectable_distance
self.max_points_to_convert = max_points_to_convert
self.scale_factor = scale_factor
def run_in_threaded(self, **kwargs):
while True:
self.agent.kwargs["point_cloud"] = self.run_in_series()
def run_in_series(self) -> Optional[np.ndarray]:
"""
:return: 3 x N array of point cloud
"""
if self.agent.front_depth_camera.data is not None:
depth_img = self.agent.front_depth_camera.data.copy()
coords = np.where(depth_img <= np.amax(depth_img)) # it will just return all coordinate pairs
depths = depth_img[coords][:, np.newaxis] * self.scale_factor
result = np.multiply(np.array(coords).T, depths)
S_uv1 = np.hstack((result, depths)).T
if self.should_compute_global_pointcloud:
result = img_to_world(scaled_depth_image=S_uv1,
intrinsics_matrix=self.agent.front_depth_camera.intrinsics_matrix,
veh_world_matrix=self.agent.vehicle.transform.get_matrix(),
cam_veh_matrix=self.agent.front_depth_camera.transform.get_matrix())
return result
else:
# return p3d.T
K_inv = np.linalg.inv(self.agent.front_depth_camera.intrinsics_matrix)
return (K_inv @ S_uv1).T
return None
@staticmethod
def find_fps(t1, t2):
return 1 / (t2 - t1)
| from ROAR.agent_module.agent import Agent
from ROAR.perception_module.detector import Detector
import numpy as np
from typing import Optional
import time
from ROAR.utilities_module.utilities import img_to_world
import cv2
from numpy.matlib import repmat
class DepthToPointCloudDetector(Detector):
def __init__(self,
agent: Agent,
should_compute_global_pointcloud: bool = False,
should_sample_points: bool = False,
should_filter_by_distance: float = False,
max_detectable_distance: float = 1,
scale_factor: int = 1000,
max_points_to_convert=10000, **kwargs):
super().__init__(agent, **kwargs)
self.should_compute_global_pointcloud = should_compute_global_pointcloud
self.should_sample_points = should_sample_points
self.should_filter_by_distance = should_filter_by_distance
self.max_detectable_distance = max_detectable_distance
self.max_points_to_convert = max_points_to_convert
self.scale_factor = scale_factor
def run_in_threaded(self, **kwargs):
while True:
self.agent.kwargs["point_cloud"] = self.run_in_series()
def run_in_series(self) -> Optional[np.ndarray]:
"""
:return: 3 x N array of point cloud
"""
if self.agent.front_depth_camera.data is not None:
depth_img = self.agent.front_depth_camera.data.copy()
coords = np.where(depth_img <= np.amax(depth_img)) # it will just return all coordinate pairs
depths = depth_img[coords][:, np.newaxis] * self.scale_factor
result = np.multiply(np.array(coords).T, depths)
S_uv1 = np.hstack((result, depths)).T
if self.should_compute_global_pointcloud:
result = img_to_world(scaled_depth_image=S_uv1,
intrinsics_matrix=self.agent.front_depth_camera.intrinsics_matrix,
veh_world_matrix=self.agent.vehicle.transform.get_matrix(),
cam_veh_matrix=self.agent.front_depth_camera.transform.get_matrix())
return result
else:
# return p3d.T
K_inv = np.linalg.inv(self.agent.front_depth_camera.intrinsics_matrix)
return (K_inv @ S_uv1).T
return None
@staticmethod
def find_fps(t1, t2):
return 1 / (t2 - t1) | en | 0.3588 | :return: 3 x N array of point cloud # it will just return all coordinate pairs # return p3d.T | 2.346402 | 2 |
tools/imgrephaseDC.py | fragrussu/MRItools | 2 | 6622729 | # Code released under BSD Two-Clause license
#
# Copyright (c) 2020 University College London.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import nibabel as nib
import numpy as np
import sys, argparse
from scipy import ndimage
from scipy import signal
def rephaseDC(img_real,img_imag,out_base,kernel):
''' Rephase complex MR data in image space
INTERFACE
rephaseDC(img_real,img_imag,out_root,kernel)
PARAMETERS
img_real: path of a NIFTI file storing a 3D or 4D image (real channel)
img_imag: path of a NIFTI file storing a 3D or 4D image (imaginary channel)
out_base: base name of output files (the output files contain the real and imaginary channels
after noise decorrelation and rephasing; these will end in *_RealReph.nii
(real channel rephased), *_RealRephThresh.nii (real channel rephased with outlier
detection), *_ImagReph.nii (imaginary channel rephased), *_ImagRephThresh.nii
(imaginary channel rephased with outlier detection), *_OutlierDetected.nii
(flagging with 1 outliers), *_PhaseOriginal.nii (storing the original phase),
*_PhaseBackground.nii (storing the estimated background phase), *_PhaseRephased.nii
(storing the phase after rephasing using the background phase), *_PhaseRephasedOutliers.nii
(storing the original phase after rephasing where outliers are set to zero phase).
Note that the imaginary channel after rephasing should contain mostly noise and
negligible true signal information.
kernel: string of the 2D kernel to use for decorrenation (choose among "B3", "B5", "G3F1", "G5F2",
"G3F1H", "G5F2H", "Opt3", "Opt5"; see Sprenger T et al, MRM 2017, 77:559–570 for more
information about the kernels.)
DESCRIPTION
The function implements noise decorrelation and rephasing algorithm presented in Sprenger T et al,
MRM 2017, 77:559-570. The function works with 3D and 4D NIFTI files (in the latter case,
each volume of the 4D NIFTI is treated independently).
References: "Real valued diffusion-weighted imaging using decorrelated
phase filtering", Sprenger T et al, Magnetic Resonance
in Medicine (2017), 77:559-570
Author: <NAME>, University College London
<<EMAIL>> <<EMAIL>>
Code released under BSD Two-Clause license.
Copyright (c) 2020 University College London. All rights reserved.'''
# Load real MRI
try:
imgR_obj = nib.load(img_real)
except:
print('')
print('ERROR: the file storing the real channel {} does not exist or is not in NIFTI format. Exiting with 1.'.format(img_real))
print('')
sys.exit(1)
imgR_data = imgR_obj.get_fdata()
imgR_size = imgR_data.shape
imgR_size = np.array(imgR_size)
imgR_ndim = imgR_size.size
imgR_data = np.array(imgR_data,'float64')
imgR_header = imgR_obj.header
imgR_affine = imgR_header.get_best_affine()
# Load imaginary MRI
try:
imgI_obj = nib.load(img_imag)
except:
print('')
print('ERROR: the file storing the imaginary channel {} does not exist or is not in NIFTI format. Exiting with 1.'.format(img_imag))
print('')
sys.exit(1)
imgI_data = imgI_obj.get_fdata()
imgI_size = imgI_data.shape
imgI_size = np.array(imgI_size)
imgI_ndim = imgI_size.size
imgI_data = np.array(imgI_data,'float64')
imgI_header = imgI_obj.header
imgI_affine = imgI_header.get_best_affine()
# Check consistency of real and imaginay MRIs
if ((imgR_ndim>4) or (imgR_ndim<2) or (imgI_ndim>4) or (imgI_ndim<2)):
print('')
print('ERROR: the input files {} and {} cannot have more than 4 dimensions and less than 2. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
if imgR_ndim!=imgI_ndim:
print('')
print('ERROR: the input files {} is {}D while the input file {} is {}D. Exiting with 1.'.format(img_real,imgR_ndim,img_imag,imgI_ndim))
print('')
sys.exit(1)
if imgR_ndim==4:
if imgR_size[3]!=imgI_size[3]:
print('')
print('ERROR: the input files {} and {} store a different number of measurements along the 4th dimension. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
if ( (np.sum(imgI_affine==imgR_affine)!=16) or (imgI_size[0]!=imgR_size[0]) or (imgI_size[1]!=imgR_size[1]) ):
print('')
print('ERROR: the geometry of the input files {} and {} do not match. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
if imgR_ndim>2:
if imgI_size[2]!=imgR_size[2]:
print('')
print('ERROR: the geometry of the input files {} and {} do not match. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
# Load kernel
if kernel=='B3':
# Boxcar 3x3
kernel_weights = np.array([[1.0/9.0, 1.0/9.0, 1.0/9.0],
[1.0/9.0, 1.0/9.0, 1.0/9.0],
[1.0/9.0, 1.0/9.0, 1.0/9.0]],'float64')
elif kernel=='B5':
# Boxcar 5x5
kernel_weights = np.array([[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0]],'float64')
elif kernel=='G3F1':
# Gaussian 3x3 with sigma = 1 voxel
kernel_weights = np.array([[0.075113607954111, 0.123841403152974, 0.075113607954111],
[0.123841403152974, 0.204179955571658, 0.123841403152974],
[0.075113607954111, 0.123841403152974, 0.075113607954111]],'float64')
elif kernel=='G5F2':
# Gaussian 5x5 with sigma = 2 voxels
kernel_weights = np.array([[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.038327559383904, 0.055766269846849, 0.063191462410265, 0.055766269846849, 0.038327559383904],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294]],'float64')
elif kernel=='G3F1H':
# Gaussian 3x3 with sigma = 1 voxel and center coefficient equal to 0
kernel_weights = np.array([[0.075113607954111, 0.123841403152974, 0.075113607954111],
[0.123841403152974, 0.0, 0.123841403152974],
[0.075113607954111, 0.123841403152974, 0.075113607954111]],'float64')
elif kernel=='G5F2H':
# Gaussian 5x5 with sigma = 2 voxels and center coefficient equal to 0
kernel_weights = np.array([[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.038327559383904, 0.055766269846849, 0.0, 0.055766269846849, 0.038327559383904],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294]],'float64')
elif kernel=='Opt3':
# Optimised kernel 3x3
kernel_weights = np.array([[0.107235538162453, 0.142764461837547, 0.107235538162453],
[0.142764461837547, 0.0, 0.142764461837547],
[0.107235538162453, 0.142764461837547, 0.107235538162453]],'float64')
elif kernel=='Opt5':
# Optimised kernel 5x5
kernel_weights = np.array([[0.025441320175391, 0.037016902431746, 0.041945645727859, 0.037016902431746, 0.025441320175391],
[0.037016902431746, 0.053859275233950, 0.054719953999307, 0.053859275233950, 0.037016902431746],
[0.041945645727859, 0.054719953999307, 0.0, 0.054719953999307, 0.041945645727859],
[0.037016902431746, 0.053859275233950, 0.054719953999307, 0.053859275233950, 0.037016902431746],
[0.025441320175391, 0.037016902431746, 0.041945645727859, 0.037016902431746, 0.025441320175391]],'float64')
else:
print('')
print('ERROR: the kernel {} is not supported. Exiting with 1.'.format(kernel))
print('')
sys.exit(1)
### Filter data with the specified kernel
if imgR_ndim==2:
# Filter real and imaginary channels independently
imgR_data_filt = ndimage.convolve(imgR_data, kernel_weights, mode='constant', cval=0.0)
imgI_data_filt = ndimage.convolve(imgI_data, kernel_weights, mode='constant', cval=0.0)
elif imgR_ndim==3:
# Filter real and imaginary channels independently
imgI_data_filt = np.zeros(imgR_size,'float64')
imgR_data_filt = np.zeros(imgR_size,'float64')
for zz in range(0, imgR_size[2]):
imgI_data_filt[:,:,zz] = ndimage.convolve(imgI_data[:,:,zz], kernel_weights, mode='constant', cval=0.0)
imgR_data_filt[:,:,zz] = ndimage.convolve(imgR_data[:,:,zz], kernel_weights, mode='constant', cval=0.0)
elif imgR_ndim==4:
# Filter real and imaginary channels independently
imgI_data_filt = np.zeros(imgR_size,'float64')
imgR_data_filt = np.zeros(imgR_size,'float64')
for vv in range(0, imgR_size[3]):
for zz in range(0, imgR_size[2]):
imgI_data_filt[:,:,zz,vv] = ndimage.convolve(imgI_data[:,:,zz,vv], kernel_weights, mode='constant', cval=0.0)
imgR_data_filt[:,:,zz,vv] = ndimage.convolve(imgR_data[:,:,zz,vv], kernel_weights, mode='constant', cval=0.0)
### Get phase of complex data after filtering
phase_data_orig = np.angle(imgR_data + imgI_data*1j)
phase_data_filt = np.angle(imgR_data_filt + imgI_data_filt*1j)
### Rephase measured signals so that the true information is in the real channel only; for the rephasing, use the phase of the signal after filtering
rephased_data_complex = (imgR_data + 1j*imgI_data)*(np.exp(-1j*phase_data_filt)) # Rephase signals
rephased_data_R = np.real(rephased_data_complex) # Get real channel of rephased signals (this should theoretically contain only true information)
rephased_data_I = np.imag(rephased_data_complex) # Get imaginary channel of rephased signals (this should theoretically contain only Gaussian noise)
rephased_data_M = np.sqrt(rephased_data_R*rephased_data_R + rephased_data_I*rephased_data_I) # Get magnitude of rephased signals (when this differs too much from rephased_data_R, then the rephasing has probably gone wrong)
rephased_data_deltaMR = np.abs(rephased_data_M - rephased_data_R) # Difference between magnitude and real channel
phase_data_new = np.angle(rephased_data_R + rephased_data_I*1j) # Phase after rephasing
### Clear some memory
del imgI_data, imgR_data
### Calculate noise level and remove outliers (look at MAD within a window the same size as the kernels)
rephased_data_R_thresh = rephased_data_M
rephased_data_I_thresh = np.zeros(imgR_size,'float64')
outliers_flag = np.ones(imgR_size,'float64')
if imgR_ndim==2:
absdev = np.abs(rephased_data_I - signal.medfilt(rephased_data_I,kernel_weights.shape)) # Absolute deviation of imaginary channel within kernel window
medabsdev = signal.medfilt(absdev,kernel_weights.shape) # Median absolute deviation of imaginary channel within kernel window
thresh = 2.5000*1.4826*medabsdev # Local threhsold
rephased_data_R_thresh[rephased_data_deltaMR<thresh] = rephased_data_R[rephased_data_deltaMR<thresh]
rephased_data_I_thresh[rephased_data_deltaMR<thresh] = rephased_data_I[rephased_data_deltaMR<thresh]
outliers_flag[rephased_data_deltaMR<thresh] = 0.0
elif imgR_ndim==3:
thresh = np.zeros(imgR_size,'float64')
for zz in range(0, imgR_size[2]):
absdev_slice = np.abs(rephased_data_I[:,:,zz] - signal.medfilt(rephased_data_I[:,:,zz],kernel_weights.shape)) # Absolute deviation of imaginary channel within kernel window
medabsdev_slice = signal.medfilt(absdev_slice,kernel_weights.shape) # Median absolute deviation of imaginary channel within kernel window
thresh_slice = 2.5000*1.4826*medabsdev_slice # Local threhsold
thresh[:,:,zz] = thresh_slice
rephased_data_R_thresh[rephased_data_deltaMR<thresh] = rephased_data_R[rephased_data_deltaMR<thresh]
rephased_data_I_thresh[rephased_data_deltaMR<thresh] = rephased_data_I[rephased_data_deltaMR<thresh]
outliers_flag[rephased_data_deltaMR<thresh] = 0.0
elif imgR_ndim==4:
thresh = np.zeros(imgR_size,'float64')
for vv in range(0, imgR_size[3]):
for zz in range(0, imgR_size[2]):
absdev_vol_slice = np.abs(rephased_data_I[:,:,zz,vv] - signal.medfilt(rephased_data_I[:,:,zz,vv],kernel_weights.shape)) # Absolute deviation of imaginary channel within kernel window
medabsdev_vol_slice = signal.medfilt(absdev_vol_slice,kernel_weights.shape) # Median absolute deviation of imaginary channel within kernel window
thresh_vol_slice = 2.5000*1.4826*medabsdev_vol_slice # Local threhsold
thresh[:,:,zz,vv] = thresh_vol_slice
rephased_data_R_thresh[rephased_data_deltaMR<thresh] = rephased_data_R[rephased_data_deltaMR<thresh]
rephased_data_I_thresh[rephased_data_deltaMR<thresh] = rephased_data_I[rephased_data_deltaMR<thresh]
outliers_flag[rephased_data_deltaMR<thresh] = 0.0
phase_data_new_thresh = np.angle(rephased_data_R_thresh + rephased_data_I_thresh*1j)
### Save as real and imaginary channels after rephasing and after rephasing + outlier detection as NIFTI
# Create file names
buffer_string=''
seq_string = (out_base,'_RealReph.nii')
rephased_R_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_RealRephThresh.nii')
rephased_R_thresh_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_ImagReph.nii')
rephased_I_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_ImagRephThresh.nii')
rephased_I_thresh_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_OutlierDetected.nii')
flag_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseOriginal.nii')
phaseorig_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseRephased.nii')
phasenew_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseBackground.nii')
phaseest_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseRephasedOutliers.nii')
phasenewthresh_outfile = buffer_string.join(seq_string)
# Create header
buffer_header = imgR_header
buffer_header.set_data_dtype('float64') # Make sure we save output data as float64, even if input header indicates a different data type
# Save files
rephased_obj_R = nib.Nifti1Image(rephased_data_R,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_R, rephased_R_outfile)
rephased_obj_R_thresh = nib.Nifti1Image(rephased_data_R_thresh,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_R_thresh, rephased_R_thresh_outfile)
rephased_obj_I = nib.Nifti1Image(rephased_data_I,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_I, rephased_I_outfile)
rephased_obj_I_thresh = nib.Nifti1Image(rephased_data_I_thresh,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_I_thresh, rephased_I_thresh_outfile)
flag_outobj = nib.Nifti1Image(outliers_flag,imgR_obj.affine,buffer_header)
nib.save(flag_outobj, flag_outfile)
phaseorig_obj = nib.Nifti1Image(phase_data_orig,imgR_obj.affine,buffer_header)
nib.save(phaseorig_obj, phaseorig_outfile)
phasenew_obj = nib.Nifti1Image(phase_data_new,imgR_obj.affine,buffer_header)
nib.save(phasenew_obj, phasenew_outfile)
phasenewthresh_obj = nib.Nifti1Image(phase_data_new_thresh,imgR_obj.affine,buffer_header)
nib.save(phasenewthresh_obj, phasenewthresh_outfile)
phaseest_obj = nib.Nifti1Image(phase_data_filt,imgR_obj.affine,buffer_header)
nib.save(phaseest_obj, phaseest_outfile)
# Run the module as a script when required
if __name__ == "__main__":
### Parse arguments or print help
parser = argparse.ArgumentParser(description='Rephasing of complex MR images with noise decorrelation according to Sprenger T et al, MRM 2017, 77:559-570. Author: <NAME>, University College London. Email: <<EMAIL>> <<EMAIL>>. Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('img_real', help='3D or 4D Nifti file storing the real channel in image space')
parser.add_argument('img_imag', help='3D or 4D Nifti file storing the imaginary channel in image space')
parser.add_argument('out_base', help='base name of the output files (output files are: *_RealReph.nii, storing the real channel rephased; *_RealRephThresh.nii, storing the real channel rephased with outlier detection; *_ImagReph.nii, storing the imaginary channel rephased; *_ImagRephThresh.nii, storing the real channel rephased with outlier detection; *_OutlierDetected.nii, flagging with 1 outliers; *_PhaseOriginal.nii, storing the original phase; *_PhaseBackground.nii, storing the estimated background phase; *_PhaseRephased.nii, storing the phase after rephasing using the background phase; *_PhaseRephasedOutliers.nii, storing the original phase after rephasing where outliers are set to zero phase)')
parser.add_argument('kernel', help='kernel for decorrelation filers (choose among B3, B5, G3F1, G5F2, G3F1H, G5F2H, Opt3 and Opt5; see Sprenger T et al, MRM 2017, 77:559-570)')
args = parser.parse_args()
### Rephase the data
rephaseDC(args.img_real,args.img_imag,args.out_base,args.kernel)
| # Code released under BSD Two-Clause license
#
# Copyright (c) 2020 University College London.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import nibabel as nib
import numpy as np
import sys, argparse
from scipy import ndimage
from scipy import signal
def rephaseDC(img_real,img_imag,out_base,kernel):
''' Rephase complex MR data in image space
INTERFACE
rephaseDC(img_real,img_imag,out_root,kernel)
PARAMETERS
img_real: path of a NIFTI file storing a 3D or 4D image (real channel)
img_imag: path of a NIFTI file storing a 3D or 4D image (imaginary channel)
out_base: base name of output files (the output files contain the real and imaginary channels
after noise decorrelation and rephasing; these will end in *_RealReph.nii
(real channel rephased), *_RealRephThresh.nii (real channel rephased with outlier
detection), *_ImagReph.nii (imaginary channel rephased), *_ImagRephThresh.nii
(imaginary channel rephased with outlier detection), *_OutlierDetected.nii
(flagging with 1 outliers), *_PhaseOriginal.nii (storing the original phase),
*_PhaseBackground.nii (storing the estimated background phase), *_PhaseRephased.nii
(storing the phase after rephasing using the background phase), *_PhaseRephasedOutliers.nii
(storing the original phase after rephasing where outliers are set to zero phase).
Note that the imaginary channel after rephasing should contain mostly noise and
negligible true signal information.
kernel: string of the 2D kernel to use for decorrenation (choose among "B3", "B5", "G3F1", "G5F2",
"G3F1H", "G5F2H", "Opt3", "Opt5"; see Sprenger T et al, MRM 2017, 77:559–570 for more
information about the kernels.)
DESCRIPTION
The function implements noise decorrelation and rephasing algorithm presented in Sprenger T et al,
MRM 2017, 77:559-570. The function works with 3D and 4D NIFTI files (in the latter case,
each volume of the 4D NIFTI is treated independently).
References: "Real valued diffusion-weighted imaging using decorrelated
phase filtering", Sprenger T et al, Magnetic Resonance
in Medicine (2017), 77:559-570
Author: <NAME>, University College London
<<EMAIL>> <<EMAIL>>
Code released under BSD Two-Clause license.
Copyright (c) 2020 University College London. All rights reserved.'''
# Load real MRI
try:
imgR_obj = nib.load(img_real)
except:
print('')
print('ERROR: the file storing the real channel {} does not exist or is not in NIFTI format. Exiting with 1.'.format(img_real))
print('')
sys.exit(1)
imgR_data = imgR_obj.get_fdata()
imgR_size = imgR_data.shape
imgR_size = np.array(imgR_size)
imgR_ndim = imgR_size.size
imgR_data = np.array(imgR_data,'float64')
imgR_header = imgR_obj.header
imgR_affine = imgR_header.get_best_affine()
# Load imaginary MRI
try:
imgI_obj = nib.load(img_imag)
except:
print('')
print('ERROR: the file storing the imaginary channel {} does not exist or is not in NIFTI format. Exiting with 1.'.format(img_imag))
print('')
sys.exit(1)
imgI_data = imgI_obj.get_fdata()
imgI_size = imgI_data.shape
imgI_size = np.array(imgI_size)
imgI_ndim = imgI_size.size
imgI_data = np.array(imgI_data,'float64')
imgI_header = imgI_obj.header
imgI_affine = imgI_header.get_best_affine()
# Check consistency of real and imaginay MRIs
if ((imgR_ndim>4) or (imgR_ndim<2) or (imgI_ndim>4) or (imgI_ndim<2)):
print('')
print('ERROR: the input files {} and {} cannot have more than 4 dimensions and less than 2. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
if imgR_ndim!=imgI_ndim:
print('')
print('ERROR: the input files {} is {}D while the input file {} is {}D. Exiting with 1.'.format(img_real,imgR_ndim,img_imag,imgI_ndim))
print('')
sys.exit(1)
if imgR_ndim==4:
if imgR_size[3]!=imgI_size[3]:
print('')
print('ERROR: the input files {} and {} store a different number of measurements along the 4th dimension. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
if ( (np.sum(imgI_affine==imgR_affine)!=16) or (imgI_size[0]!=imgR_size[0]) or (imgI_size[1]!=imgR_size[1]) ):
print('')
print('ERROR: the geometry of the input files {} and {} do not match. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
if imgR_ndim>2:
if imgI_size[2]!=imgR_size[2]:
print('')
print('ERROR: the geometry of the input files {} and {} do not match. Exiting with 1.'.format(img_real,img_imag))
print('')
sys.exit(1)
# Load kernel
if kernel=='B3':
# Boxcar 3x3
kernel_weights = np.array([[1.0/9.0, 1.0/9.0, 1.0/9.0],
[1.0/9.0, 1.0/9.0, 1.0/9.0],
[1.0/9.0, 1.0/9.0, 1.0/9.0]],'float64')
elif kernel=='B5':
# Boxcar 5x5
kernel_weights = np.array([[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0],
[1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0, 1.0/25.0]],'float64')
elif kernel=='G3F1':
# Gaussian 3x3 with sigma = 1 voxel
kernel_weights = np.array([[0.075113607954111, 0.123841403152974, 0.075113607954111],
[0.123841403152974, 0.204179955571658, 0.123841403152974],
[0.075113607954111, 0.123841403152974, 0.075113607954111]],'float64')
elif kernel=='G5F2':
# Gaussian 5x5 with sigma = 2 voxels
kernel_weights = np.array([[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.038327559383904, 0.055766269846849, 0.063191462410265, 0.055766269846849, 0.038327559383904],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294]],'float64')
elif kernel=='G3F1H':
# Gaussian 3x3 with sigma = 1 voxel and center coefficient equal to 0
kernel_weights = np.array([[0.075113607954111, 0.123841403152974, 0.075113607954111],
[0.123841403152974, 0.0, 0.123841403152974],
[0.075113607954111, 0.123841403152974, 0.075113607954111]],'float64')
elif kernel=='G5F2H':
# Gaussian 5x5 with sigma = 2 voxels and center coefficient equal to 0
kernel_weights = np.array([[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.038327559383904, 0.055766269846849, 0.0, 0.055766269846849, 0.038327559383904],
[0.033823952439922, 0.049213560408541, 0.055766269846849, 0.049213560408541, 0.033823952439922],
[0.023246839878294, 0.033823952439922, 0.038327559383904, 0.033823952439922, 0.023246839878294]],'float64')
elif kernel=='Opt3':
# Optimised kernel 3x3
kernel_weights = np.array([[0.107235538162453, 0.142764461837547, 0.107235538162453],
[0.142764461837547, 0.0, 0.142764461837547],
[0.107235538162453, 0.142764461837547, 0.107235538162453]],'float64')
elif kernel=='Opt5':
# Optimised kernel 5x5
kernel_weights = np.array([[0.025441320175391, 0.037016902431746, 0.041945645727859, 0.037016902431746, 0.025441320175391],
[0.037016902431746, 0.053859275233950, 0.054719953999307, 0.053859275233950, 0.037016902431746],
[0.041945645727859, 0.054719953999307, 0.0, 0.054719953999307, 0.041945645727859],
[0.037016902431746, 0.053859275233950, 0.054719953999307, 0.053859275233950, 0.037016902431746],
[0.025441320175391, 0.037016902431746, 0.041945645727859, 0.037016902431746, 0.025441320175391]],'float64')
else:
print('')
print('ERROR: the kernel {} is not supported. Exiting with 1.'.format(kernel))
print('')
sys.exit(1)
### Filter data with the specified kernel
if imgR_ndim==2:
# Filter real and imaginary channels independently
imgR_data_filt = ndimage.convolve(imgR_data, kernel_weights, mode='constant', cval=0.0)
imgI_data_filt = ndimage.convolve(imgI_data, kernel_weights, mode='constant', cval=0.0)
elif imgR_ndim==3:
# Filter real and imaginary channels independently
imgI_data_filt = np.zeros(imgR_size,'float64')
imgR_data_filt = np.zeros(imgR_size,'float64')
for zz in range(0, imgR_size[2]):
imgI_data_filt[:,:,zz] = ndimage.convolve(imgI_data[:,:,zz], kernel_weights, mode='constant', cval=0.0)
imgR_data_filt[:,:,zz] = ndimage.convolve(imgR_data[:,:,zz], kernel_weights, mode='constant', cval=0.0)
elif imgR_ndim==4:
# Filter real and imaginary channels independently
imgI_data_filt = np.zeros(imgR_size,'float64')
imgR_data_filt = np.zeros(imgR_size,'float64')
for vv in range(0, imgR_size[3]):
for zz in range(0, imgR_size[2]):
imgI_data_filt[:,:,zz,vv] = ndimage.convolve(imgI_data[:,:,zz,vv], kernel_weights, mode='constant', cval=0.0)
imgR_data_filt[:,:,zz,vv] = ndimage.convolve(imgR_data[:,:,zz,vv], kernel_weights, mode='constant', cval=0.0)
### Get phase of complex data after filtering
phase_data_orig = np.angle(imgR_data + imgI_data*1j)
phase_data_filt = np.angle(imgR_data_filt + imgI_data_filt*1j)
### Rephase measured signals so that the true information is in the real channel only; for the rephasing, use the phase of the signal after filtering
rephased_data_complex = (imgR_data + 1j*imgI_data)*(np.exp(-1j*phase_data_filt)) # Rephase signals
rephased_data_R = np.real(rephased_data_complex) # Get real channel of rephased signals (this should theoretically contain only true information)
rephased_data_I = np.imag(rephased_data_complex) # Get imaginary channel of rephased signals (this should theoretically contain only Gaussian noise)
rephased_data_M = np.sqrt(rephased_data_R*rephased_data_R + rephased_data_I*rephased_data_I) # Get magnitude of rephased signals (when this differs too much from rephased_data_R, then the rephasing has probably gone wrong)
rephased_data_deltaMR = np.abs(rephased_data_M - rephased_data_R) # Difference between magnitude and real channel
phase_data_new = np.angle(rephased_data_R + rephased_data_I*1j) # Phase after rephasing
### Clear some memory
del imgI_data, imgR_data
### Calculate noise level and remove outliers (look at MAD within a window the same size as the kernels)
rephased_data_R_thresh = rephased_data_M
rephased_data_I_thresh = np.zeros(imgR_size,'float64')
outliers_flag = np.ones(imgR_size,'float64')
if imgR_ndim==2:
absdev = np.abs(rephased_data_I - signal.medfilt(rephased_data_I,kernel_weights.shape)) # Absolute deviation of imaginary channel within kernel window
medabsdev = signal.medfilt(absdev,kernel_weights.shape) # Median absolute deviation of imaginary channel within kernel window
thresh = 2.5000*1.4826*medabsdev # Local threhsold
rephased_data_R_thresh[rephased_data_deltaMR<thresh] = rephased_data_R[rephased_data_deltaMR<thresh]
rephased_data_I_thresh[rephased_data_deltaMR<thresh] = rephased_data_I[rephased_data_deltaMR<thresh]
outliers_flag[rephased_data_deltaMR<thresh] = 0.0
elif imgR_ndim==3:
thresh = np.zeros(imgR_size,'float64')
for zz in range(0, imgR_size[2]):
absdev_slice = np.abs(rephased_data_I[:,:,zz] - signal.medfilt(rephased_data_I[:,:,zz],kernel_weights.shape)) # Absolute deviation of imaginary channel within kernel window
medabsdev_slice = signal.medfilt(absdev_slice,kernel_weights.shape) # Median absolute deviation of imaginary channel within kernel window
thresh_slice = 2.5000*1.4826*medabsdev_slice # Local threhsold
thresh[:,:,zz] = thresh_slice
rephased_data_R_thresh[rephased_data_deltaMR<thresh] = rephased_data_R[rephased_data_deltaMR<thresh]
rephased_data_I_thresh[rephased_data_deltaMR<thresh] = rephased_data_I[rephased_data_deltaMR<thresh]
outliers_flag[rephased_data_deltaMR<thresh] = 0.0
elif imgR_ndim==4:
thresh = np.zeros(imgR_size,'float64')
for vv in range(0, imgR_size[3]):
for zz in range(0, imgR_size[2]):
absdev_vol_slice = np.abs(rephased_data_I[:,:,zz,vv] - signal.medfilt(rephased_data_I[:,:,zz,vv],kernel_weights.shape)) # Absolute deviation of imaginary channel within kernel window
medabsdev_vol_slice = signal.medfilt(absdev_vol_slice,kernel_weights.shape) # Median absolute deviation of imaginary channel within kernel window
thresh_vol_slice = 2.5000*1.4826*medabsdev_vol_slice # Local threhsold
thresh[:,:,zz,vv] = thresh_vol_slice
rephased_data_R_thresh[rephased_data_deltaMR<thresh] = rephased_data_R[rephased_data_deltaMR<thresh]
rephased_data_I_thresh[rephased_data_deltaMR<thresh] = rephased_data_I[rephased_data_deltaMR<thresh]
outliers_flag[rephased_data_deltaMR<thresh] = 0.0
phase_data_new_thresh = np.angle(rephased_data_R_thresh + rephased_data_I_thresh*1j)
### Save as real and imaginary channels after rephasing and after rephasing + outlier detection as NIFTI
# Create file names
buffer_string=''
seq_string = (out_base,'_RealReph.nii')
rephased_R_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_RealRephThresh.nii')
rephased_R_thresh_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_ImagReph.nii')
rephased_I_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_ImagRephThresh.nii')
rephased_I_thresh_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_OutlierDetected.nii')
flag_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseOriginal.nii')
phaseorig_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseRephased.nii')
phasenew_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseBackground.nii')
phaseest_outfile = buffer_string.join(seq_string)
buffer_string=''
seq_string = (out_base,'_PhaseRephasedOutliers.nii')
phasenewthresh_outfile = buffer_string.join(seq_string)
# Create header
buffer_header = imgR_header
buffer_header.set_data_dtype('float64') # Make sure we save output data as float64, even if input header indicates a different data type
# Save files
rephased_obj_R = nib.Nifti1Image(rephased_data_R,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_R, rephased_R_outfile)
rephased_obj_R_thresh = nib.Nifti1Image(rephased_data_R_thresh,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_R_thresh, rephased_R_thresh_outfile)
rephased_obj_I = nib.Nifti1Image(rephased_data_I,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_I, rephased_I_outfile)
rephased_obj_I_thresh = nib.Nifti1Image(rephased_data_I_thresh,imgR_obj.affine,buffer_header)
nib.save(rephased_obj_I_thresh, rephased_I_thresh_outfile)
flag_outobj = nib.Nifti1Image(outliers_flag,imgR_obj.affine,buffer_header)
nib.save(flag_outobj, flag_outfile)
phaseorig_obj = nib.Nifti1Image(phase_data_orig,imgR_obj.affine,buffer_header)
nib.save(phaseorig_obj, phaseorig_outfile)
phasenew_obj = nib.Nifti1Image(phase_data_new,imgR_obj.affine,buffer_header)
nib.save(phasenew_obj, phasenew_outfile)
phasenewthresh_obj = nib.Nifti1Image(phase_data_new_thresh,imgR_obj.affine,buffer_header)
nib.save(phasenewthresh_obj, phasenewthresh_outfile)
phaseest_obj = nib.Nifti1Image(phase_data_filt,imgR_obj.affine,buffer_header)
nib.save(phaseest_obj, phaseest_outfile)
# Run the module as a script when required
if __name__ == "__main__":
### Parse arguments or print help
parser = argparse.ArgumentParser(description='Rephasing of complex MR images with noise decorrelation according to Sprenger T et al, MRM 2017, 77:559-570. Author: <NAME>, University College London. Email: <<EMAIL>> <<EMAIL>>. Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('img_real', help='3D or 4D Nifti file storing the real channel in image space')
parser.add_argument('img_imag', help='3D or 4D Nifti file storing the imaginary channel in image space')
parser.add_argument('out_base', help='base name of the output files (output files are: *_RealReph.nii, storing the real channel rephased; *_RealRephThresh.nii, storing the real channel rephased with outlier detection; *_ImagReph.nii, storing the imaginary channel rephased; *_ImagRephThresh.nii, storing the real channel rephased with outlier detection; *_OutlierDetected.nii, flagging with 1 outliers; *_PhaseOriginal.nii, storing the original phase; *_PhaseBackground.nii, storing the estimated background phase; *_PhaseRephased.nii, storing the phase after rephasing using the background phase; *_PhaseRephasedOutliers.nii, storing the original phase after rephasing where outliers are set to zero phase)')
parser.add_argument('kernel', help='kernel for decorrelation filers (choose among B3, B5, G3F1, G5F2, G3F1H, G5F2H, Opt3 and Opt5; see Sprenger T et al, MRM 2017, 77:559-570)')
args = parser.parse_args()
### Rephase the data
rephaseDC(args.img_real,args.img_imag,args.out_base,args.kernel)
| en | 0.785732 | # Code released under BSD Two-Clause license # # Copyright (c) 2020 University College London. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are those # of the authors and should not be interpreted as representing official policies, # either expressed or implied, of the FreeBSD Project. Rephase complex MR data in image space INTERFACE rephaseDC(img_real,img_imag,out_root,kernel) PARAMETERS img_real: path of a NIFTI file storing a 3D or 4D image (real channel) img_imag: path of a NIFTI file storing a 3D or 4D image (imaginary channel) out_base: base name of output files (the output files contain the real and imaginary channels after noise decorrelation and rephasing; these will end in *_RealReph.nii (real channel rephased), *_RealRephThresh.nii (real channel rephased with outlier detection), *_ImagReph.nii (imaginary channel rephased), *_ImagRephThresh.nii (imaginary channel rephased with outlier detection), *_OutlierDetected.nii (flagging with 1 outliers), *_PhaseOriginal.nii (storing the original phase), *_PhaseBackground.nii (storing the estimated background phase), *_PhaseRephased.nii (storing the phase after rephasing using the background phase), *_PhaseRephasedOutliers.nii (storing the original phase after rephasing where outliers are set to zero phase). Note that the imaginary channel after rephasing should contain mostly noise and negligible true signal information. kernel: string of the 2D kernel to use for decorrenation (choose among "B3", "B5", "G3F1", "G5F2", "G3F1H", "G5F2H", "Opt3", "Opt5"; see Sprenger T et al, MRM 2017, 77:559–570 for more information about the kernels.) DESCRIPTION The function implements noise decorrelation and rephasing algorithm presented in Sprenger T et al, MRM 2017, 77:559-570. The function works with 3D and 4D NIFTI files (in the latter case, each volume of the 4D NIFTI is treated independently). References: "Real valued diffusion-weighted imaging using decorrelated phase filtering", Sprenger T et al, Magnetic Resonance in Medicine (2017), 77:559-570 Author: <NAME>, University College London <<EMAIL>> <<EMAIL>> Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved. # Load real MRI # Load imaginary MRI # Check consistency of real and imaginay MRIs # Load kernel # Boxcar 3x3 # Boxcar 5x5 # Gaussian 3x3 with sigma = 1 voxel # Gaussian 5x5 with sigma = 2 voxels # Gaussian 3x3 with sigma = 1 voxel and center coefficient equal to 0 # Gaussian 5x5 with sigma = 2 voxels and center coefficient equal to 0 # Optimised kernel 3x3 # Optimised kernel 5x5 ### Filter data with the specified kernel # Filter real and imaginary channels independently # Filter real and imaginary channels independently # Filter real and imaginary channels independently ### Get phase of complex data after filtering ### Rephase measured signals so that the true information is in the real channel only; for the rephasing, use the phase of the signal after filtering # Rephase signals # Get real channel of rephased signals (this should theoretically contain only true information) # Get imaginary channel of rephased signals (this should theoretically contain only Gaussian noise) # Get magnitude of rephased signals (when this differs too much from rephased_data_R, then the rephasing has probably gone wrong) # Difference between magnitude and real channel # Phase after rephasing ### Clear some memory ### Calculate noise level and remove outliers (look at MAD within a window the same size as the kernels) # Absolute deviation of imaginary channel within kernel window # Median absolute deviation of imaginary channel within kernel window # Local threhsold # Absolute deviation of imaginary channel within kernel window # Median absolute deviation of imaginary channel within kernel window # Local threhsold # Absolute deviation of imaginary channel within kernel window # Median absolute deviation of imaginary channel within kernel window # Local threhsold ### Save as real and imaginary channels after rephasing and after rephasing + outlier detection as NIFTI # Create file names # Create header # Make sure we save output data as float64, even if input header indicates a different data type # Save files # Run the module as a script when required ### Parse arguments or print help ### Rephase the data | 1.223334 | 1 |
reward/runner/base_runner.py | lgvaz/torchrl | 5 | 6622730 | <filename>reward/runner/base_runner.py
import reward.utils as U
import numpy as np
from abc import ABC, abstractmethod
from tqdm.autonotebook import tqdm
from boltons.cacheutils import cachedproperty
class BaseRunner(ABC):
def __init__(self, env, ep_maxlen=None):
self.env = env
self.ep_maxlen = ep_maxlen or float("inf")
self.clean()
@property
@abstractmethod
def env_name(self):
pass
@property
@abstractmethod
def num_envs(self):
pass
@cachedproperty
@abstractmethod
def s_space(self):
pass
@cachedproperty
@abstractmethod
def ac_space(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def act(self, ac):
pass
@abstractmethod
def sample_random_ac(self):
pass
def _wrap_name(self, s):
return "/".join([self.__class__.__name__, s])
@property
def num_episodes(self):
return len(self.rs)
@property
def is_best(self):
return self._is_best
def clean(self):
self.rs = []
self.num_steps = 0
self.ep_lens = []
self.new_ep = 0
self._is_best = False
self._last_logged_ep = 0
self._best_rew = 0
def close(self):
raise NotImplementedError
def write_logs(self, logger):
new_ep = abs(self._last_logged_ep - self.num_episodes)
if new_ep != 0:
self.new_ep = new_ep
self._last_logged_ep = self.num_episodes
rew = np.mean(self.rs[-self.new_ep :])
self._is_best = rew >= self._best_rew
self._best_rew = max(self._best_rew, rew)
logger.add_log(self._wrap_name("Reward"), rew)
logger.add_log(self._wrap_name("Length"), np.mean(self.ep_lens[-self.new_ep :]))
| <filename>reward/runner/base_runner.py
import reward.utils as U
import numpy as np
from abc import ABC, abstractmethod
from tqdm.autonotebook import tqdm
from boltons.cacheutils import cachedproperty
class BaseRunner(ABC):
def __init__(self, env, ep_maxlen=None):
self.env = env
self.ep_maxlen = ep_maxlen or float("inf")
self.clean()
@property
@abstractmethod
def env_name(self):
pass
@property
@abstractmethod
def num_envs(self):
pass
@cachedproperty
@abstractmethod
def s_space(self):
pass
@cachedproperty
@abstractmethod
def ac_space(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def act(self, ac):
pass
@abstractmethod
def sample_random_ac(self):
pass
def _wrap_name(self, s):
return "/".join([self.__class__.__name__, s])
@property
def num_episodes(self):
return len(self.rs)
@property
def is_best(self):
return self._is_best
def clean(self):
self.rs = []
self.num_steps = 0
self.ep_lens = []
self.new_ep = 0
self._is_best = False
self._last_logged_ep = 0
self._best_rew = 0
def close(self):
raise NotImplementedError
def write_logs(self, logger):
new_ep = abs(self._last_logged_ep - self.num_episodes)
if new_ep != 0:
self.new_ep = new_ep
self._last_logged_ep = self.num_episodes
rew = np.mean(self.rs[-self.new_ep :])
self._is_best = rew >= self._best_rew
self._best_rew = max(self._best_rew, rew)
logger.add_log(self._wrap_name("Reward"), rew)
logger.add_log(self._wrap_name("Length"), np.mean(self.ep_lens[-self.new_ep :]))
| none | 1 | 2.427478 | 2 | |
2020/1B/1.py | AmauryLiet/CodeJam | 0 | 6622731 | N = int(input())
directions = {
N: 'N'
}
for case_id in range(1, N + 1):
x, y = map(int, input().split())
moves = ""
while x != 0 or y != 0:
# if len(moves) > 0:
# print('Did', moves[-1])
# print("now at", x, y)
if x % 2 == y % 2:
break
elif x % 2 == 1:
# the move to do is horizontal (W-E)
if y == 0 and abs(x) == 1:
moves += 'E' if x == 1 else 'W'
x, y = 0, 0
else:
next_move_will_be_vertical = (y // 2) % 2 == 1
going_west_would_force_next_move_horizontal = ((x + 1) // 2) % 2 == 1
if next_move_will_be_vertical == going_west_would_force_next_move_horizontal:
moves += 'E'
x, y = ((x - 1) // 2, y // 2)
else:
moves += 'W'
x, y = ((x + 1) // 2, y // 2)
else:
# the move to do is vertical (N-S)
if x == 0 and abs(y) == 1:
moves += 'N' if y == 1 else 'S'
x, y = 0, 0
else:
next_move_will_be_horizontal = (x // 2) % 2 == 1 # false
going_south_would_force_next_move_vertical = ((y + 1) // 2) % 2 == 1 # true
if next_move_will_be_horizontal == going_south_would_force_next_move_vertical:
moves += 'N'
x, y = (x // 2, (y - 1) // 2)
else:
moves += 'S'
x, y = (x // 2, (y + 1) // 2)
else:
print('Case #{}: {}'.format(case_id, moves))
continue
print('Case #{}: {}'.format(case_id, 'IMPOSSIBLE'))
| N = int(input())
directions = {
N: 'N'
}
for case_id in range(1, N + 1):
x, y = map(int, input().split())
moves = ""
while x != 0 or y != 0:
# if len(moves) > 0:
# print('Did', moves[-1])
# print("now at", x, y)
if x % 2 == y % 2:
break
elif x % 2 == 1:
# the move to do is horizontal (W-E)
if y == 0 and abs(x) == 1:
moves += 'E' if x == 1 else 'W'
x, y = 0, 0
else:
next_move_will_be_vertical = (y // 2) % 2 == 1
going_west_would_force_next_move_horizontal = ((x + 1) // 2) % 2 == 1
if next_move_will_be_vertical == going_west_would_force_next_move_horizontal:
moves += 'E'
x, y = ((x - 1) // 2, y // 2)
else:
moves += 'W'
x, y = ((x + 1) // 2, y // 2)
else:
# the move to do is vertical (N-S)
if x == 0 and abs(y) == 1:
moves += 'N' if y == 1 else 'S'
x, y = 0, 0
else:
next_move_will_be_horizontal = (x // 2) % 2 == 1 # false
going_south_would_force_next_move_vertical = ((y + 1) // 2) % 2 == 1 # true
if next_move_will_be_horizontal == going_south_would_force_next_move_vertical:
moves += 'N'
x, y = (x // 2, (y - 1) // 2)
else:
moves += 'S'
x, y = (x // 2, (y + 1) // 2)
else:
print('Case #{}: {}'.format(case_id, moves))
continue
print('Case #{}: {}'.format(case_id, 'IMPOSSIBLE'))
| en | 0.322147 | # if len(moves) > 0: # print('Did', moves[-1]) # print("now at", x, y) # the move to do is horizontal (W-E) # the move to do is vertical (N-S) # false # true #{}: {}'.format(case_id, moves)) #{}: {}'.format(case_id, 'IMPOSSIBLE')) | 3.399727 | 3 |
world challenges 2/df68.py | T-Terra/Exercises-of-Python | 0 | 6622732 | from random import randint
v = 0
print('=' * 30)
print('Vamos jogar PAR OU ÍMPAR...')
print('=' * 30)
while True:
jogador = int(input('Digite um valor: '))
pc = randint(0, 11)
total = jogador + pc
tipo = ' '
while tipo not in 'PI':
tipo = str(input('Par ou Ímpar? [P/I] ')).strip().upper()[0]
print(f'Você jogou {jogador} e o PC {pc}. e o total é {total}', end=' ')
print('DEU PAR' if total % 2 == 0 else 'DEU ÍMPAR')
if tipo == 'P':
if total % 2 == 0:
print('Você venceu!')
v += 1
else:
print('Você perdeu!!!')
break
elif tipo == 'I':
if total % 2 == 1:
print('Você venceu!!!')
v += 1
else:
print('Você perdeu!!!')
break
print('Vamos jogar novamente...')
print(f'GAME OVER! você venceu {v} vezes.')
| from random import randint
v = 0
print('=' * 30)
print('Vamos jogar PAR OU ÍMPAR...')
print('=' * 30)
while True:
jogador = int(input('Digite um valor: '))
pc = randint(0, 11)
total = jogador + pc
tipo = ' '
while tipo not in 'PI':
tipo = str(input('Par ou Ímpar? [P/I] ')).strip().upper()[0]
print(f'Você jogou {jogador} e o PC {pc}. e o total é {total}', end=' ')
print('DEU PAR' if total % 2 == 0 else 'DEU ÍMPAR')
if tipo == 'P':
if total % 2 == 0:
print('Você venceu!')
v += 1
else:
print('Você perdeu!!!')
break
elif tipo == 'I':
if total % 2 == 1:
print('Você venceu!!!')
v += 1
else:
print('Você perdeu!!!')
break
print('Vamos jogar novamente...')
print(f'GAME OVER! você venceu {v} vezes.')
| none | 1 | 3.601382 | 4 | |
__init__.py | diehlpk/blender-vtk-plugin | 9 | 6622733 | <gh_stars>1-10
"""
Blender plugin to import vtk unstrucutred grids
author:diehlpk
date: 13.10.2014
"""
bl_info = {"name": "VTK-Importer",
"author": "diehlpk",
"blender": (2, 6, 9),
"version": (0, 0, 1),
"location": "File > Import-Export",
"description": "Import VTK unstructured grid",
"category": "Import-Export"
}
# Imports for readinf vtk
from xml.etree import ElementTree
import re
# Import blender addon api
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ImportHelper, ExportHelper
from bpy.props import (StringProperty,
BoolProperty,
EnumProperty,
IntProperty,
FloatProperty)
def add_particle(x_pos, y_pos, z_pos, radius):
"""Adds a primitive uV sphere to the blender engine
@param x_pos X coordinate of the center of the sphere
@param y_pos Y coordinate of the center of the sphre
@param z_pos Z Coordinate of the center of the sphre
@param radius Radius of the sphere
"""
bpy.ops.mesh.primitive_uv_sphere_add(
segments=6,
ring_count=6,
size=radius,
location=(x_pos, y_pos, z_pos),
rotation=(0, 0, 0),
layers=(
True, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False
)
)
def value_to_rgb(minimum, maximum, value):
""" Converts the value to a RGB map
@param minimum The minimum of all values
@param maximum The maximum of all values
@param value The value
"""
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value - minimum) / (maximum - minimum)
b_color = int(max(0, 255 * (1 - ratio)))
r_color = int(max(0, 255 * (1 - ratio)))
g_color = 255 - b_color - r_color
return r_color, g_color, b_color
def values_to_rgb(ranges, values):
""" Converts a three dimensional tuple to a RGB map
@param ranges The mininum and maximum of each dimension
@param values The value to transform
"""
r_color = (float(values[0]) - float(ranges[0][0])) / float(ranges[0][1])
g_color = (float(values[1]) - float(ranges[1][0])) / float(ranges[1][1])
b_color = (float(values[2]) - float(ranges[2][0])) / float(ranges[2][1])
return r_color, g_color, b_color
class ImportVTK(Operator, ImportHelper):
"""VTK unstructured grid importer"""
bl_idname = "import.vtk"
bl_label = "VTK importer"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def __init__(self):
pass
@classmethod
def poll(cls, context):
"""Method polls"""
return context.object is not None
def execute(self, context):
"""Configuration"""
radius = 0.05
name = ""
bpy.ops.object.select_all(action='DESELECT')
add_particle(0, 0, 0, radius)
self.sphere = bpy.context.object
self.sphere.name = 'vtk_import_root_object'
points = []
material = []
with open(self.filepath, 'r') as input_file:
tree = ElementTree.parse(input_file)
for node in tree.getiterator('DataArray'):
if (node.attrib.get('Name') == 'Points' or
node.attrib.get('Name') == 'coordinates'):
dim = int(node.attrib.get('NumberOfComponents'))
text = re.sub("\n", "", node.text)
text = re.sub("\t", "", text)
text = re.sub(" +", " ", text)
text = text.lstrip(' ').rstrip(' ')
splitted = text.split(' ')
pos = []
for element in splitted:
pos.append(element)
if len(pos) == dim:
points.append(pos)
pos = []
if node.attrib.get('Name') == name and len(name) > 0:
dim = int(node.attrib.get('NumberOfComponents'))
colors = []
if dim == 1:
text = re.sub(" +", "", node.text)
text = re.sub(" ", "", text)
text = re.sub("\t", "", text)
text = text.lstrip().rstrip()
splitted = text.split("\n")
for element in splitted:
colors.append(float(element))
minimum = min(colors)
maximum = max(colors)
for i in range(len(colors)):
material_object = bpy.data.materials.new(str(i))
material_object.diffuse_color = value_to_rgb(
minimum, maximum, colors[i])
material.append(material_object)
if dim == 3:
text = re.sub("\n", "", node.text)
text = re.sub("\t", "", text)
text = re.sub(" +", " ", text)
text = text.lstrip(' ').rstrip(' ')
splitted = text.split(' ')
color = []
for element in splitted:
color.append(element)
if len(color) == dim:
colors.append(color)
color = []
min_r = min(zip(range(len(colors)), colors[0]))[1]
max_r = max(zip(range(len(colors)), colors[0]))[1]
min_g = min(zip(range(len(colors)), colors[0]))[1]
max_g = max(zip(range(len(colors)), colors[0]))[1]
min_b = min(zip(range(len(colors)), colors[0]))[1]
max_b = max(zip(range(len(colors)), colors[0]))[1]
ranges = [[min_r, max_r], [min_g, max_g], [min_b, max_b]]
for i in range(len(colors)):
material_object = bpy.data.materials.new(str(i))
material_object.diffuse_color = values_to_rgb(
ranges, colors[i])
material.append(material_object)
index = 0
len_points = len(points)
len_material = len(material)
for i in range(len_points):
actual_object = self.sphere.copy()
actual_object.name = 'particle_' + str(index)
actual_object.data = self.sphere.data.copy()
pos = points[i]
actual_object.location = (
float(pos[0]), float(pos[1]), float(pos[2]))
bpy.context.scene.objects.link(actual_object)
if len_points == len_material:
actual_object.data.materials.append(material[i])
index = index + 1
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.select_pattern(
pattern='vtk_import_root_object',
case_sensitive=False, extend=False)
bpy.ops.object.delete()
bpy.context.scene.update()
return {'FINISHED'}
def invoke(self, context, event):
"""Method invokes the file pick up action"""
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
"""Method add the plugin to the menu"""
self.layout.operator(ImportVTK.bl_idname, text="VTK Importer")
def register():
"""Method register the plugin"""
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
"""Method unregister the plugin"""
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register()
| """
Blender plugin to import vtk unstrucutred grids
author:diehlpk
date: 13.10.2014
"""
bl_info = {"name": "VTK-Importer",
"author": "diehlpk",
"blender": (2, 6, 9),
"version": (0, 0, 1),
"location": "File > Import-Export",
"description": "Import VTK unstructured grid",
"category": "Import-Export"
}
# Imports for readinf vtk
from xml.etree import ElementTree
import re
# Import blender addon api
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ImportHelper, ExportHelper
from bpy.props import (StringProperty,
BoolProperty,
EnumProperty,
IntProperty,
FloatProperty)
def add_particle(x_pos, y_pos, z_pos, radius):
"""Adds a primitive uV sphere to the blender engine
@param x_pos X coordinate of the center of the sphere
@param y_pos Y coordinate of the center of the sphre
@param z_pos Z Coordinate of the center of the sphre
@param radius Radius of the sphere
"""
bpy.ops.mesh.primitive_uv_sphere_add(
segments=6,
ring_count=6,
size=radius,
location=(x_pos, y_pos, z_pos),
rotation=(0, 0, 0),
layers=(
True, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False
)
)
def value_to_rgb(minimum, maximum, value):
""" Converts the value to a RGB map
@param minimum The minimum of all values
@param maximum The maximum of all values
@param value The value
"""
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value - minimum) / (maximum - minimum)
b_color = int(max(0, 255 * (1 - ratio)))
r_color = int(max(0, 255 * (1 - ratio)))
g_color = 255 - b_color - r_color
return r_color, g_color, b_color
def values_to_rgb(ranges, values):
""" Converts a three dimensional tuple to a RGB map
@param ranges The mininum and maximum of each dimension
@param values The value to transform
"""
r_color = (float(values[0]) - float(ranges[0][0])) / float(ranges[0][1])
g_color = (float(values[1]) - float(ranges[1][0])) / float(ranges[1][1])
b_color = (float(values[2]) - float(ranges[2][0])) / float(ranges[2][1])
return r_color, g_color, b_color
class ImportVTK(Operator, ImportHelper):
"""VTK unstructured grid importer"""
bl_idname = "import.vtk"
bl_label = "VTK importer"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def __init__(self):
pass
@classmethod
def poll(cls, context):
"""Method polls"""
return context.object is not None
def execute(self, context):
"""Configuration"""
radius = 0.05
name = ""
bpy.ops.object.select_all(action='DESELECT')
add_particle(0, 0, 0, radius)
self.sphere = bpy.context.object
self.sphere.name = 'vtk_import_root_object'
points = []
material = []
with open(self.filepath, 'r') as input_file:
tree = ElementTree.parse(input_file)
for node in tree.getiterator('DataArray'):
if (node.attrib.get('Name') == 'Points' or
node.attrib.get('Name') == 'coordinates'):
dim = int(node.attrib.get('NumberOfComponents'))
text = re.sub("\n", "", node.text)
text = re.sub("\t", "", text)
text = re.sub(" +", " ", text)
text = text.lstrip(' ').rstrip(' ')
splitted = text.split(' ')
pos = []
for element in splitted:
pos.append(element)
if len(pos) == dim:
points.append(pos)
pos = []
if node.attrib.get('Name') == name and len(name) > 0:
dim = int(node.attrib.get('NumberOfComponents'))
colors = []
if dim == 1:
text = re.sub(" +", "", node.text)
text = re.sub(" ", "", text)
text = re.sub("\t", "", text)
text = text.lstrip().rstrip()
splitted = text.split("\n")
for element in splitted:
colors.append(float(element))
minimum = min(colors)
maximum = max(colors)
for i in range(len(colors)):
material_object = bpy.data.materials.new(str(i))
material_object.diffuse_color = value_to_rgb(
minimum, maximum, colors[i])
material.append(material_object)
if dim == 3:
text = re.sub("\n", "", node.text)
text = re.sub("\t", "", text)
text = re.sub(" +", " ", text)
text = text.lstrip(' ').rstrip(' ')
splitted = text.split(' ')
color = []
for element in splitted:
color.append(element)
if len(color) == dim:
colors.append(color)
color = []
min_r = min(zip(range(len(colors)), colors[0]))[1]
max_r = max(zip(range(len(colors)), colors[0]))[1]
min_g = min(zip(range(len(colors)), colors[0]))[1]
max_g = max(zip(range(len(colors)), colors[0]))[1]
min_b = min(zip(range(len(colors)), colors[0]))[1]
max_b = max(zip(range(len(colors)), colors[0]))[1]
ranges = [[min_r, max_r], [min_g, max_g], [min_b, max_b]]
for i in range(len(colors)):
material_object = bpy.data.materials.new(str(i))
material_object.diffuse_color = values_to_rgb(
ranges, colors[i])
material.append(material_object)
index = 0
len_points = len(points)
len_material = len(material)
for i in range(len_points):
actual_object = self.sphere.copy()
actual_object.name = 'particle_' + str(index)
actual_object.data = self.sphere.data.copy()
pos = points[i]
actual_object.location = (
float(pos[0]), float(pos[1]), float(pos[2]))
bpy.context.scene.objects.link(actual_object)
if len_points == len_material:
actual_object.data.materials.append(material[i])
index = index + 1
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.select_pattern(
pattern='vtk_import_root_object',
case_sensitive=False, extend=False)
bpy.ops.object.delete()
bpy.context.scene.update()
return {'FINISHED'}
def invoke(self, context, event):
"""Method invokes the file pick up action"""
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
"""Method add the plugin to the menu"""
self.layout.operator(ImportVTK.bl_idname, text="VTK Importer")
def register():
"""Method register the plugin"""
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
"""Method unregister the plugin"""
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register() | en | 0.429679 | Blender plugin to import vtk unstrucutred grids author:diehlpk date: 13.10.2014 # Imports for readinf vtk # Import blender addon api Adds a primitive uV sphere to the blender engine @param x_pos X coordinate of the center of the sphere @param y_pos Y coordinate of the center of the sphre @param z_pos Z Coordinate of the center of the sphre @param radius Radius of the sphere Converts the value to a RGB map @param minimum The minimum of all values @param maximum The maximum of all values @param value The value Converts a three dimensional tuple to a RGB map @param ranges The mininum and maximum of each dimension @param values The value to transform VTK unstructured grid importer Method polls Configuration Method invokes the file pick up action Method add the plugin to the menu Method register the plugin Method unregister the plugin | 2.067172 | 2 |
proposal/utils.py | coagulant/pyvideo.ru | 9 | 6622734 | <gh_stars>1-10
# coding: utf-8
import pathlib
def force_path(path):
"""
Return a pathlib.Path instance.
:param path: An object representing a file path ('/tmp/foo', Path(/tmp/foo), etc)
"""
return path if isinstance(path, pathlib.Path) else pathlib.Path(path)
| # coding: utf-8
import pathlib
def force_path(path):
"""
Return a pathlib.Path instance.
:param path: An object representing a file path ('/tmp/foo', Path(/tmp/foo), etc)
"""
return path if isinstance(path, pathlib.Path) else pathlib.Path(path) | en | 0.659447 | # coding: utf-8 Return a pathlib.Path instance. :param path: An object representing a file path ('/tmp/foo', Path(/tmp/foo), etc) | 2.708757 | 3 |
tests/unit/test_list_of_lists.py | csurfer/ezread | 3 | 6622735 | <reponame>csurfer/ezread
import pytest
from ezread import EzReader
from json.decoder import JSONDecodeError
@pytest.mark.parametrize(
"template, expected_rows",
[
("""[0, 1, 2]""", [[1, 2, 3], [2, 4, 6], [3, 6, 9]]),
("""[0, 9]""", [[1, 10], [2, 20], [3, 30]]),
],
)
def test_read(list_of_lists_json, template, expected_rows):
er = EzReader(template)
assert expected_rows == er.read(list_of_lists_json)
@pytest.mark.parametrize(
"template",
[
"""[0, 1, 20]""", # 20 being the missing index.
"""[0, 11]""", # 11 being the missing index.
],
)
def test_missingkeys_strict_read(list_of_lists_json, template):
er = EzReader(template)
with pytest.raises(IndexError):
er.read(list_of_lists_json)
@pytest.mark.parametrize(
"template, expected_rows",
[
("""[0, 1, 20]""", [[1, 2, None], [2, 4, None], [3, 6, None]]),
("""[0, 11]""", [[1, None], [2, None], [3, None]]),
],
)
def test_missingkeys_nonstrict_read(list_of_lists_json, template, expected_rows):
er = EzReader(template, strict=False)
assert expected_rows == er.read(list_of_lists_json)
| import pytest
from ezread import EzReader
from json.decoder import JSONDecodeError
@pytest.mark.parametrize(
"template, expected_rows",
[
("""[0, 1, 2]""", [[1, 2, 3], [2, 4, 6], [3, 6, 9]]),
("""[0, 9]""", [[1, 10], [2, 20], [3, 30]]),
],
)
def test_read(list_of_lists_json, template, expected_rows):
er = EzReader(template)
assert expected_rows == er.read(list_of_lists_json)
@pytest.mark.parametrize(
"template",
[
"""[0, 1, 20]""", # 20 being the missing index.
"""[0, 11]""", # 11 being the missing index.
],
)
def test_missingkeys_strict_read(list_of_lists_json, template):
er = EzReader(template)
with pytest.raises(IndexError):
er.read(list_of_lists_json)
@pytest.mark.parametrize(
"template, expected_rows",
[
("""[0, 1, 20]""", [[1, 2, None], [2, 4, None], [3, 6, None]]),
("""[0, 11]""", [[1, None], [2, None], [3, None]]),
],
)
def test_missingkeys_nonstrict_read(list_of_lists_json, template, expected_rows):
er = EzReader(template, strict=False)
assert expected_rows == er.read(list_of_lists_json) | en | 0.60899 | [0, 1, 2] [0, 9] [0, 1, 20] # 20 being the missing index. [0, 11] # 11 being the missing index. [0, 1, 20] [0, 11] | 2.522519 | 3 |
fastface/dataset/__init__.py | mdornseif/fastface | 72 | 6622736 | from .base import BaseDataset
from .fddb import FDDBDataset
from .widerface import WiderFaceDataset
__all__ = [
"BaseDataset",
"FDDBDataset",
"WiderFaceDataset",
]
| from .base import BaseDataset
from .fddb import FDDBDataset
from .widerface import WiderFaceDataset
__all__ = [
"BaseDataset",
"FDDBDataset",
"WiderFaceDataset",
]
| none | 1 | 1.074352 | 1 | |
RaspPi Setup/setup.py | felmoreno1726/quitIT | 2 | 6622737 | <gh_stars>1-10
import subprocess
import time
#bashCommand1 = 'raspistill -o test.jpeg'
bashCommand2 = 'bash setup2.sh'
bashCommand3 = 'bash setup.sh'
bashCommand4 = 'rm out.txt'
bashCommand5 = 'sleep 10m'
bashCommand6 = 'bash setup3.sh'
for i in range(30):
#process1 = subprocess.Popen(bashCommand1.split(), stdout=subprocess.PIPE)
#output1, error1 = process1.communicate()
process2 = subprocess.Popen(bashCommand2.split(), stdout=subprocess.PIPE)
output2, error2 = process2.communicate()
time.sleep(10)
print("Done IBM")
with open('out.txt') as myfile:
content = myfile.read()
print(content.split('"')[17])
if(content.split('"')[17] == 'smoking'):
print("Smoking!")
process4 = subprocess.Popen(bashCommand6.split(), stdout=subprocess.PIPE)
output4, error4 = process4.communicate()
process3 = subprocess.Popen(bashCommand3.split(), stdout=subprocess.PIPE)
output3, error3 = process3.communicate()
time.sleep(10)
process4 = subprocess.Popen(bashCommand4.split(), stdout=subprocess.PIPE)
output4, error4 = process4.communicate()
process5 = subprocess.Popen(bashCommand5.split(), stdout=subprocess.PIPE)
output5, error5 = process5.communicate() | import subprocess
import time
#bashCommand1 = 'raspistill -o test.jpeg'
bashCommand2 = 'bash setup2.sh'
bashCommand3 = 'bash setup.sh'
bashCommand4 = 'rm out.txt'
bashCommand5 = 'sleep 10m'
bashCommand6 = 'bash setup3.sh'
for i in range(30):
#process1 = subprocess.Popen(bashCommand1.split(), stdout=subprocess.PIPE)
#output1, error1 = process1.communicate()
process2 = subprocess.Popen(bashCommand2.split(), stdout=subprocess.PIPE)
output2, error2 = process2.communicate()
time.sleep(10)
print("Done IBM")
with open('out.txt') as myfile:
content = myfile.read()
print(content.split('"')[17])
if(content.split('"')[17] == 'smoking'):
print("Smoking!")
process4 = subprocess.Popen(bashCommand6.split(), stdout=subprocess.PIPE)
output4, error4 = process4.communicate()
process3 = subprocess.Popen(bashCommand3.split(), stdout=subprocess.PIPE)
output3, error3 = process3.communicate()
time.sleep(10)
process4 = subprocess.Popen(bashCommand4.split(), stdout=subprocess.PIPE)
output4, error4 = process4.communicate()
process5 = subprocess.Popen(bashCommand5.split(), stdout=subprocess.PIPE)
output5, error5 = process5.communicate() | en | 0.247009 | #bashCommand1 = 'raspistill -o test.jpeg' #process1 = subprocess.Popen(bashCommand1.split(), stdout=subprocess.PIPE) #output1, error1 = process1.communicate() | 2.335463 | 2 |
test/bolt_src/data_preparation.py | Jitesh17/classification | 1 | 6622738 |
# In[]:
import pyjeasy.file_utils as f
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from sys import exit as x
import torch
import torch.nn as nn
import cv2
import matplotlib.pyplot as plt
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import copy
from tqdm import tqdm
from PIL import Image
import glob
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[]:
IMG_SIZE = 512
batch_size = 8
DATA_TYPES = ["train", "val", "test"]
# dataset_path = "/home/jitesh/3d/data/UE_training_results/bolt2/bolt_cropped"
# b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES]
# print(b_type_list)
# # Required once start
# In[]:
def convert_4ch_to_3ch(dataset_path, split_ratio =[0.8, 0.1, 0.1]):
b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES]
img_path_list = dict()
for data_type in DATA_TYPES:
img_path_list[data_type] = []
import random
for b_type in b_type_list:
data = glob.glob(f'{dataset_path}/{b_type}/*.png')
# train_data.append(data[:int(len(data)*0.8)])
random.shuffle(data)
s1 = split_ratio[0]
s2 = split_ratio[0] + split_ratio[1]
assert 1 == split_ratio[0] + split_ratio[1] + split_ratio[2]
img_path_list["train"] += data[:int(len(data)*s1)]
img_path_list["val"] += data[int(len(data)*s1):int(len(data)*s2)]
img_path_list["test"] += data[int(len(data)*s2):]
print(f'len(train_data): {len(img_path_list["train"])}')
print(f'len(val_data): {len(img_path_list["val"])}')
print(f'len(test_data): {len(img_path_list["test"])}')
# In[ ]:
import pyjeasy.file_utils as f
import cv2
filename_list = dict()
for data_type in DATA_TYPES:
dirname_new = os.path.join(dataset_path, data_type)
f.make_dir_if_not_exists(dirname_new)
for file_path in tqdm(img_path_list[data_type]):
file_path_split = file_path.split("/")
dirname_old = file_path_split[-2].split("_")[0]
filename_old = file_path_split[-1]
# filename_new = dirname_old.replace("b", "") + "_" + filename_old
filename_new = dirname_old + "_" + filename_old
output_img_path = os.path.join(dirname_new, filename_new)
f.delete_file_if_exists(output_img_path)
# Converting 4 channel to 3 channel and then writing in different folder
img = cv2.imread(file_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
cv2.imwrite(output_img_path, img)
# f.copy_file(src_path=file_path, dst_path=output_img_path, verbose=False)
filename_list[data_type] = os.listdir(dirname_new)
# train_files = os.listdir(TRAIN_IMG_DIR_PATH)
# test_files = os.listdir(TEST_IMG_DIR_PATH)
# # Required once ends
# In[6]:
# filename_list = dict()
# for data_type in DATA_TYPES:
# dirname_new = os.path.join(dataset_path, data_type)
# filename_list[data_type] = os.listdir(dirname_new)
# In[6]:
class BoltDataset(Dataset):
def __init__(self, file_list, dir, mode='train', transform = None):
self.file_list = file_list
self.dir = dir
self.mode= mode
self.transform = transform
# print(self.file_list[0])
if self.mode == 'train':
# if 'b00' in self.file_list[0]:
# print(self.file_list[0])
if 'b00' in self.file_list[0]:
self.label = 0
else:
self.label = 1
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img = Image.open(os.path.join(self.dir, self.file_list[idx]))
if self.transform:
img = self.transform(img)
if self.mode == 'train':
img = img.numpy()
return img.astype('float32'), self.label
else:
img = img.numpy()
return img.astype('float32'), self.file_list[idx]
# data_transform = transforms.Compose([
# transforms.Resize(256),
# transforms.ColorJitter(),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.Resize(128),
# transforms.ToTensor()
# ])
def get_dataset(dataset_path: str, b_type_list: list, img_size: int):
dataset = dict()
filename_list = dict()
for data_type in DATA_TYPES:
dirname_new = os.path.join(dataset_path, data_type)
filename_list[data_type] = os.listdir(dirname_new)
for data_type in DATA_TYPES:
dir_path = os.path.join(dataset_path, data_type)
if data_type =="train":
data_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ColorJitter(),
transforms.RandomCrop(int(img_size*1)),
transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor()
])
# catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# # print(filename_list)
# cat_files = [tf for tf in filename_list[data_type] if b_type in tf]
# catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform)
# dataset[data_type] = ConcatDataset([c for c in catagory_data.values()])
else:
data_transform = transforms.Compose([
transforms.Resize((img_size)),
transforms.RandomCrop(int(img_size*1)),
# transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor()
])
# dataset[data_type] = BoltDataset(filename_list[data_type], dir_path, mode=data_type, transform = data_transform)
catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# for b_type in ['b10', 'b11']: # b_type_list:
for b_type in b_type_list:
# print(filename_list)
cat_files = [tf for tf in filename_list[data_type] if b_type in tf]
catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform)
dataset[data_type] = ConcatDataset([c for c in catagory_data.values()])
print(f'len({data_type}_data): {len(dataset[data_type])}')
return dataset
# In[10]:
def mmmm():
# batch_size = 2
dataloader = DataLoader(dataset["train"], batch_size = batch_size, shuffle=True, num_workers=1)
# dataloader = DataLoader(catdogs, batch_size = 32, shuffle=True, num_workers=4)
print("len dataloader", len(dataloader))
# In[30]:
show_n_images = 40
samples, labels = iter(dataloader).next()
plt.figure(figsize=(16*2,24))
grid_imgs = torchvision.utils.make_grid(samples[:show_n_images])
np_grid_imgs = grid_imgs.numpy()
# in tensor, image is (batch, width, height), so you have to transpose it to (width, height, batch) in numpy to show it.
plt.imshow(np.transpose(np_grid_imgs, (1,2,0)))
# In[]:
# In[]:
# In[]:
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
# imshow(torchvision.utils.make_grid(images))
# In[]:
convert_4ch_to_3ch("/home/jitesh/3d/data/UE_training_results/bolt3/bolt_cropped")
# %%
|
# In[]:
import pyjeasy.file_utils as f
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from sys import exit as x
import torch
import torch.nn as nn
import cv2
import matplotlib.pyplot as plt
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import copy
from tqdm import tqdm
from PIL import Image
import glob
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[]:
IMG_SIZE = 512
batch_size = 8
DATA_TYPES = ["train", "val", "test"]
# dataset_path = "/home/jitesh/3d/data/UE_training_results/bolt2/bolt_cropped"
# b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES]
# print(b_type_list)
# # Required once start
# In[]:
def convert_4ch_to_3ch(dataset_path, split_ratio =[0.8, 0.1, 0.1]):
b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES]
img_path_list = dict()
for data_type in DATA_TYPES:
img_path_list[data_type] = []
import random
for b_type in b_type_list:
data = glob.glob(f'{dataset_path}/{b_type}/*.png')
# train_data.append(data[:int(len(data)*0.8)])
random.shuffle(data)
s1 = split_ratio[0]
s2 = split_ratio[0] + split_ratio[1]
assert 1 == split_ratio[0] + split_ratio[1] + split_ratio[2]
img_path_list["train"] += data[:int(len(data)*s1)]
img_path_list["val"] += data[int(len(data)*s1):int(len(data)*s2)]
img_path_list["test"] += data[int(len(data)*s2):]
print(f'len(train_data): {len(img_path_list["train"])}')
print(f'len(val_data): {len(img_path_list["val"])}')
print(f'len(test_data): {len(img_path_list["test"])}')
# In[ ]:
import pyjeasy.file_utils as f
import cv2
filename_list = dict()
for data_type in DATA_TYPES:
dirname_new = os.path.join(dataset_path, data_type)
f.make_dir_if_not_exists(dirname_new)
for file_path in tqdm(img_path_list[data_type]):
file_path_split = file_path.split("/")
dirname_old = file_path_split[-2].split("_")[0]
filename_old = file_path_split[-1]
# filename_new = dirname_old.replace("b", "") + "_" + filename_old
filename_new = dirname_old + "_" + filename_old
output_img_path = os.path.join(dirname_new, filename_new)
f.delete_file_if_exists(output_img_path)
# Converting 4 channel to 3 channel and then writing in different folder
img = cv2.imread(file_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
cv2.imwrite(output_img_path, img)
# f.copy_file(src_path=file_path, dst_path=output_img_path, verbose=False)
filename_list[data_type] = os.listdir(dirname_new)
# train_files = os.listdir(TRAIN_IMG_DIR_PATH)
# test_files = os.listdir(TEST_IMG_DIR_PATH)
# # Required once ends
# In[6]:
# filename_list = dict()
# for data_type in DATA_TYPES:
# dirname_new = os.path.join(dataset_path, data_type)
# filename_list[data_type] = os.listdir(dirname_new)
# In[6]:
class BoltDataset(Dataset):
def __init__(self, file_list, dir, mode='train', transform = None):
self.file_list = file_list
self.dir = dir
self.mode= mode
self.transform = transform
# print(self.file_list[0])
if self.mode == 'train':
# if 'b00' in self.file_list[0]:
# print(self.file_list[0])
if 'b00' in self.file_list[0]:
self.label = 0
else:
self.label = 1
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img = Image.open(os.path.join(self.dir, self.file_list[idx]))
if self.transform:
img = self.transform(img)
if self.mode == 'train':
img = img.numpy()
return img.astype('float32'), self.label
else:
img = img.numpy()
return img.astype('float32'), self.file_list[idx]
# data_transform = transforms.Compose([
# transforms.Resize(256),
# transforms.ColorJitter(),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.Resize(128),
# transforms.ToTensor()
# ])
def get_dataset(dataset_path: str, b_type_list: list, img_size: int):
dataset = dict()
filename_list = dict()
for data_type in DATA_TYPES:
dirname_new = os.path.join(dataset_path, data_type)
filename_list[data_type] = os.listdir(dirname_new)
for data_type in DATA_TYPES:
dir_path = os.path.join(dataset_path, data_type)
if data_type =="train":
data_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ColorJitter(),
transforms.RandomCrop(int(img_size*1)),
transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor()
])
# catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# # print(filename_list)
# cat_files = [tf for tf in filename_list[data_type] if b_type in tf]
# catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform)
# dataset[data_type] = ConcatDataset([c for c in catagory_data.values()])
else:
data_transform = transforms.Compose([
transforms.Resize((img_size)),
transforms.RandomCrop(int(img_size*1)),
# transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor()
])
# dataset[data_type] = BoltDataset(filename_list[data_type], dir_path, mode=data_type, transform = data_transform)
catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# for b_type in ['b10', 'b11']: # b_type_list:
for b_type in b_type_list:
# print(filename_list)
cat_files = [tf for tf in filename_list[data_type] if b_type in tf]
catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform)
dataset[data_type] = ConcatDataset([c for c in catagory_data.values()])
print(f'len({data_type}_data): {len(dataset[data_type])}')
return dataset
# In[10]:
def mmmm():
# batch_size = 2
dataloader = DataLoader(dataset["train"], batch_size = batch_size, shuffle=True, num_workers=1)
# dataloader = DataLoader(catdogs, batch_size = 32, shuffle=True, num_workers=4)
print("len dataloader", len(dataloader))
# In[30]:
show_n_images = 40
samples, labels = iter(dataloader).next()
plt.figure(figsize=(16*2,24))
grid_imgs = torchvision.utils.make_grid(samples[:show_n_images])
np_grid_imgs = grid_imgs.numpy()
# in tensor, image is (batch, width, height), so you have to transpose it to (width, height, batch) in numpy to show it.
plt.imshow(np.transpose(np_grid_imgs, (1,2,0)))
# In[]:
# In[]:
# In[]:
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
# imshow(torchvision.utils.make_grid(images))
# In[]:
convert_4ch_to_3ch("/home/jitesh/3d/data/UE_training_results/bolt3/bolt_cropped")
# %%
| en | 0.404859 | # In[]: # linear algebra # data processing, CSV file I/O (e.g. pd.read_csv) # get_ipython().run_line_magic('matplotlib', 'inline') # In[]: # dataset_path = "/home/jitesh/3d/data/UE_training_results/bolt2/bolt_cropped" # b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES] # print(b_type_list) # # Required once start # In[]: # train_data.append(data[:int(len(data)*0.8)]) # In[ ]: # filename_new = dirname_old.replace("b", "") + "_" + filename_old # Converting 4 channel to 3 channel and then writing in different folder # f.copy_file(src_path=file_path, dst_path=output_img_path, verbose=False) # train_files = os.listdir(TRAIN_IMG_DIR_PATH) # test_files = os.listdir(TEST_IMG_DIR_PATH) # # Required once ends # In[6]: # filename_list = dict() # for data_type in DATA_TYPES: # dirname_new = os.path.join(dataset_path, data_type) # filename_list[data_type] = os.listdir(dirname_new) # In[6]: # print(self.file_list[0]) # if 'b00' in self.file_list[0]: # print(self.file_list[0]) # data_transform = transforms.Compose([ # transforms.Resize(256), # transforms.ColorJitter(), # transforms.RandomCrop(224), # transforms.RandomHorizontalFlip(), # transforms.Resize(128), # transforms.ToTensor() # ]) # catagory_data = dict() # for b_type in ['b00', 'b01']: # b_type_list: # # print(filename_list) # cat_files = [tf for tf in filename_list[data_type] if b_type in tf] # catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform) # dataset[data_type] = ConcatDataset([c for c in catagory_data.values()]) # transforms.RandomHorizontalFlip(), # dataset[data_type] = BoltDataset(filename_list[data_type], dir_path, mode=data_type, transform = data_transform) # for b_type in ['b00', 'b01']: # b_type_list: # for b_type in ['b10', 'b11']: # b_type_list: # print(filename_list) # In[10]: # batch_size = 2 # dataloader = DataLoader(catdogs, batch_size = 32, shuffle=True, num_workers=4) # In[30]: # in tensor, image is (batch, width, height), so you have to transpose it to (width, height, batch) in numpy to show it. # In[]: # In[]: # In[]: # dataiter = iter(trainloader) # images, labels = dataiter.next() # imshow(torchvision.utils.make_grid(images)) # In[]: # %% | 2.193609 | 2 |
build/lib/affiliations/templates/utils.py | salimm/django-affiliations | 3 | 6622739 | <gh_stars>1-10
from django.core.mail import send_mail
def createEmailBody(request,aff):
tmp = """Dear """+request.user.get_full_name()+"""
Your confirmation code to confirm your affiliation at """ +aff.org+""" is:
"""+aff.token+"""
Use this code to confirm your affiliation at
http://pittreview/affiliaitons
Sincerely,
Admin
"""
return tmp
def createEmailBodyHtml(request,aff):
tmp = """Dear """+request.user.get_full_name()+"""<br/>
Your confirmation code to confirm your affiliation at <b>""" +aff.org+"""</b> is:
<br/>
<b>"""+aff.token+"""</b><br/>
<br/>
Use this code to confirm your affiliation at
<br/><br/>
<a href="http://pittreview/affiliaitons">http://pittreview/affiliaitons</a>
<br/><br/>
Sincerely,<br/>
Admin<br/>
"""
return tmp
def sendEmail(request,aff):
body = createEmailBody(request,aff)
htmlbody = createEmailBodyHtml(request,aff)
email = aff.account+"@"+aff.org
subject="Confirmation Code for Pittreview"
fromemail ="PittReview <<EMAIL>>"
msg['Subject'] = subject
msg['From'] = "PittReview <<EMAIL>>"
msg['To'] = email
text = body
part1 = MIMEText(text, 'plain')
html = htmlbody
part2 = MIMEText(html, 'html')
username = os.environ['<EMAIL>']
password = os.environ['<PASSWORD>']
msg.attach(part1)
msg.attach(part2)
s = smtplib.SMTP('smtp.mandrillapp.com', 587)
s.login(username, password)
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.quit()
# send_mail(subject, body, fromemail,[email], fail_silently=False)
| from django.core.mail import send_mail
def createEmailBody(request,aff):
tmp = """Dear """+request.user.get_full_name()+"""
Your confirmation code to confirm your affiliation at """ +aff.org+""" is:
"""+aff.token+"""
Use this code to confirm your affiliation at
http://pittreview/affiliaitons
Sincerely,
Admin
"""
return tmp
def createEmailBodyHtml(request,aff):
tmp = """Dear """+request.user.get_full_name()+"""<br/>
Your confirmation code to confirm your affiliation at <b>""" +aff.org+"""</b> is:
<br/>
<b>"""+aff.token+"""</b><br/>
<br/>
Use this code to confirm your affiliation at
<br/><br/>
<a href="http://pittreview/affiliaitons">http://pittreview/affiliaitons</a>
<br/><br/>
Sincerely,<br/>
Admin<br/>
"""
return tmp
def sendEmail(request,aff):
body = createEmailBody(request,aff)
htmlbody = createEmailBodyHtml(request,aff)
email = aff.account+"@"+aff.org
subject="Confirmation Code for Pittreview"
fromemail ="PittReview <<EMAIL>>"
msg['Subject'] = subject
msg['From'] = "PittReview <<EMAIL>>"
msg['To'] = email
text = body
part1 = MIMEText(text, 'plain')
html = htmlbody
part2 = MIMEText(html, 'html')
username = os.environ['<EMAIL>']
password = os.environ['<PASSWORD>']
msg.attach(part1)
msg.attach(part2)
s = smtplib.SMTP('smtp.mandrillapp.com', 587)
s.login(username, password)
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.quit()
# send_mail(subject, body, fromemail,[email], fail_silently=False) | en | 0.541823 | Dear Your confirmation code to confirm your affiliation at is: Use this code to confirm your affiliation at http://pittreview/affiliaitons Sincerely, Admin Dear <br/> Your confirmation code to confirm your affiliation at <b> </b> is: <br/> <b> </b><br/> <br/> Use this code to confirm your affiliation at <br/><br/> <a href="http://pittreview/affiliaitons">http://pittreview/affiliaitons</a> <br/><br/> Sincerely,<br/> Admin<br/> # send_mail(subject, body, fromemail,[email], fail_silently=False) | 2.638166 | 3 |
pepys_import/core/validators/enhanced_validator.py | debrief/pepys-import | 4 | 6622740 | from pepys_import.core.formats import unit_registry
from pepys_import.utils.unit_utils import (
acceptable_bearing_error,
bearing_between_two_points,
distance_between_two_points_haversine,
)
class EnhancedValidator:
"""Enhanced validator serve to verify the lat/long, in addition to the course/speed/heading"""
def __init__(self):
self.name = "Enhanced Validator"
def validate(self, current_object, errors, parser_name, prev_object=None):
orig_errors_length = len(errors)
error_type = (
f"{parser_name} - {self.name} Error on Timestamp:"
f"{str(current_object.time)}, Sensor:"
f"{current_object.sensor_name}, Platform:{current_object.platform_name}"
)
try:
heading = current_object.heading
except AttributeError:
heading = None
try:
course = current_object.course
except AttributeError:
course = None
try:
speed = current_object.speed
except AttributeError:
speed = None
try:
location = current_object.location
except AttributeError:
location = None
if (
speed == 0.0 * (unit_registry.metre / unit_registry.second)
and course == 0.0 * unit_registry.radian
):
print("Both course and speed are exactly zero. Skipping the enhanced validator...")
return True
# Doesn't need a try-catch as time is a compulsory field, created
# when a state is initialised
time = current_object.time
if prev_object:
try:
prev_location = prev_object.location
except AttributeError:
prev_location = None
# Doesn't need a try-catch as time is a compulsory field, created
# when a state is initialised
prev_time = prev_object.time
if location and prev_location:
# Only calculate bearing from the locations and compare to heading
# if the vessel has actually moved between the prev_location and the new location
if location != prev_location:
self.course_heading_loose_match_with_location(
location, prev_location, heading, course, errors, error_type
)
calculated_time = self.calculate_time(time, prev_time)
if calculated_time != 0:
self.speed_loose_match_with_location(
location,
prev_location,
speed,
calculated_time,
errors,
error_type,
)
if len(errors) > orig_errors_length:
return False
else:
return True
@staticmethod
def course_heading_loose_match_with_location(
curr_location, prev_location, heading, course, errors, error_type
):
"""Loosely matches the course and heading values with the bearing between two location
points.
:param curr_location: Point of the current location of the object
:type curr_location: Location
:param prev_location: Point o the previous location of the object
:type prev_location: Location
:param heading: Heading of the object (In degrees)
:type heading: Quantity
:param course: Course of the object (In degrees)
:type course: Quantity
:param errors: Error List to save value error if it raises
:type errors: List
:param error_type: Type of error
:type error_type: String
:return: True if there is no error, False otherwise
:rtype: bool
"""
number_of_errors = len(errors)
bearing = bearing_between_two_points(prev_location, curr_location)
delta = 90
if heading:
heading_in_degrees = heading.to(unit_registry.degree)
if not acceptable_bearing_error(heading_in_degrees, bearing, delta):
errors.append(
{
error_type: f"Difference between Bearing ({bearing:.3f}) and "
f"Heading ({heading_in_degrees:.3f}) is more than {delta} degrees!"
}
)
if course:
course_in_degrees = course.to(unit_registry.degree)
if not acceptable_bearing_error(course_in_degrees, bearing, delta):
errors.append(
{
error_type: f"Difference between Bearing ({bearing:.3f}) and "
f"Course ({course_in_degrees:.3f}) is more than {delta} degrees!"
}
)
# if not an error appended to the list, its length will be the same
if number_of_errors == len(errors):
return True
return False
@staticmethod
def calculate_time(curr_time, prev_time):
"""Finds the difference between two Datetime objects, converts it to Quantity seconds
:param curr_time: Timestamp of the current measurement object
:type curr_time: Datetime
:param prev_time: Timestamp of the previous measurement object
:type prev_time: Datetime
:return: Time difference (In seconds)
:rtype: Quantity
"""
diff = curr_time - prev_time
return diff.seconds * unit_registry.seconds
@staticmethod
def speed_loose_match_with_location(
curr_location, prev_location, speed, time, errors, error_type
):
"""Loosely matches the recorded speed with the calculated speed.
:param curr_location: Point of the current location of the object
:type curr_location: Location
:param prev_location: Point of the previous location of the object
:type prev_location: Location
:param speed: Speed the object
:type speed: Quantity
:param time: Timestamp of the object
:type time: Datetime
:param errors: Error List to save value error if it raises
:type errors: List
:param error_type: Type of error
:type error_type: String
:return: True if there is no error, False otherwise
:rtype: bool
"""
distance = distance_between_two_points_haversine(prev_location, curr_location)
calculated_speed = distance / time
if speed is None:
return True
elif (speed / 10) <= calculated_speed <= (speed * 10):
return True
elif speed == 0.0 * (
unit_registry.metre / unit_registry.second
) and calculated_speed <= 1.0 * (unit_registry.metre / unit_registry.second):
return True
errors.append(
{
error_type: f"Calculated speed ({calculated_speed:.3f}) is more than "
f"the measured speed * 10 ({speed * 10:.3f})!"
}
)
return False
| from pepys_import.core.formats import unit_registry
from pepys_import.utils.unit_utils import (
acceptable_bearing_error,
bearing_between_two_points,
distance_between_two_points_haversine,
)
class EnhancedValidator:
"""Enhanced validator serve to verify the lat/long, in addition to the course/speed/heading"""
def __init__(self):
self.name = "Enhanced Validator"
def validate(self, current_object, errors, parser_name, prev_object=None):
orig_errors_length = len(errors)
error_type = (
f"{parser_name} - {self.name} Error on Timestamp:"
f"{str(current_object.time)}, Sensor:"
f"{current_object.sensor_name}, Platform:{current_object.platform_name}"
)
try:
heading = current_object.heading
except AttributeError:
heading = None
try:
course = current_object.course
except AttributeError:
course = None
try:
speed = current_object.speed
except AttributeError:
speed = None
try:
location = current_object.location
except AttributeError:
location = None
if (
speed == 0.0 * (unit_registry.metre / unit_registry.second)
and course == 0.0 * unit_registry.radian
):
print("Both course and speed are exactly zero. Skipping the enhanced validator...")
return True
# Doesn't need a try-catch as time is a compulsory field, created
# when a state is initialised
time = current_object.time
if prev_object:
try:
prev_location = prev_object.location
except AttributeError:
prev_location = None
# Doesn't need a try-catch as time is a compulsory field, created
# when a state is initialised
prev_time = prev_object.time
if location and prev_location:
# Only calculate bearing from the locations and compare to heading
# if the vessel has actually moved between the prev_location and the new location
if location != prev_location:
self.course_heading_loose_match_with_location(
location, prev_location, heading, course, errors, error_type
)
calculated_time = self.calculate_time(time, prev_time)
if calculated_time != 0:
self.speed_loose_match_with_location(
location,
prev_location,
speed,
calculated_time,
errors,
error_type,
)
if len(errors) > orig_errors_length:
return False
else:
return True
@staticmethod
def course_heading_loose_match_with_location(
curr_location, prev_location, heading, course, errors, error_type
):
"""Loosely matches the course and heading values with the bearing between two location
points.
:param curr_location: Point of the current location of the object
:type curr_location: Location
:param prev_location: Point o the previous location of the object
:type prev_location: Location
:param heading: Heading of the object (In degrees)
:type heading: Quantity
:param course: Course of the object (In degrees)
:type course: Quantity
:param errors: Error List to save value error if it raises
:type errors: List
:param error_type: Type of error
:type error_type: String
:return: True if there is no error, False otherwise
:rtype: bool
"""
number_of_errors = len(errors)
bearing = bearing_between_two_points(prev_location, curr_location)
delta = 90
if heading:
heading_in_degrees = heading.to(unit_registry.degree)
if not acceptable_bearing_error(heading_in_degrees, bearing, delta):
errors.append(
{
error_type: f"Difference between Bearing ({bearing:.3f}) and "
f"Heading ({heading_in_degrees:.3f}) is more than {delta} degrees!"
}
)
if course:
course_in_degrees = course.to(unit_registry.degree)
if not acceptable_bearing_error(course_in_degrees, bearing, delta):
errors.append(
{
error_type: f"Difference between Bearing ({bearing:.3f}) and "
f"Course ({course_in_degrees:.3f}) is more than {delta} degrees!"
}
)
# if not an error appended to the list, its length will be the same
if number_of_errors == len(errors):
return True
return False
@staticmethod
def calculate_time(curr_time, prev_time):
"""Finds the difference between two Datetime objects, converts it to Quantity seconds
:param curr_time: Timestamp of the current measurement object
:type curr_time: Datetime
:param prev_time: Timestamp of the previous measurement object
:type prev_time: Datetime
:return: Time difference (In seconds)
:rtype: Quantity
"""
diff = curr_time - prev_time
return diff.seconds * unit_registry.seconds
@staticmethod
def speed_loose_match_with_location(
curr_location, prev_location, speed, time, errors, error_type
):
"""Loosely matches the recorded speed with the calculated speed.
:param curr_location: Point of the current location of the object
:type curr_location: Location
:param prev_location: Point of the previous location of the object
:type prev_location: Location
:param speed: Speed the object
:type speed: Quantity
:param time: Timestamp of the object
:type time: Datetime
:param errors: Error List to save value error if it raises
:type errors: List
:param error_type: Type of error
:type error_type: String
:return: True if there is no error, False otherwise
:rtype: bool
"""
distance = distance_between_two_points_haversine(prev_location, curr_location)
calculated_speed = distance / time
if speed is None:
return True
elif (speed / 10) <= calculated_speed <= (speed * 10):
return True
elif speed == 0.0 * (
unit_registry.metre / unit_registry.second
) and calculated_speed <= 1.0 * (unit_registry.metre / unit_registry.second):
return True
errors.append(
{
error_type: f"Calculated speed ({calculated_speed:.3f}) is more than "
f"the measured speed * 10 ({speed * 10:.3f})!"
}
)
return False
| en | 0.847283 | Enhanced validator serve to verify the lat/long, in addition to the course/speed/heading # Doesn't need a try-catch as time is a compulsory field, created # when a state is initialised # Doesn't need a try-catch as time is a compulsory field, created # when a state is initialised # Only calculate bearing from the locations and compare to heading # if the vessel has actually moved between the prev_location and the new location Loosely matches the course and heading values with the bearing between two location points. :param curr_location: Point of the current location of the object :type curr_location: Location :param prev_location: Point o the previous location of the object :type prev_location: Location :param heading: Heading of the object (In degrees) :type heading: Quantity :param course: Course of the object (In degrees) :type course: Quantity :param errors: Error List to save value error if it raises :type errors: List :param error_type: Type of error :type error_type: String :return: True if there is no error, False otherwise :rtype: bool # if not an error appended to the list, its length will be the same Finds the difference between two Datetime objects, converts it to Quantity seconds :param curr_time: Timestamp of the current measurement object :type curr_time: Datetime :param prev_time: Timestamp of the previous measurement object :type prev_time: Datetime :return: Time difference (In seconds) :rtype: Quantity Loosely matches the recorded speed with the calculated speed. :param curr_location: Point of the current location of the object :type curr_location: Location :param prev_location: Point of the previous location of the object :type prev_location: Location :param speed: Speed the object :type speed: Quantity :param time: Timestamp of the object :type time: Datetime :param errors: Error List to save value error if it raises :type errors: List :param error_type: Type of error :type error_type: String :return: True if there is no error, False otherwise :rtype: bool | 2.99622 | 3 |
benchmarks/bench_speed_faiss.py | luccaportes/DESlib | 310 | 6622741 | import gzip
import os
import shutil
import time
import urllib.request
import numpy as np
import pandas as pd
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import train_test_split
from deslib.des.knora_e import KNORAE
def run_knorae(pool_classifiers, X_DSEL, y_DSEL, X_test, y_test, knn_type):
knorae = KNORAE(pool_classifiers=pool_classifiers,
knn_classifier=knn_type)
knorae.fit(X_DSEL, y_DSEL)
start = time.clock()
score = knorae.score(X_test, y_test)
end = time.clock() - start
return score, end
def fetch_HIGGS():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/" \
"00280/HIGGS.csv.gz"
if not os.path.exists("../../HIGGS.csv"):
print("Downloading HIGGS dataset from {}".format(url))
if not os.path.exists("../../HIGGS.gz"):
filedata = urllib.request.urlopen(url)
data2write = filedata.read()
with open('../../HIGGS.gz', 'wb') as f:
f.write(data2write)
print("Finished downloading")
print("Extracting HIGGS.gz")
if not os.path.exists("../../HIGGS.csv"):
with gzip.open('../../HIGGS.gz', 'rb') as f:
with open('../../HIGGS.csv', 'wb') as csv_out:
shutil.copyfileobj(f, csv_out)
print("Extracted csv")
print('Reading CSV file')
df = pd.read_csv('../../HIGGS.csv', header=None)
data = df.values
X = data[:, 1:]
y = data[:, 0]
return X, y
if __name__ == "__main__":
rng = np.random.RandomState(123456)
print('Preparing dataset')
X, y = fetch_HIGGS()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=rng)
X_DSEL, X_train, y_DSEL, y_train = train_test_split(X_train, y_train,
test_size=0.50,
random_state=rng)
pool_classifiers = BaggingClassifier(n_estimators=100,
random_state=rng,
n_jobs=-1)
print('Fitting base classifiers...')
pool_classifiers.fit(X_train, y_train)
n_samples = 1000000
num_of_test_inputs = [100, 1000, 10000]
for n_t in num_of_test_inputs:
print("running experiment: num_of_DSEL_samples: {}, "
"num_of_tests: {}".format(y_DSEL.size, n_t))
score_sklearn, time_sklearn = run_knorae(pool_classifiers,
X_DSEL[:n_samples],
y_DSEL[:n_samples],
X_test[:n_t],
y_test[:n_t],
knn_type='knn')
print("sklearn_knorae score = {}, time = {}".format(score_sklearn,
time_sklearn))
score_faiss, time_faiss = run_knorae(pool_classifiers,
X_DSEL[:n_samples],
y_DSEL[:n_samples],
X_test[:n_t],
y_test[:n_t],
knn_type='faiss')
print("faiss_knorae score = {}, time = {}".format(score_faiss,
time_faiss))
| import gzip
import os
import shutil
import time
import urllib.request
import numpy as np
import pandas as pd
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import train_test_split
from deslib.des.knora_e import KNORAE
def run_knorae(pool_classifiers, X_DSEL, y_DSEL, X_test, y_test, knn_type):
knorae = KNORAE(pool_classifiers=pool_classifiers,
knn_classifier=knn_type)
knorae.fit(X_DSEL, y_DSEL)
start = time.clock()
score = knorae.score(X_test, y_test)
end = time.clock() - start
return score, end
def fetch_HIGGS():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/" \
"00280/HIGGS.csv.gz"
if not os.path.exists("../../HIGGS.csv"):
print("Downloading HIGGS dataset from {}".format(url))
if not os.path.exists("../../HIGGS.gz"):
filedata = urllib.request.urlopen(url)
data2write = filedata.read()
with open('../../HIGGS.gz', 'wb') as f:
f.write(data2write)
print("Finished downloading")
print("Extracting HIGGS.gz")
if not os.path.exists("../../HIGGS.csv"):
with gzip.open('../../HIGGS.gz', 'rb') as f:
with open('../../HIGGS.csv', 'wb') as csv_out:
shutil.copyfileobj(f, csv_out)
print("Extracted csv")
print('Reading CSV file')
df = pd.read_csv('../../HIGGS.csv', header=None)
data = df.values
X = data[:, 1:]
y = data[:, 0]
return X, y
if __name__ == "__main__":
rng = np.random.RandomState(123456)
print('Preparing dataset')
X, y = fetch_HIGGS()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=rng)
X_DSEL, X_train, y_DSEL, y_train = train_test_split(X_train, y_train,
test_size=0.50,
random_state=rng)
pool_classifiers = BaggingClassifier(n_estimators=100,
random_state=rng,
n_jobs=-1)
print('Fitting base classifiers...')
pool_classifiers.fit(X_train, y_train)
n_samples = 1000000
num_of_test_inputs = [100, 1000, 10000]
for n_t in num_of_test_inputs:
print("running experiment: num_of_DSEL_samples: {}, "
"num_of_tests: {}".format(y_DSEL.size, n_t))
score_sklearn, time_sklearn = run_knorae(pool_classifiers,
X_DSEL[:n_samples],
y_DSEL[:n_samples],
X_test[:n_t],
y_test[:n_t],
knn_type='knn')
print("sklearn_knorae score = {}, time = {}".format(score_sklearn,
time_sklearn))
score_faiss, time_faiss = run_knorae(pool_classifiers,
X_DSEL[:n_samples],
y_DSEL[:n_samples],
X_test[:n_t],
y_test[:n_t],
knn_type='faiss')
print("faiss_knorae score = {}, time = {}".format(score_faiss,
time_faiss))
| none | 1 | 2.713564 | 3 | |
op_bridge/__init__.py | xichennn/op_bridge | 3 | 6622742 | from __future__ absolute import
import sys
__future_module__ = True
if sys.version_info[0] < 3:
from Queue import *
else:
raise ImportError('This package shoud not be accessible on Python 3.')
| from __future__ absolute import
import sys
__future_module__ = True
if sys.version_info[0] < 3:
from Queue import *
else:
raise ImportError('This package shoud not be accessible on Python 3.')
| none | 1 | 1.644926 | 2 | |
dataset/regular.py | raminnakhli/Patch-to-Cell | 1 | 6622743 | import os
import numpy as np
from PIL import Image
class Regular:
def __init__(self):
self.input_image_dir_name = "Images"
self.input_label_dir_name = "Labels"
self.input_ihc_dir_name = "IHC"
self.skip_labels = None
self.labeling_type = 'ihc'
self.first_valid_instance = 1
@staticmethod
def get_instance_name_from_file_name(file_name):
instance_name, _ = os.path.splitext(os.path.split(file_name)[1])
return instance_name
@staticmethod
def read_instance_mask(file_path):
mask = np.load(file_path)
return mask[:, :, 0].astype(int)
@staticmethod
def read_type_mask(file_path):
mask = np.load(file_path)
return mask[:, :, 1].astype(int)
| import os
import numpy as np
from PIL import Image
class Regular:
def __init__(self):
self.input_image_dir_name = "Images"
self.input_label_dir_name = "Labels"
self.input_ihc_dir_name = "IHC"
self.skip_labels = None
self.labeling_type = 'ihc'
self.first_valid_instance = 1
@staticmethod
def get_instance_name_from_file_name(file_name):
instance_name, _ = os.path.splitext(os.path.split(file_name)[1])
return instance_name
@staticmethod
def read_instance_mask(file_path):
mask = np.load(file_path)
return mask[:, :, 0].astype(int)
@staticmethod
def read_type_mask(file_path):
mask = np.load(file_path)
return mask[:, :, 1].astype(int)
| none | 1 | 2.819511 | 3 | |
libs/shutdown.py | wombatrace/phpweb | 36 | 6622744 | command = "/usr/bin/sudo /sbin/shutdown -h now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print output | command = "/usr/bin/sudo /sbin/shutdown -h now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print output | none | 1 | 1.995118 | 2 | |
byCourseLectureID/24_Dict-Exer/2.A_Miner_Task.py | shapeshifter789/Fund_PyFu | 0 | 6622745 | <reponame>shapeshifter789/Fund_PyFu
imp = ''
imp_list = list()
imp_dict = dict()
while imp != 'stop':
imp = input()
if imp != 'stop':
imp_list.append(imp)
for i in range(0, len(imp_list), 2):
key = imp_list[i]
value = int(imp_list[i + 1])
if key in imp_dict.keys():
imp_dict[key] += value
else:
imp_dict[key] = value
for key, value in imp_dict.items():
print(key, '->', value)
| imp = ''
imp_list = list()
imp_dict = dict()
while imp != 'stop':
imp = input()
if imp != 'stop':
imp_list.append(imp)
for i in range(0, len(imp_list), 2):
key = imp_list[i]
value = int(imp_list[i + 1])
if key in imp_dict.keys():
imp_dict[key] += value
else:
imp_dict[key] = value
for key, value in imp_dict.items():
print(key, '->', value) | none | 1 | 3.457718 | 3 | |
old/UG/tests/blackbox/syndicate-httpd/PUT.py | jcnelson/syndicate | 16 | 6622746 | <reponame>jcnelson/syndicate
#!/usr/bin/python
import socket
import time
import sys
import urllib2
import os
import base64
auth = "<PASSWORD>:<PASSWORD>"
hostname = sys.argv[1]
port = int(sys.argv[2] )
filename = sys.argv[3]
data_fd = None
data_path = None
if len(sys.argv) > 4:
data_path = sys.argv[4]
data_fd = open( data_path, "r" )
mode = '0644'
if filename[-1] == '/':
mode = '0755'
size = 0
if data_fd != None:
size = os.stat(data_path).st_size
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( (hostname, port) )
boundary = "AaBbCcDdEe"
http_header = ""
http_header += "PUT %s HTTP/1.0\r\n" % filename
http_header += "Host: t510\r\n"
http_header += "Content-Type: application/octet-stream\r\n"
http_header += "Content-Length: %s\r\n" % size
http_header += "Authorization: Basic %s\r\n" % base64.b64encode(auth)
http_header += "X-POSIX-mode: %s\r\n" % mode
http_header += "\r\n"
print "<<<<<<<<<<<<<<<<<<<<<<<<<"
print http_header
print "<<<<<<<<<<<<<<<<<<<<<<<<<\n"
s.send( http_header )
while data_fd != None:
buf = data_fd.read(32768)
if len(buf) == 0:
break
s.send( buf )
ret = s.recv(16384)
print ">>>>>>>>>>>>>>>>>>>>>>>>>"
print ret
print ">>>>>>>>>>>>>>>>>>>>>>>>>\n"
s.close()
| #!/usr/bin/python
import socket
import time
import sys
import urllib2
import os
import base64
auth = "<PASSWORD>:<PASSWORD>"
hostname = sys.argv[1]
port = int(sys.argv[2] )
filename = sys.argv[3]
data_fd = None
data_path = None
if len(sys.argv) > 4:
data_path = sys.argv[4]
data_fd = open( data_path, "r" )
mode = '0644'
if filename[-1] == '/':
mode = '0755'
size = 0
if data_fd != None:
size = os.stat(data_path).st_size
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( (hostname, port) )
boundary = "AaBbCcDdEe"
http_header = ""
http_header += "PUT %s HTTP/1.0\r\n" % filename
http_header += "Host: t510\r\n"
http_header += "Content-Type: application/octet-stream\r\n"
http_header += "Content-Length: %s\r\n" % size
http_header += "Authorization: Basic %s\r\n" % base64.b64encode(auth)
http_header += "X-POSIX-mode: %s\r\n" % mode
http_header += "\r\n"
print "<<<<<<<<<<<<<<<<<<<<<<<<<"
print http_header
print "<<<<<<<<<<<<<<<<<<<<<<<<<\n"
s.send( http_header )
while data_fd != None:
buf = data_fd.read(32768)
if len(buf) == 0:
break
s.send( buf )
ret = s.recv(16384)
print ">>>>>>>>>>>>>>>>>>>>>>>>>"
print ret
print ">>>>>>>>>>>>>>>>>>>>>>>>>\n"
s.close() | ru | 0.258958 | #!/usr/bin/python | 2.790037 | 3 |
crosshair/typed_inspect.py | ckeeter/CrossHair | 1 | 6622747 | import os.path
import inspect
import importlib
from typing import *
from crosshair.util import debug, walk_qualname
def _has_annotations(sig: inspect.Signature):
if sig.return_annotation != inspect.Signature.empty:
return True
for p in sig.parameters.values():
if p.annotation != inspect.Parameter.empty:
return True
return False
def get_resolved_sig(fn: Callable, env=None) -> inspect.Signature:
sig = inspect.signature(fn)
#debug('get_resolved_seg input:', sig, next(iter(sig.parameters.keys())), inspect.ismethod(fn))
type_hints = get_type_hints(fn, env, env.copy() if env else None)
params = sig.parameters.values()
if len(params) > 0 and next(iter(params)).name == 'self' and 'self' not in type_hints:
fn_module = inspect.getmodule(fn)
if fn_module:
defining_thing = walk_qualname(
fn_module, fn.__qualname__.rsplit('.', 1)[0])
if inspect.isclass(defining_thing):
type_hints['self'] = defining_thing
#debug('TO HINTS ', type_hints)
newparams = []
for name, param in sig.parameters.items():
if name in type_hints:
param = param.replace(annotation=type_hints[name])
newparams.append(param)
newreturn = type_hints.get('return', sig.return_annotation)
sig = inspect.Signature(newparams, return_annotation=newreturn)
#debug('get_resolved_sig output: ', sig)
return sig
def signature(fn: Callable, _stub_path: Optional[List[str]] = None) -> inspect.Signature:
sig = get_resolved_sig(fn)
debug('START ', fn.__name__, sig)
if _has_annotations(sig):
debug('has annotations already')
return sig
if _stub_path is None and os.environ.get('PYTHONPATH'):
_stub_path = os.environ['PYTHONPATH'].split(':')
if _stub_path is None:
_stub_path = []
try:
src_file = inspect.getsourcefile(fn)
except TypeError: # raises this for builtins
return sig
if not src_file.endswith('.py'):
debug(src_file, ' not ending in py')
return sig
pyi_file = src_file + 'i'
if not os.path.exists(pyi_file):
#debug('no pyi at ', pyi_file)
filename = os.path.split(pyi_file)[1]
for path in _stub_path:
candidate_file = os.path.join(path, filename)
if os.path.exists(candidate_file):
pyi_file = candidate_file
break
#debug('no pyi at ', candidate_file)
if not os.path.exists(pyi_file):
debug('no pyi found on PYTHONPATH')
return sig
#debug('pyi found at ', pyi_file)
loader = importlib.machinery.SourceFileLoader(fn.__module__, pyi_file)
spec = importlib.util.spec_from_loader(loader.name, loader)
#debug('signature spec ', spec)
ptr: Any = importlib.util.module_from_spec(spec)
import sys
old_module = sys.modules[spec.name]
try:
loader.exec_module(ptr)
except BaseException as e:
debug('Failed to load ' + pyi_file + '(' + str(e) + '); ignoring')
return sig
if old_module is not sys.modules[spec.name]:
raise Exception('sys modules changed')
ptr = walk_qualname(ptr, fn.__qualname__)
return get_resolved_sig(ptr, inspect.getmodule(fn).__dict__)
def _assert_signature_works():
'''
post: 'return_annotation' in return
post: 'self' in return
post: 'parameters' in return
post: return['return_annotation'].annotation == Any
post: return['parameters'].annotation == Optional[Sequence[inspect.Parameter]]
'''
return signature(inspect.Signature.__init__).parameters
| import os.path
import inspect
import importlib
from typing import *
from crosshair.util import debug, walk_qualname
def _has_annotations(sig: inspect.Signature):
if sig.return_annotation != inspect.Signature.empty:
return True
for p in sig.parameters.values():
if p.annotation != inspect.Parameter.empty:
return True
return False
def get_resolved_sig(fn: Callable, env=None) -> inspect.Signature:
sig = inspect.signature(fn)
#debug('get_resolved_seg input:', sig, next(iter(sig.parameters.keys())), inspect.ismethod(fn))
type_hints = get_type_hints(fn, env, env.copy() if env else None)
params = sig.parameters.values()
if len(params) > 0 and next(iter(params)).name == 'self' and 'self' not in type_hints:
fn_module = inspect.getmodule(fn)
if fn_module:
defining_thing = walk_qualname(
fn_module, fn.__qualname__.rsplit('.', 1)[0])
if inspect.isclass(defining_thing):
type_hints['self'] = defining_thing
#debug('TO HINTS ', type_hints)
newparams = []
for name, param in sig.parameters.items():
if name in type_hints:
param = param.replace(annotation=type_hints[name])
newparams.append(param)
newreturn = type_hints.get('return', sig.return_annotation)
sig = inspect.Signature(newparams, return_annotation=newreturn)
#debug('get_resolved_sig output: ', sig)
return sig
def signature(fn: Callable, _stub_path: Optional[List[str]] = None) -> inspect.Signature:
sig = get_resolved_sig(fn)
debug('START ', fn.__name__, sig)
if _has_annotations(sig):
debug('has annotations already')
return sig
if _stub_path is None and os.environ.get('PYTHONPATH'):
_stub_path = os.environ['PYTHONPATH'].split(':')
if _stub_path is None:
_stub_path = []
try:
src_file = inspect.getsourcefile(fn)
except TypeError: # raises this for builtins
return sig
if not src_file.endswith('.py'):
debug(src_file, ' not ending in py')
return sig
pyi_file = src_file + 'i'
if not os.path.exists(pyi_file):
#debug('no pyi at ', pyi_file)
filename = os.path.split(pyi_file)[1]
for path in _stub_path:
candidate_file = os.path.join(path, filename)
if os.path.exists(candidate_file):
pyi_file = candidate_file
break
#debug('no pyi at ', candidate_file)
if not os.path.exists(pyi_file):
debug('no pyi found on PYTHONPATH')
return sig
#debug('pyi found at ', pyi_file)
loader = importlib.machinery.SourceFileLoader(fn.__module__, pyi_file)
spec = importlib.util.spec_from_loader(loader.name, loader)
#debug('signature spec ', spec)
ptr: Any = importlib.util.module_from_spec(spec)
import sys
old_module = sys.modules[spec.name]
try:
loader.exec_module(ptr)
except BaseException as e:
debug('Failed to load ' + pyi_file + '(' + str(e) + '); ignoring')
return sig
if old_module is not sys.modules[spec.name]:
raise Exception('sys modules changed')
ptr = walk_qualname(ptr, fn.__qualname__)
return get_resolved_sig(ptr, inspect.getmodule(fn).__dict__)
def _assert_signature_works():
'''
post: 'return_annotation' in return
post: 'self' in return
post: 'parameters' in return
post: return['return_annotation'].annotation == Any
post: return['parameters'].annotation == Optional[Sequence[inspect.Parameter]]
'''
return signature(inspect.Signature.__init__).parameters
| en | 0.302682 | #debug('get_resolved_seg input:', sig, next(iter(sig.parameters.keys())), inspect.ismethod(fn)) #debug('TO HINTS ', type_hints) #debug('get_resolved_sig output: ', sig) # raises this for builtins #debug('no pyi at ', pyi_file) #debug('no pyi at ', candidate_file) #debug('pyi found at ', pyi_file) #debug('signature spec ', spec) post: 'return_annotation' in return post: 'self' in return post: 'parameters' in return post: return['return_annotation'].annotation == Any post: return['parameters'].annotation == Optional[Sequence[inspect.Parameter]] | 2.167172 | 2 |
codeforces/cdf324_2d.py | knuu/competitive-programming | 1 | 6622748 | def miller_rabin(n):
""" primality Test
if n < 3,825,123,056,546,413,051, it is enough to test
a = 2, 3, 5, 7, 11, 13, 17, 19, and 23.
Complexity: O(log^3 n)
"""
if n == 2:
return True
if n <= 1 or not n & 1:
return False
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23]
d = n - 1
s = 0
while not d & 1:
d >>= 1
s += 1
for prime in primes:
if prime >= n:
continue
x = pow(prime, d, n)
if x == 1:
continue
for r in range(s):
if x == n - 1:
break
if r + 1 == s:
return False
x = x * x % n
return True
N = int(input())
if miller_rabin(N):
print(1)
print(N)
elif miller_rabin(N-2):
print(2)
print(2, N-2)
elif miller_rabin(N-4):
print(3)
print(2, 2, N-4)
else:
i = 3
while True:
if miller_rabin(i):
if miller_rabin((N-i)//2):
print(3)
print(i, (N-i)//2, (N-i)//2)
break
elif miller_rabin(N-2*i):
print(3)
print(i, i, N-2*i)
break
i += 2
| def miller_rabin(n):
""" primality Test
if n < 3,825,123,056,546,413,051, it is enough to test
a = 2, 3, 5, 7, 11, 13, 17, 19, and 23.
Complexity: O(log^3 n)
"""
if n == 2:
return True
if n <= 1 or not n & 1:
return False
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23]
d = n - 1
s = 0
while not d & 1:
d >>= 1
s += 1
for prime in primes:
if prime >= n:
continue
x = pow(prime, d, n)
if x == 1:
continue
for r in range(s):
if x == n - 1:
break
if r + 1 == s:
return False
x = x * x % n
return True
N = int(input())
if miller_rabin(N):
print(1)
print(N)
elif miller_rabin(N-2):
print(2)
print(2, N-2)
elif miller_rabin(N-4):
print(3)
print(2, 2, N-4)
else:
i = 3
while True:
if miller_rabin(i):
if miller_rabin((N-i)//2):
print(3)
print(i, (N-i)//2, (N-i)//2)
break
elif miller_rabin(N-2*i):
print(3)
print(i, i, N-2*i)
break
i += 2
| en | 0.875265 | primality Test if n < 3,825,123,056,546,413,051, it is enough to test a = 2, 3, 5, 7, 11, 13, 17, 19, and 23. Complexity: O(log^3 n) | 3.710883 | 4 |
gnd-sys/app/cfsinterface/cfeconstants.py | OpenSatKit/cfsat | 12 | 6622749 | """
Copyright 2022 Open STEMware Foundation
All Rights Reserved.
This program is free software; you can modify and/or redistribute it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation; version 3 with attribution addendums as found in the
LICENSE.txt
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
This program may also be used under the terms of a commercial or enterprise
edition license of cFSAT if purchased from the copyright holder.
Purpose:
Define cFE constants
todo: These definitions should come from EDS
"""
###############################################################################
class Cfe():
CFE_TIME_FLY_ON_EID = 20
CFE_TIME_FLY_OFF_EID = 21
CFE_EVS_NO_FILTER = 0x0000
CFE_EVS_FIRST_ONE_STOP = 0xFFFF
EVS_DEBUG_MASK = 0b0001
EVS_INFO_MASK = 0b0010
EVS_ERROR_MASK = 0b0100
EVS_CRITICAL_MASK = 0b1000
FILE_XFER_DATA_SEG_LEN = 512
| """
Copyright 2022 Open STEMware Foundation
All Rights Reserved.
This program is free software; you can modify and/or redistribute it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation; version 3 with attribution addendums as found in the
LICENSE.txt
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
This program may also be used under the terms of a commercial or enterprise
edition license of cFSAT if purchased from the copyright holder.
Purpose:
Define cFE constants
todo: These definitions should come from EDS
"""
###############################################################################
class Cfe():
CFE_TIME_FLY_ON_EID = 20
CFE_TIME_FLY_OFF_EID = 21
CFE_EVS_NO_FILTER = 0x0000
CFE_EVS_FIRST_ONE_STOP = 0xFFFF
EVS_DEBUG_MASK = 0b0001
EVS_INFO_MASK = 0b0010
EVS_ERROR_MASK = 0b0100
EVS_CRITICAL_MASK = 0b1000
FILE_XFER_DATA_SEG_LEN = 512
| en | 0.790173 | Copyright 2022 Open STEMware Foundation All Rights Reserved. This program is free software; you can modify and/or redistribute it under the terms of the GNU Affero General Public License as published by the Free Software Foundation; version 3 with attribution addendums as found in the LICENSE.txt This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. This program may also be used under the terms of a commercial or enterprise edition license of cFSAT if purchased from the copyright holder. Purpose: Define cFE constants todo: These definitions should come from EDS ############################################################################### | 1.517423 | 2 |
contents/tts/content/TensorflowTTS/tensorflow_tts/processor/ljspeech.py | PIN-devel/inside-kids | 0 | 6622750 | # -*- coding: utf-8 -*-
# This code is copy and modify from https://github.com/keithito/tacotron.
"""Perform preprocessing and raw feature extraction."""
import re
import os
import numpy as np
import soundfile as sf
from tensorflow_tts.utils import cleaners
_korean_jaso_code = list(range(0x1100, 0x1113)) + list(range(0x1161, 0x1176)) + list(range(0x11a8, 0x11c3))
_korean_jaso = list(chr(c) for c in _korean_jaso_code)
_pad = "_"
_eos = "~"
_punctuation = " .!?"
# Export all symbols:
symbols = (
[_pad] + list(_punctuation) + list(_korean_jaso) + [_eos]
)
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
class LJSpeechProcessor(object):
"""LJSpeech processor."""
def __init__(self, root_path, cleaner_names):
self.root_path = root_path
self.cleaner_names = cleaner_names
items = []
self.speaker_name = "ljspeech"
if root_path is not None:
with open(os.path.join(root_path, "metadata.csv"), encoding="utf-8") as ttf:
for line in ttf:
parts = line.strip().split("|")
wav_path = os.path.join(root_path, "wavs", "%s.wav" % parts[0])
text = parts[1]
items.append([text, wav_path, self.speaker_name])
self.items = items
def get_one_sample(self, idx):
text, wav_file, speaker_name = self.items[idx]
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_file)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": self.items[idx][1].split("/")[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
global _symbol_to_id
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
if len(text):
sequence += _symbols_to_sequence(
_clean_text(text, [self.cleaner_names])
)
return sequence
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _should_keep_symbol(s):
return s in _symbol_to_id and s != "_" and s != "~"
| # -*- coding: utf-8 -*-
# This code is copy and modify from https://github.com/keithito/tacotron.
"""Perform preprocessing and raw feature extraction."""
import re
import os
import numpy as np
import soundfile as sf
from tensorflow_tts.utils import cleaners
_korean_jaso_code = list(range(0x1100, 0x1113)) + list(range(0x1161, 0x1176)) + list(range(0x11a8, 0x11c3))
_korean_jaso = list(chr(c) for c in _korean_jaso_code)
_pad = "_"
_eos = "~"
_punctuation = " .!?"
# Export all symbols:
symbols = (
[_pad] + list(_punctuation) + list(_korean_jaso) + [_eos]
)
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
class LJSpeechProcessor(object):
"""LJSpeech processor."""
def __init__(self, root_path, cleaner_names):
self.root_path = root_path
self.cleaner_names = cleaner_names
items = []
self.speaker_name = "ljspeech"
if root_path is not None:
with open(os.path.join(root_path, "metadata.csv"), encoding="utf-8") as ttf:
for line in ttf:
parts = line.strip().split("|")
wav_path = os.path.join(root_path, "wavs", "%s.wav" % parts[0])
text = parts[1]
items.append([text, wav_path, self.speaker_name])
self.items = items
def get_one_sample(self, idx):
text, wav_file, speaker_name = self.items[idx]
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_file)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": self.items[idx][1].split("/")[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
global _symbol_to_id
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
if len(text):
sequence += _symbols_to_sequence(
_clean_text(text, [self.cleaner_names])
)
return sequence
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _should_keep_symbol(s):
return s in _symbol_to_id and s != "_" and s != "~"
| en | 0.790346 | # -*- coding: utf-8 -*- # This code is copy and modify from https://github.com/keithito/tacotron. Perform preprocessing and raw feature extraction. # Export all symbols: # Mappings from symbol to numeric ID and vice versa: LJSpeech processor. # normalize audio signal to be [-1, 1], soundfile already norm. # convert text to ids # Check for curly braces and treat their contents as ARPAbet: | 2.607715 | 3 |