max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
exemplo/executar_sintatico.py
AlanTaranti/CompiladorLALG
1
6622951
<filename>exemplo/executar_sintatico.py<gh_stars>1-10 # Exemplo de Execução do Analisador Sintático # # Desenvolvedor: <NAME> # Main if __name__ == "__main__": # # Importar Analisador # import sys, os sys.path.append("..") from analisador import Sintatico dir = os.path.dirname(os.path.abspath(__file__)) dirpath = '/'.join((dir).split('/')[:-1])+'/analisador' sys.path.append(dirpath) # # Ler o Arquivo # filename = 'file.lalg' with open(dir+'/'+filename) as f: alg = f.read() # # Inicializar o Analisador # sin = Sintatico(alg) # # Executar # print('\nAnálise Lexica e Sintática:\n') resultado = sin.start() print('\n' + resultado['mensagem'])
<filename>exemplo/executar_sintatico.py<gh_stars>1-10 # Exemplo de Execução do Analisador Sintático # # Desenvolvedor: <NAME> # Main if __name__ == "__main__": # # Importar Analisador # import sys, os sys.path.append("..") from analisador import Sintatico dir = os.path.dirname(os.path.abspath(__file__)) dirpath = '/'.join((dir).split('/')[:-1])+'/analisador' sys.path.append(dirpath) # # Ler o Arquivo # filename = 'file.lalg' with open(dir+'/'+filename) as f: alg = f.read() # # Inicializar o Analisador # sin = Sintatico(alg) # # Executar # print('\nAnálise Lexica e Sintática:\n') resultado = sin.start() print('\n' + resultado['mensagem'])
pt
0.765322
# Exemplo de Execução do Analisador Sintático # # Desenvolvedor: <NAME> # Main # # Importar Analisador # # # Ler o Arquivo # # # Inicializar o Analisador # # # Executar #
3.218764
3
jp.atcoder/abc109/abc109_c/9351215.py
kagemeka/atcoder-submissions
1
6622952
<gh_stars>1-10 import sys from functools import reduce def gcd(a, b): while b: a, b = b, a % b return abs(a) n, X, *x = map(int, sys.stdin.read().split()) def main(): x.append(X) for i in range(n): x[i] -= x[i+1] x[-1] -= X return reduce(gcd, x) if __name__ == '__main__': ans = main() print(ans)
import sys from functools import reduce def gcd(a, b): while b: a, b = b, a % b return abs(a) n, X, *x = map(int, sys.stdin.read().split()) def main(): x.append(X) for i in range(n): x[i] -= x[i+1] x[-1] -= X return reduce(gcd, x) if __name__ == '__main__': ans = main() print(ans)
none
1
3.164203
3
RenameAllFileinUEFile.py
youdrew/All-Kind-of-Language-Study
0
6622953
<reponame>youdrew/All-Kind-of-Language-Study import os import pypinyin #这是一个将文件夹下所有的子文件/文件夹改成拼音的小程序 #撰写日期:20220124 Engene def is_chinese(string): """ 检查整个字符串是否包含中文 :param string: 需要检查的字符串 :return: bool """ for ch in string: if u'\u4e00' <= ch <= u'\u9fff': return True return False def ADDFileName(parent, dirnames, filenames): for filename in filenames: # print("parent is: " + parent) # print("filename is: " + filename) # print("dirnames is: " + dirnames) # print(os.path.join(parent, filename)) # 输出rootdir路径下所有文件(包含子文件)信息 #files_list.append([os.path.join(parent, filename)]) print([os.path.join(parent, filename)]) #如果找到了文件立刻改名 for filename in filenames: if is_chinese(filename): EN= pypinyin.lazy_pinyin(filename) strEN="".join(EN) if os.path.exists(parent + "/" + filename): os.rename(parent + "/" + filename, parent + "/" + strEN) #如果找到的文件以**为尾缀,打开来遍历 def get_files_list(dir): for parent, dirnames, filenames in os.walk(dir): ADDFileName(parent, dirnames, filenames) #如果找到了文件立刻改名 for filename in filenames: if is_chinese(filename): EN= pypinyin.lazy_pinyin(filename) strEN="".join(EN) if os.path.exists(parent + "/" + filename): os.rename(parent + "/" + filename, parent + "/" + strEN) # 判断字符串里是否包含中文 for m in dirnames: #如果包含中文 if is_chinese(m): EN= pypinyin.lazy_pinyin(m) strEN="".join(EN) if os.path.exists(dir + "/" + m): os.rename(dir + "/" + m, dir + "/" + strEN) #往下遍历 if dirnames!=None: m = 0 while m < len(dirnames): subdir = dir + "/" + dirnames[m] m=1+m get_files_list(subdir) if __name__ == '__main__': dir = input("请把你想要修改文件名的文件夹整个扔进来🚮 \n 请注意,路径后面不要有空格符。") print("读取到的路径是: ", dir) get_files_list(dir)
import os import pypinyin #这是一个将文件夹下所有的子文件/文件夹改成拼音的小程序 #撰写日期:20220124 Engene def is_chinese(string): """ 检查整个字符串是否包含中文 :param string: 需要检查的字符串 :return: bool """ for ch in string: if u'\u4e00' <= ch <= u'\u9fff': return True return False def ADDFileName(parent, dirnames, filenames): for filename in filenames: # print("parent is: " + parent) # print("filename is: " + filename) # print("dirnames is: " + dirnames) # print(os.path.join(parent, filename)) # 输出rootdir路径下所有文件(包含子文件)信息 #files_list.append([os.path.join(parent, filename)]) print([os.path.join(parent, filename)]) #如果找到了文件立刻改名 for filename in filenames: if is_chinese(filename): EN= pypinyin.lazy_pinyin(filename) strEN="".join(EN) if os.path.exists(parent + "/" + filename): os.rename(parent + "/" + filename, parent + "/" + strEN) #如果找到的文件以**为尾缀,打开来遍历 def get_files_list(dir): for parent, dirnames, filenames in os.walk(dir): ADDFileName(parent, dirnames, filenames) #如果找到了文件立刻改名 for filename in filenames: if is_chinese(filename): EN= pypinyin.lazy_pinyin(filename) strEN="".join(EN) if os.path.exists(parent + "/" + filename): os.rename(parent + "/" + filename, parent + "/" + strEN) # 判断字符串里是否包含中文 for m in dirnames: #如果包含中文 if is_chinese(m): EN= pypinyin.lazy_pinyin(m) strEN="".join(EN) if os.path.exists(dir + "/" + m): os.rename(dir + "/" + m, dir + "/" + strEN) #往下遍历 if dirnames!=None: m = 0 while m < len(dirnames): subdir = dir + "/" + dirnames[m] m=1+m get_files_list(subdir) if __name__ == '__main__': dir = input("请把你想要修改文件名的文件夹整个扔进来🚮 \n 请注意,路径后面不要有空格符。") print("读取到的路径是: ", dir) get_files_list(dir)
zh
0.794543
#这是一个将文件夹下所有的子文件/文件夹改成拼音的小程序 #撰写日期:20220124 Engene 检查整个字符串是否包含中文 :param string: 需要检查的字符串 :return: bool # print("parent is: " + parent) # print("filename is: " + filename) # print("dirnames is: " + dirnames) # print(os.path.join(parent, filename)) # 输出rootdir路径下所有文件(包含子文件)信息 #files_list.append([os.path.join(parent, filename)]) #如果找到了文件立刻改名 #如果找到的文件以**为尾缀,打开来遍历 #如果找到了文件立刻改名 # 判断字符串里是否包含中文 #如果包含中文 #往下遍历
2.948985
3
electrum_gui/common/basic/exceptions.py
BixinKey/electrum
12
6622954
class OneKeyException(Exception): key = "msg__unknown_error" other_info = "" def __init__(self, other_info=None): if other_info is not None: self.other_info = other_info class UnavailablePrivateKey(OneKeyException): key = "msg__incorrect_private_key" class InvalidKeystoreFormat(OneKeyException): key = "msg__incorrect_keystore_format" class InvalidMnemonicFormat(OneKeyException): key = "msg__incorrect_recovery_phrase_format" class UnavailableBtcAddr(OneKeyException): key = "msg__incorrect_bitcoin_address" class InvalidPassword(OneKeyException): key = "msg__incorrect_password" class UnavailablePublicKey(OneKeyException): key = "msg__incorrect_public_key" class UnavailableEthAddr(OneKeyException): key = "msg__incorrect_eth_address" class IncorrectAddress(OneKeyException): key = "msg__incorrect_address" class IncorrectTokenAddress(OneKeyException): key = "msg__incorrect_token_address" class InactiveAddress(OneKeyException): key = "msg__the_address_has_not_been_activated_please_enter_receipt_identifier" class UnsupportedCurrencyCoin(OneKeyException): key = "msg__unsupported_coin_types" class NotEnoughFunds(OneKeyException): key = "msg__insufficient_funds" class InvalidBip39Seed(OneKeyException): key = "msg__Incorrect_bip39_recovery_phrase_format" class UserCancel(OneKeyException): key = "msg__user_cancel" class DerivedWalletLimit(OneKeyException): key = "msg__derived_wallet_limit" class NotChosenWallet(OneKeyException): key = "msg__you_have_not_chosen_a_wallet_yet" class DustTransaction(OneKeyException): key = "msg__dust_transaction" class AddressNotInCurrentWallet(OneKeyException): key = "msg__the_address_is_not_in_the_current_wallet" class ThisIsWatchOnlyWallet(OneKeyException): key = "msg__this_is_a_watching_only_wallet" class CurWalletNotSuppSigMesg(OneKeyException): key = "msg__current_wallet_does_not_support_signature_message" class ReplaceWatchOnlyWallet(OneKeyException): key = "msg__replace_watch_only_wallet" class NotSupportExportSeed(OneKeyException): key = "msg__current_wallet_does_not_support_exporting_mnemonic" class FileAlreadyExist(OneKeyException): key = "msg__file_already_exists" class FailedGetTx(OneKeyException): key = "msg__failed_to_get_transactions" class BroadcastFailedDueToNetExcept(OneKeyException): key = "msg__cannot_broadcast_transaction_due_to_network_connected_exceptions" class TxFormatError(OneKeyException): key = "msg__transaction_formatter_error" class TxBroadcastError(OneKeyException): key = "msg__transaction_broadcast_error" class PythonLibNotStart(OneKeyException): key = "msg__python_lib_not_start_please_restart_app" class KeyStoreFormatError(OneKeyException): key = "msg__incorrect_keystore_format" class PrivateKeyNotSupportedFormat(OneKeyException): key = "msg__private_key_format_not_supported" class KeypairMismatchedError(OneKeyException): key = "msg__keypair_mismatched_error" class KeyStoreIncorrectPassword(OneKeyException): key = "msg__incorrect_keystore_password" class InvalidExtendSecret(OneKeyException): key = "msg__invalid_extend_secret" ##################################### # hardware exceptions # ##################################### class HardwareInvalidPIN(OneKeyException): key = "msg__incorrect_pin_please_try_again" class HardwareUpdateFailed(OneKeyException): key = "msg__update_failed_please_try_again"
class OneKeyException(Exception): key = "msg__unknown_error" other_info = "" def __init__(self, other_info=None): if other_info is not None: self.other_info = other_info class UnavailablePrivateKey(OneKeyException): key = "msg__incorrect_private_key" class InvalidKeystoreFormat(OneKeyException): key = "msg__incorrect_keystore_format" class InvalidMnemonicFormat(OneKeyException): key = "msg__incorrect_recovery_phrase_format" class UnavailableBtcAddr(OneKeyException): key = "msg__incorrect_bitcoin_address" class InvalidPassword(OneKeyException): key = "msg__incorrect_password" class UnavailablePublicKey(OneKeyException): key = "msg__incorrect_public_key" class UnavailableEthAddr(OneKeyException): key = "msg__incorrect_eth_address" class IncorrectAddress(OneKeyException): key = "msg__incorrect_address" class IncorrectTokenAddress(OneKeyException): key = "msg__incorrect_token_address" class InactiveAddress(OneKeyException): key = "msg__the_address_has_not_been_activated_please_enter_receipt_identifier" class UnsupportedCurrencyCoin(OneKeyException): key = "msg__unsupported_coin_types" class NotEnoughFunds(OneKeyException): key = "msg__insufficient_funds" class InvalidBip39Seed(OneKeyException): key = "msg__Incorrect_bip39_recovery_phrase_format" class UserCancel(OneKeyException): key = "msg__user_cancel" class DerivedWalletLimit(OneKeyException): key = "msg__derived_wallet_limit" class NotChosenWallet(OneKeyException): key = "msg__you_have_not_chosen_a_wallet_yet" class DustTransaction(OneKeyException): key = "msg__dust_transaction" class AddressNotInCurrentWallet(OneKeyException): key = "msg__the_address_is_not_in_the_current_wallet" class ThisIsWatchOnlyWallet(OneKeyException): key = "msg__this_is_a_watching_only_wallet" class CurWalletNotSuppSigMesg(OneKeyException): key = "msg__current_wallet_does_not_support_signature_message" class ReplaceWatchOnlyWallet(OneKeyException): key = "msg__replace_watch_only_wallet" class NotSupportExportSeed(OneKeyException): key = "msg__current_wallet_does_not_support_exporting_mnemonic" class FileAlreadyExist(OneKeyException): key = "msg__file_already_exists" class FailedGetTx(OneKeyException): key = "msg__failed_to_get_transactions" class BroadcastFailedDueToNetExcept(OneKeyException): key = "msg__cannot_broadcast_transaction_due_to_network_connected_exceptions" class TxFormatError(OneKeyException): key = "msg__transaction_formatter_error" class TxBroadcastError(OneKeyException): key = "msg__transaction_broadcast_error" class PythonLibNotStart(OneKeyException): key = "msg__python_lib_not_start_please_restart_app" class KeyStoreFormatError(OneKeyException): key = "msg__incorrect_keystore_format" class PrivateKeyNotSupportedFormat(OneKeyException): key = "msg__private_key_format_not_supported" class KeypairMismatchedError(OneKeyException): key = "msg__keypair_mismatched_error" class KeyStoreIncorrectPassword(OneKeyException): key = "msg__incorrect_keystore_password" class InvalidExtendSecret(OneKeyException): key = "msg__invalid_extend_secret" ##################################### # hardware exceptions # ##################################### class HardwareInvalidPIN(OneKeyException): key = "msg__incorrect_pin_please_try_again" class HardwareUpdateFailed(OneKeyException): key = "msg__update_failed_please_try_again"
de
0.77502
##################################### # hardware exceptions # #####################################
2.368125
2
python3_exercicios_feitos/Desafio043.py
LouiMaxine/python3-exercicios-cursoemvideo
0
6622955
<gh_stars>0 '''Desenvolva uma lógica que leia o peso e a altra de uma pessoa, calcule seu IMC(índice de massa corpórea) e mostre seu status, de acordo com a tabela abaixo: - Abaixo de 18.5: Abaixo do Peso - Entre 18.5 e 25: Peso ideal - 25 até 30: Sobrepeso - 30 até 40: Obesidade - Acima de 40: Obesidade Mórbida''' #from math import pow peso = float(input('Peso: ')) alt = float(input('Altura: ')) #imc = peso/pow(alt,2) imc = peso/(alt**2) #esqueci que poderia usar dois asteriscos, obgda Guanabara if imc<18.5: print(f'Seu IMC é: {imc:.2f} e você está ABAIXO DO PESO') elif 18.5<=imc<25: print(f'Seu IMC é: {imc:.2f} e você está no PESO IDEAL') elif 25<=imc<30: print(f'Seu IMC é: {imc:.2f} e você está com SOBREPESO') elif 30<=imc<=40: print(f'Seu IMC é: {imc:.2f} e você está com OBESIDADE') else: print(f'Seu IMC é: {imc:.2f} e você está com OBESIDADE MÓRBIDA')
'''Desenvolva uma lógica que leia o peso e a altra de uma pessoa, calcule seu IMC(índice de massa corpórea) e mostre seu status, de acordo com a tabela abaixo: - Abaixo de 18.5: Abaixo do Peso - Entre 18.5 e 25: Peso ideal - 25 até 30: Sobrepeso - 30 até 40: Obesidade - Acima de 40: Obesidade Mórbida''' #from math import pow peso = float(input('Peso: ')) alt = float(input('Altura: ')) #imc = peso/pow(alt,2) imc = peso/(alt**2) #esqueci que poderia usar dois asteriscos, obgda Guanabara if imc<18.5: print(f'Seu IMC é: {imc:.2f} e você está ABAIXO DO PESO') elif 18.5<=imc<25: print(f'Seu IMC é: {imc:.2f} e você está no PESO IDEAL') elif 25<=imc<30: print(f'Seu IMC é: {imc:.2f} e você está com SOBREPESO') elif 30<=imc<=40: print(f'Seu IMC é: {imc:.2f} e você está com OBESIDADE') else: print(f'Seu IMC é: {imc:.2f} e você está com OBESIDADE MÓRBIDA')
pt
0.93646
Desenvolva uma lógica que leia o peso e a altra de uma pessoa, calcule seu IMC(índice de massa corpórea) e mostre seu status, de acordo com a tabela abaixo: - Abaixo de 18.5: Abaixo do Peso - Entre 18.5 e 25: Peso ideal - 25 até 30: Sobrepeso - 30 até 40: Obesidade - Acima de 40: Obesidade Mórbida #from math import pow #imc = peso/pow(alt,2) #esqueci que poderia usar dois asteriscos, obgda Guanabara
3.930295
4
pgex/gaming/animated_sprite.py
IvanFoke/pgex
0
6622956
from pgex.gaming.animation import AnimationIterator from .base_sprite import BaseSprite class AnimatedSprite(BaseSprite): def __init__(self, coordinates, speed_x, speed_y, stay_images, left_images=None, right_images=None, up_images=None, down_images=None, jump_images=None, transparent_color=None, frames_per_image=1): super().__init__(coordinates, speed_x, speed_y, stay_images[0], transparent_color) self.stay_animation = AnimationIterator(stay_images, frames_per_image) self.left_animation = AnimationIterator(left_images, frames_per_image) if left_images else self.stay_animation self.right_animation = AnimationIterator(right_images, frames_per_image) if right_images else self.stay_animation self.up_animation = AnimationIterator(up_images, frames_per_image) if up_images else self.stay_animation self.down_animation = AnimationIterator(down_images, frames_per_image) if down_images else self.stay_animation self.jump_animation = AnimationIterator(jump_images, frames_per_image) if jump_images else self.stay_animation def stay(self): self.surf.blit(next(self.stay_animation), (0, 0)) def move_left(self): super().move_left() self.surf.blit(next(self.left_animation), (0, 0)) def move_right(self): super().move_right() self.surf.blit(next(self.right_animation), (0, 0)) def move_up(self): super().move_up() self.surf.blit(next(self.up_animation), (0, 0)) def move_down(self): super().move_down() self.surf.blit(next(self.down_animation), (0, 0)) def move(self, keys=None, left=True, right=True, up=True, down=True): self.stay() super().move(keys, left, right, up, down) def jump(self): super().jump() self.surf.blit(next(self.jump_animation), (0, 0))
from pgex.gaming.animation import AnimationIterator from .base_sprite import BaseSprite class AnimatedSprite(BaseSprite): def __init__(self, coordinates, speed_x, speed_y, stay_images, left_images=None, right_images=None, up_images=None, down_images=None, jump_images=None, transparent_color=None, frames_per_image=1): super().__init__(coordinates, speed_x, speed_y, stay_images[0], transparent_color) self.stay_animation = AnimationIterator(stay_images, frames_per_image) self.left_animation = AnimationIterator(left_images, frames_per_image) if left_images else self.stay_animation self.right_animation = AnimationIterator(right_images, frames_per_image) if right_images else self.stay_animation self.up_animation = AnimationIterator(up_images, frames_per_image) if up_images else self.stay_animation self.down_animation = AnimationIterator(down_images, frames_per_image) if down_images else self.stay_animation self.jump_animation = AnimationIterator(jump_images, frames_per_image) if jump_images else self.stay_animation def stay(self): self.surf.blit(next(self.stay_animation), (0, 0)) def move_left(self): super().move_left() self.surf.blit(next(self.left_animation), (0, 0)) def move_right(self): super().move_right() self.surf.blit(next(self.right_animation), (0, 0)) def move_up(self): super().move_up() self.surf.blit(next(self.up_animation), (0, 0)) def move_down(self): super().move_down() self.surf.blit(next(self.down_animation), (0, 0)) def move(self, keys=None, left=True, right=True, up=True, down=True): self.stay() super().move(keys, left, right, up, down) def jump(self): super().jump() self.surf.blit(next(self.jump_animation), (0, 0))
none
1
2.65287
3
ColDocDjango/ColDocApp/migrations/0001_initial.py
mennucc/ColDoc_project
0
6622957
# Generated by Django 3.0.5 on 2020-04-30 11:12 import ColDocApp.models import ColDocDjango.users from django.conf import settings import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0011_update_proxy_permissions'), ] operations = [ migrations.CreateModel( name='ColDocUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], bases=(models.Model, ColDocDjango.users.BaseColDocUser), managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='DColDoc', fields=[ ('nickname', models.SlugField(help_text="short string to identify this ColDoc in URLs (alphanumeric only, use '_' or '-' for other chars)", max_length=10, primary_key=True, serialize=False, validators=[ColDocApp.models.validate_coldoc_nickname], verbose_name='short string to identify')), ('title', models.CharField(blank=True, max_length=2000)), ('abstract', models.TextField(blank=True, max_length=10000)), ('publication_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='time first published')), ('blob_modification_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='time of last modification of any blob in this coldoc')), ('latex_time', models.DateTimeField(default=None, null=True, verbose_name='time of last run of latex')), ('latex_return_codes', models.CharField(blank=True, max_length=2000)), ('anonymous_can_view', models.BooleanField(default=True)), ('latex_engine', models.CharField(choices=[('pdflatex', 'LaTeX'), ('xelatex', 'XeLaTeX'), ('lualatex', 'LuaLaTeX')], default='pdflatex', max_length=15, verbose_name='latex-type command used to compile')), ('root_uuid', ColDocApp.models.UUID_Field(default=1)), ('editor', models.ManyToManyField(to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'ColDoc', 'permissions': [('add_blob', 'can add_blob on any coldoc'), ('delete_blob', 'can delete_blob on any coldoc'), ('commit', 'can commit on any coldoc')], }, ), ]
# Generated by Django 3.0.5 on 2020-04-30 11:12 import ColDocApp.models import ColDocDjango.users from django.conf import settings import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0011_update_proxy_permissions'), ] operations = [ migrations.CreateModel( name='ColDocUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], bases=(models.Model, ColDocDjango.users.BaseColDocUser), managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='DColDoc', fields=[ ('nickname', models.SlugField(help_text="short string to identify this ColDoc in URLs (alphanumeric only, use '_' or '-' for other chars)", max_length=10, primary_key=True, serialize=False, validators=[ColDocApp.models.validate_coldoc_nickname], verbose_name='short string to identify')), ('title', models.CharField(blank=True, max_length=2000)), ('abstract', models.TextField(blank=True, max_length=10000)), ('publication_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='time first published')), ('blob_modification_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='time of last modification of any blob in this coldoc')), ('latex_time', models.DateTimeField(default=None, null=True, verbose_name='time of last run of latex')), ('latex_return_codes', models.CharField(blank=True, max_length=2000)), ('anonymous_can_view', models.BooleanField(default=True)), ('latex_engine', models.CharField(choices=[('pdflatex', 'LaTeX'), ('xelatex', 'XeLaTeX'), ('lualatex', 'LuaLaTeX')], default='pdflatex', max_length=15, verbose_name='latex-type command used to compile')), ('root_uuid', ColDocApp.models.UUID_Field(default=1)), ('editor', models.ManyToManyField(to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'ColDoc', 'permissions': [('add_blob', 'can add_blob on any coldoc'), ('delete_blob', 'can delete_blob on any coldoc'), ('commit', 'can commit on any coldoc')], }, ), ]
en
0.795519
# Generated by Django 3.0.5 on 2020-04-30 11:12
2.033889
2
scripts/portal/gPark_Portal.py
G00dBye/YYMS
54
6622958
<reponame>G00dBye/YYMS<gh_stars>10-100 if sm.getFieldID() == 956100000: map = 224000000 portal = 32 else: map = 956100000 portal = 3 sm.warp(map, portal) sm.dispose()
if sm.getFieldID() == 956100000: map = 224000000 portal = 32 else: map = 956100000 portal = 3 sm.warp(map, portal) sm.dispose()
none
1
1.166197
1
answers/MridulMohanta/Day8/question1.py
arc03/30-DaysOfCode-March-2021
22
6622959
arr = input("Enter") nums = arr.split() nums = [int(i) for i in nums] unique = [] for x in nums: if nums.count(x) > 1: while x in nums: nums.remove(x) total = 0 for i in range(0 , len(nums)): total = total + nums[i] print(total)
arr = input("Enter") nums = arr.split() nums = [int(i) for i in nums] unique = [] for x in nums: if nums.count(x) > 1: while x in nums: nums.remove(x) total = 0 for i in range(0 , len(nums)): total = total + nums[i] print(total)
none
1
3.46082
3
scripts/sources/S_FactorReplicationTest.py
dpopadic/arpmRes
6
6622960
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_FactorReplicationTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FactorReplicationTest&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-cross-sec-reg-num-test). # ## Prepare the environment # + import os import os.path as path import sys sys.path.append(path.abspath('../../functions-legacy')) import numpy as np from numpy import ones, zeros, cov, eye, r_ from numpy.linalg import solve, pinv from numpy.random import randn import matplotlib.pyplot as plt from matplotlib.pyplot import figure, plot, legend, xlim, scatter, ylabel, \ xlabel, title plt.style.use('seaborn') from ARPM_utils import save_plot from NormalScenarios import NormalScenarios from MultivRsquare import MultivRsquare # input parameters n_ = 100 # max dimension of target X nstep = range(10,n_+1) # target dimension steps j_ = 1000 # number of simulations k_ = 5 # dimension of factors Z sigma2_Z = eye(k_) # factor covariance sig2_U = 0.8 stepsize = len(nstep) R2_Reg = zeros((stepsize, 1)) R2_CS = zeros((stepsize, 1)) R2_XReg = zeros((stepsize, 1)) R2_XCS = zeros((stepsize, 1)) for n in range(stepsize): # ## Generate a sample from the joint distribution of the factors and residuals mu_ZU = zeros((k_ + nstep[n], 1)) # expectation sig2_ZU = zeros((k_, nstep[n])) # systematic condition d = sig2_U*ones((nstep[n], 1)) sigma2_U = np.diagflat(d * d) # idiosyncratic condition sigma2_ZU = r_[r_['-1',sigma2_Z, sig2_ZU], r_['-1',sig2_ZU.T, sigma2_U]] # covariance Z_U,_ = NormalScenarios(mu_ZU, sigma2_ZU, j_) # joint sample # Z_U = Z_U.T # ensure Z_U is (k_ + n_) x nsim # ## Generate target sample according to systematic-idiosyncratic LFM Z = Z_U[:k_,:] # observable factors sample U = Z_U[k_:,:] # observable residuals sample beta_XZ = randn(nstep[n], k_) # observable loadings i_n = eye(nstep[n]) X = r_['-1',beta_XZ, i_n]@Z_U # target sample sigma2_X = beta_XZ@sigma2_Z@beta_XZ.T + sigma2_U # (low-rank diagonal) covariance sigma2_XZ = beta_XZ@sigma2_Z # covariance of target and factors invres2 = np.diagflat(1 / (d * d)) # inverse residuals covariance inv_sig2 = invres2-(invres2@beta_XZ).dot(pinv(beta_XZ.T@invres2@beta_XZ + solve(sigma2_Z,eye(sigma2_Z.shape[0]))))@beta_XZ.T@invres2 # inverse residuals covariance # ## Recovered regression factors beta_Reg = (sigma2_XZ.T)@inv_sig2 # regression loadings of Z over X Z_Reg = beta_Reg@X # regression recovered factor sample # ## Recovered cross-sectional factors beta_fa = beta_XZ invres2_fa = invres2 beta_CS = solve(beta_fa.T@invres2_fa@beta_fa,beta_fa.T@invres2_fa) # pseudo inverse Z_CS = beta_CS@X # cross-sectional extracted factor sample # ## Recover X via regression of X over Z beta_XZReg = sigma2_XZ@solve(sigma2_Z,eye(sigma2_Z.shape[0])) # regression loadings of X over Z X_Reg = beta_XZReg@Z # regression recovered target # ## Compute X via cross-sectional on Z gamma = solve(beta_XZ.T@invres2@beta_XZ + solve(sigma2_Z,eye(sigma2_Z.shape[0])),beta_XZ.T)@invres2 X_CS = beta_XZ@gamma@X # ## Compute the r-square at dimension nstep[n] R2_Reg[n] = MultivRsquare(cov(Z_Reg-Z), sigma2_Z, eye(k_)) R2_CS[n] = MultivRsquare(cov(Z_CS-Z), sigma2_Z, eye(k_)) R2_XReg[n] = MultivRsquare(cov(X_Reg-X), sigma2_X, sigma2_X) R2_XCS[n] = MultivRsquare(cov(X_CS-X), sigma2_X, sigma2_X) # - # ## Scatter plot Z vs factor replications figure() scatter(Z_Reg[0], Z[0], marker='*') scatter(Z_CS[0], Z[0], marker='o',facecolors='none', color=[1, 0.5, 0]) xlabel('Recovered Factors') ylabel('Z') title('Scatter plot for n = %d and k = %d' % (n_,k_)) legend(['Regression Z', 'Cross-Sec Z']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Plot the r-squares for each target dimension figure() plot(nstep, R2_Reg, 'r', linewidth=1.2) plot(nstep, R2_CS, 'g', linewidth=1.2) plot(nstep, ones(stepsize), 'b', linewidth=2, ) xlabel(r'$n_{1}$') ylabel(r'$R^{2}$') xlim([min(nstep),max(nstep)]) legend(['Regression $R^2$', 'Cross-Sec $R^2$']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Scatter plot X vs factor replications figure() scatter(X_Reg[0], X[0], marker='*') scatter(X_CS[0], X[0], marker='o', facecolors='none', color=[1, 0.5, 0]) xlabel('Recovered Target') ylabel('X') title('Scatter plot for n = %d and k = %d' % (n_,k_)) legend(['Regression X', 'Cross-Sec X']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Plot the r-squares for each market dimension figure() plot(nstep, R2_XReg, 'r', linewidth=1.2) plot(nstep, R2_XCS, 'g', linewidth=1.2) plot(nstep, ones(stepsize), 'b', lw=2) xlabel('n') ylabel(r'$R^{2}$') xlim([min(nstep),max(nstep)]) legend(['Regression $R^2$', 'Cross-Sec $R^2$']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_FactorReplicationTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FactorReplicationTest&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-cross-sec-reg-num-test). # ## Prepare the environment # + import os import os.path as path import sys sys.path.append(path.abspath('../../functions-legacy')) import numpy as np from numpy import ones, zeros, cov, eye, r_ from numpy.linalg import solve, pinv from numpy.random import randn import matplotlib.pyplot as plt from matplotlib.pyplot import figure, plot, legend, xlim, scatter, ylabel, \ xlabel, title plt.style.use('seaborn') from ARPM_utils import save_plot from NormalScenarios import NormalScenarios from MultivRsquare import MultivRsquare # input parameters n_ = 100 # max dimension of target X nstep = range(10,n_+1) # target dimension steps j_ = 1000 # number of simulations k_ = 5 # dimension of factors Z sigma2_Z = eye(k_) # factor covariance sig2_U = 0.8 stepsize = len(nstep) R2_Reg = zeros((stepsize, 1)) R2_CS = zeros((stepsize, 1)) R2_XReg = zeros((stepsize, 1)) R2_XCS = zeros((stepsize, 1)) for n in range(stepsize): # ## Generate a sample from the joint distribution of the factors and residuals mu_ZU = zeros((k_ + nstep[n], 1)) # expectation sig2_ZU = zeros((k_, nstep[n])) # systematic condition d = sig2_U*ones((nstep[n], 1)) sigma2_U = np.diagflat(d * d) # idiosyncratic condition sigma2_ZU = r_[r_['-1',sigma2_Z, sig2_ZU], r_['-1',sig2_ZU.T, sigma2_U]] # covariance Z_U,_ = NormalScenarios(mu_ZU, sigma2_ZU, j_) # joint sample # Z_U = Z_U.T # ensure Z_U is (k_ + n_) x nsim # ## Generate target sample according to systematic-idiosyncratic LFM Z = Z_U[:k_,:] # observable factors sample U = Z_U[k_:,:] # observable residuals sample beta_XZ = randn(nstep[n], k_) # observable loadings i_n = eye(nstep[n]) X = r_['-1',beta_XZ, i_n]@Z_U # target sample sigma2_X = beta_XZ@sigma2_Z@beta_XZ.T + sigma2_U # (low-rank diagonal) covariance sigma2_XZ = beta_XZ@sigma2_Z # covariance of target and factors invres2 = np.diagflat(1 / (d * d)) # inverse residuals covariance inv_sig2 = invres2-(invres2@beta_XZ).dot(pinv(beta_XZ.T@invres2@beta_XZ + solve(sigma2_Z,eye(sigma2_Z.shape[0]))))@beta_XZ.T@invres2 # inverse residuals covariance # ## Recovered regression factors beta_Reg = (sigma2_XZ.T)@inv_sig2 # regression loadings of Z over X Z_Reg = beta_Reg@X # regression recovered factor sample # ## Recovered cross-sectional factors beta_fa = beta_XZ invres2_fa = invres2 beta_CS = solve(beta_fa.T@invres2_fa@beta_fa,beta_fa.T@invres2_fa) # pseudo inverse Z_CS = beta_CS@X # cross-sectional extracted factor sample # ## Recover X via regression of X over Z beta_XZReg = sigma2_XZ@solve(sigma2_Z,eye(sigma2_Z.shape[0])) # regression loadings of X over Z X_Reg = beta_XZReg@Z # regression recovered target # ## Compute X via cross-sectional on Z gamma = solve(beta_XZ.T@invres2@beta_XZ + solve(sigma2_Z,eye(sigma2_Z.shape[0])),beta_XZ.T)@invres2 X_CS = beta_XZ@gamma@X # ## Compute the r-square at dimension nstep[n] R2_Reg[n] = MultivRsquare(cov(Z_Reg-Z), sigma2_Z, eye(k_)) R2_CS[n] = MultivRsquare(cov(Z_CS-Z), sigma2_Z, eye(k_)) R2_XReg[n] = MultivRsquare(cov(X_Reg-X), sigma2_X, sigma2_X) R2_XCS[n] = MultivRsquare(cov(X_CS-X), sigma2_X, sigma2_X) # - # ## Scatter plot Z vs factor replications figure() scatter(Z_Reg[0], Z[0], marker='*') scatter(Z_CS[0], Z[0], marker='o',facecolors='none', color=[1, 0.5, 0]) xlabel('Recovered Factors') ylabel('Z') title('Scatter plot for n = %d and k = %d' % (n_,k_)) legend(['Regression Z', 'Cross-Sec Z']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Plot the r-squares for each target dimension figure() plot(nstep, R2_Reg, 'r', linewidth=1.2) plot(nstep, R2_CS, 'g', linewidth=1.2) plot(nstep, ones(stepsize), 'b', linewidth=2, ) xlabel(r'$n_{1}$') ylabel(r'$R^{2}$') xlim([min(nstep),max(nstep)]) legend(['Regression $R^2$', 'Cross-Sec $R^2$']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Scatter plot X vs factor replications figure() scatter(X_Reg[0], X[0], marker='*') scatter(X_CS[0], X[0], marker='o', facecolors='none', color=[1, 0.5, 0]) xlabel('Recovered Target') ylabel('X') title('Scatter plot for n = %d and k = %d' % (n_,k_)) legend(['Regression X', 'Cross-Sec X']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Plot the r-squares for each market dimension figure() plot(nstep, R2_XReg, 'r', linewidth=1.2) plot(nstep, R2_XCS, 'g', linewidth=1.2) plot(nstep, ones(stepsize), 'b', lw=2) xlabel('n') ylabel(r'$R^{2}$') xlim([min(nstep),max(nstep)]) legend(['Regression $R^2$', 'Cross-Sec $R^2$']); plt.tight_layout() # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
en
0.554366
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_FactorReplicationTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FactorReplicationTest&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-cross-sec-reg-num-test). # ## Prepare the environment # + # input parameters # max dimension of target X # target dimension steps # number of simulations # dimension of factors Z # factor covariance # ## Generate a sample from the joint distribution of the factors and residuals # expectation # systematic condition # idiosyncratic condition # covariance # joint sample # Z_U = Z_U.T # ensure Z_U is (k_ + n_) x nsim # ## Generate target sample according to systematic-idiosyncratic LFM # observable factors sample # observable residuals sample # observable loadings # target sample # (low-rank diagonal) covariance # covariance of target and factors # inverse residuals covariance # inverse residuals covariance # ## Recovered regression factors # regression loadings of Z over X # regression recovered factor sample # ## Recovered cross-sectional factors # pseudo inverse # cross-sectional extracted factor sample # ## Recover X via regression of X over Z # regression loadings of X over Z # regression recovered target # ## Compute X via cross-sectional on Z # ## Compute the r-square at dimension nstep[n] # - # ## Scatter plot Z vs factor replications # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Plot the r-squares for each target dimension # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Scatter plot X vs factor replications # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) # ## Plot the r-squares for each market dimension # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
2.28809
2
lqg/plotter.py
brain-research/mirage-rl
15
6622961
#!python import os import numpy as np import matplotlib.pyplot as plt plt.ion() from matplotlib import rc rc('text', usetex=True) import seaborn as sns import matplotlib.patches as mpatches color_list = sns.color_palette("muted", 10) sns.palplot(color_list) def plot_trajs(s, filename, title, ylim=(-2,6), xlim=(-2,6)): print(filename, title) if os.path.exists(filename + '.npz'): s = np.load(filename + '.npz')['s'] else: np.savez(filename, s=s) assert s.shape[2] == 4 plt.figure( figsize=(6,6)) for i in range(s.shape[1]): plt.plot(s[:,i,0], s[:,i,1], zorder=1) plt.scatter(s[:,i,0], s[:,i,1], zorder=2) plt.title(title) plt.xlim(xlim) plt.ylim(ylim) plt.grid(alpha=0.5) plt.savefig(filename + '.png') plt.close() def plot_multiple_trajs(filenames, outfile, titles, ylim=(-2,6), xlim=(-2, 6)): n = len(filenames) dats = [] for i in range(n): dats.append(np.load(filenames[i]+'.npz')['s']) m = dats[0].shape[1] fig, axes = plt.subplots(1, n, figsize=(20, 3)) for i in range(n): ax = axes[i] for j in range(m): ax.plot(dats[i][:,j,0], dats[i][:,j,1],color=color_list[j], alpha=0.5, zorder=1, linewidth=0.5) ax.scatter(dats[i][:,j,0],dats[i][:,j,1], color=color_list[j], alpha=0.5, zorder=2, linewidth=0.5) ax.tick_params(axis='both', which='major', labelsize=11) ax.tick_params(axis='both', which='minor', labelsize=11) ax.set_title(titles[i]) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.grid(alpha=0.5) fig.savefig('%s.pdf' % outfile , bbox_inches='tight', dpi=200, format='pdf') def plot_multiple_variances(filenames, outfile, titles, ylim=(-10,28), xlim=(0, 100)): n = len(filenames) dats = [] for i in range(n): d = np.load(filenames[i]+'.npz')['dat'] dict_ = {k: d.item().get(k) for k in list(d.item().keys())} dats.append(dict_) keys = sorted(dats[i].keys()) fig, axes = plt.subplots(1, n, figsize=(20, 3)) for i in range(n): print(i, n, keys, len(color_list)) ax = axes[i] for j, key in enumerate(keys): ax.plot(dats[i][key], label=key, color=color_list[j]) ax.tick_params(axis='both', which='major', labelsize=11) ax.tick_params(axis='both', which='minor', labelsize=11) ax.set_xlabel('time horizon (t)') ax.set_title(titles[i]) if i == 0: ax.set_ylabel(r'$\log |\Sigma|$') ax.set_xlim(xlim) ax.set_ylim(ylim) ax.grid(alpha=0.5) hs = [] for j, key in enumerate(keys): hs.append(mpatches.Patch(color=color_list[j], label=key)) leg = fig.legend(handles=hs, loc='lower center', ncol=6, prop={'size': 14}) bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes) bb.y0 += -0.30 leg.set_bbox_to_anchor(bb, transform = ax.transAxes) fig.savefig('%s.pdf' % outfile , bbox_inches='tight', dpi=200, format='pdf') plt.close() def plot_variances(dat, filename, title, ylim=(-10,28), xlim=(0,100)): print(filename, title) if dat == None and os.path.exists(filename + '.npz'): dat_ = np.load(filename + '.npz')['dat'] dat = dict() for k in sorted(list(dat_.item().keys())): dat[k] = dat_.item().get(k) else: np.savez(filename, dat=dat) sorted_keys = sorted(dat.keys()) plt.figure(figsize=(6,6)) for i, key in enumerate(sorted_keys): plt.plot(dat[key], label=key, linewidth=2.0, color=color_list[i]) plt.legend(loc='lower left') plt.title(title) plt.ylabel(r'$\log |\Sigma|$') plt.xlabel('time horizon (t)') if ylim is not None: plt.ylim(ylim) if xlim is not None: plt.xlim(xlim) plt.grid(alpha=0.5) plt.savefig('%s.png' % filename, bbox_inches='tight') plt.savefig('%s.pdf' % filename, bbox_inches='tight', dpi=200, format='pdf') plt.close() if __name__ == '__main__': #fileids = [0, 150, 300, 600] #fileids = list(range(0, 700, 150)) # plot first up to first 1000 updates fileids = list(range(0, 1000, 10)) # plot first up to first 1000 updates trial = 't1' titles = ['\#updates=%04d' % i for i in fileids] filenames = ['%s_variances_%04d' % (trial, i) for i in fileids] # variance plots #filenames = ['%s_traj_%04d' % (trial, i) for i in fileids] # traj plots outfile = 'out_%s' % trial # variance plots #outfile = 'out_%s_traj' % trial # traj plots #plot_multiple_variances(filenames, outfile, titles) # variance plots #plot_multiple_trajs(filenames, outfile, titles) # traj plots for f, t in zip(filenames, titles): plot_variances(dat=None, filename=f, title=t) # variance plots #plot_trajs(s=None, filename=f, title=t) # traj plots
#!python import os import numpy as np import matplotlib.pyplot as plt plt.ion() from matplotlib import rc rc('text', usetex=True) import seaborn as sns import matplotlib.patches as mpatches color_list = sns.color_palette("muted", 10) sns.palplot(color_list) def plot_trajs(s, filename, title, ylim=(-2,6), xlim=(-2,6)): print(filename, title) if os.path.exists(filename + '.npz'): s = np.load(filename + '.npz')['s'] else: np.savez(filename, s=s) assert s.shape[2] == 4 plt.figure( figsize=(6,6)) for i in range(s.shape[1]): plt.plot(s[:,i,0], s[:,i,1], zorder=1) plt.scatter(s[:,i,0], s[:,i,1], zorder=2) plt.title(title) plt.xlim(xlim) plt.ylim(ylim) plt.grid(alpha=0.5) plt.savefig(filename + '.png') plt.close() def plot_multiple_trajs(filenames, outfile, titles, ylim=(-2,6), xlim=(-2, 6)): n = len(filenames) dats = [] for i in range(n): dats.append(np.load(filenames[i]+'.npz')['s']) m = dats[0].shape[1] fig, axes = plt.subplots(1, n, figsize=(20, 3)) for i in range(n): ax = axes[i] for j in range(m): ax.plot(dats[i][:,j,0], dats[i][:,j,1],color=color_list[j], alpha=0.5, zorder=1, linewidth=0.5) ax.scatter(dats[i][:,j,0],dats[i][:,j,1], color=color_list[j], alpha=0.5, zorder=2, linewidth=0.5) ax.tick_params(axis='both', which='major', labelsize=11) ax.tick_params(axis='both', which='minor', labelsize=11) ax.set_title(titles[i]) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.grid(alpha=0.5) fig.savefig('%s.pdf' % outfile , bbox_inches='tight', dpi=200, format='pdf') def plot_multiple_variances(filenames, outfile, titles, ylim=(-10,28), xlim=(0, 100)): n = len(filenames) dats = [] for i in range(n): d = np.load(filenames[i]+'.npz')['dat'] dict_ = {k: d.item().get(k) for k in list(d.item().keys())} dats.append(dict_) keys = sorted(dats[i].keys()) fig, axes = plt.subplots(1, n, figsize=(20, 3)) for i in range(n): print(i, n, keys, len(color_list)) ax = axes[i] for j, key in enumerate(keys): ax.plot(dats[i][key], label=key, color=color_list[j]) ax.tick_params(axis='both', which='major', labelsize=11) ax.tick_params(axis='both', which='minor', labelsize=11) ax.set_xlabel('time horizon (t)') ax.set_title(titles[i]) if i == 0: ax.set_ylabel(r'$\log |\Sigma|$') ax.set_xlim(xlim) ax.set_ylim(ylim) ax.grid(alpha=0.5) hs = [] for j, key in enumerate(keys): hs.append(mpatches.Patch(color=color_list[j], label=key)) leg = fig.legend(handles=hs, loc='lower center', ncol=6, prop={'size': 14}) bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes) bb.y0 += -0.30 leg.set_bbox_to_anchor(bb, transform = ax.transAxes) fig.savefig('%s.pdf' % outfile , bbox_inches='tight', dpi=200, format='pdf') plt.close() def plot_variances(dat, filename, title, ylim=(-10,28), xlim=(0,100)): print(filename, title) if dat == None and os.path.exists(filename + '.npz'): dat_ = np.load(filename + '.npz')['dat'] dat = dict() for k in sorted(list(dat_.item().keys())): dat[k] = dat_.item().get(k) else: np.savez(filename, dat=dat) sorted_keys = sorted(dat.keys()) plt.figure(figsize=(6,6)) for i, key in enumerate(sorted_keys): plt.plot(dat[key], label=key, linewidth=2.0, color=color_list[i]) plt.legend(loc='lower left') plt.title(title) plt.ylabel(r'$\log |\Sigma|$') plt.xlabel('time horizon (t)') if ylim is not None: plt.ylim(ylim) if xlim is not None: plt.xlim(xlim) plt.grid(alpha=0.5) plt.savefig('%s.png' % filename, bbox_inches='tight') plt.savefig('%s.pdf' % filename, bbox_inches='tight', dpi=200, format='pdf') plt.close() if __name__ == '__main__': #fileids = [0, 150, 300, 600] #fileids = list(range(0, 700, 150)) # plot first up to first 1000 updates fileids = list(range(0, 1000, 10)) # plot first up to first 1000 updates trial = 't1' titles = ['\#updates=%04d' % i for i in fileids] filenames = ['%s_variances_%04d' % (trial, i) for i in fileids] # variance plots #filenames = ['%s_traj_%04d' % (trial, i) for i in fileids] # traj plots outfile = 'out_%s' % trial # variance plots #outfile = 'out_%s_traj' % trial # traj plots #plot_multiple_variances(filenames, outfile, titles) # variance plots #plot_multiple_trajs(filenames, outfile, titles) # traj plots for f, t in zip(filenames, titles): plot_variances(dat=None, filename=f, title=t) # variance plots #plot_trajs(s=None, filename=f, title=t) # traj plots
en
0.38265
#!python #fileids = [0, 150, 300, 600] #fileids = list(range(0, 700, 150)) # plot first up to first 1000 updates # plot first up to first 1000 updates #updates=%04d' % i for i in fileids] # variance plots #filenames = ['%s_traj_%04d' % (trial, i) for i in fileids] # traj plots # variance plots #outfile = 'out_%s_traj' % trial # traj plots #plot_multiple_variances(filenames, outfile, titles) # variance plots #plot_multiple_trajs(filenames, outfile, titles) # traj plots # variance plots #plot_trajs(s=None, filename=f, title=t) # traj plots
2.6264
3
pisat/sensor/sam_m8q.py
jjj999/pisat
1
6622962
<gh_stars>1-10 from typing import Optional, Tuple, Union from pisat.handler.i2c_handler_base import I2CHandlerBase from pisat.handler.serial_handler_base import SerialHandlerBase from pisat.model.datamodel import DataModelBase, loggable from pisat.sensor.sensor_base import HandlerMismatchError, SensorBase from pisat.sensor.serial_gps import SerialGPS class UARTSamM8Q(SerialGPS): pass # TODO I2C ver. class I2CSamM8Q(SensorBase): pass # TODO I2C ver. class SamM8Q(SensorBase): class DataModel(DataModelBase): def setup(self, time_utc: Optional[Tuple[Union[int, float]]] = None, latitude: Optional[float] = None, longitude: Optional[float] = None, altitude: Optional[float] = None): self._time_utc = time_utc self._latitude = latitude self._longitude = longitude self._altitude = altitude @loggable def time_utc(self): return self._time_utc @time_utc.formatter def time_utc(self): name = self.get_tag("time_utc") value = None if self._time_utc is not None: value = f"{self._time_utc[0]}:{self._time_utc[1]}:{self._time_utc[2]}" return {name: value} @loggable def latitude(self): return self._latitude @loggable def longitude(self): return self._longitude @loggable def altitude(self): return self._altitude def __init__(self, handler: Union[I2CHandlerBase, SerialHandlerBase], name: Optional[str] = None) -> None: super().__init__(name=name) if isinstance(handler, SerialHandlerBase): self._base = UARTSamM8Q(handler=handler) elif isinstance(handler, I2CHandlerBase): self._base = I2CSamM8Q(handler=handler) else: raise HandlerMismatchError( "'handler' must be for UART or I2C." ) self._handler = handler def read(self): return self._base.read()
from typing import Optional, Tuple, Union from pisat.handler.i2c_handler_base import I2CHandlerBase from pisat.handler.serial_handler_base import SerialHandlerBase from pisat.model.datamodel import DataModelBase, loggable from pisat.sensor.sensor_base import HandlerMismatchError, SensorBase from pisat.sensor.serial_gps import SerialGPS class UARTSamM8Q(SerialGPS): pass # TODO I2C ver. class I2CSamM8Q(SensorBase): pass # TODO I2C ver. class SamM8Q(SensorBase): class DataModel(DataModelBase): def setup(self, time_utc: Optional[Tuple[Union[int, float]]] = None, latitude: Optional[float] = None, longitude: Optional[float] = None, altitude: Optional[float] = None): self._time_utc = time_utc self._latitude = latitude self._longitude = longitude self._altitude = altitude @loggable def time_utc(self): return self._time_utc @time_utc.formatter def time_utc(self): name = self.get_tag("time_utc") value = None if self._time_utc is not None: value = f"{self._time_utc[0]}:{self._time_utc[1]}:{self._time_utc[2]}" return {name: value} @loggable def latitude(self): return self._latitude @loggable def longitude(self): return self._longitude @loggable def altitude(self): return self._altitude def __init__(self, handler: Union[I2CHandlerBase, SerialHandlerBase], name: Optional[str] = None) -> None: super().__init__(name=name) if isinstance(handler, SerialHandlerBase): self._base = UARTSamM8Q(handler=handler) elif isinstance(handler, I2CHandlerBase): self._base = I2CSamM8Q(handler=handler) else: raise HandlerMismatchError( "'handler' must be for UART or I2C." ) self._handler = handler def read(self): return self._base.read()
tr
0.268247
# TODO I2C ver. # TODO I2C ver.
2.543579
3
tests/test_utils.py
gfalcone/mlserve
26
6622963
import unittest import pandas as pd from serveml.inputs import BasicFeedbackInput from serveml.utils import ( dict_to_pandas, pandas_to_dict, pydantic_model_to_pandas, ) class TestUtils(unittest.TestCase): def test_parsing_dict_to_pandas(self): item = {"item_id": 0, "item_name": "coconut"} df = dict_to_pandas(item) self.assertIsInstance(df, pd.DataFrame) self.assertEqual(len(df), 1) def test_parsing_pandas_to_dict(self): item = {"item_id": 0, "item_name": "coconut"} df = dict_to_pandas(item) self.assertEqual(pandas_to_dict(df), [item]) def test_pydantic_model_to_pandas(self): feedback = BasicFeedbackInput(status=True, request_id="coconut") result = pydantic_model_to_pandas(feedback) item = { "request_id": "coconut", "status": True, "expected_result": None, } self.assertTrue(result.equals(dict_to_pandas(item)))
import unittest import pandas as pd from serveml.inputs import BasicFeedbackInput from serveml.utils import ( dict_to_pandas, pandas_to_dict, pydantic_model_to_pandas, ) class TestUtils(unittest.TestCase): def test_parsing_dict_to_pandas(self): item = {"item_id": 0, "item_name": "coconut"} df = dict_to_pandas(item) self.assertIsInstance(df, pd.DataFrame) self.assertEqual(len(df), 1) def test_parsing_pandas_to_dict(self): item = {"item_id": 0, "item_name": "coconut"} df = dict_to_pandas(item) self.assertEqual(pandas_to_dict(df), [item]) def test_pydantic_model_to_pandas(self): feedback = BasicFeedbackInput(status=True, request_id="coconut") result = pydantic_model_to_pandas(feedback) item = { "request_id": "coconut", "status": True, "expected_result": None, } self.assertTrue(result.equals(dict_to_pandas(item)))
none
1
3.110647
3
features/steps/exporting.py
eblade/images4
1
6622964
<filename>features/steps/exporting.py import logging from behave import * from hamcrest import * from hamcrest.library.collection.issequence_containinginanyorder import contains_inanyorder from images import ExportJob, EXPORTABLE from images.export_job import ExportJobDescriptor, create_export_job, get_export_jobs_by_entry_id from images.location import get_location_by_name, get_locations_by_type from images.user import get_user_by_id from images.entry import EntryDescriptor, get_entry_by_source @when('the user creates a specific set of export jobs') def step_impl(context): for row in context.table: data = {key: row[key] for key in context.table.headings} data['location'] = get_location_by_name(data.pop('location')) data['entry'] = EntryDescriptor(id=data.pop('entry_id')) ejd = ExportJobDescriptor(**data) md = ExportJob.DefaultExportJobMetadata(**data) ejd.metadata = md create_export_job(ejd) @then('the entry with id {entry_id:d} should have the following export jobs') def step_impl(context, entry_id): expected_jobs = [ {key: int(row[key]) if key.endswith('_id') else row[key] for key in context.table.headings} for row in context.table ] actual_jobs = [ dict( entry_id = job.entry.id if job.entry is not None else None, location = job.location.name if job.location is not None else None, path = job.metadata.path if job.metadata is not None else None, user = get_user_by_id(job.user_id).name, ) for job in get_export_jobs_by_entry_id(entry_id).entries ] print(get_export_jobs_by_entry_id(entry_id).to_json()) assert_that(actual_jobs, contains_inanyorder(*expected_jobs)) @then('possible export destinations should be as follows') def step_impl(context): expected_locations = [row['name'] for row in context.table] feed = get_locations_by_type(*EXPORTABLE) actual_locations = [location.name for location in feed.entries] assert_that(actual_locations, contains_inanyorder(*expected_locations))
<filename>features/steps/exporting.py import logging from behave import * from hamcrest import * from hamcrest.library.collection.issequence_containinginanyorder import contains_inanyorder from images import ExportJob, EXPORTABLE from images.export_job import ExportJobDescriptor, create_export_job, get_export_jobs_by_entry_id from images.location import get_location_by_name, get_locations_by_type from images.user import get_user_by_id from images.entry import EntryDescriptor, get_entry_by_source @when('the user creates a specific set of export jobs') def step_impl(context): for row in context.table: data = {key: row[key] for key in context.table.headings} data['location'] = get_location_by_name(data.pop('location')) data['entry'] = EntryDescriptor(id=data.pop('entry_id')) ejd = ExportJobDescriptor(**data) md = ExportJob.DefaultExportJobMetadata(**data) ejd.metadata = md create_export_job(ejd) @then('the entry with id {entry_id:d} should have the following export jobs') def step_impl(context, entry_id): expected_jobs = [ {key: int(row[key]) if key.endswith('_id') else row[key] for key in context.table.headings} for row in context.table ] actual_jobs = [ dict( entry_id = job.entry.id if job.entry is not None else None, location = job.location.name if job.location is not None else None, path = job.metadata.path if job.metadata is not None else None, user = get_user_by_id(job.user_id).name, ) for job in get_export_jobs_by_entry_id(entry_id).entries ] print(get_export_jobs_by_entry_id(entry_id).to_json()) assert_that(actual_jobs, contains_inanyorder(*expected_jobs)) @then('possible export destinations should be as follows') def step_impl(context): expected_locations = [row['name'] for row in context.table] feed = get_locations_by_type(*EXPORTABLE) actual_locations = [location.name for location in feed.entries] assert_that(actual_locations, contains_inanyorder(*expected_locations))
none
1
1.975347
2
apocalypse/utils/backgroundJob.py
dhoomakethu/terminator
6
6622965
import threading from apocalypse.utils.logger import get_logger logger = get_logger() class BackgroundJob(threading.Thread): def __init__(self, name, interval, function, *funcargs, **funckwargs): threading.Thread.__init__(self) self._name = name self.interval = interval self.task = function self.stop_timer = threading.Event() self.funcargs = funcargs self.funckwargs = funckwargs def run(self): logger.debug("Start %s thread" % self._name) while not self.stop_timer.is_set(): if not self.stop_timer.is_set(): self.task(*self.funcargs, **self.funckwargs) self.stop_timer.wait(self.interval) logger.debug("Stop %s thread" % self._name) def cancel(self): self.stop_timer.set()
import threading from apocalypse.utils.logger import get_logger logger = get_logger() class BackgroundJob(threading.Thread): def __init__(self, name, interval, function, *funcargs, **funckwargs): threading.Thread.__init__(self) self._name = name self.interval = interval self.task = function self.stop_timer = threading.Event() self.funcargs = funcargs self.funckwargs = funckwargs def run(self): logger.debug("Start %s thread" % self._name) while not self.stop_timer.is_set(): if not self.stop_timer.is_set(): self.task(*self.funcargs, **self.funckwargs) self.stop_timer.wait(self.interval) logger.debug("Stop %s thread" % self._name) def cancel(self): self.stop_timer.set()
none
1
2.694097
3
Pendulum/simple_pendulum.py
umairkarel/Beginner-Projects
0
6622966
<reponame>umairkarel/Beginner-Projects import pygame import math SCREEN_WIDTH = 300 SCREEN_HEIGHT = 300 WHITE = (255, 255, 255) BLACK = (0,0,0) FPS = 60 pygame.init() screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) clock = pygame.time.Clock() dragging = False length = 95 origin = [int(SCREEN_WIDTH/2), 0] bob = [int(SCREEN_WIDTH/2),length] angle = math.pi/4 aVel = 0 aAcc = 0 r = 20 gravity = 0.8 damping = 0.995 def update(): global angle global aVel global aAcc if not dragging: aAcc = (-gravity/length)*math.sin(angle) aVel += aAcc aVel *= damping angle += aVel bob[0] = math.floor(origin[0] + length * math.sin(angle)) bob[1] = math.floor(origin[1] + length * math.cos(angle)) def draw(): global circle pygame.draw.line(screen, (0,0,0), (origin), (bob)) circle = pygame.draw.circle(screen, BLACK, (bob), r, 0) running = True while running: update() for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: if event.button == 1: if circle.collidepoint(event.pos): dragging = True mouse_x, mouse_y = event.pos aVel = 0 elif event.type == pygame.MOUSEBUTTONUP: if event.button == 1: dragging = False elif event.type == pygame.MOUSEMOTION: if dragging: mouse_x, mouse_y = event.pos try: angle = (math.atan((mouse_x-origin[0])/mouse_y)) except: angle = math.pi/2 if mouse_x>200 else -math.pi/2 bob[0] = math.floor(origin[0] + length * math.sin(angle)) bob[1] = math.floor(origin[1] + length * math.cos(angle)) screen.fill(WHITE) draw() pygame.display.flip() clock.tick(FPS) pygame.quit()
import pygame import math SCREEN_WIDTH = 300 SCREEN_HEIGHT = 300 WHITE = (255, 255, 255) BLACK = (0,0,0) FPS = 60 pygame.init() screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) clock = pygame.time.Clock() dragging = False length = 95 origin = [int(SCREEN_WIDTH/2), 0] bob = [int(SCREEN_WIDTH/2),length] angle = math.pi/4 aVel = 0 aAcc = 0 r = 20 gravity = 0.8 damping = 0.995 def update(): global angle global aVel global aAcc if not dragging: aAcc = (-gravity/length)*math.sin(angle) aVel += aAcc aVel *= damping angle += aVel bob[0] = math.floor(origin[0] + length * math.sin(angle)) bob[1] = math.floor(origin[1] + length * math.cos(angle)) def draw(): global circle pygame.draw.line(screen, (0,0,0), (origin), (bob)) circle = pygame.draw.circle(screen, BLACK, (bob), r, 0) running = True while running: update() for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: if event.button == 1: if circle.collidepoint(event.pos): dragging = True mouse_x, mouse_y = event.pos aVel = 0 elif event.type == pygame.MOUSEBUTTONUP: if event.button == 1: dragging = False elif event.type == pygame.MOUSEMOTION: if dragging: mouse_x, mouse_y = event.pos try: angle = (math.atan((mouse_x-origin[0])/mouse_y)) except: angle = math.pi/2 if mouse_x>200 else -math.pi/2 bob[0] = math.floor(origin[0] + length * math.sin(angle)) bob[1] = math.floor(origin[1] + length * math.cos(angle)) screen.fill(WHITE) draw() pygame.display.flip() clock.tick(FPS) pygame.quit()
none
1
3.512776
4
src/indra_cogex/client/subnetwork.py
kkaris/indra_cogex
0
6622967
"""Queries that generate statement subnetworks.""" from typing import Iterable, List, Tuple from indra.statements import Statement from .neo4j_client import Neo4jClient, autoclient from .queries import get_genes_for_go_term, get_genes_in_tissue from ..representation import Relation, indra_stmts_from_relations, norm_id __all__ = [ "indra_subnetwork", "indra_subnetwork_relations", "indra_mediated_subnetwork", "indra_subnetwork_tissue", "indra_subnetwork_go", ] @autoclient() def indra_subnetwork_relations( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient ) -> List[Relation]: """Return the subnetwork induced by the given nodes as a set of Relations. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes represented as Relation objects. """ nodes_str = ", ".join(["'%s'" % norm_id(*node) for node in nodes]) query = """MATCH p=(n1:BioEntity)-[r:indra_rel]->(n2:BioEntity) WHERE n1.id IN [%s] AND n2.id IN [%s] AND n1.id <> n2.id RETURN p""" % ( nodes_str, nodes_str, ) return client.query_relations(query) @autoclient() def indra_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by the given nodes. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ rels = indra_subnetwork_relations(nodes=nodes, client=client) stmts = indra_stmts_from_relations(rels) return stmts @autoclient() def indra_mediated_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced pairs of statements between the given nodes. For example, if gene A and gene B are given as the query, find statements mediated by X such that A -> X -> B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ return get_two_step_subnetwork( client=client, nodes=nodes, first_forward=True, second_forward=True ) @autoclient() def indra_shared_downstream_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by shared downstream targets of nodes in the query. For example, if gene A and gene B are given as the query, find statements to shared downstream entity X such that A -> X <- B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ return get_two_step_subnetwork( client=client, nodes=nodes, first_forward=True, second_forward=False ) @autoclient() def indra_shared_upstream_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by shared upstream controllers of nodes in the query. For example, if gene A and gene B are given as the query, find statements to shared upstream entity X such that A <- X -> B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ return get_two_step_subnetwork( client=client, nodes=nodes, first_forward=False, second_forward=True ) def get_two_step_subnetwork( *, nodes: Iterable[Tuple[str, str]], client: Neo4jClient, first_forward: bool = True, second_forward: bool = True, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by paths of length two between nodes A and B in a query with intermediate nodes X such that paths look like A-X-B. Parameters ---------- nodes : The nodes to query (A and B are one of these nodes in the following examples). client : The Neo4j client. first_forward: If true, query A->X otherwise query A<-X second_forward: If true, query X->B otherwise query X<-B Returns ------- : The INDRA statement subnetwork induced by the query """ nodes_str = ", ".join(["'%s'" % norm_id(*node) for node in nodes]) f1, f2 = ("-", "->") if first_forward else ("<-", "-") s1, s2 = ("-", "->") if second_forward else ("<-", "-") query = f"""\ MATCH p=(n1:BioEntity){f1}[r1:indra_rel]{f2}(n3:BioEntity){s1}[r2:indra_rel]{s2}(n2:BioEntity) WHERE n1.id IN [{nodes_str}] AND n2.id IN [{nodes_str}] AND n1.id <> n2.id AND NOT n3 IN [{nodes_str}] RETURN p """ return _paths_to_stmts(client=client, query=query) def _paths_to_stmts(*, client: Neo4jClient, query: str) -> List[Statement]: """Generate INDRA statements from a query that returns paths of length > 1.""" return indra_stmts_from_relations( relation for path in client.query_tx(query) for relation in client.neo4j_to_relations(path[0]) ) @autoclient() def indra_subnetwork_tissue( nodes: List[Tuple[str, str]], tissue: Tuple[str, str], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by the given nodes and expressed in the given tissue. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. tissue : The tissue to query. Returns ------- : The subnetwork induced by the given nodes and expressed in the given tissue. """ genes = get_genes_in_tissue(client=client, tissue=tissue) relevant_genes = {g.grounding() for g in genes} & set(nodes) return indra_subnetwork(client, relevant_genes) @autoclient() def indra_subnetwork_go( go_term: Tuple[str, str], *, client: Neo4jClient, include_indirect: bool = False, mediated: bool = False, upstream_controllers: bool = False, downstream_targets: bool = False, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by the given GO term. Parameters ---------- go_term : The GO term to query. Example: ``("GO", "GO:0006915")`` client : The Neo4j client. include_indirect : Should ontological children of the given GO term be queried as well? Defaults to False. mediated: Should relations A->X->B be included for X not associated to the given GO term? Defaults to False. upstream_controllers: Should relations A<-X->B be included for upstream controller X not associated to the given GO term? Defaults to False. downstream_targets: Should relations A->X<-B be included for downstream target X not associated to the given GO term? Defaults to False. Returns ------- : The INDRA statement subnetwork induced by GO term. """ genes = get_genes_for_go_term( client=client, go_term=go_term, include_indirect=include_indirect ) nodes = {g.grounding() for g in genes} rv = indra_subnetwork(client=client, nodes=nodes) if mediated: rv.extend(indra_mediated_subnetwork(client=client, nodes=nodes)) if upstream_controllers: rv.extend(indra_shared_upstream_subnetwork(client=client, nodes=nodes)) if downstream_targets: rv.extend(indra_shared_downstream_subnetwork(client=client, nodes=nodes)) # No deduplication of statements based on the union of # the queries should be necessary since each are disjoint return rv
"""Queries that generate statement subnetworks.""" from typing import Iterable, List, Tuple from indra.statements import Statement from .neo4j_client import Neo4jClient, autoclient from .queries import get_genes_for_go_term, get_genes_in_tissue from ..representation import Relation, indra_stmts_from_relations, norm_id __all__ = [ "indra_subnetwork", "indra_subnetwork_relations", "indra_mediated_subnetwork", "indra_subnetwork_tissue", "indra_subnetwork_go", ] @autoclient() def indra_subnetwork_relations( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient ) -> List[Relation]: """Return the subnetwork induced by the given nodes as a set of Relations. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes represented as Relation objects. """ nodes_str = ", ".join(["'%s'" % norm_id(*node) for node in nodes]) query = """MATCH p=(n1:BioEntity)-[r:indra_rel]->(n2:BioEntity) WHERE n1.id IN [%s] AND n2.id IN [%s] AND n1.id <> n2.id RETURN p""" % ( nodes_str, nodes_str, ) return client.query_relations(query) @autoclient() def indra_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by the given nodes. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ rels = indra_subnetwork_relations(nodes=nodes, client=client) stmts = indra_stmts_from_relations(rels) return stmts @autoclient() def indra_mediated_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced pairs of statements between the given nodes. For example, if gene A and gene B are given as the query, find statements mediated by X such that A -> X -> B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ return get_two_step_subnetwork( client=client, nodes=nodes, first_forward=True, second_forward=True ) @autoclient() def indra_shared_downstream_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by shared downstream targets of nodes in the query. For example, if gene A and gene B are given as the query, find statements to shared downstream entity X such that A -> X <- B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ return get_two_step_subnetwork( client=client, nodes=nodes, first_forward=True, second_forward=False ) @autoclient() def indra_shared_upstream_subnetwork( nodes: Iterable[Tuple[str, str]], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by shared upstream controllers of nodes in the query. For example, if gene A and gene B are given as the query, find statements to shared upstream entity X such that A <- X -> B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. """ return get_two_step_subnetwork( client=client, nodes=nodes, first_forward=False, second_forward=True ) def get_two_step_subnetwork( *, nodes: Iterable[Tuple[str, str]], client: Neo4jClient, first_forward: bool = True, second_forward: bool = True, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by paths of length two between nodes A and B in a query with intermediate nodes X such that paths look like A-X-B. Parameters ---------- nodes : The nodes to query (A and B are one of these nodes in the following examples). client : The Neo4j client. first_forward: If true, query A->X otherwise query A<-X second_forward: If true, query X->B otherwise query X<-B Returns ------- : The INDRA statement subnetwork induced by the query """ nodes_str = ", ".join(["'%s'" % norm_id(*node) for node in nodes]) f1, f2 = ("-", "->") if first_forward else ("<-", "-") s1, s2 = ("-", "->") if second_forward else ("<-", "-") query = f"""\ MATCH p=(n1:BioEntity){f1}[r1:indra_rel]{f2}(n3:BioEntity){s1}[r2:indra_rel]{s2}(n2:BioEntity) WHERE n1.id IN [{nodes_str}] AND n2.id IN [{nodes_str}] AND n1.id <> n2.id AND NOT n3 IN [{nodes_str}] RETURN p """ return _paths_to_stmts(client=client, query=query) def _paths_to_stmts(*, client: Neo4jClient, query: str) -> List[Statement]: """Generate INDRA statements from a query that returns paths of length > 1.""" return indra_stmts_from_relations( relation for path in client.query_tx(query) for relation in client.neo4j_to_relations(path[0]) ) @autoclient() def indra_subnetwork_tissue( nodes: List[Tuple[str, str]], tissue: Tuple[str, str], *, client: Neo4jClient, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by the given nodes and expressed in the given tissue. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. tissue : The tissue to query. Returns ------- : The subnetwork induced by the given nodes and expressed in the given tissue. """ genes = get_genes_in_tissue(client=client, tissue=tissue) relevant_genes = {g.grounding() for g in genes} & set(nodes) return indra_subnetwork(client, relevant_genes) @autoclient() def indra_subnetwork_go( go_term: Tuple[str, str], *, client: Neo4jClient, include_indirect: bool = False, mediated: bool = False, upstream_controllers: bool = False, downstream_targets: bool = False, ) -> List[Statement]: """Return the INDRA Statement subnetwork induced by the given GO term. Parameters ---------- go_term : The GO term to query. Example: ``("GO", "GO:0006915")`` client : The Neo4j client. include_indirect : Should ontological children of the given GO term be queried as well? Defaults to False. mediated: Should relations A->X->B be included for X not associated to the given GO term? Defaults to False. upstream_controllers: Should relations A<-X->B be included for upstream controller X not associated to the given GO term? Defaults to False. downstream_targets: Should relations A->X<-B be included for downstream target X not associated to the given GO term? Defaults to False. Returns ------- : The INDRA statement subnetwork induced by GO term. """ genes = get_genes_for_go_term( client=client, go_term=go_term, include_indirect=include_indirect ) nodes = {g.grounding() for g in genes} rv = indra_subnetwork(client=client, nodes=nodes) if mediated: rv.extend(indra_mediated_subnetwork(client=client, nodes=nodes)) if upstream_controllers: rv.extend(indra_shared_upstream_subnetwork(client=client, nodes=nodes)) if downstream_targets: rv.extend(indra_shared_downstream_subnetwork(client=client, nodes=nodes)) # No deduplication of statements based on the union of # the queries should be necessary since each are disjoint return rv
en
0.842855
Queries that generate statement subnetworks. Return the subnetwork induced by the given nodes as a set of Relations. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes represented as Relation objects. MATCH p=(n1:BioEntity)-[r:indra_rel]->(n2:BioEntity) WHERE n1.id IN [%s] AND n2.id IN [%s] AND n1.id <> n2.id RETURN p Return the INDRA Statement subnetwork induced by the given nodes. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. Return the INDRA Statement subnetwork induced pairs of statements between the given nodes. For example, if gene A and gene B are given as the query, find statements mediated by X such that A -> X -> B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. Return the INDRA Statement subnetwork induced by shared downstream targets of nodes in the query. For example, if gene A and gene B are given as the query, find statements to shared downstream entity X such that A -> X <- B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. Return the INDRA Statement subnetwork induced by shared upstream controllers of nodes in the query. For example, if gene A and gene B are given as the query, find statements to shared upstream entity X such that A <- X -> B. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. Returns ------- : The subnetwork induced by the given nodes. Return the INDRA Statement subnetwork induced by paths of length two between nodes A and B in a query with intermediate nodes X such that paths look like A-X-B. Parameters ---------- nodes : The nodes to query (A and B are one of these nodes in the following examples). client : The Neo4j client. first_forward: If true, query A->X otherwise query A<-X second_forward: If true, query X->B otherwise query X<-B Returns ------- : The INDRA statement subnetwork induced by the query \ MATCH p=(n1:BioEntity){f1}[r1:indra_rel]{f2}(n3:BioEntity){s1}[r2:indra_rel]{s2}(n2:BioEntity) WHERE n1.id IN [{nodes_str}] AND n2.id IN [{nodes_str}] AND n1.id <> n2.id AND NOT n3 IN [{nodes_str}] RETURN p Generate INDRA statements from a query that returns paths of length > 1. Return the INDRA Statement subnetwork induced by the given nodes and expressed in the given tissue. Parameters ---------- client : The Neo4j client. nodes : The nodes to query. tissue : The tissue to query. Returns ------- : The subnetwork induced by the given nodes and expressed in the given tissue. Return the INDRA Statement subnetwork induced by the given GO term. Parameters ---------- go_term : The GO term to query. Example: ``("GO", "GO:0006915")`` client : The Neo4j client. include_indirect : Should ontological children of the given GO term be queried as well? Defaults to False. mediated: Should relations A->X->B be included for X not associated to the given GO term? Defaults to False. upstream_controllers: Should relations A<-X->B be included for upstream controller X not associated to the given GO term? Defaults to False. downstream_targets: Should relations A->X<-B be included for downstream target X not associated to the given GO term? Defaults to False. Returns ------- : The INDRA statement subnetwork induced by GO term. # No deduplication of statements based on the union of # the queries should be necessary since each are disjoint
2.593687
3
lib/utils/AllKeywordsLibrary.py
johnxthekid/AutomationFramework
0
6622968
<gh_stars>0 import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) from robot.api import logger from lib.browsers.drivermanagers.BrowserManager import BrowserManager from lib.frontend.apphelpers.SampleNotepadHelper import SampleNotepadHelper from lib.browsers.pageobjectmodels.DemoMainPage import DemoMainPage from lib.browsers.pageobjectmodels.sofi.sofi_login import SofiLoginPage class AllKeywordsLibrary(DemoMainPage, SampleNotepadHelper, SofiLoginPage): ROBOT_LIBRARY_SCOPE = 'GLOBAL' _browser_manager = None _browser = None _browser_id = None _driver = None def __init__(self, run_location): logger.info(f"Running keyword library: {run_location}") self._browser_manager = BrowserManager() SampleNotepadHelper.__init__(self) def open_browser(self, browser_type, driver_location=None, options=None): self._browser_id, self.driver = BrowserManager.open_browser(browser_type, driver_location, options) self._browser = BrowserManager.get_browser_instance(self._browser_id) DemoMainPage.__init__(self, self.driver) SofiLoginPage.__init__(self, self.driver) def open_page(self, url): self.driver.get(url) def get_page_title(self): return self._browser_manager.get_page_title() def close_browser(self): self._browser_manager.close_browser(self._browser_id)
import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) from robot.api import logger from lib.browsers.drivermanagers.BrowserManager import BrowserManager from lib.frontend.apphelpers.SampleNotepadHelper import SampleNotepadHelper from lib.browsers.pageobjectmodels.DemoMainPage import DemoMainPage from lib.browsers.pageobjectmodels.sofi.sofi_login import SofiLoginPage class AllKeywordsLibrary(DemoMainPage, SampleNotepadHelper, SofiLoginPage): ROBOT_LIBRARY_SCOPE = 'GLOBAL' _browser_manager = None _browser = None _browser_id = None _driver = None def __init__(self, run_location): logger.info(f"Running keyword library: {run_location}") self._browser_manager = BrowserManager() SampleNotepadHelper.__init__(self) def open_browser(self, browser_type, driver_location=None, options=None): self._browser_id, self.driver = BrowserManager.open_browser(browser_type, driver_location, options) self._browser = BrowserManager.get_browser_instance(self._browser_id) DemoMainPage.__init__(self, self.driver) SofiLoginPage.__init__(self, self.driver) def open_page(self, url): self.driver.get(url) def get_page_title(self): return self._browser_manager.get_page_title() def close_browser(self): self._browser_manager.close_browser(self._browser_id)
none
1
2.332956
2
blueprint_spins.py
SamCHogg/Ark-Bot
0
6622969
import argparse import random def random_quality(): rand = random.randint(1, 101) # 8% if rand <= 8: quality = 1 # 36% elif 8 < rand <= 43: quality = 2 # 39% elif 43 < rand <= 82: quality = 3 # 15% elif 82 < rand <= 97: quality = 5 # 2% else: quality = 45 return quality def do_spin(items): quality = random_quality() item = random.choice(items) return "{item} 5 {quality} 1".format(item=item, quality=quality) def check_positive(value): ivalue = int(value) if ivalue <= 0: raise argparse.ArgumentTypeError( "{} is an invalid positive int value".format(value) ) return ivalue def do_multiple_spins(value): ivalue = check_positive(value) with open('items.txt') as f: items = f.read().splitlines() output = [] for i in range(ivalue): output.append(do_spin(items)) return ' | \n\n'.join(output) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "spins", type=int, help="The number of spins" ) args = parser.parse_args() print(do_multiple_spins(args.spins))
import argparse import random def random_quality(): rand = random.randint(1, 101) # 8% if rand <= 8: quality = 1 # 36% elif 8 < rand <= 43: quality = 2 # 39% elif 43 < rand <= 82: quality = 3 # 15% elif 82 < rand <= 97: quality = 5 # 2% else: quality = 45 return quality def do_spin(items): quality = random_quality() item = random.choice(items) return "{item} 5 {quality} 1".format(item=item, quality=quality) def check_positive(value): ivalue = int(value) if ivalue <= 0: raise argparse.ArgumentTypeError( "{} is an invalid positive int value".format(value) ) return ivalue def do_multiple_spins(value): ivalue = check_positive(value) with open('items.txt') as f: items = f.read().splitlines() output = [] for i in range(ivalue): output.append(do_spin(items)) return ' | \n\n'.join(output) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "spins", type=int, help="The number of spins" ) args = parser.parse_args() print(do_multiple_spins(args.spins))
en
0.63467
# 8% # 36% # 39% # 15% # 2%
3.607432
4
dual_crispr/run_scoring.py
ucsd-ccbb/mali-dual-crispr-pipeline
5
6622970
<reponame>ucsd-ccbb/mali-dual-crispr-pipeline<gh_stars>1-10 # standard libraries import argparse import distutils.util import os import warnings import ccbb_pyutils.config_loader as ns_config import ccbb_pyutils.notebook_pipeliner as ns_pipeliner from dual_crispr import dual_crispr_pipeliner as ns_dcpipe __author__ = '<NAME>' __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "prototype" def _parse_cmd_line_args(): # examples: # human_readable_name = 20160627HeLaA549 # library_name = CV4 # day_timepoints_str = 3,14,20,28 # --test parser = argparse.ArgumentParser() parser.add_argument("dataset_name", help="short, alphanumeric human-readable name for the dataset to be analyzed") parser.add_argument("library_name", help="name of the construct library for the dataset to be analyzed") parser.add_argument("count_fps_or_dirs", help="a comma-separated list of absolute paths to the file of counts to be" " scored, or to the directories in which the *_combined_counts.txt " "file to be scored resides") parser.add_argument("day_timepoints_str", help="a comma-separated list containing the time points (in order) at " "which data were collected") parser.add_argument("output_dir_path", help="absolute path to folder in which new output directory should be created") parser.add_argument("--test", help="run with set seed and only two iterations, suitable for testing ONLY", action="store_true") parser.add_argument("--config", help="path to config file; if not specified, config file in default location will " "be used") args = parser.parse_args() return args.dataset_name, args.library_name, args.count_fps_or_dirs, args.day_timepoints_str, args.output_dir_path,\ args.test, args.config def _set_params(count_fps_or_dirs, day_timepoints_str, outputs_dir_path, is_test, config_fp): test_config_section_key = "test" count_fps_or_dirs_key = "count_fps_or_dirs" min_count_limit_key = "min_count_limit" max_fraction_acceptable_spline_density_diff_key = "max_fraction_acceptable_spline_density_diff" max_fraction_counts_excluded_key = "max_fraction_counts_excluded" day_timepoints_str_key = "day_timepoints_str" use_seed_key = "use_seed" num_iterations_key = "num_iterations" # load the config file config_fp = ns_dcpipe.get_config_fp_or_default(config_fp) config_parser = ns_config.load_config_parser_from_fp(config_fp) score_params = ns_config.load_config_section_dict(config_parser, "score_pipeline") result = score_params.copy() if is_test: result[use_seed_key] = config_parser.get(test_config_section_key, use_seed_key) result[num_iterations_key] = config_parser.get(test_config_section_key, num_iterations_key) result[count_fps_or_dirs_key] = count_fps_or_dirs # Note: the time_prefixes_str and day_timepoints_str comma-delimited string params are not being converted to lists # here--that is done in the notebook, because if users run the notebooks directly, they will have to put in # comma-delimited strings there, so the notebooks needs to know how to deal with it. result[day_timepoints_str_key] = day_timepoints_str result[ns_dcpipe.DirectoryKeys.PROCESSED_DATA.value] = os.path.abspath(outputs_dir_path) # the below values DO need to be converted because users have the ability to input int, float, and boolean values # directly into the notebooks, so the notebooks don't need to know how to convert those result[min_count_limit_key] = int(result[min_count_limit_key]) result[max_fraction_acceptable_spline_density_diff_key] = float( result[max_fraction_acceptable_spline_density_diff_key]) result[max_fraction_counts_excluded_key] = float( result[max_fraction_counts_excluded_key]) result[use_seed_key] = bool(distutils.util.strtobool(result[use_seed_key])) result[num_iterations_key] = int(result[num_iterations_key]) # if test parameters are detected, remind user! Results should not be used for real analysis if result[use_seed_key] or is_test: warnings.warn('Scoring is running in TEST MODE; do not use results for data analysis!') return result def main(): dataset_name, library_name, counts_fps_or_dirs, day_timepoints_str, output_dir_path, is_test, config_fp = \ _parse_cmd_line_args() score_params = _set_params(counts_fps_or_dirs, day_timepoints_str, output_dir_path, is_test, config_fp) full_params = ns_dcpipe.generate_notebook_params(dataset_name, library_name, score_params, config_fp) # Note: second argument is the *function*, not results of calling the function ns_pipeliner.execute_run_from_full_params(full_params, ns_dcpipe.rename_param_names_as_global_vars) if __name__ == '__main__': main()
# standard libraries import argparse import distutils.util import os import warnings import ccbb_pyutils.config_loader as ns_config import ccbb_pyutils.notebook_pipeliner as ns_pipeliner from dual_crispr import dual_crispr_pipeliner as ns_dcpipe __author__ = '<NAME>' __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "prototype" def _parse_cmd_line_args(): # examples: # human_readable_name = 20160627HeLaA549 # library_name = CV4 # day_timepoints_str = 3,14,20,28 # --test parser = argparse.ArgumentParser() parser.add_argument("dataset_name", help="short, alphanumeric human-readable name for the dataset to be analyzed") parser.add_argument("library_name", help="name of the construct library for the dataset to be analyzed") parser.add_argument("count_fps_or_dirs", help="a comma-separated list of absolute paths to the file of counts to be" " scored, or to the directories in which the *_combined_counts.txt " "file to be scored resides") parser.add_argument("day_timepoints_str", help="a comma-separated list containing the time points (in order) at " "which data were collected") parser.add_argument("output_dir_path", help="absolute path to folder in which new output directory should be created") parser.add_argument("--test", help="run with set seed and only two iterations, suitable for testing ONLY", action="store_true") parser.add_argument("--config", help="path to config file; if not specified, config file in default location will " "be used") args = parser.parse_args() return args.dataset_name, args.library_name, args.count_fps_or_dirs, args.day_timepoints_str, args.output_dir_path,\ args.test, args.config def _set_params(count_fps_or_dirs, day_timepoints_str, outputs_dir_path, is_test, config_fp): test_config_section_key = "test" count_fps_or_dirs_key = "count_fps_or_dirs" min_count_limit_key = "min_count_limit" max_fraction_acceptable_spline_density_diff_key = "max_fraction_acceptable_spline_density_diff" max_fraction_counts_excluded_key = "max_fraction_counts_excluded" day_timepoints_str_key = "day_timepoints_str" use_seed_key = "use_seed" num_iterations_key = "num_iterations" # load the config file config_fp = ns_dcpipe.get_config_fp_or_default(config_fp) config_parser = ns_config.load_config_parser_from_fp(config_fp) score_params = ns_config.load_config_section_dict(config_parser, "score_pipeline") result = score_params.copy() if is_test: result[use_seed_key] = config_parser.get(test_config_section_key, use_seed_key) result[num_iterations_key] = config_parser.get(test_config_section_key, num_iterations_key) result[count_fps_or_dirs_key] = count_fps_or_dirs # Note: the time_prefixes_str and day_timepoints_str comma-delimited string params are not being converted to lists # here--that is done in the notebook, because if users run the notebooks directly, they will have to put in # comma-delimited strings there, so the notebooks needs to know how to deal with it. result[day_timepoints_str_key] = day_timepoints_str result[ns_dcpipe.DirectoryKeys.PROCESSED_DATA.value] = os.path.abspath(outputs_dir_path) # the below values DO need to be converted because users have the ability to input int, float, and boolean values # directly into the notebooks, so the notebooks don't need to know how to convert those result[min_count_limit_key] = int(result[min_count_limit_key]) result[max_fraction_acceptable_spline_density_diff_key] = float( result[max_fraction_acceptable_spline_density_diff_key]) result[max_fraction_counts_excluded_key] = float( result[max_fraction_counts_excluded_key]) result[use_seed_key] = bool(distutils.util.strtobool(result[use_seed_key])) result[num_iterations_key] = int(result[num_iterations_key]) # if test parameters are detected, remind user! Results should not be used for real analysis if result[use_seed_key] or is_test: warnings.warn('Scoring is running in TEST MODE; do not use results for data analysis!') return result def main(): dataset_name, library_name, counts_fps_or_dirs, day_timepoints_str, output_dir_path, is_test, config_fp = \ _parse_cmd_line_args() score_params = _set_params(counts_fps_or_dirs, day_timepoints_str, output_dir_path, is_test, config_fp) full_params = ns_dcpipe.generate_notebook_params(dataset_name, library_name, score_params, config_fp) # Note: second argument is the *function*, not results of calling the function ns_pipeliner.execute_run_from_full_params(full_params, ns_dcpipe.rename_param_names_as_global_vars) if __name__ == '__main__': main()
en
0.812984
# standard libraries # examples: # human_readable_name = 20160627HeLaA549 # library_name = CV4 # day_timepoints_str = 3,14,20,28 # --test # load the config file # Note: the time_prefixes_str and day_timepoints_str comma-delimited string params are not being converted to lists # here--that is done in the notebook, because if users run the notebooks directly, they will have to put in # comma-delimited strings there, so the notebooks needs to know how to deal with it. # the below values DO need to be converted because users have the ability to input int, float, and boolean values # directly into the notebooks, so the notebooks don't need to know how to convert those # if test parameters are detected, remind user! Results should not be used for real analysis # Note: second argument is the *function*, not results of calling the function
2.273649
2
recipes/Python/511423_8_Queens_Problem/recipe-511423.py
tdiprima/code
2,023
6622971
<filename>recipes/Python/511423_8_Queens_Problem/recipe-511423.py def Permute(queens, row): for i in range(8): queens[row] = i if Fine(queens, row): if row == 7: print(queens) globals()["solutions"] = globals()["solutions"] + 1 else: Permute(queens, row+1) def Fine(queens, row): c = 0 derga = True for i in range(row): c, cur, oth = c+1, queens[row], queens[row-i-1] if (cur == oth) or (cur-c == oth) or (cur+c == oth): derga = False break return(derga) globals()["solutions"] = 0 queens = [20, 20, 20, 20, 20, 20, 20, 20] for i in range(8): queens[0] = i Permute(queens, 1) print(solutions)
<filename>recipes/Python/511423_8_Queens_Problem/recipe-511423.py def Permute(queens, row): for i in range(8): queens[row] = i if Fine(queens, row): if row == 7: print(queens) globals()["solutions"] = globals()["solutions"] + 1 else: Permute(queens, row+1) def Fine(queens, row): c = 0 derga = True for i in range(row): c, cur, oth = c+1, queens[row], queens[row-i-1] if (cur == oth) or (cur-c == oth) or (cur+c == oth): derga = False break return(derga) globals()["solutions"] = 0 queens = [20, 20, 20, 20, 20, 20, 20, 20] for i in range(8): queens[0] = i Permute(queens, 1) print(solutions)
none
1
3.421999
3
src/data_generator/input_file_creator.py
HeartSaVioR/structured_streaming_experiments
2
6622972
# -*- coding: utf-8 -*- import sys import os from time import sleep from tempfile import mkdtemp from uuid import uuid1 from datetime import datetime from random import randint TEST_DATA_FILE_PATH = "resources/test_data.txt" def create_file(input_dir_path, dir_pattern, temp_dir_path, num_of_lines, data_lines): file_name = str(uuid1()) + ".txt" temp_file_path = temp_dir_path + "/" + file_name with open(temp_file_path, "w") as fw: for idx in range(num_of_lines): fw.write(data_lines[randint(0, len(data_lines) - 1)] + "\n") cur_datetime = datetime.utcnow() target_dir = input_dir_path + "/" + cur_datetime.strftime(dir_pattern) if not os.access(target_dir, os.R_OK): print("Input directory [%s] doesn't exist - creating one..." % target_dir) os.makedirs(target_dir) target_file_path = target_dir + "/" + file_name os.rename(temp_file_path, target_file_path) def main(): if len(sys.argv) < 5: print("USAGE: %s [input_dir_path][dir_pattern (follow the format of datetime)]" "[seconds per file][lines per file]" % sys.argv[0]) sys.exit(1) input_dir_path = sys.argv[1] dir_pattern = sys.argv[2] seconds_per_file = float(sys.argv[3]) lines_per_file = int(sys.argv[4]) temp_dir_path = mkdtemp() print("=" * 40) print("Input directory path: %s" % input_dir_path) print("Seconds per file: %s" % seconds_per_file) print("Number of lines per file: %s" % lines_per_file) print("=" * 40) with open(TEST_DATA_FILE_PATH, "r") as fr: data_lines = [x.strip() for x in fr.readlines()] while True: create_file(input_dir_path, dir_pattern, temp_dir_path, lines_per_file, data_lines) sleep(seconds_per_file) if __name__ == "__main__": main()
# -*- coding: utf-8 -*- import sys import os from time import sleep from tempfile import mkdtemp from uuid import uuid1 from datetime import datetime from random import randint TEST_DATA_FILE_PATH = "resources/test_data.txt" def create_file(input_dir_path, dir_pattern, temp_dir_path, num_of_lines, data_lines): file_name = str(uuid1()) + ".txt" temp_file_path = temp_dir_path + "/" + file_name with open(temp_file_path, "w") as fw: for idx in range(num_of_lines): fw.write(data_lines[randint(0, len(data_lines) - 1)] + "\n") cur_datetime = datetime.utcnow() target_dir = input_dir_path + "/" + cur_datetime.strftime(dir_pattern) if not os.access(target_dir, os.R_OK): print("Input directory [%s] doesn't exist - creating one..." % target_dir) os.makedirs(target_dir) target_file_path = target_dir + "/" + file_name os.rename(temp_file_path, target_file_path) def main(): if len(sys.argv) < 5: print("USAGE: %s [input_dir_path][dir_pattern (follow the format of datetime)]" "[seconds per file][lines per file]" % sys.argv[0]) sys.exit(1) input_dir_path = sys.argv[1] dir_pattern = sys.argv[2] seconds_per_file = float(sys.argv[3]) lines_per_file = int(sys.argv[4]) temp_dir_path = mkdtemp() print("=" * 40) print("Input directory path: %s" % input_dir_path) print("Seconds per file: %s" % seconds_per_file) print("Number of lines per file: %s" % lines_per_file) print("=" * 40) with open(TEST_DATA_FILE_PATH, "r") as fr: data_lines = [x.strip() for x in fr.readlines()] while True: create_file(input_dir_path, dir_pattern, temp_dir_path, lines_per_file, data_lines) sleep(seconds_per_file) if __name__ == "__main__": main()
en
0.769321
# -*- coding: utf-8 -*-
2.814717
3
app/models/oauth/client.py
tch1bo/viaduct
11
6622973
<reponame>tch1bo/viaduct<filename>app/models/oauth/client.py from authlib.flask.oauth2.sqla import OAuth2ClientMixin from sqlalchemy.ext.hybrid import hybrid_property from app import db class OAuthClient(db.Model, OAuth2ClientMixin): __tablename__ = 'oauth_client' # Overwrite the mixin client_id, since we want it to be the primary key. client_id = db.Column(db.String(48), primary_key=True) # Creator of the client user = db.relationship("User") user_id = db.Column(db.ForeignKey("user.id")) auto_approve = db.Column(db.Boolean(), default=False, nullable=False) @hybrid_property def scopes(self): if self.scope: return self.scope.splitlines() return []
from authlib.flask.oauth2.sqla import OAuth2ClientMixin from sqlalchemy.ext.hybrid import hybrid_property from app import db class OAuthClient(db.Model, OAuth2ClientMixin): __tablename__ = 'oauth_client' # Overwrite the mixin client_id, since we want it to be the primary key. client_id = db.Column(db.String(48), primary_key=True) # Creator of the client user = db.relationship("User") user_id = db.Column(db.ForeignKey("user.id")) auto_approve = db.Column(db.Boolean(), default=False, nullable=False) @hybrid_property def scopes(self): if self.scope: return self.scope.splitlines() return []
en
0.932295
# Overwrite the mixin client_id, since we want it to be the primary key. # Creator of the client
2.28545
2
main.py
TimO96/RE-CEM
0
6622974
# main.py -- main file with arguments # <NAME> (11318422) # <NAME> (11331720) # <NAME> (11248815) # <NAME> (11147598) # (C) 2020 UvA FACT AI import random import argparse import cem from cem.train import search, train_model if __name__ == "__main__": parser = argparse.ArgumentParser(description='Contrastive Explanations\ Method (CEM)') parser.add_argument('--dataset', type=str, default="MNIST", help='Used\ dataset - either MNIST or FMNIST') parser.add_argument('--seed', type=int, default=121, help='Seed for\ reproducablitity') parser.add_argument('--id', type=int, default=0, help='Id of the used\ image') parser.add_argument('--mode', type=str, default="PN", help='Type of\ pertubation') parser.add_argument('--max_iter', type=str, default=1000, help='Type of\ pertubation') parser.add_argument('--gamma', type=int, default=100, help='Hyperparameter\ for the effect of the autoencoder') parser.add_argument('--kappa', type=int, default=10, help='Hyperparameter\ for the desired confidence') parser.add_argument('--quant_eval', type=bool, default=False, help='Run\ the quantative evaluation') parser.add_argument('--n_samples', type=int, default=400, help='Number of\ samples for quantative evaluation') parser.add_argument("-s", "--search", type=int, default=0, help='Search\ for best training hyperparameters') parser.add_argument("-u", "--unsupervised", type=bool, default=None, help='True trains an autoencoder firstly, False trains\ an NN model firstly.') args = parser.parse_args() # Train optionally if args.unsupervised is not None: if args.search: search(args.dataset, args.unsupervised) else: train_model(args.dataset, args.unsupervised, stats=1000) # Perform explanation or quantative evaluation m = cem.Main(seed=args.seed, type=args.dataset) if args.quant_eval: print(m.quant_eval(ids=random.sample(range(10000), args.n_samples))) else: m.explain(args.id, mode=args.mode, max_iter=args.max_iter, gamma=args.gamma, kappa=args.kappa)
# main.py -- main file with arguments # <NAME> (11318422) # <NAME> (11331720) # <NAME> (11248815) # <NAME> (11147598) # (C) 2020 UvA FACT AI import random import argparse import cem from cem.train import search, train_model if __name__ == "__main__": parser = argparse.ArgumentParser(description='Contrastive Explanations\ Method (CEM)') parser.add_argument('--dataset', type=str, default="MNIST", help='Used\ dataset - either MNIST or FMNIST') parser.add_argument('--seed', type=int, default=121, help='Seed for\ reproducablitity') parser.add_argument('--id', type=int, default=0, help='Id of the used\ image') parser.add_argument('--mode', type=str, default="PN", help='Type of\ pertubation') parser.add_argument('--max_iter', type=str, default=1000, help='Type of\ pertubation') parser.add_argument('--gamma', type=int, default=100, help='Hyperparameter\ for the effect of the autoencoder') parser.add_argument('--kappa', type=int, default=10, help='Hyperparameter\ for the desired confidence') parser.add_argument('--quant_eval', type=bool, default=False, help='Run\ the quantative evaluation') parser.add_argument('--n_samples', type=int, default=400, help='Number of\ samples for quantative evaluation') parser.add_argument("-s", "--search", type=int, default=0, help='Search\ for best training hyperparameters') parser.add_argument("-u", "--unsupervised", type=bool, default=None, help='True trains an autoencoder firstly, False trains\ an NN model firstly.') args = parser.parse_args() # Train optionally if args.unsupervised is not None: if args.search: search(args.dataset, args.unsupervised) else: train_model(args.dataset, args.unsupervised, stats=1000) # Perform explanation or quantative evaluation m = cem.Main(seed=args.seed, type=args.dataset) if args.quant_eval: print(m.quant_eval(ids=random.sample(range(10000), args.n_samples))) else: m.explain(args.id, mode=args.mode, max_iter=args.max_iter, gamma=args.gamma, kappa=args.kappa)
en
0.417639
# main.py -- main file with arguments # <NAME> (11318422) # <NAME> (11331720) # <NAME> (11248815) # <NAME> (11147598) # (C) 2020 UvA FACT AI # Train optionally # Perform explanation or quantative evaluation
2.478385
2
companyAPI/apps.py
colombia-immap/unicef-school-mapping-back
0
6622975
<reponame>colombia-immap/unicef-school-mapping-back from django.apps import AppConfig class CompanyapiConfig(AppConfig): name = 'companyAPI'
from django.apps import AppConfig class CompanyapiConfig(AppConfig): name = 'companyAPI'
none
1
1.14176
1
tests/modules/test_pulseaudio.py
kunalshetye/bumblebee-status
5
6622976
<gh_stars>1-10 # pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE, RIGHT_MOUSE, WHEEL_UP, WHEEL_DOWN from bumblebee.modules.pulseaudio import Module class TestPulseAudioModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) def tearDown(self): mocks.teardown_test(self) def test_leftclick(self): mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module) self.popen.assert_call("pactl set-source-mute @DEFAULT_SOURCE@ toggle") def test_rightclick(self): mocks.mouseEvent(stdin=self.stdin, button=RIGHT_MOUSE, inp=self.input, module=self.module) self.popen.assert_call("pavucontrol") def test_wheelup(self): mocks.mouseEvent(stdin=self.stdin, button=WHEEL_UP, inp=self.input, module=self.module) self.popen.assert_call("pactl set-source-volume @DEFAULT_SOURCE@ +2%") def test_wheeldown(self): mocks.mouseEvent(stdin=self.stdin, button=WHEEL_DOWN, inp=self.input, module=self.module) self.popen.assert_call("pactl set-source-volume @DEFAULT_SOURCE@ -2%") # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE, RIGHT_MOUSE, WHEEL_UP, WHEEL_DOWN from bumblebee.modules.pulseaudio import Module class TestPulseAudioModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) def tearDown(self): mocks.teardown_test(self) def test_leftclick(self): mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module) self.popen.assert_call("pactl set-source-mute @DEFAULT_SOURCE@ toggle") def test_rightclick(self): mocks.mouseEvent(stdin=self.stdin, button=RIGHT_MOUSE, inp=self.input, module=self.module) self.popen.assert_call("pavucontrol") def test_wheelup(self): mocks.mouseEvent(stdin=self.stdin, button=WHEEL_UP, inp=self.input, module=self.module) self.popen.assert_call("pactl set-source-volume @DEFAULT_SOURCE@ +2%") def test_wheeldown(self): mocks.mouseEvent(stdin=self.stdin, button=WHEEL_DOWN, inp=self.input, module=self.module) self.popen.assert_call("pactl set-source-volume @DEFAULT_SOURCE@ -2%") # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
de
0.176571
# pylint: disable=C0103,C0111 # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
2.409423
2
pyspi/io/plotting/spi_display.py
grburgess/pyspi
0
6622977
<filename>pyspi/io/plotting/spi_display.py import matplotlib.pyplot as plt import numpy as np import pandas as pd from matplotlib.patches import RegularPolygon from pyspi.utils.livedets import get_live_dets NUM_REAL_DETS = 19 NUM_PSEUDO_DOUBLE_DETS = 42 NUM_PSEUDO_TRIPLE_DETS = 42 NUM_TOTAL_DETS = NUM_REAL_DETS + NUM_PSEUDO_DOUBLE_DETS + \ NUM_PSEUDO_TRIPLE_DETS # the origins of the detectors # the underscore keeps these variable from being exposed # to the user _detector_origins = ((0, 0), (6, 0), (3, 5.196), (-3, 5.196), (-6, 0), (-3, -5.196), (3, -5.196), (9, -5.196), (12, 0), (9, 5.196), (6, 10.392), (0, 10.392), (-6, 10.392), (-9, 5.196), (-12, 0), (-9, -5.196), (-6, -10.392), (0, -10.392), (6, -10.392)) def _calc_double_origin(det1, det2): x = (_detector_origins[det1][0] + _detector_origins[det2][0]) * 0.5 y = (_detector_origins[det1][1] + _detector_origins[det2][1]) * 0.5 return x, y def _construct_double_events_table(): """ Helper function to generate double event detector list :return: """ # the list of double pairs # for details see: # https://heasarc.gsfc.nasa.gov/docs/integral/spi/pages/detectors.html double_event_pairs = ( (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 2), (1, 6), (1, 7), (1, 8), (1, 9), (2, 3), (2, 9), (2, 10), (2, 11), (3, 4), (3, 11), (3, 12), (3, 13), (4, 5), (4, 13), (4, 14), (4, 15), (5, 6), (5, 15), (5, 16), (5, 17), (6, 7), (6, 17), (6, 18), (7, 8), (7, 18), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18)) # calculate the origins origins = np.array([_calc_double_origin(*pair) for pair in double_event_pairs]) # build a dictionary for out put out = dict(detector_number=range(19, 19 + 42), x=origins[:, 0], y=origins[:, 1], detector1=np.array(double_event_pairs)[:, 0], detector2=np.array(double_event_pairs)[:, 1] ) # return the as a pandas data frame return pd.DataFrame(out) # build a list that can be exposed to the user spi_pseudo_double_detectors = _construct_double_events_table() class SPIDetector(object): def __init__(self, detector_number, origin, is_pseudo_detector=False): """ A SPI detector is defined by its number, origin and type :param detector_number: the detector number :param origin: the detector origin :param is_pseudo_detector: if this is a real detector or not """ self._detector_number = detector_number self._origin = origin self._is_pseudo_detector = is_pseudo_detector self._bad = False @property def bad(self): return self._bad def set_bad(self, flag): """ Set the flag if this is a bad detector :param flag: Bad detector? :return: """ self._bad = flag @property def origin(self): return self._origin @property def is_pseudo_detector(self): return self._is_pseudo_detector @property def detector_number(self): return self._detector_number class DoubleEventDetector(SPIDetector): def __init__(self, detector_number, origin, detector1, detector2): """ :param detector_number: :param origin: :param detector1: :param detector2: """ super(DoubleEventDetector, self).__init__(detector_number, origin, is_pseudo_detector=True) self._detector1 = detector1 self._detector2 = detector2 # Fill out this class class TripleEventDetector(SPIDetector): pass class DetectorContents(object): def __init__(self, detector_array): assert len(detector_array) == NUM_TOTAL_DETS self._contents = np.array(detector_array) self._real_contents = np.array(detector_array[:NUM_REAL_DETS]) @classmethod def from_spi_data(cls, spi_data): pass @classmethod def from_total_effective_area(cls, spi_response, azimuth, zenith): effective_area = spi_response.effective_area_per_detector(azimuth, zenith).sum(axix=0) return cls(effective_area) class SPI(object): def __init__(self, bad_detectors=[], time=None): self._bad_detectors = bad_detectors if time: good_detectors = get_live_dets(time) self._bad_detectors = [i for i in range(19) if i not in good_detectors] self._construct_detectors() def _construct_detectors(self): """ :return: """ # the real detector origins self._detector_origins = _detector_origins # go through an build the list of detectors for SPI self._detectors = [] n = 0 # keeps track of the detector number for origin in self._detector_origins: # first we build the real detectors self._detectors.append(SPIDetector(detector_number=n, origin=origin, is_pseudo_detector=False)) n += 1 # now we build the double event detectors for detector in spi_pseudo_double_detectors.iterrows(): detector = detector[1] origin = (detector['x'], detector['y']) self._detectors.append(DoubleEventDetector(detector_number=n, origin=origin, detector1=detector['detector1'], detector2=detector['detector2'])) n += 1 # TODO: Add the triple event detectors for det in self._detectors: if det.detector_number in self._bad_detectors: det.set_bad(True) def plot_spi_working_dets(self, with_pseudo_detectors=True, show_detector_number=True): """ Plot the SPI Detectors and mark the detectors that are not working red :param with_pseudo_detectors: Plot pseudo detectors? :param show_detector_number: Show the det numbers in the plot? :return: """ fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'}, figsize=(15,15)) d=1 radius = d*2* 1.732 # first get the colors based of the contents #colors, pseudo_colors = self._get_colors_from_contents(cmap, # pseudo_cmap) # color iterators n = 0 pseudo_n = 0 # now we loop over all the detectors and if they have contents # we will plot them for detector in self._detectors: # first the real detectors if not detector.is_pseudo_detector: if detector.bad: color = "red" else: color = "green" # create a ploygon and color it based of the contents p = RegularPolygon(xy=tuple(d*i for i in detector.origin), numVertices=6, radius=radius, facecolor=color, ec='k', lw=3) ax.add_patch(p) # show the detector number if show_detector_number: ax.text(d*detector.origin[0], d*detector.origin[1], detector.detector_number, ha="center", va="center", color='k', size=30) ax.set_xlim(d*-16, d*16) ax.set_ylim(d*-16, d*16) ax.set_yticks([]) ax.set_xticks([]) return fig def _loadtxt2d(self, intext): try: return np.loadtxt(intext, ndmin=2) except: return np.loadtxt(intext)
<filename>pyspi/io/plotting/spi_display.py import matplotlib.pyplot as plt import numpy as np import pandas as pd from matplotlib.patches import RegularPolygon from pyspi.utils.livedets import get_live_dets NUM_REAL_DETS = 19 NUM_PSEUDO_DOUBLE_DETS = 42 NUM_PSEUDO_TRIPLE_DETS = 42 NUM_TOTAL_DETS = NUM_REAL_DETS + NUM_PSEUDO_DOUBLE_DETS + \ NUM_PSEUDO_TRIPLE_DETS # the origins of the detectors # the underscore keeps these variable from being exposed # to the user _detector_origins = ((0, 0), (6, 0), (3, 5.196), (-3, 5.196), (-6, 0), (-3, -5.196), (3, -5.196), (9, -5.196), (12, 0), (9, 5.196), (6, 10.392), (0, 10.392), (-6, 10.392), (-9, 5.196), (-12, 0), (-9, -5.196), (-6, -10.392), (0, -10.392), (6, -10.392)) def _calc_double_origin(det1, det2): x = (_detector_origins[det1][0] + _detector_origins[det2][0]) * 0.5 y = (_detector_origins[det1][1] + _detector_origins[det2][1]) * 0.5 return x, y def _construct_double_events_table(): """ Helper function to generate double event detector list :return: """ # the list of double pairs # for details see: # https://heasarc.gsfc.nasa.gov/docs/integral/spi/pages/detectors.html double_event_pairs = ( (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 2), (1, 6), (1, 7), (1, 8), (1, 9), (2, 3), (2, 9), (2, 10), (2, 11), (3, 4), (3, 11), (3, 12), (3, 13), (4, 5), (4, 13), (4, 14), (4, 15), (5, 6), (5, 15), (5, 16), (5, 17), (6, 7), (6, 17), (6, 18), (7, 8), (7, 18), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18)) # calculate the origins origins = np.array([_calc_double_origin(*pair) for pair in double_event_pairs]) # build a dictionary for out put out = dict(detector_number=range(19, 19 + 42), x=origins[:, 0], y=origins[:, 1], detector1=np.array(double_event_pairs)[:, 0], detector2=np.array(double_event_pairs)[:, 1] ) # return the as a pandas data frame return pd.DataFrame(out) # build a list that can be exposed to the user spi_pseudo_double_detectors = _construct_double_events_table() class SPIDetector(object): def __init__(self, detector_number, origin, is_pseudo_detector=False): """ A SPI detector is defined by its number, origin and type :param detector_number: the detector number :param origin: the detector origin :param is_pseudo_detector: if this is a real detector or not """ self._detector_number = detector_number self._origin = origin self._is_pseudo_detector = is_pseudo_detector self._bad = False @property def bad(self): return self._bad def set_bad(self, flag): """ Set the flag if this is a bad detector :param flag: Bad detector? :return: """ self._bad = flag @property def origin(self): return self._origin @property def is_pseudo_detector(self): return self._is_pseudo_detector @property def detector_number(self): return self._detector_number class DoubleEventDetector(SPIDetector): def __init__(self, detector_number, origin, detector1, detector2): """ :param detector_number: :param origin: :param detector1: :param detector2: """ super(DoubleEventDetector, self).__init__(detector_number, origin, is_pseudo_detector=True) self._detector1 = detector1 self._detector2 = detector2 # Fill out this class class TripleEventDetector(SPIDetector): pass class DetectorContents(object): def __init__(self, detector_array): assert len(detector_array) == NUM_TOTAL_DETS self._contents = np.array(detector_array) self._real_contents = np.array(detector_array[:NUM_REAL_DETS]) @classmethod def from_spi_data(cls, spi_data): pass @classmethod def from_total_effective_area(cls, spi_response, azimuth, zenith): effective_area = spi_response.effective_area_per_detector(azimuth, zenith).sum(axix=0) return cls(effective_area) class SPI(object): def __init__(self, bad_detectors=[], time=None): self._bad_detectors = bad_detectors if time: good_detectors = get_live_dets(time) self._bad_detectors = [i for i in range(19) if i not in good_detectors] self._construct_detectors() def _construct_detectors(self): """ :return: """ # the real detector origins self._detector_origins = _detector_origins # go through an build the list of detectors for SPI self._detectors = [] n = 0 # keeps track of the detector number for origin in self._detector_origins: # first we build the real detectors self._detectors.append(SPIDetector(detector_number=n, origin=origin, is_pseudo_detector=False)) n += 1 # now we build the double event detectors for detector in spi_pseudo_double_detectors.iterrows(): detector = detector[1] origin = (detector['x'], detector['y']) self._detectors.append(DoubleEventDetector(detector_number=n, origin=origin, detector1=detector['detector1'], detector2=detector['detector2'])) n += 1 # TODO: Add the triple event detectors for det in self._detectors: if det.detector_number in self._bad_detectors: det.set_bad(True) def plot_spi_working_dets(self, with_pseudo_detectors=True, show_detector_number=True): """ Plot the SPI Detectors and mark the detectors that are not working red :param with_pseudo_detectors: Plot pseudo detectors? :param show_detector_number: Show the det numbers in the plot? :return: """ fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'}, figsize=(15,15)) d=1 radius = d*2* 1.732 # first get the colors based of the contents #colors, pseudo_colors = self._get_colors_from_contents(cmap, # pseudo_cmap) # color iterators n = 0 pseudo_n = 0 # now we loop over all the detectors and if they have contents # we will plot them for detector in self._detectors: # first the real detectors if not detector.is_pseudo_detector: if detector.bad: color = "red" else: color = "green" # create a ploygon and color it based of the contents p = RegularPolygon(xy=tuple(d*i for i in detector.origin), numVertices=6, radius=radius, facecolor=color, ec='k', lw=3) ax.add_patch(p) # show the detector number if show_detector_number: ax.text(d*detector.origin[0], d*detector.origin[1], detector.detector_number, ha="center", va="center", color='k', size=30) ax.set_xlim(d*-16, d*16) ax.set_ylim(d*-16, d*16) ax.set_yticks([]) ax.set_xticks([]) return fig def _loadtxt2d(self, intext): try: return np.loadtxt(intext, ndmin=2) except: return np.loadtxt(intext)
en
0.734818
# the origins of the detectors # the underscore keeps these variable from being exposed # to the user Helper function to generate double event detector list :return: # the list of double pairs # for details see: # https://heasarc.gsfc.nasa.gov/docs/integral/spi/pages/detectors.html # calculate the origins # build a dictionary for out put # return the as a pandas data frame # build a list that can be exposed to the user A SPI detector is defined by its number, origin and type :param detector_number: the detector number :param origin: the detector origin :param is_pseudo_detector: if this is a real detector or not Set the flag if this is a bad detector :param flag: Bad detector? :return: :param detector_number: :param origin: :param detector1: :param detector2: # Fill out this class :return: # the real detector origins # go through an build the list of detectors for SPI # keeps track of the detector number # first we build the real detectors # now we build the double event detectors # TODO: Add the triple event detectors Plot the SPI Detectors and mark the detectors that are not working red :param with_pseudo_detectors: Plot pseudo detectors? :param show_detector_number: Show the det numbers in the plot? :return: # first get the colors based of the contents #colors, pseudo_colors = self._get_colors_from_contents(cmap, # pseudo_cmap) # color iterators # now we loop over all the detectors and if they have contents # we will plot them # first the real detectors # create a ploygon and color it based of the contents # show the detector number
2.383432
2
code/cartesian-product-func.py
tumuum/prog-book
0
6622978
<filename>code/cartesian-product-func.py def product(A,B): """Takes two sets A and B, and returns their cartesian product as a set of 2-tuples.""" product = set() for x in A: for y in B: product.add((x,y)) """Now it is time to return the result""" return product
<filename>code/cartesian-product-func.py def product(A,B): """Takes two sets A and B, and returns their cartesian product as a set of 2-tuples.""" product = set() for x in A: for y in B: product.add((x,y)) """Now it is time to return the result""" return product
en
0.95597
Takes two sets A and B, and returns their cartesian product as a set of 2-tuples. Now it is time to return the result
3.954961
4
regex_tester.py
lparolin/python_regex_tester
0
6622979
<gh_stars>0 import re def make_all_single_lines(in_string): pattern = "\\\W*(?:\r\n|\n|\r)" repl = "" out_string = re.sub(pattern, repl, in_string) return out_string def get_import_lines(in_string): pattern = "(?<=import\W).+" return re.findall(pattern, in_string) def get_import_items(in_list): all_items = [] pattern = "\w+" for i_string in in_list: all_items.extend([i_match for i_match in re.findall(pattern, i_string)]) return list(set(all_items)).sort() def extract_python_lib(in_string): return get_import_items(get_import_lines(make_all_single_lines(in_string)))
import re def make_all_single_lines(in_string): pattern = "\\\W*(?:\r\n|\n|\r)" repl = "" out_string = re.sub(pattern, repl, in_string) return out_string def get_import_lines(in_string): pattern = "(?<=import\W).+" return re.findall(pattern, in_string) def get_import_items(in_list): all_items = [] pattern = "\w+" for i_string in in_list: all_items.extend([i_match for i_match in re.findall(pattern, i_string)]) return list(set(all_items)).sort() def extract_python_lib(in_string): return get_import_items(get_import_lines(make_all_single_lines(in_string)))
none
1
2.937442
3
library/py/tde/cli.py
eblot/tde-base
0
6622980
"""Simple command line wrapper""" import os from io import TextIOWrapper from subprocess import Popen, DEVNULL, PIPE from sys import stderr from time import sleep, time as now class Command: """Context manager for a shell command. Run the command specified as a list of parameters. The Command constructor accepts modifiers as keyword arguments: * ``nosignal`` (expects a boolean value) to prevent Python from forwarding signals received in the Python VM to the subprocess * ``nostderr`` (expects a boolean value) to prevent the command from sending stderr to the default stderr (stderr messages are discarded) * ``debug`` to report errors * ``cwd`` to specify a working directory """ GRACE_DELAY = 0.5 POLL_PERIOD = 0.0025 def __init__(self, command, *args, **kwargs): self._cmd = None # be sure to use the untranslated output strings environment = dict(os.environ) environment['LC_ALL'] = 'C' preexec = Command.preexec if kwargs.get('nosignal', False) else None self._stderr = not bool(kwargs.get('nostderr', False)) cwd = kwargs.get('cwd', None) or os.getcwd() self._dbg = kwargs.get('debug', False) self._args = [command] + list(args) try: self._cmd = Popen(self._args, stdout=PIPE, stderr=PIPE if self._stderr else DEVNULL, env=environment, cwd=cwd, preexec_fn=preexec) except OSError as exc: raise OSError("Cannot launch command: %s" % str(exc)) def __enter__(self): return TextIOWrapper(self._cmd.stdout, encoding='utf8') def __exit__(self, type, value, tbl): # process not started or process exited with code 0 if not self._cmd: return # give the process a delay to exit properly polltime = now() + self.GRACE_DELAY while now() < polltime: if self._check_status() is not None: return # give the process a delay to exit properly killtime = now() + self.GRACE_DELAY while now() < killtime: # check periodically if it exited by itself rc = self._check_status() if rc is not None: return # process still alive self._cmd.terminate() sleep(self.POLL_PERIOD) # kill it self._cmd.kill() self._cmd.wait() if self._dbg: raise OSError('Command process had to be killed ' 'as it looked stuck', file=stderr) def _check_status(self): rc = self._cmd.poll() if rc is not None: if rc != 0: if self._stderr: error = [] while True: char = self._cmd.stderr.read(1) if char in b'\r\n': break error.append(char) stderr.write(b''.join(error).decode()) raise OSError(rc) return rc @staticmethod def preexec(): # Don't forward signals. os.setpgrp()
"""Simple command line wrapper""" import os from io import TextIOWrapper from subprocess import Popen, DEVNULL, PIPE from sys import stderr from time import sleep, time as now class Command: """Context manager for a shell command. Run the command specified as a list of parameters. The Command constructor accepts modifiers as keyword arguments: * ``nosignal`` (expects a boolean value) to prevent Python from forwarding signals received in the Python VM to the subprocess * ``nostderr`` (expects a boolean value) to prevent the command from sending stderr to the default stderr (stderr messages are discarded) * ``debug`` to report errors * ``cwd`` to specify a working directory """ GRACE_DELAY = 0.5 POLL_PERIOD = 0.0025 def __init__(self, command, *args, **kwargs): self._cmd = None # be sure to use the untranslated output strings environment = dict(os.environ) environment['LC_ALL'] = 'C' preexec = Command.preexec if kwargs.get('nosignal', False) else None self._stderr = not bool(kwargs.get('nostderr', False)) cwd = kwargs.get('cwd', None) or os.getcwd() self._dbg = kwargs.get('debug', False) self._args = [command] + list(args) try: self._cmd = Popen(self._args, stdout=PIPE, stderr=PIPE if self._stderr else DEVNULL, env=environment, cwd=cwd, preexec_fn=preexec) except OSError as exc: raise OSError("Cannot launch command: %s" % str(exc)) def __enter__(self): return TextIOWrapper(self._cmd.stdout, encoding='utf8') def __exit__(self, type, value, tbl): # process not started or process exited with code 0 if not self._cmd: return # give the process a delay to exit properly polltime = now() + self.GRACE_DELAY while now() < polltime: if self._check_status() is not None: return # give the process a delay to exit properly killtime = now() + self.GRACE_DELAY while now() < killtime: # check periodically if it exited by itself rc = self._check_status() if rc is not None: return # process still alive self._cmd.terminate() sleep(self.POLL_PERIOD) # kill it self._cmd.kill() self._cmd.wait() if self._dbg: raise OSError('Command process had to be killed ' 'as it looked stuck', file=stderr) def _check_status(self): rc = self._cmd.poll() if rc is not None: if rc != 0: if self._stderr: error = [] while True: char = self._cmd.stderr.read(1) if char in b'\r\n': break error.append(char) stderr.write(b''.join(error).decode()) raise OSError(rc) return rc @staticmethod def preexec(): # Don't forward signals. os.setpgrp()
en
0.81853
Simple command line wrapper Context manager for a shell command. Run the command specified as a list of parameters. The Command constructor accepts modifiers as keyword arguments: * ``nosignal`` (expects a boolean value) to prevent Python from forwarding signals received in the Python VM to the subprocess * ``nostderr`` (expects a boolean value) to prevent the command from sending stderr to the default stderr (stderr messages are discarded) * ``debug`` to report errors * ``cwd`` to specify a working directory # be sure to use the untranslated output strings # process not started or process exited with code 0 # give the process a delay to exit properly # give the process a delay to exit properly # check periodically if it exited by itself # process still alive # kill it # Don't forward signals.
2.961236
3
Python Fundamentals/9. RegEx/Exercise/03. Find Occurrences of Word in Sentence.py
a-shiro/SoftUni-Courses
0
6622981
<filename>Python Fundamentals/9. RegEx/Exercise/03. Find Occurrences of Word in Sentence.py import re text = input().lower() word = input().lower() pattern = rf"\b{word}\b" valid_matches = len(re.findall(pattern, text)) print(valid_matches)
<filename>Python Fundamentals/9. RegEx/Exercise/03. Find Occurrences of Word in Sentence.py import re text = input().lower() word = input().lower() pattern = rf"\b{word}\b" valid_matches = len(re.findall(pattern, text)) print(valid_matches)
none
1
4.296508
4
OpenCV/01_Canny_Edge_Detection.py
danieldfgro2000/Recycle
0
6622982
import cv2 import numpy as np import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt img = cv2.imread('../elephant_gray.jpeg', 0) edges = cv2.Canny(img, 100, 200) plt.subplot(121), plt.imshow(img, cmap='gray') plt.title('Original Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122), plt.imshow(edges, cmap='gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.show()
import cv2 import numpy as np import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt img = cv2.imread('../elephant_gray.jpeg', 0) edges = cv2.Canny(img, 100, 200) plt.subplot(121), plt.imshow(img, cmap='gray') plt.title('Original Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122), plt.imshow(edges, cmap='gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.show()
none
1
2.918459
3
experiments/rrn/models.py
nilamm/openprotein
73
6622983
<gh_stars>10-100 """ This file is part of the OpenProtein project. For license information, please see the LICENSE file in the root directory. """ import time import torch import torch.nn as nn import openprotein from util import initial_pos_from_aa_string, \ pass_messages, write_out, calc_avg_drmsd_over_minibatch class RrnModel(openprotein.BaseModel): def __init__(self, embedding_size, use_gpu): super(RrnModel, self).__init__(use_gpu, embedding_size) self.recurrent_steps = 2 self.hidden_size = 50 self.msg_output_size = 50 self.output_size = 9 # 3 dimensions * 3 coordinates for each aa self.f_to_hid = nn.Linear((embedding_size * 2 + 9), self.hidden_size, bias=True) self.hid_to_pos = nn.Linear(self.hidden_size, self.msg_output_size, bias=True) # (last state + orginal state) self.linear_transform = nn.Linear(embedding_size + 9 + self.msg_output_size, 9, bias=True) self.use_gpu = use_gpu def apply_message_function(self, aa_features): # aa_features: msg_count * 2 * feature_count aa_features_transformed = torch.cat( ( aa_features[:, 0, 0:21], aa_features[:, 1, 0:21], aa_features[:, 0, 21:30] - aa_features[:, 1, 21:30] ), dim=1) return self.hid_to_pos(self.f_to_hid(aa_features_transformed)) # msg_count * outputsize def _get_network_emissions(self, original_aa_string): backbone_atoms_padded, batch_sizes_backbone = \ initial_pos_from_aa_string(original_aa_string, self.use_gpu) embedding_padded = self.embed(original_aa_string) if self.use_gpu: backbone_atoms_padded = backbone_atoms_padded.cuda() for _ in range(self.recurrent_steps): combined_features = torch.cat( (embedding_padded, backbone_atoms_padded), dim=2 ).transpose(0, 1) features_transformed = [] for aa_features in combined_features.split(1, dim=0): msg = pass_messages(aa_features.squeeze(0), self.apply_message_function, self.use_gpu) # aa_count * output size features_transformed.append(self.linear_transform( torch.cat((aa_features.squeeze(0), msg), dim=1))) backbone_atoms_padded_clone = torch.stack(features_transformed).transpose(0, 1) backbone_atoms_padded = backbone_atoms_padded_clone return [], backbone_atoms_padded, batch_sizes_backbone def compute_loss(self, minibatch): (original_aa_string, actual_coords_list, _) = minibatch _, backbone_atoms_padded, batch_sizes = \ self._get_network_emissions(original_aa_string) actual_coords_list_padded = torch.nn.utils.rnn.pad_sequence(actual_coords_list) if self.use_gpu: actual_coords_list_padded = actual_coords_list_padded.cuda() start = time.time() if isinstance(batch_sizes[0], int): batch_sizes = torch.tensor(batch_sizes) drmsd_avg = calc_avg_drmsd_over_minibatch(backbone_atoms_padded, actual_coords_list_padded, batch_sizes) write_out("drmsd calculation time:", time.time() - start) if self.use_gpu: drmsd_avg = drmsd_avg.cuda() return drmsd_avg
""" This file is part of the OpenProtein project. For license information, please see the LICENSE file in the root directory. """ import time import torch import torch.nn as nn import openprotein from util import initial_pos_from_aa_string, \ pass_messages, write_out, calc_avg_drmsd_over_minibatch class RrnModel(openprotein.BaseModel): def __init__(self, embedding_size, use_gpu): super(RrnModel, self).__init__(use_gpu, embedding_size) self.recurrent_steps = 2 self.hidden_size = 50 self.msg_output_size = 50 self.output_size = 9 # 3 dimensions * 3 coordinates for each aa self.f_to_hid = nn.Linear((embedding_size * 2 + 9), self.hidden_size, bias=True) self.hid_to_pos = nn.Linear(self.hidden_size, self.msg_output_size, bias=True) # (last state + orginal state) self.linear_transform = nn.Linear(embedding_size + 9 + self.msg_output_size, 9, bias=True) self.use_gpu = use_gpu def apply_message_function(self, aa_features): # aa_features: msg_count * 2 * feature_count aa_features_transformed = torch.cat( ( aa_features[:, 0, 0:21], aa_features[:, 1, 0:21], aa_features[:, 0, 21:30] - aa_features[:, 1, 21:30] ), dim=1) return self.hid_to_pos(self.f_to_hid(aa_features_transformed)) # msg_count * outputsize def _get_network_emissions(self, original_aa_string): backbone_atoms_padded, batch_sizes_backbone = \ initial_pos_from_aa_string(original_aa_string, self.use_gpu) embedding_padded = self.embed(original_aa_string) if self.use_gpu: backbone_atoms_padded = backbone_atoms_padded.cuda() for _ in range(self.recurrent_steps): combined_features = torch.cat( (embedding_padded, backbone_atoms_padded), dim=2 ).transpose(0, 1) features_transformed = [] for aa_features in combined_features.split(1, dim=0): msg = pass_messages(aa_features.squeeze(0), self.apply_message_function, self.use_gpu) # aa_count * output size features_transformed.append(self.linear_transform( torch.cat((aa_features.squeeze(0), msg), dim=1))) backbone_atoms_padded_clone = torch.stack(features_transformed).transpose(0, 1) backbone_atoms_padded = backbone_atoms_padded_clone return [], backbone_atoms_padded, batch_sizes_backbone def compute_loss(self, minibatch): (original_aa_string, actual_coords_list, _) = minibatch _, backbone_atoms_padded, batch_sizes = \ self._get_network_emissions(original_aa_string) actual_coords_list_padded = torch.nn.utils.rnn.pad_sequence(actual_coords_list) if self.use_gpu: actual_coords_list_padded = actual_coords_list_padded.cuda() start = time.time() if isinstance(batch_sizes[0], int): batch_sizes = torch.tensor(batch_sizes) drmsd_avg = calc_avg_drmsd_over_minibatch(backbone_atoms_padded, actual_coords_list_padded, batch_sizes) write_out("drmsd calculation time:", time.time() - start) if self.use_gpu: drmsd_avg = drmsd_avg.cuda() return drmsd_avg
en
0.758843
This file is part of the OpenProtein project. For license information, please see the LICENSE file in the root directory. # 3 dimensions * 3 coordinates for each aa # (last state + orginal state) # aa_features: msg_count * 2 * feature_count # msg_count * outputsize # aa_count * output size
1.814609
2
netests/converters/vlan/cumulus/ssh.py
Netests/netests
14
6622984
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json from netests.protocols.ipv4 import IPV4, IPV4Interface from netests.protocols.ipv6 import IPV6, IPV6Interface from netests.protocols.vlan import VLAN, ListVLAN from netests.constants import NOT_SET def _cumulus_vlan_ssh_converter( hostname: str, cmd_output, options={} ) -> VLAN: vlan_lst = ListVLAN( vlan_lst=list() ) if cmd_output is not None: if not isinstance(cmd_output, dict): cmd_output = json.loads(cmd_output) for key, value in cmd_output.items(): if 'vlan' in key: ipv4_addresses = IPV4Interface(ipv4_addresses=list()) ipv6_addresses = IPV6Interface(ipv6_addresses=list()) if len( value.get('iface_obj').get('ip_address').get('allentries') ) > 0: for ip in value.get('iface_obj') \ .get('ip_address') \ .get('allentries'): if ':' in ip: # Is an IPv6 (light I know :) ipv6_addresses.ipv6_addresses.append( IPV6( ip_address=ip.split('/')[0], netmask=ip.split('/')[1] ) ) else: ipv4_addresses.ipv4_addresses.append( IPV4( ip_address=ip.split('/')[0], netmask=ip.split('/')[1] ) ) vlan_lst.vlan_lst.append( VLAN( id=key[4:], name=value.get('iface_obj') .get('description', NOT_SET), vrf_name=NOT_SET, ipv4_addresses=ipv4_addresses, ipv6_addresses=ipv6_addresses, assigned_port=[], options=options ) ) return vlan_lst
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json from netests.protocols.ipv4 import IPV4, IPV4Interface from netests.protocols.ipv6 import IPV6, IPV6Interface from netests.protocols.vlan import VLAN, ListVLAN from netests.constants import NOT_SET def _cumulus_vlan_ssh_converter( hostname: str, cmd_output, options={} ) -> VLAN: vlan_lst = ListVLAN( vlan_lst=list() ) if cmd_output is not None: if not isinstance(cmd_output, dict): cmd_output = json.loads(cmd_output) for key, value in cmd_output.items(): if 'vlan' in key: ipv4_addresses = IPV4Interface(ipv4_addresses=list()) ipv6_addresses = IPV6Interface(ipv6_addresses=list()) if len( value.get('iface_obj').get('ip_address').get('allentries') ) > 0: for ip in value.get('iface_obj') \ .get('ip_address') \ .get('allentries'): if ':' in ip: # Is an IPv6 (light I know :) ipv6_addresses.ipv6_addresses.append( IPV6( ip_address=ip.split('/')[0], netmask=ip.split('/')[1] ) ) else: ipv4_addresses.ipv4_addresses.append( IPV4( ip_address=ip.split('/')[0], netmask=ip.split('/')[1] ) ) vlan_lst.vlan_lst.append( VLAN( id=key[4:], name=value.get('iface_obj') .get('description', NOT_SET), vrf_name=NOT_SET, ipv4_addresses=ipv4_addresses, ipv6_addresses=ipv6_addresses, assigned_port=[], options=options ) ) return vlan_lst
en
0.526116
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Is an IPv6 (light I know :)
2.723303
3
apps/portalbase/macros/page/accordion/3_accordion.py
Jumpscale/jumpscale_portal8
0
6622985
<filename>apps/portalbase/macros/page/accordion/3_accordion.py def main(j, args, params, tags, tasklet): page = args.page macrostr = args.macrostr.strip() content = "\n".join(macrostr.split("\n")[1:-1]) panels = j.data.serializer.yaml.loads(content) if not isinstance(panels, list): panels = [panels] page.addJS('/jslib/codemirror/autorefresh.js', header=False) page.addMessage('<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">') for panel_data in panels: # hack to be able to pass yaml into the macro # the content is json serializer passed to the macro then deserialize here if panel_data is None: continue try: panel_data['content'] = j.data.serializer.json.loads(panel_data['content']) except: pass for item in ['header_id', 'section_id', 'label_id']: if item not in panel_data: panel_data[item] = j.data.idgenerator.generateXCharID(10) page.addMessage(""" <div class="panel panel-default"> <div class="panel-heading" role="tab" id="%(header_id)s"> <h4 class="panel-title"> <a data-toggle="collapse" data-parent="#accordion" href="#%(section_id)s" aria-expanded="true" aria-controls="%(section_id)s">%(title)s</a> """ % panel_data) if 'label_content' in panel_data: page.addMessage(""" <a id=%(label_id)s class="label-archive label label-%(label_color)s glyphicon glyphicon glyphicon-%(label_icon)s pull-right">%(label_content)s</a> """ % panel_data) page.addMessage(""" </h4> </div> <div id="%(section_id)s" class="panel-collapse collapse" role="tabpanel" aria-labelledby="%(header_id)s"> <div class="panel-body"> """ % panel_data) if panel_data.get('code', False): page.addCodeBlock(panel_data['content'], edit=False, exitpage=True, spacename='', pagename='', linenr=True, autorefresh=True) else: page.addMessage(panel_data['content']) page.addMessage(""" </div> <!-- panel body--> </div> <!-- panel collapse--> </div> <!-- panel default-->""") page.addMessage('</div>') # close panel-group params.result = page return params def match(j, args, params, tags, tasklet): return True
<filename>apps/portalbase/macros/page/accordion/3_accordion.py def main(j, args, params, tags, tasklet): page = args.page macrostr = args.macrostr.strip() content = "\n".join(macrostr.split("\n")[1:-1]) panels = j.data.serializer.yaml.loads(content) if not isinstance(panels, list): panels = [panels] page.addJS('/jslib/codemirror/autorefresh.js', header=False) page.addMessage('<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">') for panel_data in panels: # hack to be able to pass yaml into the macro # the content is json serializer passed to the macro then deserialize here if panel_data is None: continue try: panel_data['content'] = j.data.serializer.json.loads(panel_data['content']) except: pass for item in ['header_id', 'section_id', 'label_id']: if item not in panel_data: panel_data[item] = j.data.idgenerator.generateXCharID(10) page.addMessage(""" <div class="panel panel-default"> <div class="panel-heading" role="tab" id="%(header_id)s"> <h4 class="panel-title"> <a data-toggle="collapse" data-parent="#accordion" href="#%(section_id)s" aria-expanded="true" aria-controls="%(section_id)s">%(title)s</a> """ % panel_data) if 'label_content' in panel_data: page.addMessage(""" <a id=%(label_id)s class="label-archive label label-%(label_color)s glyphicon glyphicon glyphicon-%(label_icon)s pull-right">%(label_content)s</a> """ % panel_data) page.addMessage(""" </h4> </div> <div id="%(section_id)s" class="panel-collapse collapse" role="tabpanel" aria-labelledby="%(header_id)s"> <div class="panel-body"> """ % panel_data) if panel_data.get('code', False): page.addCodeBlock(panel_data['content'], edit=False, exitpage=True, spacename='', pagename='', linenr=True, autorefresh=True) else: page.addMessage(panel_data['content']) page.addMessage(""" </div> <!-- panel body--> </div> <!-- panel collapse--> </div> <!-- panel default-->""") page.addMessage('</div>') # close panel-group params.result = page return params def match(j, args, params, tags, tasklet): return True
en
0.3611
# hack to be able to pass yaml into the macro # the content is json serializer passed to the macro then deserialize here <div class="panel panel-default"> <div class="panel-heading" role="tab" id="%(header_id)s"> <h4 class="panel-title"> <a data-toggle="collapse" data-parent="#accordion" href="#%(section_id)s" aria-expanded="true" aria-controls="%(section_id)s">%(title)s</a> <a id=%(label_id)s class="label-archive label label-%(label_color)s glyphicon glyphicon glyphicon-%(label_icon)s pull-right">%(label_content)s</a> </h4> </div> <div id="%(section_id)s" class="panel-collapse collapse" role="tabpanel" aria-labelledby="%(header_id)s"> <div class="panel-body"> </div> <!-- panel body--> </div> <!-- panel collapse--> </div> <!-- panel default--> # close panel-group
1.858016
2
inquire/interactions/__init__.py
HARPLab/inquire
0
6622986
<gh_stars>0 from inquire.interactions.feedback import Query, Trajectory, Choice, Modality from inquire.interactions.modalities import Demonstration, Correction, Preference, BinaryFeedback __all__ = ["Modality", "Query", "Trajectory", "Choice", "Demonstration", "Correction", "Preference", "BinaryFeedback", "Feedback"]
from inquire.interactions.feedback import Query, Trajectory, Choice, Modality from inquire.interactions.modalities import Demonstration, Correction, Preference, BinaryFeedback __all__ = ["Modality", "Query", "Trajectory", "Choice", "Demonstration", "Correction", "Preference", "BinaryFeedback", "Feedback"]
none
1
1.081975
1
leetcode/0405_convert_a_number_to_hexadecimal.py
chaosWsF/Python-Practice
0
6622987
<filename>leetcode/0405_convert_a_number_to_hexadecimal.py """ Given an integer, write an algorithm to convert it to hexadecimal. For negative integer, two’s complement method is used. Note: All letters in hexadecimal (a-f) must be in lowercase. The hexadecimal string must not contain extra leading 0s. If the number is zero, it is represented by a single zero character '0'; otherwise, the first character in the hexadecimal string will not be the zero character. The given number is guaranteed to fit within the range of a 32-bit signed integer. You must not use any method provided by the library which converts/formats the number to hex directly. Example 1: Input: 26 Output: "1a" Example 2: Input: -1 Output: "ffffffff" """ class Solution: def toHex1(self, num): if num == 0: return '0' elif num < 0: num += 2 ** 32 hex_digits = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f' } hex_num = [] while num > 0: hex_num.append(hex_digits[num % 16]) num >>= 4 return ''.join(hex_num[::-1]) # def toHex2(self, num): # return '{:x}'.format(2**32 + num) if num < 0 else '{:x}'.format(num) # def toHex3(self, num): # return hex(2**32 + num)[2:] if num < 0 else hex(num)[2:]
<filename>leetcode/0405_convert_a_number_to_hexadecimal.py """ Given an integer, write an algorithm to convert it to hexadecimal. For negative integer, two’s complement method is used. Note: All letters in hexadecimal (a-f) must be in lowercase. The hexadecimal string must not contain extra leading 0s. If the number is zero, it is represented by a single zero character '0'; otherwise, the first character in the hexadecimal string will not be the zero character. The given number is guaranteed to fit within the range of a 32-bit signed integer. You must not use any method provided by the library which converts/formats the number to hex directly. Example 1: Input: 26 Output: "1a" Example 2: Input: -1 Output: "ffffffff" """ class Solution: def toHex1(self, num): if num == 0: return '0' elif num < 0: num += 2 ** 32 hex_digits = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f' } hex_num = [] while num > 0: hex_num.append(hex_digits[num % 16]) num >>= 4 return ''.join(hex_num[::-1]) # def toHex2(self, num): # return '{:x}'.format(2**32 + num) if num < 0 else '{:x}'.format(num) # def toHex3(self, num): # return hex(2**32 + num)[2:] if num < 0 else hex(num)[2:]
en
0.703183
Given an integer, write an algorithm to convert it to hexadecimal. For negative integer, two’s complement method is used. Note: All letters in hexadecimal (a-f) must be in lowercase. The hexadecimal string must not contain extra leading 0s. If the number is zero, it is represented by a single zero character '0'; otherwise, the first character in the hexadecimal string will not be the zero character. The given number is guaranteed to fit within the range of a 32-bit signed integer. You must not use any method provided by the library which converts/formats the number to hex directly. Example 1: Input: 26 Output: "1a" Example 2: Input: -1 Output: "ffffffff" # def toHex2(self, num): # return '{:x}'.format(2**32 + num) if num < 0 else '{:x}'.format(num) # def toHex3(self, num): # return hex(2**32 + num)[2:] if num < 0 else hex(num)[2:]
4.344935
4
laceworksdk/api/cloud_activities.py
kiddinn/python-sdk
10
6622988
# -*- coding: utf-8 -*- """ Lacework CloudActivities API wrapper. """ import logging logger = logging.getLogger(__name__) class CloudActivitiesAPI(object): """ Lacework CloudActivities API. """ def __init__(self, session): """ Initializes the CloudActivitiesAPI object. :param session: An instance of the HttpSession class :return CloudActivitiesAPI object. """ super(CloudActivitiesAPI, self).__init__() self._session = session def get(self, start_time=None, end_time=None, org=False): """ A method to get CloudActivities details. :param start_time: A "%Y-%m-%dT%H:%M:%SZ" structured timestamp to begin from. :param end_time: A "%Y-%m-%dT%H:%M:%S%Z" structured timestamp to end at. :param org: A boolean representing whether the request should be performed at the Organization level :return response json """ logger.info("Getting CloudActivities details from Lacework...") # Build the CloudActivities request URI api_uri = "/api/v2/CloudActivities" if start_time and end_time: api_uri += f"?startTime={start_time}&endTime={end_time}" response = self._session.get(api_uri, org=org) return response.json() def search(self, query_data=None, org=False): """ A method to search CloudActivities details. :param query_data: A dictionary containing the necessary search parameters (timeFilter, filters, returns) :param org: A boolean representing whether the request should be performed at the Organization level :return response json """ logger.info("Searching CloudActivities details from Lacework...") # Build the CloudActivities request URI api_uri = "/api/v2/CloudActivities/search" response = self._session.post(api_uri, data=query_data, org=org) return response.json()
# -*- coding: utf-8 -*- """ Lacework CloudActivities API wrapper. """ import logging logger = logging.getLogger(__name__) class CloudActivitiesAPI(object): """ Lacework CloudActivities API. """ def __init__(self, session): """ Initializes the CloudActivitiesAPI object. :param session: An instance of the HttpSession class :return CloudActivitiesAPI object. """ super(CloudActivitiesAPI, self).__init__() self._session = session def get(self, start_time=None, end_time=None, org=False): """ A method to get CloudActivities details. :param start_time: A "%Y-%m-%dT%H:%M:%SZ" structured timestamp to begin from. :param end_time: A "%Y-%m-%dT%H:%M:%S%Z" structured timestamp to end at. :param org: A boolean representing whether the request should be performed at the Organization level :return response json """ logger.info("Getting CloudActivities details from Lacework...") # Build the CloudActivities request URI api_uri = "/api/v2/CloudActivities" if start_time and end_time: api_uri += f"?startTime={start_time}&endTime={end_time}" response = self._session.get(api_uri, org=org) return response.json() def search(self, query_data=None, org=False): """ A method to search CloudActivities details. :param query_data: A dictionary containing the necessary search parameters (timeFilter, filters, returns) :param org: A boolean representing whether the request should be performed at the Organization level :return response json """ logger.info("Searching CloudActivities details from Lacework...") # Build the CloudActivities request URI api_uri = "/api/v2/CloudActivities/search" response = self._session.post(api_uri, data=query_data, org=org) return response.json()
en
0.699712
# -*- coding: utf-8 -*- Lacework CloudActivities API wrapper. Lacework CloudActivities API. Initializes the CloudActivitiesAPI object. :param session: An instance of the HttpSession class :return CloudActivitiesAPI object. A method to get CloudActivities details. :param start_time: A "%Y-%m-%dT%H:%M:%SZ" structured timestamp to begin from. :param end_time: A "%Y-%m-%dT%H:%M:%S%Z" structured timestamp to end at. :param org: A boolean representing whether the request should be performed at the Organization level :return response json # Build the CloudActivities request URI A method to search CloudActivities details. :param query_data: A dictionary containing the necessary search parameters (timeFilter, filters, returns) :param org: A boolean representing whether the request should be performed at the Organization level :return response json # Build the CloudActivities request URI
2.514838
3
tests/hub_test_fork.py
lbonn/eventlet
1
6622989
<filename>tests/hub_test_fork.py # no standard tests in this file, ignore __test__ = False if __name__ == '__main__': import os import eventlet server = eventlet.listen(('localhost', 12345)) t = eventlet.Timeout(0.01) try: new_sock, address = server.accept() except eventlet.Timeout as t: pass pid = os.fork() if not pid: t = eventlet.Timeout(0.1) try: new_sock, address = server.accept() except eventlet.Timeout as t: print("accept blocked") else: kpid, status = os.wait() assert kpid == pid assert status == 0 print("child died ok")
<filename>tests/hub_test_fork.py # no standard tests in this file, ignore __test__ = False if __name__ == '__main__': import os import eventlet server = eventlet.listen(('localhost', 12345)) t = eventlet.Timeout(0.01) try: new_sock, address = server.accept() except eventlet.Timeout as t: pass pid = os.fork() if not pid: t = eventlet.Timeout(0.1) try: new_sock, address = server.accept() except eventlet.Timeout as t: print("accept blocked") else: kpid, status = os.wait() assert kpid == pid assert status == 0 print("child died ok")
en
0.626225
# no standard tests in this file, ignore
2.239597
2
FD_EPR_TraitComposition.py
ldykman/FD_EPR
0
6622990
<filename>FD_EPR_TraitComposition.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ FUNCTIONAL TRAIT COMPOSITION Created on Mon Jun 11 14:23:09 2018 @author: laurendykman Requires three input data tables: Abundance = species as rows, sites as columns. Contains a row of temperatures and a row of months associated with each sample. Temperature row must be named "Temperature". Months row must be named "Months". Names are not case-sensitive. Temperature and months must be type int. Traits = species as rows, traits as columns. Cells contain modalities within a given trait. All modalities must be true modalities in data frame Modality. Case sensitive, space sensitive. Modality = table with traits in one column, corresponding modalities in another. Mdality names must exactly match the values in the Traits data frame. This script will run diagnostics to make sure all traits are present. """ # IMPORT FUNCTIONS AND PACKAGES import numpy as np import pandas as pd import matplotlib.pyplot as plt import os # IMPORT FD_EPR MODULE import FD_EPR_TraitAnalysis as fd # WORKING DIRECTORY path = '/Users/laurendykman/Desktop/github/FD_EPR' # Enter your working directory path_out = path + '/output' os.chdir(path) # IMPORT DATA abund_raw = fd.Format("DataS1.csv") trait = fd.Format("DataS2_BCO-DMO_Dykman.csv") modality = fd.Format("FD_EPR_Modality.csv") predicted = fd.Format(path_out + "/" + "FD_EPR_Predicted.csv") # FORMATTING ABUNDANCE DATA abund = abund_raw.drop(["Unnamed: 1"], axis=1) abund = abund.drop(["Temperature"], axis=0) index = abund.loc["Months Post-Eruption"] == "Pre-Eruption" abund[abund.columns[index]] = abund[abund.columns[index]].apply(pd.to_numeric, errors='coerce', axis=1) abund.replace(np.nan, -10, inplace=True) abund = abund.groupby(abund.loc["Months Post-Eruption"], axis=1).sum() months = list(abund.columns) abund = abund.drop(["Months Post-Eruption"], axis=0) # FORMATTING TRAIT DATA index = trait.columns.str.contains('CITATION', case=False) trait = trait.drop(trait.columns[index], axis=1) # Removing citation columns trait = trait.drop("AphiaID", axis=1) # Removing Taxon Aphia ID column trait = trait.drop("scientificNameID", axis=1) # CHECK SPECIES LISTS FOR TRAIT AND ABUNDANCE TABLES ARE IDENTICAL fd.CheckDataMatch(abund, trait) fd.CheckModalities(trait, modality) # PERFORMING MATRIX OPERATIONS TO GET RELATIVE ABUNDANCE OF EACH TRAIT MODALITY trait_dict = fd.GetTraitDict(modality) # Using the modality table, generates a dictionary of modalities per trait. trait_binary = fd.GetBinaryTraits(trait, modality) # Creates a binary table with "1" if an organism expresses a modality, and "0" otherwise. trait_abund = fd.GetTraitAbundance(trait_binary, abund, modality) # Combines binary modality data with species abundance to calculate modality abundace. trait_abund.to_csv(path_out + '/' + 'FD_EPR_TraitAbund.csv') # Uncomment the following line to save trait abundance as a csv file. samples_to_plot = list(trait_abund.index) # Creating the list of all samples to plot for observed values. samples_to_plot_fits = list(predicted.columns) # Creating a list of samples to plot for predicted values. # CREATE A SEPARATE CWM PLOT FOR EACH TRAIT # Change last command to True to save figures for key in trait_dict: plt.figure(figsize=(10,5), dpi=100) fd.PlotTraitComposition(trait_abund, samples=None, modalities=trait_dict[key], fit=predicted, title=key.upper(), xlabel="Month", ylabel='Relative Abundance', filename = "FD_EPR_TRAIT_COMP", output_path = path_out, save = True, addline = True)
<filename>FD_EPR_TraitComposition.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ FUNCTIONAL TRAIT COMPOSITION Created on Mon Jun 11 14:23:09 2018 @author: laurendykman Requires three input data tables: Abundance = species as rows, sites as columns. Contains a row of temperatures and a row of months associated with each sample. Temperature row must be named "Temperature". Months row must be named "Months". Names are not case-sensitive. Temperature and months must be type int. Traits = species as rows, traits as columns. Cells contain modalities within a given trait. All modalities must be true modalities in data frame Modality. Case sensitive, space sensitive. Modality = table with traits in one column, corresponding modalities in another. Mdality names must exactly match the values in the Traits data frame. This script will run diagnostics to make sure all traits are present. """ # IMPORT FUNCTIONS AND PACKAGES import numpy as np import pandas as pd import matplotlib.pyplot as plt import os # IMPORT FD_EPR MODULE import FD_EPR_TraitAnalysis as fd # WORKING DIRECTORY path = '/Users/laurendykman/Desktop/github/FD_EPR' # Enter your working directory path_out = path + '/output' os.chdir(path) # IMPORT DATA abund_raw = fd.Format("DataS1.csv") trait = fd.Format("DataS2_BCO-DMO_Dykman.csv") modality = fd.Format("FD_EPR_Modality.csv") predicted = fd.Format(path_out + "/" + "FD_EPR_Predicted.csv") # FORMATTING ABUNDANCE DATA abund = abund_raw.drop(["Unnamed: 1"], axis=1) abund = abund.drop(["Temperature"], axis=0) index = abund.loc["Months Post-Eruption"] == "Pre-Eruption" abund[abund.columns[index]] = abund[abund.columns[index]].apply(pd.to_numeric, errors='coerce', axis=1) abund.replace(np.nan, -10, inplace=True) abund = abund.groupby(abund.loc["Months Post-Eruption"], axis=1).sum() months = list(abund.columns) abund = abund.drop(["Months Post-Eruption"], axis=0) # FORMATTING TRAIT DATA index = trait.columns.str.contains('CITATION', case=False) trait = trait.drop(trait.columns[index], axis=1) # Removing citation columns trait = trait.drop("AphiaID", axis=1) # Removing Taxon Aphia ID column trait = trait.drop("scientificNameID", axis=1) # CHECK SPECIES LISTS FOR TRAIT AND ABUNDANCE TABLES ARE IDENTICAL fd.CheckDataMatch(abund, trait) fd.CheckModalities(trait, modality) # PERFORMING MATRIX OPERATIONS TO GET RELATIVE ABUNDANCE OF EACH TRAIT MODALITY trait_dict = fd.GetTraitDict(modality) # Using the modality table, generates a dictionary of modalities per trait. trait_binary = fd.GetBinaryTraits(trait, modality) # Creates a binary table with "1" if an organism expresses a modality, and "0" otherwise. trait_abund = fd.GetTraitAbundance(trait_binary, abund, modality) # Combines binary modality data with species abundance to calculate modality abundace. trait_abund.to_csv(path_out + '/' + 'FD_EPR_TraitAbund.csv') # Uncomment the following line to save trait abundance as a csv file. samples_to_plot = list(trait_abund.index) # Creating the list of all samples to plot for observed values. samples_to_plot_fits = list(predicted.columns) # Creating a list of samples to plot for predicted values. # CREATE A SEPARATE CWM PLOT FOR EACH TRAIT # Change last command to True to save figures for key in trait_dict: plt.figure(figsize=(10,5), dpi=100) fd.PlotTraitComposition(trait_abund, samples=None, modalities=trait_dict[key], fit=predicted, title=key.upper(), xlabel="Month", ylabel='Relative Abundance', filename = "FD_EPR_TRAIT_COMP", output_path = path_out, save = True, addline = True)
en
0.756813
#!/usr/bin/env python3 # -*- coding: utf-8 -*- FUNCTIONAL TRAIT COMPOSITION Created on Mon Jun 11 14:23:09 2018 @author: laurendykman Requires three input data tables: Abundance = species as rows, sites as columns. Contains a row of temperatures and a row of months associated with each sample. Temperature row must be named "Temperature". Months row must be named "Months". Names are not case-sensitive. Temperature and months must be type int. Traits = species as rows, traits as columns. Cells contain modalities within a given trait. All modalities must be true modalities in data frame Modality. Case sensitive, space sensitive. Modality = table with traits in one column, corresponding modalities in another. Mdality names must exactly match the values in the Traits data frame. This script will run diagnostics to make sure all traits are present. # IMPORT FUNCTIONS AND PACKAGES # IMPORT FD_EPR MODULE # WORKING DIRECTORY # Enter your working directory # IMPORT DATA # FORMATTING ABUNDANCE DATA # FORMATTING TRAIT DATA # Removing citation columns # Removing Taxon Aphia ID column # CHECK SPECIES LISTS FOR TRAIT AND ABUNDANCE TABLES ARE IDENTICAL # PERFORMING MATRIX OPERATIONS TO GET RELATIVE ABUNDANCE OF EACH TRAIT MODALITY # Using the modality table, generates a dictionary of modalities per trait. # Creates a binary table with "1" if an organism expresses a modality, and "0" otherwise. # Combines binary modality data with species abundance to calculate modality abundace. # Uncomment the following line to save trait abundance as a csv file. # Creating the list of all samples to plot for observed values. # Creating a list of samples to plot for predicted values. # CREATE A SEPARATE CWM PLOT FOR EACH TRAIT # Change last command to True to save figures
2.482775
2
service/image.py
khromiumos/chromiumos-chromite
0
6622991
<reponame>khromiumos/chromiumos-chromite<gh_stars>0 # -*- coding: utf-8 -*- # Copyright 2018 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The Image API is the entry point for image functionality.""" from __future__ import print_function import os from chromite.lib import constants from chromite.lib import cros_build_lib from chromite.lib import image_lib from chromite.lib import osutils from chromite.lib import path_util from chromite.lib.parser import package_info PARALLEL_EMERGE_STATUS_FILE_NAME = 'status_file' class Error(Exception): """Base module error.""" class InvalidArgumentError(Error): """Invalid argument values.""" class ImageToVmError(Error): """Error converting the image to a vm.""" class BuildConfig(object): """Value object to hold the build configuration options.""" def __init__(self, builder_path=None, disk_layout=None, enable_rootfs_verification=True, replace=False, version=None, build_attempt=None, symlink=None): """Build config initialization. Args: builder_path (str): The value to which the builder path lsb key should be set, the build_name installed on DUT during hwtest. disk_layout (str): The disk layout type. enable_rootfs_verification (bool): Whether the rootfs verification is enabled. replace (bool): Whether to replace existing output if any exists. version (str): The version string to use for the image. build_attempt (int): The build_attempt number to pass to build_image. symlink (str): Symlink string. """ self.builder_path = builder_path self.disk_layout = disk_layout self.enable_rootfs_verification = enable_rootfs_verification self.replace = replace self.version = version self.build_attempt = build_attempt self.symlink = symlink def GetArguments(self): """Get the build_image arguments for the configuration.""" args = [] if self.builder_path: args.extend(['--builder_path', self.builder_path]) if self.disk_layout: args.extend(['--disk_layout', self.disk_layout]) if not self.enable_rootfs_verification: args.append('--noenable_rootfs_verification') if self.replace: args.append('--replace') if self.version: args.extend(['--version', self.version]) if self.build_attempt: args.extend(['--build_attempt', self.build_attempt]) if self.symlink: args.extend(['--symlink', self.symlink]) return args class BuildResult(object): """Value object to report build image results.""" def __init__(self, return_code, failed_packages): """Init method. Args: return_code (int): The build return code. failed_packages (list[str]): A list of failed packages as strings. """ self.failed_packages = [] for package in failed_packages or []: self.failed_packages.append(package_info.SplitCPV(package, strict=False)) # The return code should always be non-zero if there's any failed packages, # but it's cheap insurance, so check it. self.success = return_code == 0 and not self.failed_packages def Build(board=None, images=None, config=None, extra_env=None): """Build an image. Args: board (str): The board name. images (list): The image types to build. config (BuildConfig): The build configuration options. extra_env (dict): Environment variables to set for build_image. Returns: BuildResult """ board = board or cros_build_lib.GetDefaultBoard() if not board: raise InvalidArgumentError('board is required.') images = images or [constants.IMAGE_TYPE_BASE] config = config or BuildConfig() if cros_build_lib.IsInsideChroot(): cmd = [os.path.join(constants.CROSUTILS_DIR, 'build_image')] else: cmd = ['./build_image'] cmd.extend(['--board', board]) cmd.extend(config.GetArguments()) cmd.extend(images) extra_env_local = extra_env.copy() if extra_env else {} with osutils.TempDir() as tempdir: status_file = os.path.join(tempdir, PARALLEL_EMERGE_STATUS_FILE_NAME) extra_env_local[constants.PARALLEL_EMERGE_STATUS_FILE_ENVVAR] = status_file result = cros_build_lib.run(cmd, enter_chroot=True, check=False, extra_env=extra_env_local) try: content = osutils.ReadFile(status_file).strip() except IOError: # No file means no packages. failed = None else: failed = content.split() if content else None return BuildResult(result.returncode, failed) def CreateVm(board, disk_layout=None, is_test=False, chroot=None): """Create a VM from an image. Args: board (str): The board for which the VM is being created. disk_layout (str): The disk layout type. is_test (bool): Whether it is a test image. chroot (chroot_lib.Chroot): The chroot where the image lives. Returns: str: Path to the created VM .bin file. """ assert board cmd = ['./image_to_vm.sh', '--board', board] if is_test: cmd.append('--test_image') if disk_layout: cmd.extend(['--disk_layout', disk_layout]) chroot_args = None if chroot and cros_build_lib.IsOutsideChroot(): chroot_args = chroot.get_enter_args() result = cros_build_lib.run(cmd, check=False, enter_chroot=True, chroot_args=chroot_args) if result.returncode: # Error running the command. Unfortunately we can't be much more helpful # than this right now. raise ImageToVmError('Unable to convert the image to a VM. ' 'Consult the logs to determine the problem.') vm_path = os.path.join(image_lib.GetLatestImageLink(board), constants.VM_IMAGE_BIN) return os.path.realpath(vm_path) def CreateGuestVm(board, is_test=False, chroot=None): """Convert an existing image into a guest VM image. Args: board (str): The name of the board to convert. is_test (bool): Flag to create a test guest VM image. chroot (chroot_lib.Chroot): The chroot where the cros image lives. Returns: str: Path to the created guest VM folder. """ assert board cmd = [os.path.join(constants.TERMINA_TOOLS_DIR, 'termina_build_image.py')] image_file = constants.TEST_IMAGE_BIN if is_test else constants.BASE_IMAGE_BIN image_path = os.path.join(image_lib.GetLatestImageLink(board), image_file) output_dir = (constants.TEST_GUEST_VM_DIR if is_test else constants.BASE_GUEST_VM_DIR) output_path = os.path.join(image_lib.GetLatestImageLink(board), output_dir) cmd.append(image_path) cmd.append(output_path) chroot_args = None if chroot and cros_build_lib.IsOutsideChroot(): chroot_args = chroot.get_enter_args() result = cros_build_lib.sudo_run(cmd, check=False, enter_chroot=True, chroot_args=chroot_args) if result.returncode: # Error running the command. Unfortunately we can't be much more helpful # than this right now. raise ImageToVmError('Unable to convert the image to a Guest VM using' 'termina_build_image.py.' 'Consult the logs to determine the problem.') return os.path.realpath(output_path) def Test(board, result_directory, image_dir=None): """Run tests on an already built image. Currently this is just running test_image. Args: board (str): The board name. result_directory (str): Root directory where the results should be stored relative to the chroot. image_dir (str): The path to the image. Uses the board's default image build path when not provided. Returns: bool - True if all tests passed, False otherwise. """ if not board: raise InvalidArgumentError('Board is required.') if not result_directory: raise InvalidArgumentError('Result directory required.') if not image_dir: # We can build the path to the latest image directory. image_dir = image_lib.GetLatestImageLink(board, force_chroot=True) elif not cros_build_lib.IsInsideChroot() and os.path.exists(image_dir): # Outside chroot with outside chroot path--we need to convert it. image_dir = path_util.ToChrootPath(image_dir) cmd = [ os.path.join(constants.CHROOT_SOURCE_ROOT, constants.CHROMITE_BIN_SUBDIR, 'test_image'), '--board', board, '--test_results_root', result_directory, image_dir, ] result = cros_build_lib.sudo_run(cmd, enter_chroot=True, check=False) return result.returncode == 0
# -*- coding: utf-8 -*- # Copyright 2018 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The Image API is the entry point for image functionality.""" from __future__ import print_function import os from chromite.lib import constants from chromite.lib import cros_build_lib from chromite.lib import image_lib from chromite.lib import osutils from chromite.lib import path_util from chromite.lib.parser import package_info PARALLEL_EMERGE_STATUS_FILE_NAME = 'status_file' class Error(Exception): """Base module error.""" class InvalidArgumentError(Error): """Invalid argument values.""" class ImageToVmError(Error): """Error converting the image to a vm.""" class BuildConfig(object): """Value object to hold the build configuration options.""" def __init__(self, builder_path=None, disk_layout=None, enable_rootfs_verification=True, replace=False, version=None, build_attempt=None, symlink=None): """Build config initialization. Args: builder_path (str): The value to which the builder path lsb key should be set, the build_name installed on DUT during hwtest. disk_layout (str): The disk layout type. enable_rootfs_verification (bool): Whether the rootfs verification is enabled. replace (bool): Whether to replace existing output if any exists. version (str): The version string to use for the image. build_attempt (int): The build_attempt number to pass to build_image. symlink (str): Symlink string. """ self.builder_path = builder_path self.disk_layout = disk_layout self.enable_rootfs_verification = enable_rootfs_verification self.replace = replace self.version = version self.build_attempt = build_attempt self.symlink = symlink def GetArguments(self): """Get the build_image arguments for the configuration.""" args = [] if self.builder_path: args.extend(['--builder_path', self.builder_path]) if self.disk_layout: args.extend(['--disk_layout', self.disk_layout]) if not self.enable_rootfs_verification: args.append('--noenable_rootfs_verification') if self.replace: args.append('--replace') if self.version: args.extend(['--version', self.version]) if self.build_attempt: args.extend(['--build_attempt', self.build_attempt]) if self.symlink: args.extend(['--symlink', self.symlink]) return args class BuildResult(object): """Value object to report build image results.""" def __init__(self, return_code, failed_packages): """Init method. Args: return_code (int): The build return code. failed_packages (list[str]): A list of failed packages as strings. """ self.failed_packages = [] for package in failed_packages or []: self.failed_packages.append(package_info.SplitCPV(package, strict=False)) # The return code should always be non-zero if there's any failed packages, # but it's cheap insurance, so check it. self.success = return_code == 0 and not self.failed_packages def Build(board=None, images=None, config=None, extra_env=None): """Build an image. Args: board (str): The board name. images (list): The image types to build. config (BuildConfig): The build configuration options. extra_env (dict): Environment variables to set for build_image. Returns: BuildResult """ board = board or cros_build_lib.GetDefaultBoard() if not board: raise InvalidArgumentError('board is required.') images = images or [constants.IMAGE_TYPE_BASE] config = config or BuildConfig() if cros_build_lib.IsInsideChroot(): cmd = [os.path.join(constants.CROSUTILS_DIR, 'build_image')] else: cmd = ['./build_image'] cmd.extend(['--board', board]) cmd.extend(config.GetArguments()) cmd.extend(images) extra_env_local = extra_env.copy() if extra_env else {} with osutils.TempDir() as tempdir: status_file = os.path.join(tempdir, PARALLEL_EMERGE_STATUS_FILE_NAME) extra_env_local[constants.PARALLEL_EMERGE_STATUS_FILE_ENVVAR] = status_file result = cros_build_lib.run(cmd, enter_chroot=True, check=False, extra_env=extra_env_local) try: content = osutils.ReadFile(status_file).strip() except IOError: # No file means no packages. failed = None else: failed = content.split() if content else None return BuildResult(result.returncode, failed) def CreateVm(board, disk_layout=None, is_test=False, chroot=None): """Create a VM from an image. Args: board (str): The board for which the VM is being created. disk_layout (str): The disk layout type. is_test (bool): Whether it is a test image. chroot (chroot_lib.Chroot): The chroot where the image lives. Returns: str: Path to the created VM .bin file. """ assert board cmd = ['./image_to_vm.sh', '--board', board] if is_test: cmd.append('--test_image') if disk_layout: cmd.extend(['--disk_layout', disk_layout]) chroot_args = None if chroot and cros_build_lib.IsOutsideChroot(): chroot_args = chroot.get_enter_args() result = cros_build_lib.run(cmd, check=False, enter_chroot=True, chroot_args=chroot_args) if result.returncode: # Error running the command. Unfortunately we can't be much more helpful # than this right now. raise ImageToVmError('Unable to convert the image to a VM. ' 'Consult the logs to determine the problem.') vm_path = os.path.join(image_lib.GetLatestImageLink(board), constants.VM_IMAGE_BIN) return os.path.realpath(vm_path) def CreateGuestVm(board, is_test=False, chroot=None): """Convert an existing image into a guest VM image. Args: board (str): The name of the board to convert. is_test (bool): Flag to create a test guest VM image. chroot (chroot_lib.Chroot): The chroot where the cros image lives. Returns: str: Path to the created guest VM folder. """ assert board cmd = [os.path.join(constants.TERMINA_TOOLS_DIR, 'termina_build_image.py')] image_file = constants.TEST_IMAGE_BIN if is_test else constants.BASE_IMAGE_BIN image_path = os.path.join(image_lib.GetLatestImageLink(board), image_file) output_dir = (constants.TEST_GUEST_VM_DIR if is_test else constants.BASE_GUEST_VM_DIR) output_path = os.path.join(image_lib.GetLatestImageLink(board), output_dir) cmd.append(image_path) cmd.append(output_path) chroot_args = None if chroot and cros_build_lib.IsOutsideChroot(): chroot_args = chroot.get_enter_args() result = cros_build_lib.sudo_run(cmd, check=False, enter_chroot=True, chroot_args=chroot_args) if result.returncode: # Error running the command. Unfortunately we can't be much more helpful # than this right now. raise ImageToVmError('Unable to convert the image to a Guest VM using' 'termina_build_image.py.' 'Consult the logs to determine the problem.') return os.path.realpath(output_path) def Test(board, result_directory, image_dir=None): """Run tests on an already built image. Currently this is just running test_image. Args: board (str): The board name. result_directory (str): Root directory where the results should be stored relative to the chroot. image_dir (str): The path to the image. Uses the board's default image build path when not provided. Returns: bool - True if all tests passed, False otherwise. """ if not board: raise InvalidArgumentError('Board is required.') if not result_directory: raise InvalidArgumentError('Result directory required.') if not image_dir: # We can build the path to the latest image directory. image_dir = image_lib.GetLatestImageLink(board, force_chroot=True) elif not cros_build_lib.IsInsideChroot() and os.path.exists(image_dir): # Outside chroot with outside chroot path--we need to convert it. image_dir = path_util.ToChrootPath(image_dir) cmd = [ os.path.join(constants.CHROOT_SOURCE_ROOT, constants.CHROMITE_BIN_SUBDIR, 'test_image'), '--board', board, '--test_results_root', result_directory, image_dir, ] result = cros_build_lib.sudo_run(cmd, enter_chroot=True, check=False) return result.returncode == 0
en
0.786598
# -*- coding: utf-8 -*- # Copyright 2018 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. The Image API is the entry point for image functionality. Base module error. Invalid argument values. Error converting the image to a vm. Value object to hold the build configuration options. Build config initialization. Args: builder_path (str): The value to which the builder path lsb key should be set, the build_name installed on DUT during hwtest. disk_layout (str): The disk layout type. enable_rootfs_verification (bool): Whether the rootfs verification is enabled. replace (bool): Whether to replace existing output if any exists. version (str): The version string to use for the image. build_attempt (int): The build_attempt number to pass to build_image. symlink (str): Symlink string. Get the build_image arguments for the configuration. Value object to report build image results. Init method. Args: return_code (int): The build return code. failed_packages (list[str]): A list of failed packages as strings. # The return code should always be non-zero if there's any failed packages, # but it's cheap insurance, so check it. Build an image. Args: board (str): The board name. images (list): The image types to build. config (BuildConfig): The build configuration options. extra_env (dict): Environment variables to set for build_image. Returns: BuildResult # No file means no packages. Create a VM from an image. Args: board (str): The board for which the VM is being created. disk_layout (str): The disk layout type. is_test (bool): Whether it is a test image. chroot (chroot_lib.Chroot): The chroot where the image lives. Returns: str: Path to the created VM .bin file. # Error running the command. Unfortunately we can't be much more helpful # than this right now. Convert an existing image into a guest VM image. Args: board (str): The name of the board to convert. is_test (bool): Flag to create a test guest VM image. chroot (chroot_lib.Chroot): The chroot where the cros image lives. Returns: str: Path to the created guest VM folder. # Error running the command. Unfortunately we can't be much more helpful # than this right now. Run tests on an already built image. Currently this is just running test_image. Args: board (str): The board name. result_directory (str): Root directory where the results should be stored relative to the chroot. image_dir (str): The path to the image. Uses the board's default image build path when not provided. Returns: bool - True if all tests passed, False otherwise. # We can build the path to the latest image directory. # Outside chroot with outside chroot path--we need to convert it.
2.248806
2
test_project/gallery/models.py
c0ntribut0r/django-mptt-urls
24
6622992
<gh_stars>10-100 from django.db import models from django.urls import reverse from mptt.models import MPTTModel, TreeForeignKey class Category(MPTTModel): name = models.CharField('category name', max_length=32) # ... some other fields parent = TreeForeignKey('self', null=True, blank=True, on_delete=models.CASCADE, verbose_name='parent category', related_name='categories') slug = models.SlugField(unique=True) views = models.PositiveIntegerField('number of page views', default=0) def __unicode__(self): return self.name def get_absolute_url(self): return reverse('gallery', kwargs={'path': self.get_path()}) class Photo(models.Model): name = models.CharField('photo name', max_length=32) # ... some other fields parent = TreeForeignKey(Category, on_delete=models.CASCADE, verbose_name='parent category', related_name='photos') slug = models.SlugField() class Meta: unique_together = ('slug', 'parent') def __unicode__(self): return self.name
from django.db import models from django.urls import reverse from mptt.models import MPTTModel, TreeForeignKey class Category(MPTTModel): name = models.CharField('category name', max_length=32) # ... some other fields parent = TreeForeignKey('self', null=True, blank=True, on_delete=models.CASCADE, verbose_name='parent category', related_name='categories') slug = models.SlugField(unique=True) views = models.PositiveIntegerField('number of page views', default=0) def __unicode__(self): return self.name def get_absolute_url(self): return reverse('gallery', kwargs={'path': self.get_path()}) class Photo(models.Model): name = models.CharField('photo name', max_length=32) # ... some other fields parent = TreeForeignKey(Category, on_delete=models.CASCADE, verbose_name='parent category', related_name='photos') slug = models.SlugField() class Meta: unique_together = ('slug', 'parent') def __unicode__(self): return self.name
en
0.75312
# ... some other fields # ... some other fields
2.227717
2
acessando_api.py
Renatoelho/consumindo-api-enderecos-python
0
6622993
#!/usr/bin/python3 import requests lista_ceps: list = ['01153000', '20050000', '70714020'] lista_enderecos: list = [] for cep in lista_ceps: url: str = 'https://viacep.com.br/ws/{}/json/'.format(cep) try: req = requests.get(url, timeout=3) if req.status_code == 200: # API acessada com sucesso! endereco = req.json() lista_enderecos.append( [ endereco['cep'], endereco['logradouro'], endereco['complemento'], endereco['bairro'], endereco['localidade'], endereco['uf'] ] ) else: erro = req.raise_for_status() print(f'Ocorreu o seguinte erro no acesso da API: {erro}') except Exception as erro: print(f'Ocorreu o seguinte erro na execução do código: {erro}') for item in lista_enderecos: print(item)
#!/usr/bin/python3 import requests lista_ceps: list = ['01153000', '20050000', '70714020'] lista_enderecos: list = [] for cep in lista_ceps: url: str = 'https://viacep.com.br/ws/{}/json/'.format(cep) try: req = requests.get(url, timeout=3) if req.status_code == 200: # API acessada com sucesso! endereco = req.json() lista_enderecos.append( [ endereco['cep'], endereco['logradouro'], endereco['complemento'], endereco['bairro'], endereco['localidade'], endereco['uf'] ] ) else: erro = req.raise_for_status() print(f'Ocorreu o seguinte erro no acesso da API: {erro}') except Exception as erro: print(f'Ocorreu o seguinte erro na execução do código: {erro}') for item in lista_enderecos: print(item)
pt
0.752557
#!/usr/bin/python3 # API acessada com sucesso!
2.957082
3
NoiseMapGenerators_14.py
defzzd/TerrainGenerators
1
6622994
''' Fractal noise map generator library. Also includes additional non-"noise" map generators for dungeon generation purposes. The reason for putting them in is because I want the games I make to be able to use very similar code for all the different types of maps I need. Ensuring a high level of cross-compatibility at the generator level may enhance creativity later on. Each generator is an object built by a class specific to that type of generator. All generators SHOULD have enough defaults to require only a handful of arguments (tailored to your display needs) before they're popping out noiseclouds. The most useful generators are the Perlin generator and the Mk II dungeon map generator, but they all have their own unique capabilities. Generators currently include: PlasmaFractalGenerator() PerlinNoiseGenerator() SimplexNoiseGenerator() DungeonMapGenerator() RoomFilledMapGenerator() MarkIIDungeonMapGenerator() Of these, the Simplex generator is the most technically complex but is theoretically faster at creating a noise map than the Plasma and Perlin generators. It's not clear whether my implementation is even close to optimized for speed, though. I don't yet know enough about python/C integration to try speeding it up. The Perlin generator return the best-looking terrain maps, possibly tied with the Simplex generator. They both require some fiddling with generator input parameters to get better-looking results. The plasma generator has some gridwards bias, but it too produces decent noise clouds, as long as you don't look too closely or get too unlucky. It was the first noise generator I made, before I realized I wanted to make all the parameters of the various generators more similar to each other. I might go back and change it to that at some point, but I have no especial reason to given its technical inferiority to the simplex and Perlin generators. TerrainMapGenerators contains noise generators and "dungeon map generators," which are more like signal than noise, as they return maps full of rooms and corridors illustrated using two Z values (0 and 1). The DungeonMapGenerator produces randomly placed rectangular rooms that all connect to each other using L-shaped corridors daisy chained from one room's centerpoint to the next, in the order of room placement. This algorithm was inspired by/copied from the libtcod roguelike tutorial at < http://www.roguebasin.com/index.php?title=Complete_Roguelike_Tutorial,_using_python%2Blibtcod,_part_1 >. The RoomFilledMapGenerator creates maps packed full of rectangular rooms. Has significant bias and no connecting corridors. I didn't really like the direction it was going in, but it can probably be turned into something decent with some upgrading and tweaking. The MarkIIDungeonMapGenerator is my favorite one so far. It produces maps wherein the rooms are connected in a branching pattern such that dungeons have "wings" which can be quite lengthy and significantly subdivided. Note that the dependencies do NOT include pygame, even though the display program I created for demonstrations does. ''' import random import sys import math #### Classes #### class PlasmaFractalGenerator: ''' Create a fractal generator that returns a list of ((word for things that come in parentheses)) consisting of three floating point values: x, y and z coordinates for constructing a plasma fractal for use as a noise map. ''' def __init__(self, array_root=2, corners_min=0, corners_max=255, displacement_min=(-35), displacement_max=35, minimum_separation_distance=1, uleft_corner=None, uright_corner=None, lleft_corner=None, lright_corner=None): ## The root of the array (it's square root, or side measurement): self.array_root = array_root ## Save the width and height of the map as state. We'll be using this to construct a new map to hold the plasma fractal in a method designed for this purpose. self.array_width = (self.array_root * 2) self.array_height = (self.array_root * 2) ## Init the plasma fractal's handler, the noise array, as None: self.saved_noise_array = None ## Min and max values for randomly generated corner Z values: self.corners_min = corners_min self.corners_max = corners_max ## The range of randomness that can be applied to each midpoint displacement. ## Usual supplied values have a negative min and a positive max. self.displacement_min = displacement_min self.displacement_max = displacement_max ## The distance at which the fractal stops subdividing itself and returns a value for the next least coordinate point ( 1.004 --> 1, 1.000 --> 1, 0.996 --> 0 etc if min_sep_dist is 1). self.minimum_separation_distance = minimum_separation_distance ## Corners' initial zee values, can be set manually in __init__() parameters: self.uleft_corner = uleft_corner self.uright_corner = uright_corner self.lleft_corner = lleft_corner self.lright_corner = lright_corner ## Someone might want the corners to be preset values, so check if they didn't at the time of initialization. ## ... ## This section may be a candidate for refactorization in the future, with the addition of parameters to reinitialize_corners() if self.uleft_corner is None: self.uleft_corner = random.randint(self.corners_min, self.corners_max) if self.uright_corner is None: self.uright_corner = random.randint(self.corners_min, self.corners_max) if self.lleft_corner is None: self.lleft_corner = random.randint(self.corners_min, self.corners_max) if self.lright_corner is None: self.lright_corner = random.randint(self.corners_min, self.corners_max) def reinitialize_corners(self, uleft_corner=None, uright_corner=None, lleft_corner=None, lright_corner=None): if uleft_corner == None: self.uleft_corner = random.randint(self.corners_min, self.corners_max) else: self.uleft_corner = uleft_corner if uright_corner == None: self.uright_corner = random.randint(self.corners_min, self.corners_max) else: self.uright_corner = uright_corner if lleft_corner == None: self.lleft_corner = random.randint(self.corners_min, self.corners_max) else: self.lleft_corner = lleft_corner if lright_corner == None: self.lright_corner = random.randint(self.corners_min, self.corners_max) else: self.lright_corner = lright_corner def generate_noise(self, x=None, y=None, supplied_width=None, supplied_height=None, uleft_corner=None, uright_corner=None, lleft_corner=None, lright_corner=None): ''' This function is the gateway function to generate_plasma(). ''' del self.saved_noise_array self.saved_noise_array = [] ## This section necessitated by the combination of my desire to make generate_noise() callable with arbitrary arguments and Python's refusal to accept self.foo as parameters for a method. if x == None: x = 0 if y == None: y = 0 if supplied_width == None: supplied_width = self.array_width if supplied_height == None: supplied_height = self.array_height if uleft_corner == None: uleft_corner = self.uleft_corner if uright_corner == None: uright_corner = self.uright_corner if lleft_corner == None: lleft_corner = self.lleft_corner if lright_corner == None: lright_corner = self.lright_corner ## Remember, no call to self in the parameters when a method is calling another method. The definition of the second method will invoke its own self, don't worry. :p self.plasma_recursion(x=x, y=y, supplied_width=supplied_width, supplied_height=supplied_height, uleft_corner=uleft_corner, uright_corner=uright_corner, lleft_corner=lleft_corner, lright_corner=lright_corner) ####print(" Debug: self.saved_noise_array == ") #for each in self.saved_noise_array: # ###print(" " + str(each)) ## Now convert that giant list into a tuple with the same ordering as the PerlinNoiseGenerator's results. array_to_return = [] for each_array_height_index in range(0, supplied_height): # y ## Fill the array_to_return with rows full of -1s so we only have to iterate through it once in the next step! new_row = [] for each_array_width_index in range(0, supplied_width): # x new_row.append(-1) array_to_return.append(new_row) ####print(" Debug: array_to_return == " + str(array_to_return) + "\n") for each_cell in self.saved_noise_array: ## Round down x and y since the values are probably all floats. ## This will ALMOST CERTAINLY give me bad results and I'm gonna have to change something, maybe cleverer rounding?? ## I may have to round up and down more precisely than int() depending on exactly what ends up happening with the results. :S ''' ## EDIT: The following is probably not the best way to do this. I added the -1 overwrite procedure instead. ## ... ## Complicated syntax is actually very shallow conceptually. ## array[a].insert([b], [c]) ## a == the rounded down y value of the cell ## b == the rounded down x value of the cell ## c == the floating point z value of the cell ## Rounding is currently being done by int() calls, this may very well be a bad idea. See above note. ## All index variables are referenced by their index number in each_cell; hence the square brackets. array_to_return[int(each_cell[1])].insert(int(each_cell[0]), each_cell[2]) ''' ####print(" Debug: each_cell == " + str(each_cell)) ####print(" each_cell[0] == " + str(each_cell[0])) ####print(" each_cell[1] == " + str(each_cell[1])) ####print(" each_cell[2] == " + str(each_cell[2])) ## The syntax is now: ## array[y][x] = z ## where y, x and z are extracted from their respective indices in each_cell. ## Rounding is once again involved at this step. See above notes in this method. ## DEBUG: Testing -1 to see if it always rounds one way or does a split at 0.5 array_to_return[int(each_cell[1])][int(each_cell[0])] = each_cell[2] ## If this line is left out the generator will use the same corner values and make a whole new map between them. ## Remember, self.reinitialize_corners() can be called in the main program. #self.reinitialize_corners() return array_to_return def plasma_recursion(self, x, y, supplied_width, supplied_height, uleft_corner, uright_corner, lleft_corner, lright_corner): ## This method is intended to be called by self.generate_noise() ## The results of calling this separately from self.generate_noise() will be a long list of [x, y, z] values rather than a tuple with the form ( array[y][x] == (z) ). ''' Recursively supply [x, y, z]-formatted plasma fractal elements to self.saved_noise_array, as called by self.generate_noise() ''' new_width = (supplied_width / 2) new_height = (supplied_height / 2) if ( (supplied_width > self.minimum_separation_distance) or (supplied_height > self.minimum_separation_distance) ): ## This step must happen during this part of the conditional tree. Not after the else! random_midpoint_displacement = random.randint(self.displacement_min, self.displacement_max) ## Create midpoint's zee by averaging corners' zees and mixing in the random_midpoint_displacement: mid_z = ( ( (uleft_corner + uright_corner + lleft_corner + lright_corner) / 4 ) + random_midpoint_displacement ) ## Deduce sides' zees: top_z = ( (uleft_corner + uright_corner) / 2 ) bottom_z = ( (lleft_corner + lright_corner) / 2 ) left_z = ( (uleft_corner + lleft_corner) / 2 ) right_z = ( (uright_corner + lright_corner) / 2 ) ## Recursion. Note this happens inside the earlier if statement. The alternative is not recurring at this call, and instead returning a value. uleft_quadrant = self.plasma_recursion(x=x, y=y, supplied_width=new_width, supplied_height=new_height, uleft_corner=uleft_corner, uright_corner=top_z, lleft_corner=left_z, lright_corner=mid_z ) uright_quadrant = self.plasma_recursion(x=(x+new_width), y=y, supplied_width=new_width, supplied_height=new_height, uleft_corner=top_z, uright_corner=uright_corner, lleft_corner=mid_z, lright_corner=right_z ) lleft_quadrant = self.plasma_recursion(x=x, y=(y+new_height), supplied_width=new_width, supplied_height=new_height, uleft_corner=left_z, uright_corner=mid_z, lleft_corner=lleft_corner, lright_corner=bottom_z ) lright_quadrant = self.plasma_recursion(x=(x+new_width), y=(y+new_height), supplied_width=new_width, supplied_height=new_height, uleft_corner=mid_z, uright_corner=right_z, lleft_corner=bottom_z, lright_corner=lright_corner ) else: ## When the distance between the corners drops below the minimum separation distance, create an [x, y, z] cell and return it up the chain: new_z_value = ( (uleft_corner + uright_corner + lleft_corner + lright_corner) / 4 ) new_coordinate = [x, y, new_z_value] self.saved_noise_array.append(new_coordinate) class PerlinNoiseGenerator: def __init__(self): ## The generator saves its noise-map state: self.noise_array = [] self.noise_width = 0 noise_height = 0 def generate_noise(self, width, height, frequency, octaves): ''' Returns a tuple of [parameter 2] lists each containing [parameter 1] randomly generated integer numbers between $FIX_ME_MINIMUM and $FIX_ME_MAXIMUM, fractally smoothed as Perlin noise using a frequency of [parameter 3] and an octave count of [parameter 4]. ''' ## Octaves? ## It's used for calling turbulence(), which considers that parameter to be "size". ## The original function declared that changing octaves changes how far in or out from the noise you are zoomed. ## Which seems like a decent interpretation of the results. ## Raising the frequency makes it spikier (which is reminiscent of zooming out). ## Raising the octaves make it smoother (which is reminiscent of zooming in). ## Note that keeping the ratios of frequency to octaves the same will keep the results looking similar! ## For this reason I recommend using small octave values, since that governs the recursor runtime. ## First, clear the currently saved noise map: ## ... ## actually self.noise_array is used internally to the generator's function and does not save the actual noise map. ## Interesting, that. del self.noise_array[:] ## Now assign this NoiseGenerator's current noise_width and noise_height to the values supplied by the function call parameters: ## Note that the NoiseGenerator saves these as state because they need to be referenced in the sub-functions below. self.noise_width = width self.noise_height = height ## Initializing the noise_array with random numbers. ## This for loop provides the raw random data smeuthanized into a pretty, pretty vapor cloud further in the program. ## Create a bunch of rows, equal in number to self.noise_height... for each_row in range(0, self.noise_height): noise_row_handler = [] ## ... and fill them with randint()s equal in number to self.noise_width: for each_column in range(0, self.noise_width): noise_value = ( random.randint(0, 1000) / 1000.0 ) noise_row_handler.append(noise_value) ## Attach each row to the noise_array. self.noise_array.append(noise_row_handler) ## The generator's noise_array should now be full of rows which are full of integers. ## The noise_array isn't the finished product. It's used to create it, in the below functions. result = [] ## Turbulating the noise array ## for each_y in range(0, self.noise_height): turbulated_noise_row_handler = [] for each_x in range(0, self.noise_width): ## Note: Frequency is rolled into the parameters here! turbulated_noise_value = int(self.totally_justified_turbulence_function((each_x * frequency), (each_y * frequency), octaves)) turbulated_noise_row_handler.append(turbulated_noise_value) result.append(turbulated_noise_row_handler) ## NOTE that the NoiseGenerator does NOT save the result as state. ## It hands it off to whatever called its generate_noise() function. ## This is where this generator's entire function chain ends: return result def totally_justified_turbulence_function(self, x, y, size): ## noise_value is "built up" by smooth_noise(): noise_value = 0.0 ## Floats it: size *= 1.0 initial_size = size ## This is kind of like fractally splitting a grid, except it just sort of "resonates" itself in half and applies noise smoothening or something. Octaves. while (size >= 1): the_smooth_noise = self.smooth_noise((x / size), (y / size)) the_smooth_noise *= size ## Add it to the noise_value pile: noise_value += the_smooth_noise ## Paring down the size... iterating downwards... size /= 2.0 ## Order of Operations suggests division before multiplication, so: noise_value /= initial_size ## ??? ## Experiment to figure out what it does! o_o ## ... ## Biases the resulting z values to average out at this number: noise_value *= 128.0 return noise_value def smooth_noise(self, x, y): ''' Return the average value of the 4 neighbors of the point (x, y) from self.noise_array. ''' ## NOTE! self.noise_array is a cranny full of state used for THIS FUNCTION ONLY. ## The following is necessary because of modulo calls further down that would ignore it, but it needs to be saved. ## Get the trailing part of the floats of x and y: fractional_element_of_x = ( x - int(x) ) fractional_element_of_y = ( y - int(y) ) x1 = ( (int(x) + self.noise_width) % self.noise_width ) y1 = ( (int(y) + self.noise_height) % self.noise_height ) ## I think the -1 is to compensate for the fractional_element_of_foo being extracted earlier. ## Remember, that fractional_element is added back in below. ## Apart from that, this is exactly the same as \ ## figuring out the length of a line between \ ## (x1, y1) and (x2, y2) in a noise plane. ## Or something like that. Surely. x2 = ( (x1 + self.noise_width - 1) % self.noise_width ) y2 = ( (y1 + self.noise_height - 1) % self.noise_height ) ## Take NOTE of the use of self.noise_array below... ## It's the place it really matters in this ridiculous three-function chain, \ ## even though it's stored at the object level. ## Begin the cooking process by taking out a bowl. value = 0.0 ## Place inside the bowl the fractional element of X times the fractional element of Y times the noise value at location (y1, x1) value += ( fractional_element_of_x * fractional_element_of_y * self.noise_array[y1][x1] ) ## Next, stir in the fractional element of X times (one minus the fractional element of y) times the noise value at location (y2, x1) value += ( fractional_element_of_x * (1 - fractional_element_of_y) * self.noise_array[y2][x1] ) ## Sprinkle liberal amounts of (one minus the fractional element of X) times the fractional element of Y times the noise value at location (y1, x2) value += ( (1 - fractional_element_of_x) * fractional_element_of_y * self.noise_array[y1][x2] ) ## Line baking pan with a mixture of (one minus the fractional element of X) times (one minus the fractional element of Y) times the noise value at location (y2, x2) value += ( (1 - fractional_element_of_x) * (1 - fractional_element_of_y) * self.noise_array[y2][x2] ) ## I'm not yet sure how adding four things and then not dividing by four returns the AVERAGE value of the four neighbors of point (x, y) in the noise array. (Maybe it's already taken into account?) ## But slap that pan in the oven and let it burn for 0.002 ms. return value class SimplexNoiseGenerator: ## These things are true for every instance of this class and does not require re-initting. ## I don't really know what's going on here. <NAME>. ## ... ## The way it's referenced suggests that grad3 is an ordered list of simplex vertices. ## gi0/gi1/gi2 gives numbers that somehow map to indices of this list via a quite arcane mathemagical cantrip with no justification given. See below in the noise generator. ## I'm just gonna interpret all those Grad objects as simple boxes for vertex coordinates. grad3 = [ [1, 1, 0], [-1, 1, 0], [1, -1, 0], [-1, -1, 0], \ [1, 0, 1], [-1, 0, 1], [1, 0, -1], [-1, 0, -1], \ [0, 1, 1], [0, -1, 1], [0, 1, -1], [0, -1, -1] ] ## ... ## Wow I think they actually decided not to include a grad2 table because the mapping for grad3 technically works for grad2 too. ## Wow. ## I'm gonna go ahead and make a grad2 table based on my interpretation of what is going on here. grad2 = [ [1, 1], [-1, 1], [1, -1], [-1, -1], \ [1, 0], [-1, 0], [1, 0], [-1, 0], \ [0, 1], [0, -1], [0, 1], [0, -1] ] ## Nooope does not make more sense now. ## I'm going to put all of my trust in the implicit knowledge of the Java coder here. ## Just going to assume using the first two columns of the grad3 table works. ## It probably should, given that in grad2, there are precisely 4 instances of each specific value across the table, in varying combinations. ## So even though there are repeats I guess it still works somehow?! ## Maybe the fact there's some modulus going on ensures the repeated indices get skipped or something? ## The next section initializes the skewing and unskewing factors. ## I looked in the Java code and these are just constants. ## They should probably be called in preprocessing somehow, maybe at the top of this module... ## But I want the generators to be able to whip out new worlds at high speeds... ## So it's either top of the module, presolved here, or it takes too long. Choose one. #F2 = 0.3660254037844386 # 0.5*(Math.sqrt(3.0)-1.0); <-- IDLE gives me 0.3660254037844386 instead of what I had -- the lower-precision 0.36602540378 ... I clearly made a mistake while putting the formula into Google as an impromptu calculator substitute. Whatever, I hadn't considered putting the stuff in the base class at that time. #G2 = 0.21132486540518713 # (3.0-Math.sqrt(3.0))/6.0; apparently I copied it incorrectly. I had 2.71132486541 before I changed it to 0.21132486540518713 ## Trying out the math module for debugging purposes and it sort of makes it better anyways? F2 = ( 0.5 * (math.sqrt(3.0) - 1.0) ) G2 = ( ( 3.0 - math.sqrt(3.0) ) / 6.0 ) ## There's a fastfloor algorithm in the Java code. ## Whether or not any algorithm modifications like this in Python might help is currently beyond me and beyond my needs to implement this generator. ## I'm skipping that. def __init__(self, supplied_hash=255): ## The following section initializes self.noise_array. ## NOTE: The Java example just runs through the same list twice -- in Python this approach makes index errors with all the easy ways to do that behavior, so I'm using a separate Python implementation's technique of repeating the list twice, instead. ## The list contains every integer from 0 to 255. self.noise_array_seed = [151,160,137,91,90,15, \ 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, \ 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, \ 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, \ 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, \ 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, \ 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, \ 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, \ 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, \ 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, \ 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, \ 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, \ 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180] ## Prep the noise_array variable for subsequent randomization. Remember, this is the __init__() for the generator. Things have to be initialized somewhere. self.noise_array = [] ## Hash number is a variable because someone might think to make the seed some other number than 255 and would want to change the hash to match. self.hash_number = supplied_hash ## in Java I think you need to explicitly set the size of the array; not so in Python self.permutations_table = [] #self.permutations_table_Mod12 = [] ## Randomize the seed distribution (CURRENTLY DEBUGGING): self.randomize_the_noise_array_seed() ## This may only be called after self.hash_number has been established. #self.generate_permutations_table() def generate_permutations_table(self): if self.hash_number == None: ## Note that this presumes generate_permutations_table() will never be called when self.noise_array is zeroed out for regeneration or only half its normal size, which ought to always be the case. self.hash_number = ((len(self.noise_array) // 2) - 1) ## The reasoning behind this is somewhat complex. It has to do with there being 512 numbers in the noise array... ## which is constructed by taking the 256 numbers in the initial noise array and putting them in again on the end in the same order. ## The noise array is that doubled size because of some sort of wraparound thing the simplex grid needs, I think. ## But for the hash number, it needs to be 255 if there are 256 distinct values. Why, I forget, but it's somewhere in the logic behind needing a permutations_table. del self.permutations_table self.permutations_table = [] ## I changed 512 to 256 because it was giving me "list index out of range" ## This was probably not a good idea but I'll figure out why once it gives me more meaningful results with its errors ## ... ## Comparing the java and python versions convinced me it should be the other way. Their tables are both supposed to be 512. ## ... ## Made hash number changeable. ## Note that the permutations_table must be the size of the noise_array, since it is the table of permutations of that noise array's values, with a 1:1 correspondence (bijection??) for each_number in range(0, len(self.noise_array)): ## 255 was what the Java code said, but 256 produces non-errored results. Why would it be 255 any not 256 anyways? Very strange! Is python's method of storing data really that different from Java's? Can a short in Java only be positive? Can a list in python only be positive?! Argh... ## ... ## Just use 255. The Python implementation doesn't have a Mod12 table... ## I don't even know if it'll be faster, since I have to rehash everything every time I regenerate the array, anyways. ## It's entirely possible the second table for modulus results is actually wasteful rather than helpful. Idk. ## ... ## In fact I think it raises bad new problems in Python, given my perhaps mistaken instinct to use self.noise_array in building the mod table rather than permutations_table which I think is what's supposed to be used in the Java program... ## I'm going to drop the mod table and leave it here as evidence of my thought processes, for at least this version. self.permutations_table.append(self.noise_array[(each_number & self.hash_number)]) def randomize_the_noise_array_seed(self, random_number_seed=None): ## Does not currently support random seeds. >_> ## The parameter is there to inspire you to write it in, of course! ## I bet handlers are un-Pythonic for some convoluted reason nobody bothered to explain to me. #noise_array_seed_handler = self.noise_array_seed ## ^--- This didn't work because the assignment operation here made changes to noise_array_seed_handler propagate to self.noise_array_seed... ## Which is undesirable for repeated randomizations. ## Sooooo, instead, copy more explicitly: noise_array_seed_handler = [] for each_number in self.noise_array_seed: noise_array_seed_handler.append(each_number) ##print("DEBUG: noise_array_seed_handler == %s" % (str(noise_array_seed_handler))) new_noise_array = [] ## This whole function is about shuffling order of the noise array's contents while keeping the individual values of its contents the same. while len(noise_array_seed_handler) > 1: ## + 1 because it needs to include the final index. ## ... ## Except random.randint DOES NOT work like range(x, y) -- it includes the zeroeth index and the maximum index, I think. Whyyyyy did they make it inconsistent! ## ... ## It's even worse -- it gave me out-of-range errors when it was simply nothing added or subtracted, too. Have to do - 1 to make it randomize properly. ## This is something that should really be investigated when this routine is next improved. which_number_to_pick = random.randint(0, (len(noise_array_seed_handler) - 1)) ## DEBUGGING ##print("len(noise_array_seed_handler) == %d\n which_number_to_pick == %d" % (len(noise_array_seed_handler), which_number_to_pick)) ## Put the number at that index into the new_noise_array. new_noise_array.append(noise_array_seed_handler[which_number_to_pick]) ## Remove the number at that index from the list so this doesn't go on forever. noise_array_seed_handler.pop(which_number_to_pick) ## The last one doesn't need and doesn't want to be randinted into existence. new_noise_array.append(noise_array_seed_handler[0]) ## Out with the old... del self.noise_array ## ... and in with the new: self.noise_array = new_noise_array ## DEBUG ##print(" Debug: self.noise_array == %s" % (str(self.noise_array))) ## /DEBUG ## The randomization call should be callable on its own, so include this to make it the proper length: self.double_the_noise_array() ## This part is required because the permutations table draws from the noise array and is critical to making a new noise map. Forgot about that after taking a few days' break. ## Always call generate_permutations_table() when the noise array is full and doubled. self.generate_permutations_table() ## Clean the list references: del noise_array_seed_handler del new_noise_array ## ... ## I think this step is likely to be unnecessary, but it rules out one problem I thought my current issue could have been. def double_the_noise_array(self): ## Uses the supplied argument to construct a more Python-friendly way of handling the simplex noise seed. ## This function supports the creation (and re-creation) of the noise array. Called in the noise generator's __init__() and reseed_noise() methods. noise_array_handler = [] ## I got an out-of-memory error when trying to call this on itself. ## It just kept reading the array after it added all the numbers and looped endlessly. Oops. ## Time to break out the handlers! ## ... ## After changing the range number to 3 and 1, it seems to not actually care about being doubled. o_o ## Some day I'll know how simplex noise works. Eventually. ## Untill then, we move onwards with the cargo cult programming boilerplate. for number_of_times_to_double_itself in range(0, 2): for each_number in self.noise_array: noise_array_handler.append(each_number) del self.noise_array self.noise_array = noise_array_handler def twodee_dot_product(self, supplied_gradient, x, y): ## here I think I need to figure out what the grad class does ## ... ## I think it's just an object with a list of coords in it, like a MapTile. ## Maaaaaybe. return ( (supplied_gradient[0] * x) + (supplied_gradient[1] * y) ) def generate_octaved_noise(self, supplied_x, supplied_y, scale, octaves, persistence): ''' From << http://code.google.com/p/battlestar-tux/source/browse/procedural/simplexnoise.py >> " 2D Multi-Octave Simplex noise. For each octave, a higher frequency/lower amplitude function will be added to the original. The higher the persistence [0-1], the more of each succeeding octave will be added. " ''' total_noise_for_this_cell = 0.0 frequency = scale # -_- amplitude = 1.0 # " We have to keep track of the largest possible amplitude, # because each octave adds more, ad we need a value in [-1, 1]. " max_amplitude = 0.0 for each_octave in range(octaves): new_x = supplied_x * frequency new_y = supplied_y * frequency total_noise_for_this_cell += ( self.generate_raw_unoctaved_noise(new_x, new_y) * amplitude ) frequency *= 2.0 max_amplitude += amplitude ## max_amplitude is also what the total is divided by at the end. ## This implies amplitude is some sort of average over all the iterations. amplitude *= persistence ###print(" (total_noise_for_this_cell / max_amplitude) == " + str((total_noise_for_this_cell / max_amplitude))) return (total_noise_for_this_cell / max_amplitude) def generate_noise(self, supplied_x, supplied_y, scale, octaves, persistence, randseed=None): ''' The gateway function for generate_octaved_noise(), this function makes sure the noise values are formatted according to the (array[y][x] == z) format used by my MapTile constructor. ''' ## IMPORTANT! I think there should be a reinitialize_noise_array() function called here. ## That function would reshuffle or maybe change the hash value on the noise_array (the permutations table, per other sources). ## ... ## Ooooor maybe that should be optional, because we might want to generate a map from a specific hash. ## I know: The hash should be changeable as a function outside this one that is invoked by the main program, like "rerandomized generator". ## This function should also apply to the other generators too... ## Perlin will be similar, plasma will be more of a hack involving saving state and giving that out unless a reset is requested, maybe? ## Or perhaps plasma will be just the same and I'm forgetting something about the RNG calls there. ## Check that. ## ... ## randomize_the_noise_array_seed() now handles randomization for this generator. ## It may be supplied with a random seed... but only if some moxie-filled programmer supplies it with the ability to do that, first! ## DEBUG ##print("\n Generating new array of simplex noise . . .\n") ## /DEBUG self.randomize_the_noise_array_seed(random_number_seed=randseed) array_to_be_returned = [] for each_y in range(0, supplied_y): new_row = [] for each_x in range(0, supplied_x): new_z_value = self.generate_octaved_noise(each_x, each_y, scale, octaves, persistence) ###print(" new_z_value == " + str(new_z_value)) new_row.append(new_z_value) array_to_be_returned.append(new_row) ##print("\n New array of simplex noise has been generated.\n") ## DEBUG ##print(" array_to_be_returned == %s" % (str(array_to_be_returned))) ## /DEBUG return array_to_be_returned def generate_raw_unoctaved_noise(self, supplied_x, supplied_y): ## After some review... ## The "skewing" is just multiplying the coord numbers by a constant so that everything we want to do on an x,y Cartesian board gets translated onto a simplex board. ## i and j are the "coordinates" when translated into simplexese. ## t uses G2 because it can't just do subtraction from the already-worked s value baked into i and j. ## t is, I think, the Cartesian midpoint coordinate. ## So essentially all the s, i, j, t, x0, y0 defining-lines are about getting simplex-to-Cartesian and vice versa translations. ## "Skew the input space to determine which simplex cell we're in" s = (supplied_x + supplied_y) * self.F2 # they also said something about "hairy skew factor" ... wat. i = int((supplied_x + s)) # how is this supposed to work?! j = int((supplied_y + s)) ## I THINK the values of x and y are always 0 or 1... (?) ## Which would be how all of these things can just add and subtract from eachother sensibly. ## Maybe?!? This IS what I'm trying to find out by translating it from Java... ## It isn't magic programming if I'm actually trying to understand how it works! t = (float(i + j) * self.G2) ## "Unskew the cell origin back to (x, y) space" <--- "(x, y) space" means the whole Cartesian square coordinate thing, rather than... simplex-adjusted coordinates. unskewed_x_zero = (i - t) unskewed_y_zero = (j - t) ## "The x,y distances from the cell origin" <--- by x,y they mean Cartesian rather than simplex-ian x0 = (supplied_x - unskewed_x_zero) y0 = (supplied_y - unskewed_y_zero) ## "For the twodee case, the simplex shape is an equilateral triangle." ## "Determine which simplex we are in." ## i1, j1 are "offsets for second (middle) corner of simplex in (i, j) coords" ## It's basically going top, right, bottom along the triangle, if I understand correctly. if x0 > y0: ## "lower triangle, XY order: (0, 0) --> (1, 0) --> (1, 1)" i1 = 1 j1 = 0 else: ## "upper triangle, YX order: (0, 0) --> (0, 1) --> (1, 1)" i1 = 0 j1 = 1 ## " A step of (1,0) in (i,j) means a step of (1-c,-c) in (x,y), and ## a step of (0,1) in (i,j) means a step of (-c,1-c) in (x,y), where ## c = (3-sqrt(3))/6 " ((c == G2)) ## "Offsets for second (middle) corner of simplex in (x,y) unskewed coords" x1 = (x0 - i1 + self.G2) y1 = (y0 - j1 + self.G2) ## "Offsets for last corner in (x,y) unskewed coords" x2 = x0 - 1.0 + 2.0 * self.G2 # Why do people think not using parens on math is a good idea? y2 = y0 - 1.0 + 2.0 * self.G2 # I don't care about OoP. It's just sensible to give punctuation to that sort of thing. Someone COULD easily make a mistake, but with punctuation you trade the reader's interpretation time for safety, which is far better, imo. ## "Work out the hashed gradient indices of the three simplex corners" ## I think th ## ... ## I don't know why they would bother hashing it with 255. ## Why does that even matter? Why not just do the operations on the base numbers? ## It was 255 in the Java. ## But I have no idea how that was supposed to work. Isn't it supposed to be 256 anyways? ## ... the Python code I saw also uses 255 and had the 512 permutations buffer thing fixed by copying the array onto itself, which is what I'm gonna use, so I'll try the 255 thing again too. ii = int(i) & self.hash_number jj = int(j) & self.hash_number ''' ## NOTE: All of the following in this commented block is tainted by my mistaken mod table. ## It was probably the reason I went through such trouble to debug it this way. Blah. ####print(" Gradient DEBUG:\n index of self.permutations_table[jj] == " + str(jj)) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + self.permutations_table[jj])] == " + str((ii + self.permutations_table[jj]))) ####print(" Gradient DEBUG:\n gradient_i_zero == " + str(self.permutations_table_Mod12[(ii + self.permutations_table[jj])]) + "\n") #gradient_i_zero = self.permutations_table_Mod12[ii + self.permutations_table[jj]] ####print(" Gradient DEBUG:\n index of self.permutations_table[(jj+j1)] == " + str((jj+j1))) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + i1 + self.permutations_table[(jj+j1)])] == " + str((ii + i1 + self.permutations_table[(jj+j1)]))) ####print(" Gradient DEBUG:\n gradient_i_one == " + str(self.permutations_table_Mod12[(ii + i1 + self.permutations_table[(jj+j1)])]) + "\n") #gradient_i_one = self.permutations_table_Mod12[ii + i1 + self.permutations_table[jj+j1]] ####print(" Gradient DEBUG:\n index of self.permutations_table[(jj+j1)] == " + str((jj+j1))) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + 1 + self.permutations_table[(jj+1)])] == " + str((ii + 1 + self.permutations_table[(jj+1)]))) ####print(" Gradient DEBUG:\n gradient_i_two == " + str(self.permutations_table_Mod12[(ii + 1 + self.permutations_table[(jj+1)])]) + "\n") #gradient_i_two = self.permutations_table_Mod12[ii + 1 + self.permutations_table[jj+1]] ''' ## Note that the 1 constants are balanced with omitted 0 constants in the lines with "missing" elements. gradient_i_zero = self.permutations_table[ii + self.permutations_table[jj ]] % 12 gradient_i_one = self.permutations_table[ii + i1 + self.permutations_table[jj + j1]] % 12 gradient_i_two = self.permutations_table[ii + 1 + self.permutations_table[jj + 1]] % 12 ## "Calculate the contribution from the three corners" t0 = 0.5 - x0*x0 - y0*y0 # I really wish people would use parens in all multi-operator statements. if t0 < 0: n0 = 0.0 else: t0 *= t0 ## " (x,y) of grad3 used for twodee gradient " ###print("\n DEBUG:\n t0 == " + str(t0) + "\n twodee_dot_product == " + str(self.twodee_dot_product(self.grad3[gradient_i_zero], x0, y0))) n0 = t0 * t0 * self.twodee_dot_product(self.grad3[gradient_i_zero], x0, y0) t1 = 0.5 - x1*x1 - y1*y1 if t1 < 0: n1 = 0.0 else: t1 *= t1 n1 = t1 * t1 * self.twodee_dot_product(self.grad3[gradient_i_one], x1, y1) ###print("\nDEBUGGING x0 == " + str(x0)) ###print("DEBUGGING x1 == " + str(x1)) ###print("DEBUGGING x2 == " + str(x2)) ###print("\nDEBUGGING y0 == " + str(y0)) ###print("DEBUGGING y1 == " + str(y1)) ###print("DEBUGGING y2 == " + str(y2)) ###print("\nDEBUGGING (x2 * x2) == " + str((x2 * x2))) ###print("DEBUGGING (y2 * y2) == " + str((y2 * y2))) ###print("DEBUGGING ((x2 * x2) - (y2 * y2)) == " + str((x2 * x2) - (y2 * y2))) ###print("DEBUGGING (0.5 - ((x2 * x2) - (y2 * y2))) == " + str((0.5 - ((x2 * x2) - (y2 * y2))))) ## Apparently some clown thought it would be funny to allow order of operations to work all screwy in Java, or maybe someone sabatoged the code I was looking at. ## I really couldn't guess why, but this was the original code, written in Java: ## double t2 = 0.5 - x2*x2-y2*y2; ## There were no parentheses anywhere there. t2 = 0.5 - x2*x2 - y2*y2 ###print("DEBUGGING t0 == " + str(t0)) ###print("DEBUGGING t1 == " + str(t1)) ###print("DEBUGGING t2 == " + str(t2)) ## I think I understand it now! the t's are ticking down like octaves in the perlin generator, or something? ## hrm it's multiplying, not dividing, so it couldn't get below zero that way unless it already was negative. >< ## Nevermind. Still don't understand it yet. if t2 < 0: n2 = 0.0 else: t2 *= t2 n2 = t2 * t2 * self.twodee_dot_product(self.grad3[gradient_i_two], x2, y2) ## "Add contributions from each corner to get the final noise value." ## "The result is scaled to return values in the interval [-1, 1]." ###print("\nDEBUGGING n0 == " + str(n0)) ###print("\nDEBUGGING n1 == " + str(n1)) ###print("\nDEBUGGING n2 == " + str(n2)) ####print("\nDEBUGGING return " + str(70.0 * (n0 + n1 + n2))) #return 70.0 * (n0 + n1 + n2) number_to_return = ( 70.0 * (n0 + n1 + n2) ) ## My program would work better with a result scaled to 0-255. Therefore... number_to_return += 1 number_to_return *= 128.0 ## blah, getting NoneTypes after the octaves were added. Hrm... ## debug: ###print("\n number_to_return == " + str(number_to_return)) return number_to_return class DungeonMapGenerator: ''' Generators for the creation of corridor-linked dungeon rooms for indoors maps. Output format uses z values to stand for different room types, eg: 0 = blocked 1 = unblocked 2 = corridor etc. ''' ## NOTE TO SELF!! ## How to do FoV algorithm: ## - Calculate a circle with a set radius (sight range) from the player's current position ## - Find all MapTiles in that radius ## - For each MapTile, draw a line from that MapTile to the player ## - For each properly-rounded coordinate along that line (aligns to MapTile coords; partial cover? think on it...), check MapTiles with those coordinates for opacity ## - If a MapTile with opacity is found, stop checking this line and set the MapTile whose line we're checking to "UNSEEN" def __init__(self, supplied_map_width=40, supplied_map_height=40, room_max_size=10, room_min_size=4, room_max_count=30, room_min_count=5): ## Using the DungeonMapGenerator should always involve supplying some or all of these constants. ## Defaults are being used here to make it simple for me to test and demonstrate. self.map_width = supplied_map_width self.map_height = supplied_map_height ## -= 1 because doing it during room generation would be mildly wasteful -- the bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. self.map_width -= 1 self.map_height -= 1 self.room_max_size = room_max_size self.room_min_size = room_min_size self.room_max_count = room_max_count self.room_min_count = room_min_count def check_these_two_rectangles_for_intersection(self, rectangle_alpha, rectangle_beta): ''' Check two rectangles, both formatted [x, y, w, h] for intersection; return True if they intersect and False if they do not intersect. ''' new_x = rectangle_alpha[0] new_x2 = (rectangle_alpha[0] + rectangle_alpha[2]) old_x = rectangle_beta[0] old_x2 = (rectangle_beta[0] + rectangle_beta[2]) new_y = rectangle_alpha[1] new_y2 = (rectangle_alpha[1] + rectangle_alpha[3]) old_y = rectangle_beta[1] old_y2 = (rectangle_beta[1] + rectangle_beta[3]) do_they_intersect = False if ( (new_x >= old_x) and (new_x <= old_x2) ) or ( (new_x2 >= old_x) and (new_x2 <= old_x2) ): if ( (new_y >= old_y) and (new_y <= old_y2) ) or ( (new_y2 >= old_y) and (new_y2 <= old_y2) ): do_they_intersect = True if ( (old_x >= old_x) and (old_x <= new_x2) ) or ( (old_x2 >= new_x) and (old_x2 <= new_x2) ): if ( (old_y >= new_y) and (old_y <= new_y2) ) or ( (old_y2 >= new_y) and (old_y2 <= new_y2) ): do_they_intersect = True ## This if tree checks to see whether or not any rooms are forming crosses. if ((new_x >= old_x) and (new_x2 <= old_x2)) and ((new_y <= old_y) and (new_y2 >= old_y2)): do_they_intersect = True ## ... and the same check in the other direction, for if the old room was the vertical bar of the cross rather than the new room, as is assumed in the preceding if tree: if ((old_x > new_x) and (old_x2 < new_x2)) and ((old_y < new_y) and (old_y2 > new_y2)): do_they_intersect = True return do_they_intersect def define_corridor(self, which_orientation, x, y, x2, y2): ''' Create a corridor (actually a one-tile-by-n-tiles rectangular room) connecting point (x, y) and point ((x + w), (y + h)), using the rectangular room definition format. ''' w = x2 - x h = y2 - y if which_orientation == 'horizontal': if w < 0: ## ((This fix worked perfectly! Hooray)) ## If it's negative, flip it and deduct it from the index. ## DO NOT put this before the orientation check, it doesn't need to care about which direction it isn't doing, and since it gets that info anyways it would just mess it up to flip and deduct in a direction it isn't going in (because that direction is the constant 1, see below). w *= -1 x -= w ## (x, y, width, height) new_corridor = [x, y, w + 1, 1] ## Yes, it could be handled in less verbose ways. ## This way makes it blindingly obvious what the code is supposed to do, which I prefer. ## Code ought to be easy to maintain. if which_orientation == 'vertical': if h < 0: ## If it's negative, flip it and deduct it from the index. ## DO NOT put this before the orientation check, it doesn't need to care about which direction it isn't doing, and since it gets that info anyways it would just mess it up to flip and deduct in a direction it isn't going in (because that direction is the constant 1, see below). h *= -1 y -= h ## (x, y, width, height) new_corridor = [x, y, 1, h + 1] return new_corridor def return_the_center_of_this_rectangle(self, upperleft_x, upperleft_y, width, height): centerpoint_x = ( upperleft_x + (width // 2) ) centerpoint_y = ( upperleft_y + (height // 2) ) return centerpoint_x, centerpoint_y def generate_noise(self, supplied_map_width=None, supplied_map_height=None, room_max_size=None, room_min_size=None, room_max_count=None, room_min_count=None): ''' It's noise that looks like a dungeon map. If R2-D2 sneezed, this would be the random pattern left on the tissue. ''' #### Arranging the generation parameters #### ## All the generators save the state of the last map made. ## The generate_noise() method of each generator accepts new parameters every time it's called, but if none are given, it goes back to the last parameters the generator worked with. ## This makes it easy to implement deterministic map regeneration from randseeds. ## -= 1 for the same reasoning as in the inits. if supplied_map_width != None: supplied_map_width -= 1 self.map_width = supplied_map_width if supplied_map_height != None: supplied_map_height -= 1 self.map_height = supplied_map_height if room_max_size != None: self.room_max_size = room_max_size if room_min_size != None: self.room_min_size = room_min_size if room_max_count != None: self.room_max_count = room_max_count if room_min_count != None: self.room_min_count = room_min_count #### Generating the map #### ## First, make a map full of zeroes. The rooms will be carved out of it. ## Remember, every NoiseMapGenerator returns results formatted: map[y][x] == z new_dungeon_map = [] for each_row in range(0, self.map_height): new_row = [] for each_column in range(0, self.map_width): new_row.append(0) new_dungeon_map.append(new_row) ## List comprehension method: ## new_dungeon_map = [[ 0 for y in range(0, self.supplied_map_height)] for x in range(0, self.supplied_map_width)] ## Try this and see how it goes. #### Generating room coordinates #### ## DEBUG #number_of_corridors_at_map_finish = 0 ## \DEBUG ## There must be at least room_min_count rooms in the end product. are_there_enough_rooms_yet = False while are_there_enough_rooms_yet == False: list_of_rooms = [] for each_room_attempt_number in range(0, self.room_max_count): ## DEBUG: Since walls are uncarved space, should the x and y randints begin at 1 or 0? ## Watching the output process will solve this issue quickly. ## ... ## This issue needs to be straightened out early on due to how intersection tests have to work. ## Only two edges need to have uncarved space in them, and every room will have those two edges uncarved. ## I decree those two edges to be the lower and right edges. ## The map will have upper and left edges uncarved so that any rooms at the edge of the map are properly walled. ## Thus the randints will begin at 1 (the upper and left edges)... ## and end at map_width and map_height, instead of (m_w - 1) and (m_h - 1). ## By letting rooms gen to the edges with their width and height values, they can sit on an edge with their two designated built-in edge walls and everything will be fine. new_room_width = random.randint(self.room_min_size, self.room_max_size) new_room_height = random.randint(self.room_min_size, self.room_max_size) new_room_upperleft_x = random.randint(1, self.map_width - new_room_width) new_room_upperleft_y = random.randint(1, self.map_height - new_room_height) ## [x, y, w, h] new_room = [new_room_upperleft_x, new_room_upperleft_y, new_room_width, new_room_height] ## The checks for validity favor x,y modification first -- and always pushing it to the lower right -- ## and w,h modification second -- and always pushing it to the upper left -- ## because this should lead to a mild tendency for rooms to cluster, and towards the center, at that. ## Which I think will look nice. ## ... ## or that's what I'd like to do, but not on the first implementation. ## Checking to see if the rooms intersect: failed_intersection_test = False for each_other_room in list_of_rooms: if self.check_these_two_rectangles_for_intersection(new_room, each_other_room) == True: failed_intersection_test = True if failed_intersection_test == False: list_of_rooms.append(new_room) if len(list_of_rooms) >= self.room_min_count: are_there_enough_rooms_yet = True else: del list_of_rooms #### Carving successful room coordinates #### ## Someone told me using range(foo, len(list)) is un-Pythonic, so I'm using an iterator to step through the list in parallel for the purposes of creating corridors to connect rooms. room_creation_iterator = -1 for each_completed_room in list_of_rooms: for each_x_coordinate in range(each_completed_room[0], (each_completed_room[0] + each_completed_room[2])): for each_y_coordinate in range(each_completed_room[1], (each_completed_room[1] + each_completed_room[3])): ## This conditional seems a bit hackish. if new_dungeon_map[each_y_coordinate][each_x_coordinate] == 0: ## This is so simple it's bound to fail miserably. ## ... ## And yet it works. new_dungeon_map[each_y_coordinate][each_x_coordinate] += 1 ## Connect every room with corridors. (Note that there may be dungeons where this trait is not desirable for some reason; other behavior may be added as desired.) ## Generate a random direction for the corridors to point in: which_direction_first = random.randint(0, 1) # remember, random.randint() includes min and max values, unlike range() #define_corridor(which_orientation, x, y, x2, y2) ## Note: Corridors are created from the current room to the next room even though the next room hasn't actually be written in yet. ## It works because the rooms already exist as rectangle coordinates. ## This is likely to cause debugging confusion if you try to change this code without taking that into account. Be advised. ## Find the centerpoints of both rooms and pack them as tuples. ## Syntax is [ ( (list_of_rooms[n][w] // 2) + list_of_rooms[n][x] ), ( (list_of_rooms[n][h] // 2) + list_of_rooms[n][y] ) ] ## Values resulting from this look like x, y and are just the centerpoints of the two rooms. ## Another representation: [(width // 2 + x offset), (height // 2 + y offset)] ## ... ## If desired, it's possible to change this to use floor divide + 1 instead of just floor divide. ## That would make it so that rooms with a thickness of 1 do not have projections off their sides. ## Corridors would slice into the center of the room rather than the rounded-down center. #room_alpha_center = [ ( (list_of_rooms[room_creation_iterator][2] // 2) + list_of_rooms[room_creation_iterator][0] ), ( (list_of_rooms[room_creation_iterator][3] // 2) + list_of_rooms[room_creation_iterator][1] ) ] #room_beta_center = [ ( (list_of_rooms[room_creation_iterator + 1][2] // 2) + list_of_rooms[room_creation_iterator + 1][0] ), ( (list_of_rooms[room_creation_iterator + 1][3] // 2) + list_of_rooms[room_creation_iterator + 1][1] ) ] ## Redoing this to make my dungeon generator cooler. ## Now, rooms will connect to the nearest two rooms, by centerpoint value! ## Or one or zero rooms, as in the case for the second and first rooms created. ## This should make tunnel connections a whole lot more friendly-looking. ## The way we're going to do this is: ## For each room in the rooms list: ## Use my new return_the_center_of_this_rectangle() method on every room in the rooms list and compare their centers to the room currently being considered ## The nearest two rooms that do not have centerpoints equal to the room being considered will be used as anchors for the define_corridor() method. the_centerpoint_of_this_room = self.return_the_center_of_this_rectangle() ## DEBUG #print("\n room_alpha_center == %s\n room_beta_center == %s" % (str(room_alpha_center), str(room_beta_center))) ## \DEBUG if which_direction_first == 0: ## DEBUG #number_of_corridors_at_map_finish += 1 ## \DEBUG ## It needs to take room alpha center and drag it out to room beta center in only the horizontal direction. ## That's why vertical is as easy as swapping reference order to the rooms. ## define_corridor() still needs a direction because I chose not to make it implicit by unpacking the centerpoint tuple here. I think it's more readable this way. ## ... ## Something is totally wrong here. This only works if alpha centerpoint > beta centerpoint because otherwise you get negative widths or something and that can't be drawn in can it? ## Maybe it can? Let's try it and see what fails. new_horizontal_corridor = self.define_corridor('horizontal', room_alpha_center[0], room_alpha_center[1], room_beta_center[0], room_beta_center[1]) new_vertical_corridor = self.define_corridor('vertical', room_beta_center[0], room_beta_center[1], room_alpha_center[0], room_alpha_center[1]) elif which_direction_first == 1: ## DEBUG #number_of_corridors_at_map_finish += 1 ## \DEBUG new_horizontal_corridor = self.define_corridor('horizontal', room_beta_center[0], room_beta_center[1], room_alpha_center[0], room_alpha_center[1]) new_vertical_corridor = self.define_corridor('vertical', room_alpha_center[0], room_alpha_center[1], room_beta_center[0], room_beta_center[1]) #print("\n new_horizontal_corridor == %s\n new_vertical_corridor == %s" % (str(new_horizontal_corridor), str(new_vertical_corridor))) ## When the next-to-last room is connected to the last room, reset the iterator to 0 so that the last room may be connected to the first room. ## NOTE! Linear dungeons should stop corridor creation when the next-to-last room is connected to the last room. ## DEBUG #print("\n room_creation_iterator == %d\n len(list_of_rooms == %d" % (room_creation_iterator, len(list_of_rooms))) ## \DEBUG if ( room_creation_iterator < (len(list_of_rooms) - 2) ): # plus equals to, NOT set equals to (incrementing, not rolling over) room_creation_iterator += 1 else: # set equals to, NOT minus equals to (rolling over, not incrementing) room_creation_iterator = -1 ## This should probably be turned into a create_room() method. ## First horizontal: ## DEBUG #print("\nnew_horizontal_corridor[0] == %d\nnew_horizontal_corridor[2] == %d\nnew_horizontal_corridor[0] + [2] == %d" % (new_horizontal_corridor[0] ,new_horizontal_corridor[2], (new_horizontal_corridor[0] + new_horizontal_corridor[2]))) #print("\nnew_horizontal_corridor[1] == %d\nnew_horizontal_corridor[3] == %d\nnew_horizontal_corridor[1] + [3] == %d" % (new_horizontal_corridor[1], new_horizontal_corridor[3], (new_horizontal_corridor[1] + new_horizontal_corridor[3]))) ## \DEBUG for each_horizontal_corridor_x_coordinate in range(new_horizontal_corridor[0], (new_horizontal_corridor[0] + new_horizontal_corridor[2])): for each_horizontal_corridor_y_coordinate in range(new_horizontal_corridor[1], (new_horizontal_corridor[1] + new_horizontal_corridor[3])): ## If it's already walkable, don't turn it debug mauve. if new_dungeon_map[each_horizontal_corridor_y_coordinate][each_horizontal_corridor_x_coordinate] == 0: new_dungeon_map[each_horizontal_corridor_y_coordinate][each_horizontal_corridor_x_coordinate] += 1 ## Second vertical: ## DEBUG #print("\nnew_vertical_corridor[0] == %d\nnew_vertical_corridor[2] == %d\nnew_vertical_corridor[0] + [2] == %d" % (new_vertical_corridor[0], new_vertical_corridor[2], (new_vertical_corridor[0] + new_vertical_corridor[2]))) #print("\nnew_vertical_corridor[1] == %d\nnew_vertical_corridor[3] == %d\nnew_vertical_corridor[1] + [3] == %d" % (new_vertical_corridor[1], new_vertical_corridor[3], (new_vertical_corridor[1] + new_vertical_corridor[3]))) ## \DEBUG for each_vertical_corridor_x_coordinate in range(new_vertical_corridor[0], (new_vertical_corridor[0] + new_vertical_corridor[2])): for each_vertical_corridor_y_coordinate in range(new_vertical_corridor[1], (new_vertical_corridor[1] + new_vertical_corridor[3])): ## If it's already walkable, don't turn it debug mauve. if new_dungeon_map[each_vertical_corridor_y_coordinate][each_vertical_corridor_x_coordinate] == 0: new_dungeon_map[each_vertical_corridor_y_coordinate][each_vertical_corridor_x_coordinate] += 1 ## DEBUG #print("\n number_of_corridors_at_map_finish == %d\n len(list_of_rooms) == %d" % (number_of_corridors_at_map_finish, len(list_of_rooms))) ## \DEBUG return new_dungeon_map class RoomFilledMapGenerator: ## I don't like this generator. It is not worth the effort right now. Keeping it for legacy/future inspiration purposes. ## To start, this code will be somewhat copypasted from DungeonMapGenerator. Mostly just the inits and some grid work. def __init__(self, supplied_map_width=40, supplied_map_height=40, room_max_size=10, room_min_size=4): ## Using the RoomFilledMapGenerator should always involve supplying some or all of these constants. ## Defaults are being used here to make it simple for me to test and demonstrate. ## DEBUG ## Let's see if storing new_dungeon_map as state magically solves it. Woooo ## Nope, not in the least. And yes I did put in self.* tags on every reference in this class's generate_noise() method. #self.new_dungeon_map = [] ## \DEBUG self.map_width = supplied_map_width self.map_height = supplied_map_height ## -= 1 because doing it during room generation would be mildly wasteful -- the bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. ## DEBUG COMMENTED ## The following adjustment is unnecessary with the way I've structured my code now. Good. #self.map_width -= 1 #self.map_height -= 1 ## \DEBUG COMMENTED ## This generator does not need min/max room count settings, but it wouldn't be all that difficult to add them as some sort of conditional'd loop. def generate_noise(self, supplied_map_width=None, supplied_map_height=None, room_max_size=None, room_min_size=None): ''' It's sorta like noise. Except blocky and in all these clean straight lines and right angles. ''' #### Arranging the generation parameters #### ## All the generators save the state of the last map made. ## The generate_noise() method of each generator accepts new parameters every time it's called, but if none are given, it goes back to the last parameters the generator worked with. ## This makes it easy to implement deterministic map regeneration from randseeds. if supplied_map_width != None: ## -= 1 for the same reasoning as in the inits. ## Is unnecessary with the way I've structured my code now. ## DEBUG COMMENTED #supplied_map_width -= 1 ## \DEBUG COMMENTED self.map_width = supplied_map_width if supplied_map_height != None: ## Is unnecessary with the way I've structured my code now. ## DEBUG COMMENTED #supplied_map_height -= 1 ## \DEBUG COMMENTED self.map_height = supplied_map_height if room_max_size != None: self.room_max_size = room_max_size if room_min_size != None: self.room_min_size = room_min_size ## Room count will be determined by the other parameters since the map will be filled with rooms. #### Generating the map #### ## First, make a map full of zeroes. The rooms will be carved out of it. ## Remember, every NoiseMapGenerator returns results formatted: map[y][x] == z ## Refactoring this might involve making a generate_blank_map() method. ## It would also be useful for DungeonMapGenerators. ## Maybe DungeonMapGenerator should be a base class and these room-based map generators would all draw from it. new_dungeon_map = [] for each_row in range(0, self.map_height): new_row = [] for each_column in range(0, self.map_width): new_row.append(0) new_dungeon_map.append(new_row) ## List comprehension version: ## new_dungeon_map = [[ 0 for y in range(0, self.supplied_map_height)] for x in range(0, self.supplied_map_width)] ## Try this and see how it goes. #### Filling the blank map with rooms #### ## I seriously don't understand why you wouldn't want to do for loops with index numbers. It makes dealing with the data SO much easier! for each_row_index in range(1, (len(new_dungeon_map) - 1)): for each_column_index in range(1, (len(new_dungeon_map[each_row_index]) - 1)): ## IMPORTANT! ## The syntax is the same for all of these map generators: ## ## new_dungeon_map[y][x] == z ## ## If there is some confusion about row/column stuff or sublist ordering, remember to compare it to this fact. #### ATTEMPT NUMBER TWO #### ## I'm just beating my hands on the keyboard and code is coming out ## Initialize the validator variable: should_we_start_a_room_here = 1 ## Whip up a potential room: new_room_width = random.randint(self.room_min_size, self.room_max_size)#(4, 10) # <-- does not help at all new_room_height = random.randint(self.room_min_size, self.room_max_size)#(4, 10) # :( ## Now check if the width or height go out of bounds and adjust to fit if possible; if not possible, set the validator toggle to False: if (each_column_index + new_room_width) >= (len(new_dungeon_map[each_row_index]) - 1): # len(map[row]) because we're checking this particular row's width, and -1 because of uncarved side difference_between_maximum_room_width_and_attempted_room_width = ( each_column_index + new_room_width - (len(new_dungeon_map[each_row_index]) - 1) ) new_room_width -= difference_between_maximum_room_width_and_attempted_room_width if new_room_width < self.room_min_size: should_we_start_a_room_here = 0 if (each_row_index + new_room_height) >= (len(new_dungeon_map) - 1): # len(map) because we're checking the height of columns in this map, and -1 because of uncarved side difference_between_maximum_room_height_and_attempted_room_height = ( each_row_index + new_room_height - (len(new_dungeon_map) - 1) ) new_room_height -= difference_between_maximum_room_height_and_attempted_room_height if new_room_height < self.room_min_size: should_we_start_a_room_here = 0 ## Determine if this is a good starting tile for a room: for each_nearby_tile_y in range(-1, 2): for each_nearby_tile_x in range(-1, 2): ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: if new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x)] != 0: should_we_start_a_room_here = 0 ## Next, check and see if this room will slice into another room at any point along its prospective length: ## Init/reset the width decrementor: room_max_width = 0 continue_incrementing_room_width = True ## range(-1, 2) should step through (-1, 0, 1), ie (up, same, down) or (left, same, right) for each_next_unit_of_width in range(0, new_room_width): should_we_increment_room_max_width_by_one_tile = False for each_nearby_tile_y in range(-1, 2): for each_nearby_tile_x in range(-1, 2): ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: if continue_incrementing_room_width == True and new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width] == 0: #print(" INCREMENTING decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) should_we_increment_room_max_width_by_one_tile = True else: continue_incrementing_room_width = False #print(" NOT incrementing decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) if should_we_increment_room_max_width_by_one_tile == True and continue_incrementing_room_width == True: room_max_width += 1 ## Apply the decrementor and check if the room is too small: #print("\nnew_room_width == %d\n room_width_decrementor == %d" % (new_room_width, room_width_decrementor)) new_room_width = room_max_width #print(" new_room_width == %d" % (new_room_width)) #print(" self.min_room_size == %d" % (self.room_min_size)) if new_room_width < self.room_min_size: ## Then the smallest possible room had to become too small to fit here and this tile should be skipped. should_we_start_a_room_here = 0 #print(" should_we_start_a_room_here == %r" % (should_we_start_a_room_here)) #else: #print("\nnew_room_width == %d\n room_width_decrementor == %d" % (new_room_width, room_width_decrementor)) #print(" self.min_room_size == %d" % (self.room_min_size)) #print(" (t) should_we_start_a_room_here == %r" % (should_we_start_a_room_here)) ## Duplicating the width decrementor code even though it should never be necessary... o_o ## ... ## It runs but it didn't fix the problem. Still bizarre. ## Init/reset the height decrementor: room_height_decrementor = 0 ## range(-1, 2) should step through (-1, 0, 1), ie (up, same, down) or (left, same, right) for each_next_unit_of_height in range(0, new_room_height): should_we_decrement_room_height_by_one_tile = False for each_nearby_tile_y in range(-1, 2): for each_nearby_tile_x in range(-1, 2): ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: if new_dungeon_map[(each_row_index + each_nearby_tile_y) + each_next_unit_of_height][(each_column_index + each_nearby_tile_x)] != 0: #print(" INCREMENTING decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) should_we_decrement_room_height_by_one_tile = True #else: #print(" NOT incrementing decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) if should_we_decrement_room_height_by_one_tile == True: room_height_decrementor += 1 new_room_height -= room_height_decrementor if new_room_height < self.room_min_size: ## Then the smallest possible room had to become too small to fit here and this tile should be skipped. should_we_start_a_room_here = 0 #if should_we_start_a_room_here == 0: # new_room_height = 0 # new_room_width = 0 ## Now that all checks have been passed, write the room to the map. if should_we_start_a_room_here == 1: for each_new_room_height_index in range(0, new_room_height): for each_new_room_width_index in range(0, new_room_width): new_dungeon_map[each_row_index + each_new_room_height_index][each_column_index + each_new_room_width_index] += 1 else: pass return new_dungeon_map ''' #### ATTEMPT NUMBER ONE #### ## Failed due to indexing or insufficient/incorrect tile validation. ## IMPORTANT! This code all assumes tile validation begins iterating one tile right and one tile down and fills a map of zeroes of exactly the right size. ## Let's switch to serial checks with a toggle rather than nested checks with a base case. ## This will make it very easy to add and remove conditionals to alter how rooms are validated. should_we_start_a_room_on_this_tile = True if new_dungeon_map[each_row_index][each_column_index] != 0: ## Then the current tile is carved and unusable. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index][each_column_index + 1] != 0: ## The next tile to the right is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index] != 0: ## The tile above the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index][each_column_index - 1] != 0: ## The tile to the left of the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index - 1] != 0: ## The tile above and to the left of the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index + 1] != 0: ## The tile above and to the right of the current tile is carved. should_we_start_a_room_on_this_tile = False ## The absurdity gallery. Should be logically impossible to get any hits here whatsoever. if new_dungeon_map[each_row_index + 1][each_column_index] != 0: should_we_start_a_room_on_this_time = False if new_dungeon_map[each_row_index + 1][each_column_index + 1] != 0: should_we_start_a_room_on_this_time = False if new_dungeon_map[each_row_index + 1][each_column_index - 1] != 0: should_we_start_a_room_on_this_time = False ## There has to be a check to see if the tile is fewer than self.room_min_size tiles away from the right and bottom edges of the map. ## We will take this opportunity to define these handy and descriptive variables: ## ## DEBUG - 1 on the end of this? Yes or no? To handle uncarved space on the bottom+right sides... this should be what makes it uncarved, if included here. ## distance_from_left_of_room_to_right_of_map = (self.map_width - each_column_index) distance_from_top_of_room_to_bottom_of_map = (self.map_height - each_row_index) ## Check to see if the room's min size is too large for its seed location: if self.room_min_size >= distance_from_left_of_room_to_right_of_map: ## This tile is too close to the right side of the map to be placed here. should_we_start_a_room_on_this_tile = False if self.room_min_size >= distance_from_top_of_room_to_bottom_of_map: ## This tile is too close to the bottom of the map to be placed here. should_we_start_a_room_on_this_tile = False ## Get how wide and tall the room wants to be, so we can check it against its neighbors and the map edges: random_room_width = random.randint(self.room_min_size, self.room_max_size) random_room_height = random.randint(self.room_min_size, self.room_max_size) #random_room_height = 6 ## If the tile is too close to the edge to fulfill its randomly generated width, decrement it untill it just fits. ## Note: This step happens before the next validation step because if it didn't the room would generate an index-out-of-range error there. if random_room_width >= distance_from_left_of_room_to_right_of_map: random_room_width -= (random_room_width - distance_from_left_of_room_to_right_of_map) if random_room_height >= distance_from_top_of_room_to_bottom_of_map: random_room_height -= (random_room_height - distance_from_top_of_room_to_bottom_of_map) ## Something is ridiculously wrong in the below code, but I have no idea what. ## It's still generating overlaps even though I've put in all the conditionals that are supposed to make it not do that. ## To prevent the decrementer in the following for loop from clipping its own loop too short. I think. random_room_width_adjustment_handler = 0 ## random_room_width + 1 is the upperleft coordinate, the span of the room, and the wall on the right. for each_next_tile_index in range(0, (random_room_width + 2)): ## The following line of code (plus one or two index offsets) took quite some time to figure out was needed. if ((each_next_tile_index + each_column_index) <= (self.map_width - 1)): if (new_dungeon_map[each_row_index][(each_column_index + each_next_tile_index)] != 0) or (new_dungeon_map[(each_row_index - 1)][(each_column_index + each_next_tile_index)] != 0) or (new_dungeon_map[(each_row_index - 1)][(each_column_index + each_next_tile_index)] != 0): ## Then something's in the way. Decrement the room's actual size. ## ... ## This used to be decrementing random_room_width, which I think made it end the for loop too early. Changing to a handler to disconnect those parts. ## ... ## I don't think that solved it. It SHOULDN'T solve it regardless. Leaving it in just to be safe. Refactor it out later. random_room_width_adjustment_handler += 1 ## DEBUG #if random_room_width_adjustment_handler > 0: # random_room_width_adjustment_handler += 0 ## \DEBUG ## Apply the accrued adjustment. random_room_width -= random_room_width_adjustment_handler ## One final check to ensure the previous validation step did not make the room too small: if random_room_width <= self.room_min_size: should_we_start_a_room_on_this_tile = False ## DEBUG ## Direct checking for intersection after the room is defined. ## Even though it solves all the debug mauve, it doesn't fix the inappropriate abuttment issue, and it also doesn't fill the map as cleanly as resizing rooms if they're too big. #for each_new_tile_y_offset in range(0, random_room_height): # for each_new_tile_x_offset in range(0, random_room_width): # if new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] != 0: # should_we_start_a_room_on_this_tile = False ## \DEBUG ## Another absurdity gallery -- the following should never turn up any hits due to logic. random_room_height_adjustment_handler = 0 for each_next_tile_index in range(0, (random_room_height + 2)): if ((each_next_tile_index + each_row_index) <= (self.map_height - 1)): if (new_dungeon_map[each_row_index + each_next_tile_index][(each_column_index)] != 0) or (new_dungeon_map[(each_row_index + each_next_tile_index)][(each_column_index + 1)] != 0) or (new_dungeon_map[(each_row_index + each_next_tile_index)][(each_column_index - 1)] != 0): random_room_height_adjustment_handler += 1 random_room_height -= random_room_height_adjustment_handler if random_room_height <= self.room_min_size: should_we_start_a_room_on_this_tile = False ## If it passes all the checks, write the room to the map. if should_we_start_a_room_on_this_tile == True: for each_new_tile_y_offset in range(0, random_room_height): for each_new_tile_x_offset in range(0, random_room_width): if new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] >= 1: ## DEBUG print("Error: Mauve for random_room_width %d, random_room_height %d\n min_size %d max_size %d" % (random_room_width, random_room_height, self.room_min_size, self.room_max_size)) ## \DEBUG new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] += 1 return new_dungeon_map ''' ''' #### ATTEMPT NUMBER ZERO #### ## Ugh, I don't know why none of this works. ## The indices seem perfect on paper but changing one index makes the results better or worse in ways that don't make any obvious kind of sense. ## I'm going to assume I made some error in figuring out what checking was needed. ## Step 1 if new_dungeon_map[each_row_index][each_column_index] == 0: ## Theory section... ## Imagine the algorithm makes a tall room, a wide room and another tall room on the first line. ## When it passes through the second line it would need to: ## 1. Detect uncarved space at (x, y) ## 2. Check ((x + 1), y) for uncarved space ## If False, then it's about to break into a room to the right; if True... ## 3. Check ((x + 1), y - 1) for uncarved space ## 3a If False, then it's a wall beneath a room; if True... ## 4. Check ((x + 2), y) for uncarved space ## 4a If False, then it's the end of a wall beneath a room and also abutting another room to the right. ## 4b If True then this is a good spot to place a room as it is not going to be carving out a wall from any adjacent rooms. ## ... ## There needs to be a check-ahead to make the room actually fill all the space infront of it, x-ly speaking. ## Or at least to make it easy to put an upper bound on the room width in this location. ## A similar procedure may need to happen at the bottom of the map for height of the room. ## ... ## 5. For each in range(0, room_max_size): Check ((x + each), y) for uncarved space, return sum_of_this_loop ## ... stuff. ## Step 2 if new_dungeon_map[each_row_index][(each_column_index + 1)] == 0: ## Step 3 if new_dungeon_map[(each_row_index - 1)][(each_column_index + 1)] == 0: ## Step 4 ## DEBUG Let's throw more conditionals onto the last uniform step, here, to see if something is the right one: if (new_dungeon_map[each_row_index][(each_column_index + 2)] == 0) and (new_dungeon_map[each_row_index][each_column_index - 1] == 0) and (new_dungeon_map[each_row_index - 1][each_column_index - 1] == 0): ## A good spot to place a room has been found. ## Determine the limiting condition for the width and height randint ranges based on distance between room edge and map edge: distance_from_left_of_room_to_right_of_map = (self.map_width - each_column_index) distance_from_top_of_room_to_bottom_of_map = (self.map_height - each_row_index) random_room_width = random.randint(self.room_min_size, self.room_max_size) random_room_height = random.randint(self.room_min_size, self.room_max_size) ## Forbidding the rooms to be larger than the map: if random_room_width >= distance_from_left_of_room_to_right_of_map: random_room_width = (distance_from_left_of_room_to_right_of_map - 1) if random_room_height >= distance_from_top_of_room_to_bottom_of_map: random_room_height = (distance_from_top_of_room_to_bottom_of_map - 1) ## Note: Step 5 comes after a tentative room width has been generated so that it doesn't have to check any further than it needs to. how_wide_to_actually_make_the_room = 0 ## Step 5 for each_next_tile_index in range(0, random_room_width): if new_dungeon_map[each_row_index][(each_column_index + each_next_tile_index + 1)] == 0: how_wide_to_actually_make_the_room += 1 else: how_wide_to_actually_make_the_room += 0 ## Now we know how wide to make the room and, implicitly, how tall to make it, since rooms are rectilinear and will never be placed to undercut other rooms, only to block their horizontal propagation. ## The maximum width is how_wide_to_actually_make_the_room, since it uses random_room_width (already bounded for map edge purposes) in its randrange. ## The maximum height is simply random_room_height, now that it's been bounded by distance_from_top_of_room_to_bottom_of_map. ## ... ## It occurred to me it might be simpler to make the map a large carved room one or two tiles wider than the end result is supposed to be, and simply "uncarve" the map inside it before starting any of this. ## Then is-carved checking would implicitly incorporate the distance to the edge in it too. ## Ah well, that's for some adventurous refactoring spree! new_room_rectangle = [each_row_index, each_column_index, how_wide_to_actually_make_the_room, random_room_height] ## Now write the room to the map so it can continue properly. for each_new_tile_y in range(1, random_room_height + 1): for each_new_tile_x in range(1, (how_wide_to_actually_make_the_room + 1)): new_dungeon_map[(each_row_index + each_new_tile_y)][(each_column_index + each_new_tile_x)] += 1 return new_dungeon_map ''' class MarkIIDungeonMapGenerator: ## Thinking of renaming this WingedDungeonGenerator, because it loves to make floorplans split into several "wings" each of which may be impressively lengthy at larger sizes. ## The effect is pretty cool, actually. Code could use some efficiency polish, though. ''' Idea remembered from the old WanderingLineGenerator: " Decided I didn't quite like the "wandering line" idea and I'm going to try something inspired by http://donjon.bin.sh/d20/dungeon/index.cgi instead. It's going to use the DungeonMapGenerator algorithm to place rooms and a new algorithm for tunnel connection. Specifically it will use the WanderingLineMapGenerator idea of keeping an in-object list of rooms and using that to do things like is-connected-yet checks and intersection testing. " ''' def __init__(self, supplied_map_width=40, supplied_map_height=40, room_max_size=10, room_min_size=4, room_max_count=30, room_min_count=5): self.map_width = supplied_map_width self.map_height = supplied_map_height ## The bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. ## ... ## I probably have no idea what I'm doing since testing it is easier than figuring out whether Python feels like pretending 0 is an ordinal or not this time. self.map_width -= 1 self.map_height -= 1 self.room_max_size = room_max_size self.room_min_size = room_min_size self.room_max_count = room_max_count self.room_min_count = room_min_count ## Saving it as state for brain friendliness purposes. Can be changed later. self.list_of_created_rooms = [] def define_corridor(self, which_orientation, x, y, x2, y2): ''' Create a corridor (actually a one-tile-by-n-tiles rectangular room) connecting point (x, y) and point ((x + width), (y + height)), using the rectangular room definition format. ''' width = x2 - x height = y2 - y if which_orientation == 'horizontal': if width < 0: width *= -1 x -= width new_corridor = [x, y, width + 1, 1] if which_orientation == 'vertical': if height < 0: height *= -1 y -= height new_corridor = [x, y, 1, height + 1] return new_corridor def return_the_center_of_this_rectangle(self, upperleft_x, upperleft_y, width, height): ## This is for placing corridors. centerpoint_x = ( upperleft_x + (width // 2) ) centerpoint_y = ( upperleft_y + (height // 2) ) return [centerpoint_x, centerpoint_y] def check_these_two_rectangles_for_intersection(self, rectangle_alpha, rectangle_beta): ''' Check two rectangles, both formatted [x, y, w, h] for intersection; return True if they intersect and False if they do not intersect. ''' new_x = rectangle_alpha[0] new_x2 = (rectangle_alpha[0] + rectangle_alpha[2]) old_x = rectangle_beta[0] old_x2 = (rectangle_beta[0] + rectangle_beta[2]) new_y = rectangle_alpha[1] new_y2 = (rectangle_alpha[1] + rectangle_alpha[3]) old_y = rectangle_beta[1] old_y2 = (rectangle_beta[1] + rectangle_beta[3]) do_they_intersect = False if ( (new_x >= old_x) and (new_x <= old_x2) ) or ( (new_x2 >= old_x) and (new_x2 <= old_x2) ): if ( (new_y >= old_y) and (new_y <= old_y2) ) or ( (new_y2 >= old_y) and (new_y2 <= old_y2) ): do_they_intersect = True if ( (old_x >= old_x) and (old_x <= new_x2) ) or ( (old_x2 >= new_x) and (old_x2 <= new_x2) ): if ( (old_y >= new_y) and (old_y <= new_y2) ) or ( (old_y2 >= new_y) and (old_y2 <= new_y2) ): do_they_intersect = True ## This if tree checks to see whether or not any rooms are forming crosses. if ((new_x >= old_x) and (new_x2 <= old_x2)) and ((new_y <= old_y) and (new_y2 >= old_y2)): do_they_intersect = True ## ... and the same check in the other direction, for if the old room was the vertical bar of the cross rather than the new room, as is assumed in the preceding if tree: if ((old_x > new_x) and (old_x2 < new_x2)) and ((old_y < new_y) and (old_y2 > new_y2)): do_they_intersect = True ## DEBUG #print("Successfully checked for intersection") ## \DEBUG return do_they_intersect def generate_noise(self, supplied_map_width=None, supplied_map_height=None, room_max_size=None, room_min_size=None, room_max_count=None, room_min_count=None): ## I have this sinking feeling it's un-Pythonic to have this kind of optional state for my MapGenerator objects. if supplied_map_width != None: self.map_width = supplied_map_width self.map_width -= 1 if supplied_map_height != None: self.map_height = supplied_map_height self.map_height -= 1 if room_max_size != None: self.room_max_size = room_max_size if room_min_size != None: self.room_min_size = room_min_size if room_max_count != None: self.room_max_count = room_max_count if room_min_count != None: self.room_min_count = room_min_count list_of_candidate_rooms = [] while (len(list_of_candidate_rooms) < self.room_min_count): for each_new_room_attempt in range(0, self.room_max_count): #print("each_new_room_attempt == %d" % (each_new_room_attempt)) ## Width and height are defined BEFORE x/y position. ## Doing it this way makes it unnecessary to check if the room extends off the map. new_room_width = random.randint(self.room_min_size, self.room_max_size) new_room_height = random.randint(self.room_min_size, self.room_max_size) new_room_x = random.randint(1, ((self.map_width - 1) - new_room_width)) new_room_y = random.randint(1, ((self.map_height - 1) - new_room_height)) new_room_candidate = [new_room_x, new_room_y, new_room_width, new_room_height] should_we_append_this_room = True for each_other_room in list_of_candidate_rooms: if (self.check_these_two_rectangles_for_intersection(new_room_candidate, each_other_room) == True): should_we_append_this_room = False #print("Failed a room intersect test") if should_we_append_this_room == True: list_of_candidate_rooms.append(new_room_candidate) #print("Appended a room") if (len(list_of_candidate_rooms) < self.room_min_count): del list_of_candidate_rooms[:] #print("Gotta use a while loop eventually") # Do we? Doesn't seem like it now. ## Now create corridors linking rooms. ## The list_of_all_centerpoints is not the same as the list_of_candidate_rooms or list_of_new_corridors, but I guess it technically could be merged with a small redesign. Keeping them separate for now to preserve the conceptual history of the things. ## Note the reason this is done after the list_of_candidate_rooms is filled is because that list gets wiped during generation if the genned number is lower than the minimum. ## Corridor generation doesn't do that, so it can append corridors as they're created. list_of_all_centerpoints = [] ## "Colors" are an abstraction used to represent the fact that each room has a connected-to-these-other-rooms quality, which is common to all of them. ## Thinking of this quality as a color makes for an easily relatable analogy. ## The list_of_room_connection_colors is going to be a list of lists which is constructed as corridors are added (eg, as rooms pass their first and only classical connection check). list_of_room_connection_colors = [] list_of_new_corridors = [] for each_room in list_of_candidate_rooms: ## Appends [centerpoint_x, centerpoint_y] to the list, so it's a 2-ple: [ [],[],[],[],[]... ] list_of_all_centerpoints.append(self.return_the_center_of_this_rectangle(upperleft_x=each_room[0], upperleft_y=each_room[1], width=each_room[2], height=each_room[3])) ## In the following for loop, all created rooms are connected to the closest other room or corridor (technically the closest centerpoint, which stores both). ## The connecton of a room involves creating precisely one vertical and one horizontal corridor attaching it to another room. ## It also involves appending the centerpoints of the two corridors and the two rooms they connect to the list_of_room_connection_colors in their proper color. ## It does NOT involve connecting colors to each other. That comes after this "first pass" of room connection. for each_room in list_of_candidate_rooms: ## The first step is to find which other room's centerpoint is the closest to the current room's centerpoint. alpha_room_centerpoint_x, alpha_room_centerpoint_y = self.return_the_center_of_this_rectangle(upperleft_x=each_room[0], upperleft_y=each_room[1], width=each_room[2], height=each_room[3]) the_shortest_hypotenuse_found_for_this_room = None which_centerpoint_is_closest = None for each_centerpoint in list_of_all_centerpoints: ## DEBUG #print(" each_centerpoint == %s" % (str(each_centerpoint))) ## \DEBUG beta_room_centerpoint_x, beta_room_centerpoint_y = each_centerpoint[0], each_centerpoint[1] if (alpha_room_centerpoint_x == beta_room_centerpoint_x) and (alpha_room_centerpoint_y == beta_room_centerpoint_y): ## Then they're the same centerpoint and should be skipped for this step. pass else: ## Then these centerpoints should be checked to see if they're the closest to each other as of this iteration. x_distance_between_the_two = abs(beta_room_centerpoint_x - alpha_room_centerpoint_x) y_distance_between_the_two = abs(beta_room_centerpoint_y - alpha_room_centerpoint_y) hypotenuse_distance_between_the_two = math.sqrt((x_distance_between_the_two ** 2) + (y_distance_between_the_two ** 2)) if (the_shortest_hypotenuse_found_for_this_room == None) or (hypotenuse_distance_between_the_two < the_shortest_hypotenuse_found_for_this_room): ## Then these centerpoints are in fact the closest to each other as of this iteration. the_shortest_hypotenuse_found_for_this_room = hypotenuse_distance_between_the_two which_centerpoint_is_closest = each_centerpoint ## Now that the closest room rectangle has been found, draw a corridor between it's and the current room's centerpoints: which_direction_first = random.randint(0, 1) # remember, random.randint() includes min and max values, unlike range() ## NOTE! It might be a good idea to check for intersection here and, if detected, invert which_direction_first via: which_direction_first = abs(which_direction_first - 1) ## That would make it slightly less favorable to crossed tunnels, though it should already have rather few of those. I think. ## Redefining these terms so we can use them to create corridors. This may not be maximally Pythonic... It would be a decent candidate for refactoring. alpha_room_centerpoint_x, alpha_room_centerpoint_y = self.return_the_center_of_this_rectangle(upperleft_x=each_room[0], upperleft_y=each_room[1], width=each_room[2], height=each_room[3]) beta_room_centerpoint_x, beta_room_centerpoint_y = which_centerpoint_is_closest[0], which_centerpoint_is_closest[1] if which_direction_first == 0: new_horizontal_corridor = self.define_corridor('horizontal', alpha_room_centerpoint_x, alpha_room_centerpoint_y, beta_room_centerpoint_x, beta_room_centerpoint_y) new_vertical_corridor = self.define_corridor('vertical', beta_room_centerpoint_x, beta_room_centerpoint_y, alpha_room_centerpoint_x, alpha_room_centerpoint_y) elif which_direction_first == 1: new_horizontal_corridor = self.define_corridor('horizontal', beta_room_centerpoint_x, beta_room_centerpoint_y, alpha_room_centerpoint_x, alpha_room_centerpoint_y) new_vertical_corridor = self.define_corridor('vertical', alpha_room_centerpoint_x, alpha_room_centerpoint_y, beta_room_centerpoint_x, beta_room_centerpoint_y) ## Save the corridors: list_of_new_corridors.append(new_horizontal_corridor) list_of_new_corridors.append(new_vertical_corridor) ## Also save the corridors' centerpoints: horizontal_corridor_centerpoint = self.return_the_center_of_this_rectangle(upperleft_x=new_horizontal_corridor[0], upperleft_y=new_horizontal_corridor[1], width=new_horizontal_corridor[2], height=new_horizontal_corridor[3]) vertical_corridor_centerpoint = self.return_the_center_of_this_rectangle(upperleft_x=new_vertical_corridor[0], upperleft_y=new_vertical_corridor[1], width=new_vertical_corridor[2], height=new_vertical_corridor[3]) list_of_all_centerpoints.append(horizontal_corridor_centerpoint) list_of_all_centerpoints.append(vertical_corridor_centerpoint) ## We're going to absolutely have to ensure they're all connected. ## 1. I think the way to do this is to have corridors, upon creation, append their centerpoints along with their associated rooms' to a list which will later be crosschecked with the list_of_all_centerpoints. ## 2. The first room connected this way will have a color associated with it that colors the centerpoints of itself, the corridor, and the room it's connected to. ## 3. When a new corridor is created, it will check if start or end have colors associated with them and adopt it as its own color if so; if not, a new color will be created which follows this pattern. ## 4. When the map is finished creating corridors via the classical closest-centerpoints method, the color lists will be cross-checked and if any centerpoint appears in at least two color lists simultaneously, those colors are considered connected. ## 5. If this process completes and certain colors remain unconnected, the closest centerpoints in each of them will be discerned and connected to each other. ## 6. Steps 4 and 5 will iterate untill no colors remain unconnected. ## Since this is where every room has a corridor added on to it, we'll begin here. ## list_of_room_connection_colors will be a three-ple: ## [ [ [x, y], [x, y], [x, y] ], [ [x, y], [x, y], [x, y] ], ... ] ## If there are no colors yet... if len(list_of_room_connection_colors) == 0: ## Make the current room the source of the first color. new_color = [] new_color.append([alpha_room_centerpoint_x, alpha_room_centerpoint_y]) new_color.append([beta_room_centerpoint_x, beta_room_centerpoint_y]) new_color.append(horizontal_corridor_centerpoint) new_color.append(vertical_corridor_centerpoint) list_of_room_connection_colors.append(new_color) #print("\n\nlist_of_room_connection_colors == \n%s\n\n" % (str(list_of_room_connection_colors))) ## That part worked correctly the very first time! ## Otherwise, check the list of colors for cross-results with all four centerpoints currently being considered: else: does_this_room_fit_in_any_color = False for each_color in list_of_room_connection_colors: do_these_centerpoints_connect_to_this_color = False we_can_append_alpha_room_centerpoint = True we_can_append_beta_room_centerpoint = True we_can_append_horizontal_corridor_centerpoint = True we_can_append_vertical_corridor_centerpoint = True for each_centerpoint in each_color: ## Notice the split between do_these_centerpoints_connect_to_this_color and we_can_append_foo_centerpoint ## This is because the former gates the adding of all centerpoints, and the latter gates the adding of specific centerpoints. ## Without the latter it would add too many centerpoints, creating duplicates. ## Important: This part of the function does NOT pare down colors, it can only build them up. It does try to build them up only when previous colors are insufficient, however. if ((each_centerpoint[0] == alpha_room_centerpoint_x) and (each_centerpoint[1] == alpha_room_centerpoint_y)): do_these_centerpoints_connect_to_this_color = True we_can_append_alpha_room_centerpoint = False if ((each_centerpoint[0] == beta_room_centerpoint_x) and (each_centerpoint[1] == beta_room_centerpoint_y)): do_these_centerpoints_connect_to_this_color = True we_can_append_beta_room_centerpoint = False if ((each_centerpoint[0] == horizontal_corridor_centerpoint[0]) and (each_centerpoint[1] == horizontal_corridor_centerpoint[1])): do_these_centerpoints_connect_to_this_color = True we_can_append_horizontal_corridor_centerpoint = False if ((each_centerpoint[0] == vertical_corridor_centerpoint[0]) and (each_centerpoint[1] == vertical_corridor_centerpoint[1])): do_these_centerpoints_connect_to_this_color = True we_can_append_vertical_corridor_centerpoint = False if do_these_centerpoints_connect_to_this_color == True: if we_can_append_alpha_room_centerpoint == True: each_color.append([alpha_room_centerpoint_x, alpha_room_centerpoint_y]) if we_can_append_beta_room_centerpoint == True: each_color.append([beta_room_centerpoint_x, beta_room_centerpoint_y]) if we_can_append_horizontal_corridor_centerpoint == True: each_color.append(horizontal_corridor_centerpoint) if we_can_append_vertical_corridor_centerpoint == True: each_color.append(vertical_corridor_centerpoint) does_this_room_fit_in_any_color = True if does_this_room_fit_in_any_color == False: ## I thiiiiiink there's going to be a slight problem with needing more than one pass to connect colors, after these steps are done. ## Limited by the number of colors, but still, would be nice to narrow that down to get minimum runtime and maximum cleanness. the_newly_required_color = [] ## This absolutely cannot be the best way to do this kind of checking, but the tutorials didn't tell me any better way, and this way certainly works. ## It's also very flat and readable. ## ... ## It also saved me a whole lot of processing time, from the looks of my debug statements. ## ... ## I think this was mistakenly placed. Newly-required colors should not have to check for duplicates again, since this happened in the preceding part of this conditional tree -- see just above here. ## Commenting it all for rumination and debugging purposes. #we_can_append_alpha_room_centerpoint = True #we_can_append_beta_room_centerpoint = True #we_can_append_horizontal_corridor_centerpoint = True #we_can_append_vertical_corridor_centerpoint = True #for each_other_color in list_of_room_connection_colors: # for each_other_centerpoint in each_other_color: # if each_other_centerpoint == [alpha_room_centerpoint_x, alpha_room_centerpoint_y]: # we_can_append_alpha_room_centerpoint = False # if each_other_centerpoint == [beta_room_centerpoint_x, beta_room_centerpoint_y]: # we_can_append_beta_room_centerpoint = False # if each_other_centerpoint == horizontal_corridor_centerpoint: # we_can_append_horizontal_corridor_centerpoint = False # if each_other_centerpoint == vertical_corridor_centerpoint: # we_can_append_vertical_corridor_centerpoint = False #if we_can_append_alpha_room_centerpoint == True: the_newly_required_color.append([alpha_room_centerpoint_x, alpha_room_centerpoint_y]) #if we_can_append_beta_room_centerpoint == True: the_newly_required_color.append([beta_room_centerpoint_x, beta_room_centerpoint_y]) #if we_can_append_horizontal_corridor_centerpoint == True: the_newly_required_color.append(horizontal_corridor_centerpoint) #if we_can_append_vertical_corridor_centerpoint == True: the_newly_required_color.append(vertical_corridor_centerpoint) #if len(the_newly_required_color) != 0: list_of_room_connection_colors.append(the_newly_required_color) #print("pre-step: list_of_room_connection_colors == ") #for each_color in list_of_room_connection_colors: # print(" " + str(each_color)) # print("|||||||||||||||||||||||") ## The rooms are placed, connected with classical, first-pass corridors, and the initial color lists have been established. ## Next we must winnow down the color lists to the bare minimum, since there will be some colors that are connected which were not recognized as such in the establishment pass. ''' print("PRE-STEP: list_of_room_connection_colors == ") for each_color in list_of_room_connection_colors: print(" " + str(each_color)) print("|||||||||||||||||||||||") ''' ## The next step is to connect the disconnected colors. ## ## This will be accomplished by: ## 1. Taking the color in the list_of_room_connection_colors at index 0 (hereafter "color alpha"), finding its centerpoint and comparing it to all other colors' centerpoints, saving: ## a. the centerpoint of color alpha ## b. the centerpoint of the closest color to color alpha (hereafter "color beta") ## c. a list containing all the centerpoints inside the color beta (eg, identical to color beta at the time of its discovery) ## 2. Finding the room inside color alpha which is closest to 1.b. ## 3. Finding the room inside color beta which is closest to 1.a. ## 4. Connecting 2. and 3. with an L-corridor and adding its two components' centerpoints to color gamma ## 5. Appending every room centerpoint in color alpha and color beta to color gamma ## 6. Appending color gamma to the next pass's new color list ## 7. If any colors remain, appending every color not in alpha, beta, or gamma to the next pass's color list ## 8. If the next pass's color list length is greater than 1, repeat this process, starting at step 1. ## Number of passes is just the length of the color list, since this algorithm is guaranteed to step through each and every color, connecting them individually. for each_remaining_color in range(0, len(list_of_room_connection_colors)): ## Giant conditional. if (len(list_of_room_connection_colors) > 1): color_alpha = list_of_room_connection_colors[0] ## Figure out the average centerpoint of color alpha: color_alpha_average_x_stack = 0 color_alpha_average_y_stack = 0 for each_color_alpha_centerpoint in color_alpha: color_alpha_average_x_stack += each_color_alpha_centerpoint[0] color_alpha_average_y_stack += each_color_alpha_centerpoint[1] color_alpha_average_x = (color_alpha_average_x_stack // len(color_alpha)) color_alpha_average_y = (color_alpha_average_y_stack // len(color_alpha)) the_shortest_hypotenuse_found_for_this_color = None #the_closest_beta_average_x = None #the_closest_beta_average_y = None ## Yes, we have to use the index value for this, since equality checking lists doesn't seem to work, based on previous experiments. for each_beta_color_index in range(1, len(list_of_room_connection_colors)): color_beta = list_of_room_connection_colors[each_beta_color_index] color_beta_average_x_stack = 0 color_beta_average_y_stack = 0 for each_beta_centerpoint in color_beta: color_beta_average_x_stack += each_beta_centerpoint[0] color_beta_average_y_stack += each_beta_centerpoint[1] color_beta_average_x = (color_beta_average_x_stack // len(color_beta)) color_beta_average_y = (color_beta_average_y_stack // len(color_beta)) x_distance_between_the_two_colors = abs(color_beta_average_x - color_alpha_average_x) y_distance_between_the_two_colors = abs(color_beta_average_y - color_alpha_average_y) hypotenuse_distance_between_the_two_colors = math.sqrt((x_distance_between_the_two_colors ** 2) + (y_distance_between_the_two_colors ** 2)) if (the_shortest_hypotenuse_found_for_this_color == None) or (hypotenuse_distance_between_the_two_colors < the_shortest_hypotenuse_found_for_this_color): ## Then these centerpoints are in fact the closest to each other as of this iteration. the_shortest_hypotenuse_found_for_this_color = hypotenuse_distance_between_the_two_colors which_color_is_closest = color_beta which_beta_color_index_is_closest = each_beta_color_index ## Do I need to make these globals? Is python telling me to refactor my function into a zillion impossible to track tiny functions? =/ the_closest_beta_average_x = color_beta_average_x the_closest_beta_average_y = color_beta_average_y ## Now that we've found the beta color with an average centerpoint closest to the alpha color's average centerpoint, we need to find the alpha room centerpoint closest to the beta average centerpoint: the_shortest_hypotenuse_found_between_alpha_room_and_beta_average = None for each_color_alpha_centerpoint in color_alpha: x_distance_between_this_alpha_room_and_the_beta_average = abs(the_closest_beta_average_x - each_color_alpha_centerpoint[0]) y_distance_between_this_alpha_room_and_the_beta_average = abs(the_closest_beta_average_y - each_color_alpha_centerpoint[1]) hypotenuse_distance_between_alpha_room_and_beta_average = math.sqrt((x_distance_between_this_alpha_room_and_the_beta_average ** 2) + (y_distance_between_this_alpha_room_and_the_beta_average ** 2)) if (the_shortest_hypotenuse_found_between_alpha_room_and_beta_average == None) or (hypotenuse_distance_between_alpha_room_and_beta_average < the_shortest_hypotenuse_found_between_alpha_room_and_beta_average): the_shortest_hypotenuse_found_between_alpha_room_and_beta_average = hypotenuse_distance_between_alpha_room_and_beta_average which_alpha_centerpoint_is_closest_to_beta_average = each_color_alpha_centerpoint ## Mirror the above process to find the beta room with a centerpoint closest to the alpha color's average centerpoint: the_shortest_hypotenuse_found_between_beta_room_and_alpha_average = None for each_color_beta_centerpoint in which_color_is_closest: x_distance_between_this_beta_room_and_the_alpha_average = abs(color_alpha_average_x - each_color_beta_centerpoint[0]) y_distance_between_this_beta_room_and_the_alpha_average = abs(color_alpha_average_y - each_color_beta_centerpoint[1]) hypotenuse_distance_between_beta_room_and_alpha_average = math.sqrt((x_distance_between_this_beta_room_and_the_alpha_average ** 2) + (y_distance_between_this_beta_room_and_the_alpha_average ** 2)) if (the_shortest_hypotenuse_found_between_beta_room_and_alpha_average == None) or (hypotenuse_distance_between_beta_room_and_alpha_average < the_shortest_hypotenuse_found_between_beta_room_and_alpha_average): the_shortest_hypotenuse_found_between_beta_room_and_alpha_average = hypotenuse_distance_between_beta_room_and_alpha_average which_beta_centerpoint_is_closest_to_alpha_average = each_color_beta_centerpoint ## Now that we have the alpha and beta room centerpoints closest to each other, connect them with a corridor. which_direction_first = random.randint(0, 1) # remember, random.randint() includes min and max values, unlike range() if which_direction_first == 0: new_horizontal_corridor = self.define_corridor('horizontal', which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1], which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1]) new_vertical_corridor = self.define_corridor('vertical', which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1], which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1]) elif which_direction_first == 1: new_horizontal_corridor = self.define_corridor('horizontal', which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1], which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1]) new_vertical_corridor = self.define_corridor('vertical', which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1], which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1]) ## Save the corridors: list_of_new_corridors.append(new_horizontal_corridor) list_of_new_corridors.append(new_vertical_corridor) ## And now merge alpha and beta colors and delete beta color: ## DEBUG #print("which_color_is_closest == %s" % (str(which_color_is_closest))) ## \DEBUG for each_beta_centerpoint in which_color_is_closest: color_alpha.append(each_beta_centerpoint) ## The following line wasn't sufficient, since it only deleted this call-by-value variable rather than the call-by-reference pointer I was pretending it would be. Obvious in hindsight. #del which_color_is_closest ## Fortunately the index was already available! See above. del list_of_room_connection_colors[which_beta_color_index_is_closest] ## And now everything works as perfectly as I knew it would. ''' print(" POST-STEP: list_of_room_connection_colors == ") for each_color in list_of_room_connection_colors: print(" " + str(each_color)) print("|||||||||||||||||||||||") ''' ## Having generated enough rooms and corridors, create the map: the_dungeon_map = [] for each_row in range(0, self.map_height): new_row = [] for each_column in range(0, self.map_width): new_row.append(0) the_dungeon_map.append(new_row) ## Write the rooms... for each_successful_room_candidate in list_of_candidate_rooms: for each_room_height_unit in range(0, each_successful_room_candidate[3]): for each_room_width_unit in range(0, each_successful_room_candidate[2]): the_dungeon_map[(each_successful_room_candidate[1] + each_room_height_unit)][(each_successful_room_candidate[0] + each_room_width_unit)] += 1 ## and write the corridors: for each_corridor in list_of_new_corridors: #print("Attempting to write a corridor...") for each_corridor_height_unit in range(0, each_corridor[3]): for each_corridor_width_unit in range(0, each_corridor[2]): ## Check for uncarved space. When defining corridors this is the simplest way to go about it since the debug mauve is only really important for debugging room generation. if the_dungeon_map[(each_corridor[1] + each_corridor_height_unit)][(each_corridor[0] + each_corridor_width_unit)] == 0: the_dungeon_map[(each_corridor[1] + each_corridor_height_unit)][(each_corridor[0] + each_corridor_width_unit)] += 1 return the_dungeon_map
''' Fractal noise map generator library. Also includes additional non-"noise" map generators for dungeon generation purposes. The reason for putting them in is because I want the games I make to be able to use very similar code for all the different types of maps I need. Ensuring a high level of cross-compatibility at the generator level may enhance creativity later on. Each generator is an object built by a class specific to that type of generator. All generators SHOULD have enough defaults to require only a handful of arguments (tailored to your display needs) before they're popping out noiseclouds. The most useful generators are the Perlin generator and the Mk II dungeon map generator, but they all have their own unique capabilities. Generators currently include: PlasmaFractalGenerator() PerlinNoiseGenerator() SimplexNoiseGenerator() DungeonMapGenerator() RoomFilledMapGenerator() MarkIIDungeonMapGenerator() Of these, the Simplex generator is the most technically complex but is theoretically faster at creating a noise map than the Plasma and Perlin generators. It's not clear whether my implementation is even close to optimized for speed, though. I don't yet know enough about python/C integration to try speeding it up. The Perlin generator return the best-looking terrain maps, possibly tied with the Simplex generator. They both require some fiddling with generator input parameters to get better-looking results. The plasma generator has some gridwards bias, but it too produces decent noise clouds, as long as you don't look too closely or get too unlucky. It was the first noise generator I made, before I realized I wanted to make all the parameters of the various generators more similar to each other. I might go back and change it to that at some point, but I have no especial reason to given its technical inferiority to the simplex and Perlin generators. TerrainMapGenerators contains noise generators and "dungeon map generators," which are more like signal than noise, as they return maps full of rooms and corridors illustrated using two Z values (0 and 1). The DungeonMapGenerator produces randomly placed rectangular rooms that all connect to each other using L-shaped corridors daisy chained from one room's centerpoint to the next, in the order of room placement. This algorithm was inspired by/copied from the libtcod roguelike tutorial at < http://www.roguebasin.com/index.php?title=Complete_Roguelike_Tutorial,_using_python%2Blibtcod,_part_1 >. The RoomFilledMapGenerator creates maps packed full of rectangular rooms. Has significant bias and no connecting corridors. I didn't really like the direction it was going in, but it can probably be turned into something decent with some upgrading and tweaking. The MarkIIDungeonMapGenerator is my favorite one so far. It produces maps wherein the rooms are connected in a branching pattern such that dungeons have "wings" which can be quite lengthy and significantly subdivided. Note that the dependencies do NOT include pygame, even though the display program I created for demonstrations does. ''' import random import sys import math #### Classes #### class PlasmaFractalGenerator: ''' Create a fractal generator that returns a list of ((word for things that come in parentheses)) consisting of three floating point values: x, y and z coordinates for constructing a plasma fractal for use as a noise map. ''' def __init__(self, array_root=2, corners_min=0, corners_max=255, displacement_min=(-35), displacement_max=35, minimum_separation_distance=1, uleft_corner=None, uright_corner=None, lleft_corner=None, lright_corner=None): ## The root of the array (it's square root, or side measurement): self.array_root = array_root ## Save the width and height of the map as state. We'll be using this to construct a new map to hold the plasma fractal in a method designed for this purpose. self.array_width = (self.array_root * 2) self.array_height = (self.array_root * 2) ## Init the plasma fractal's handler, the noise array, as None: self.saved_noise_array = None ## Min and max values for randomly generated corner Z values: self.corners_min = corners_min self.corners_max = corners_max ## The range of randomness that can be applied to each midpoint displacement. ## Usual supplied values have a negative min and a positive max. self.displacement_min = displacement_min self.displacement_max = displacement_max ## The distance at which the fractal stops subdividing itself and returns a value for the next least coordinate point ( 1.004 --> 1, 1.000 --> 1, 0.996 --> 0 etc if min_sep_dist is 1). self.minimum_separation_distance = minimum_separation_distance ## Corners' initial zee values, can be set manually in __init__() parameters: self.uleft_corner = uleft_corner self.uright_corner = uright_corner self.lleft_corner = lleft_corner self.lright_corner = lright_corner ## Someone might want the corners to be preset values, so check if they didn't at the time of initialization. ## ... ## This section may be a candidate for refactorization in the future, with the addition of parameters to reinitialize_corners() if self.uleft_corner is None: self.uleft_corner = random.randint(self.corners_min, self.corners_max) if self.uright_corner is None: self.uright_corner = random.randint(self.corners_min, self.corners_max) if self.lleft_corner is None: self.lleft_corner = random.randint(self.corners_min, self.corners_max) if self.lright_corner is None: self.lright_corner = random.randint(self.corners_min, self.corners_max) def reinitialize_corners(self, uleft_corner=None, uright_corner=None, lleft_corner=None, lright_corner=None): if uleft_corner == None: self.uleft_corner = random.randint(self.corners_min, self.corners_max) else: self.uleft_corner = uleft_corner if uright_corner == None: self.uright_corner = random.randint(self.corners_min, self.corners_max) else: self.uright_corner = uright_corner if lleft_corner == None: self.lleft_corner = random.randint(self.corners_min, self.corners_max) else: self.lleft_corner = lleft_corner if lright_corner == None: self.lright_corner = random.randint(self.corners_min, self.corners_max) else: self.lright_corner = lright_corner def generate_noise(self, x=None, y=None, supplied_width=None, supplied_height=None, uleft_corner=None, uright_corner=None, lleft_corner=None, lright_corner=None): ''' This function is the gateway function to generate_plasma(). ''' del self.saved_noise_array self.saved_noise_array = [] ## This section necessitated by the combination of my desire to make generate_noise() callable with arbitrary arguments and Python's refusal to accept self.foo as parameters for a method. if x == None: x = 0 if y == None: y = 0 if supplied_width == None: supplied_width = self.array_width if supplied_height == None: supplied_height = self.array_height if uleft_corner == None: uleft_corner = self.uleft_corner if uright_corner == None: uright_corner = self.uright_corner if lleft_corner == None: lleft_corner = self.lleft_corner if lright_corner == None: lright_corner = self.lright_corner ## Remember, no call to self in the parameters when a method is calling another method. The definition of the second method will invoke its own self, don't worry. :p self.plasma_recursion(x=x, y=y, supplied_width=supplied_width, supplied_height=supplied_height, uleft_corner=uleft_corner, uright_corner=uright_corner, lleft_corner=lleft_corner, lright_corner=lright_corner) ####print(" Debug: self.saved_noise_array == ") #for each in self.saved_noise_array: # ###print(" " + str(each)) ## Now convert that giant list into a tuple with the same ordering as the PerlinNoiseGenerator's results. array_to_return = [] for each_array_height_index in range(0, supplied_height): # y ## Fill the array_to_return with rows full of -1s so we only have to iterate through it once in the next step! new_row = [] for each_array_width_index in range(0, supplied_width): # x new_row.append(-1) array_to_return.append(new_row) ####print(" Debug: array_to_return == " + str(array_to_return) + "\n") for each_cell in self.saved_noise_array: ## Round down x and y since the values are probably all floats. ## This will ALMOST CERTAINLY give me bad results and I'm gonna have to change something, maybe cleverer rounding?? ## I may have to round up and down more precisely than int() depending on exactly what ends up happening with the results. :S ''' ## EDIT: The following is probably not the best way to do this. I added the -1 overwrite procedure instead. ## ... ## Complicated syntax is actually very shallow conceptually. ## array[a].insert([b], [c]) ## a == the rounded down y value of the cell ## b == the rounded down x value of the cell ## c == the floating point z value of the cell ## Rounding is currently being done by int() calls, this may very well be a bad idea. See above note. ## All index variables are referenced by their index number in each_cell; hence the square brackets. array_to_return[int(each_cell[1])].insert(int(each_cell[0]), each_cell[2]) ''' ####print(" Debug: each_cell == " + str(each_cell)) ####print(" each_cell[0] == " + str(each_cell[0])) ####print(" each_cell[1] == " + str(each_cell[1])) ####print(" each_cell[2] == " + str(each_cell[2])) ## The syntax is now: ## array[y][x] = z ## where y, x and z are extracted from their respective indices in each_cell. ## Rounding is once again involved at this step. See above notes in this method. ## DEBUG: Testing -1 to see if it always rounds one way or does a split at 0.5 array_to_return[int(each_cell[1])][int(each_cell[0])] = each_cell[2] ## If this line is left out the generator will use the same corner values and make a whole new map between them. ## Remember, self.reinitialize_corners() can be called in the main program. #self.reinitialize_corners() return array_to_return def plasma_recursion(self, x, y, supplied_width, supplied_height, uleft_corner, uright_corner, lleft_corner, lright_corner): ## This method is intended to be called by self.generate_noise() ## The results of calling this separately from self.generate_noise() will be a long list of [x, y, z] values rather than a tuple with the form ( array[y][x] == (z) ). ''' Recursively supply [x, y, z]-formatted plasma fractal elements to self.saved_noise_array, as called by self.generate_noise() ''' new_width = (supplied_width / 2) new_height = (supplied_height / 2) if ( (supplied_width > self.minimum_separation_distance) or (supplied_height > self.minimum_separation_distance) ): ## This step must happen during this part of the conditional tree. Not after the else! random_midpoint_displacement = random.randint(self.displacement_min, self.displacement_max) ## Create midpoint's zee by averaging corners' zees and mixing in the random_midpoint_displacement: mid_z = ( ( (uleft_corner + uright_corner + lleft_corner + lright_corner) / 4 ) + random_midpoint_displacement ) ## Deduce sides' zees: top_z = ( (uleft_corner + uright_corner) / 2 ) bottom_z = ( (lleft_corner + lright_corner) / 2 ) left_z = ( (uleft_corner + lleft_corner) / 2 ) right_z = ( (uright_corner + lright_corner) / 2 ) ## Recursion. Note this happens inside the earlier if statement. The alternative is not recurring at this call, and instead returning a value. uleft_quadrant = self.plasma_recursion(x=x, y=y, supplied_width=new_width, supplied_height=new_height, uleft_corner=uleft_corner, uright_corner=top_z, lleft_corner=left_z, lright_corner=mid_z ) uright_quadrant = self.plasma_recursion(x=(x+new_width), y=y, supplied_width=new_width, supplied_height=new_height, uleft_corner=top_z, uright_corner=uright_corner, lleft_corner=mid_z, lright_corner=right_z ) lleft_quadrant = self.plasma_recursion(x=x, y=(y+new_height), supplied_width=new_width, supplied_height=new_height, uleft_corner=left_z, uright_corner=mid_z, lleft_corner=lleft_corner, lright_corner=bottom_z ) lright_quadrant = self.plasma_recursion(x=(x+new_width), y=(y+new_height), supplied_width=new_width, supplied_height=new_height, uleft_corner=mid_z, uright_corner=right_z, lleft_corner=bottom_z, lright_corner=lright_corner ) else: ## When the distance between the corners drops below the minimum separation distance, create an [x, y, z] cell and return it up the chain: new_z_value = ( (uleft_corner + uright_corner + lleft_corner + lright_corner) / 4 ) new_coordinate = [x, y, new_z_value] self.saved_noise_array.append(new_coordinate) class PerlinNoiseGenerator: def __init__(self): ## The generator saves its noise-map state: self.noise_array = [] self.noise_width = 0 noise_height = 0 def generate_noise(self, width, height, frequency, octaves): ''' Returns a tuple of [parameter 2] lists each containing [parameter 1] randomly generated integer numbers between $FIX_ME_MINIMUM and $FIX_ME_MAXIMUM, fractally smoothed as Perlin noise using a frequency of [parameter 3] and an octave count of [parameter 4]. ''' ## Octaves? ## It's used for calling turbulence(), which considers that parameter to be "size". ## The original function declared that changing octaves changes how far in or out from the noise you are zoomed. ## Which seems like a decent interpretation of the results. ## Raising the frequency makes it spikier (which is reminiscent of zooming out). ## Raising the octaves make it smoother (which is reminiscent of zooming in). ## Note that keeping the ratios of frequency to octaves the same will keep the results looking similar! ## For this reason I recommend using small octave values, since that governs the recursor runtime. ## First, clear the currently saved noise map: ## ... ## actually self.noise_array is used internally to the generator's function and does not save the actual noise map. ## Interesting, that. del self.noise_array[:] ## Now assign this NoiseGenerator's current noise_width and noise_height to the values supplied by the function call parameters: ## Note that the NoiseGenerator saves these as state because they need to be referenced in the sub-functions below. self.noise_width = width self.noise_height = height ## Initializing the noise_array with random numbers. ## This for loop provides the raw random data smeuthanized into a pretty, pretty vapor cloud further in the program. ## Create a bunch of rows, equal in number to self.noise_height... for each_row in range(0, self.noise_height): noise_row_handler = [] ## ... and fill them with randint()s equal in number to self.noise_width: for each_column in range(0, self.noise_width): noise_value = ( random.randint(0, 1000) / 1000.0 ) noise_row_handler.append(noise_value) ## Attach each row to the noise_array. self.noise_array.append(noise_row_handler) ## The generator's noise_array should now be full of rows which are full of integers. ## The noise_array isn't the finished product. It's used to create it, in the below functions. result = [] ## Turbulating the noise array ## for each_y in range(0, self.noise_height): turbulated_noise_row_handler = [] for each_x in range(0, self.noise_width): ## Note: Frequency is rolled into the parameters here! turbulated_noise_value = int(self.totally_justified_turbulence_function((each_x * frequency), (each_y * frequency), octaves)) turbulated_noise_row_handler.append(turbulated_noise_value) result.append(turbulated_noise_row_handler) ## NOTE that the NoiseGenerator does NOT save the result as state. ## It hands it off to whatever called its generate_noise() function. ## This is where this generator's entire function chain ends: return result def totally_justified_turbulence_function(self, x, y, size): ## noise_value is "built up" by smooth_noise(): noise_value = 0.0 ## Floats it: size *= 1.0 initial_size = size ## This is kind of like fractally splitting a grid, except it just sort of "resonates" itself in half and applies noise smoothening or something. Octaves. while (size >= 1): the_smooth_noise = self.smooth_noise((x / size), (y / size)) the_smooth_noise *= size ## Add it to the noise_value pile: noise_value += the_smooth_noise ## Paring down the size... iterating downwards... size /= 2.0 ## Order of Operations suggests division before multiplication, so: noise_value /= initial_size ## ??? ## Experiment to figure out what it does! o_o ## ... ## Biases the resulting z values to average out at this number: noise_value *= 128.0 return noise_value def smooth_noise(self, x, y): ''' Return the average value of the 4 neighbors of the point (x, y) from self.noise_array. ''' ## NOTE! self.noise_array is a cranny full of state used for THIS FUNCTION ONLY. ## The following is necessary because of modulo calls further down that would ignore it, but it needs to be saved. ## Get the trailing part of the floats of x and y: fractional_element_of_x = ( x - int(x) ) fractional_element_of_y = ( y - int(y) ) x1 = ( (int(x) + self.noise_width) % self.noise_width ) y1 = ( (int(y) + self.noise_height) % self.noise_height ) ## I think the -1 is to compensate for the fractional_element_of_foo being extracted earlier. ## Remember, that fractional_element is added back in below. ## Apart from that, this is exactly the same as \ ## figuring out the length of a line between \ ## (x1, y1) and (x2, y2) in a noise plane. ## Or something like that. Surely. x2 = ( (x1 + self.noise_width - 1) % self.noise_width ) y2 = ( (y1 + self.noise_height - 1) % self.noise_height ) ## Take NOTE of the use of self.noise_array below... ## It's the place it really matters in this ridiculous three-function chain, \ ## even though it's stored at the object level. ## Begin the cooking process by taking out a bowl. value = 0.0 ## Place inside the bowl the fractional element of X times the fractional element of Y times the noise value at location (y1, x1) value += ( fractional_element_of_x * fractional_element_of_y * self.noise_array[y1][x1] ) ## Next, stir in the fractional element of X times (one minus the fractional element of y) times the noise value at location (y2, x1) value += ( fractional_element_of_x * (1 - fractional_element_of_y) * self.noise_array[y2][x1] ) ## Sprinkle liberal amounts of (one minus the fractional element of X) times the fractional element of Y times the noise value at location (y1, x2) value += ( (1 - fractional_element_of_x) * fractional_element_of_y * self.noise_array[y1][x2] ) ## Line baking pan with a mixture of (one minus the fractional element of X) times (one minus the fractional element of Y) times the noise value at location (y2, x2) value += ( (1 - fractional_element_of_x) * (1 - fractional_element_of_y) * self.noise_array[y2][x2] ) ## I'm not yet sure how adding four things and then not dividing by four returns the AVERAGE value of the four neighbors of point (x, y) in the noise array. (Maybe it's already taken into account?) ## But slap that pan in the oven and let it burn for 0.002 ms. return value class SimplexNoiseGenerator: ## These things are true for every instance of this class and does not require re-initting. ## I don't really know what's going on here. <NAME>. ## ... ## The way it's referenced suggests that grad3 is an ordered list of simplex vertices. ## gi0/gi1/gi2 gives numbers that somehow map to indices of this list via a quite arcane mathemagical cantrip with no justification given. See below in the noise generator. ## I'm just gonna interpret all those Grad objects as simple boxes for vertex coordinates. grad3 = [ [1, 1, 0], [-1, 1, 0], [1, -1, 0], [-1, -1, 0], \ [1, 0, 1], [-1, 0, 1], [1, 0, -1], [-1, 0, -1], \ [0, 1, 1], [0, -1, 1], [0, 1, -1], [0, -1, -1] ] ## ... ## Wow I think they actually decided not to include a grad2 table because the mapping for grad3 technically works for grad2 too. ## Wow. ## I'm gonna go ahead and make a grad2 table based on my interpretation of what is going on here. grad2 = [ [1, 1], [-1, 1], [1, -1], [-1, -1], \ [1, 0], [-1, 0], [1, 0], [-1, 0], \ [0, 1], [0, -1], [0, 1], [0, -1] ] ## Nooope does not make more sense now. ## I'm going to put all of my trust in the implicit knowledge of the Java coder here. ## Just going to assume using the first two columns of the grad3 table works. ## It probably should, given that in grad2, there are precisely 4 instances of each specific value across the table, in varying combinations. ## So even though there are repeats I guess it still works somehow?! ## Maybe the fact there's some modulus going on ensures the repeated indices get skipped or something? ## The next section initializes the skewing and unskewing factors. ## I looked in the Java code and these are just constants. ## They should probably be called in preprocessing somehow, maybe at the top of this module... ## But I want the generators to be able to whip out new worlds at high speeds... ## So it's either top of the module, presolved here, or it takes too long. Choose one. #F2 = 0.3660254037844386 # 0.5*(Math.sqrt(3.0)-1.0); <-- IDLE gives me 0.3660254037844386 instead of what I had -- the lower-precision 0.36602540378 ... I clearly made a mistake while putting the formula into Google as an impromptu calculator substitute. Whatever, I hadn't considered putting the stuff in the base class at that time. #G2 = 0.21132486540518713 # (3.0-Math.sqrt(3.0))/6.0; apparently I copied it incorrectly. I had 2.71132486541 before I changed it to 0.21132486540518713 ## Trying out the math module for debugging purposes and it sort of makes it better anyways? F2 = ( 0.5 * (math.sqrt(3.0) - 1.0) ) G2 = ( ( 3.0 - math.sqrt(3.0) ) / 6.0 ) ## There's a fastfloor algorithm in the Java code. ## Whether or not any algorithm modifications like this in Python might help is currently beyond me and beyond my needs to implement this generator. ## I'm skipping that. def __init__(self, supplied_hash=255): ## The following section initializes self.noise_array. ## NOTE: The Java example just runs through the same list twice -- in Python this approach makes index errors with all the easy ways to do that behavior, so I'm using a separate Python implementation's technique of repeating the list twice, instead. ## The list contains every integer from 0 to 255. self.noise_array_seed = [151,160,137,91,90,15, \ 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, \ 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, \ 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, \ 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, \ 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, \ 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, \ 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, \ 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, \ 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, \ 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, \ 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, \ 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180] ## Prep the noise_array variable for subsequent randomization. Remember, this is the __init__() for the generator. Things have to be initialized somewhere. self.noise_array = [] ## Hash number is a variable because someone might think to make the seed some other number than 255 and would want to change the hash to match. self.hash_number = supplied_hash ## in Java I think you need to explicitly set the size of the array; not so in Python self.permutations_table = [] #self.permutations_table_Mod12 = [] ## Randomize the seed distribution (CURRENTLY DEBUGGING): self.randomize_the_noise_array_seed() ## This may only be called after self.hash_number has been established. #self.generate_permutations_table() def generate_permutations_table(self): if self.hash_number == None: ## Note that this presumes generate_permutations_table() will never be called when self.noise_array is zeroed out for regeneration or only half its normal size, which ought to always be the case. self.hash_number = ((len(self.noise_array) // 2) - 1) ## The reasoning behind this is somewhat complex. It has to do with there being 512 numbers in the noise array... ## which is constructed by taking the 256 numbers in the initial noise array and putting them in again on the end in the same order. ## The noise array is that doubled size because of some sort of wraparound thing the simplex grid needs, I think. ## But for the hash number, it needs to be 255 if there are 256 distinct values. Why, I forget, but it's somewhere in the logic behind needing a permutations_table. del self.permutations_table self.permutations_table = [] ## I changed 512 to 256 because it was giving me "list index out of range" ## This was probably not a good idea but I'll figure out why once it gives me more meaningful results with its errors ## ... ## Comparing the java and python versions convinced me it should be the other way. Their tables are both supposed to be 512. ## ... ## Made hash number changeable. ## Note that the permutations_table must be the size of the noise_array, since it is the table of permutations of that noise array's values, with a 1:1 correspondence (bijection??) for each_number in range(0, len(self.noise_array)): ## 255 was what the Java code said, but 256 produces non-errored results. Why would it be 255 any not 256 anyways? Very strange! Is python's method of storing data really that different from Java's? Can a short in Java only be positive? Can a list in python only be positive?! Argh... ## ... ## Just use 255. The Python implementation doesn't have a Mod12 table... ## I don't even know if it'll be faster, since I have to rehash everything every time I regenerate the array, anyways. ## It's entirely possible the second table for modulus results is actually wasteful rather than helpful. Idk. ## ... ## In fact I think it raises bad new problems in Python, given my perhaps mistaken instinct to use self.noise_array in building the mod table rather than permutations_table which I think is what's supposed to be used in the Java program... ## I'm going to drop the mod table and leave it here as evidence of my thought processes, for at least this version. self.permutations_table.append(self.noise_array[(each_number & self.hash_number)]) def randomize_the_noise_array_seed(self, random_number_seed=None): ## Does not currently support random seeds. >_> ## The parameter is there to inspire you to write it in, of course! ## I bet handlers are un-Pythonic for some convoluted reason nobody bothered to explain to me. #noise_array_seed_handler = self.noise_array_seed ## ^--- This didn't work because the assignment operation here made changes to noise_array_seed_handler propagate to self.noise_array_seed... ## Which is undesirable for repeated randomizations. ## Sooooo, instead, copy more explicitly: noise_array_seed_handler = [] for each_number in self.noise_array_seed: noise_array_seed_handler.append(each_number) ##print("DEBUG: noise_array_seed_handler == %s" % (str(noise_array_seed_handler))) new_noise_array = [] ## This whole function is about shuffling order of the noise array's contents while keeping the individual values of its contents the same. while len(noise_array_seed_handler) > 1: ## + 1 because it needs to include the final index. ## ... ## Except random.randint DOES NOT work like range(x, y) -- it includes the zeroeth index and the maximum index, I think. Whyyyyy did they make it inconsistent! ## ... ## It's even worse -- it gave me out-of-range errors when it was simply nothing added or subtracted, too. Have to do - 1 to make it randomize properly. ## This is something that should really be investigated when this routine is next improved. which_number_to_pick = random.randint(0, (len(noise_array_seed_handler) - 1)) ## DEBUGGING ##print("len(noise_array_seed_handler) == %d\n which_number_to_pick == %d" % (len(noise_array_seed_handler), which_number_to_pick)) ## Put the number at that index into the new_noise_array. new_noise_array.append(noise_array_seed_handler[which_number_to_pick]) ## Remove the number at that index from the list so this doesn't go on forever. noise_array_seed_handler.pop(which_number_to_pick) ## The last one doesn't need and doesn't want to be randinted into existence. new_noise_array.append(noise_array_seed_handler[0]) ## Out with the old... del self.noise_array ## ... and in with the new: self.noise_array = new_noise_array ## DEBUG ##print(" Debug: self.noise_array == %s" % (str(self.noise_array))) ## /DEBUG ## The randomization call should be callable on its own, so include this to make it the proper length: self.double_the_noise_array() ## This part is required because the permutations table draws from the noise array and is critical to making a new noise map. Forgot about that after taking a few days' break. ## Always call generate_permutations_table() when the noise array is full and doubled. self.generate_permutations_table() ## Clean the list references: del noise_array_seed_handler del new_noise_array ## ... ## I think this step is likely to be unnecessary, but it rules out one problem I thought my current issue could have been. def double_the_noise_array(self): ## Uses the supplied argument to construct a more Python-friendly way of handling the simplex noise seed. ## This function supports the creation (and re-creation) of the noise array. Called in the noise generator's __init__() and reseed_noise() methods. noise_array_handler = [] ## I got an out-of-memory error when trying to call this on itself. ## It just kept reading the array after it added all the numbers and looped endlessly. Oops. ## Time to break out the handlers! ## ... ## After changing the range number to 3 and 1, it seems to not actually care about being doubled. o_o ## Some day I'll know how simplex noise works. Eventually. ## Untill then, we move onwards with the cargo cult programming boilerplate. for number_of_times_to_double_itself in range(0, 2): for each_number in self.noise_array: noise_array_handler.append(each_number) del self.noise_array self.noise_array = noise_array_handler def twodee_dot_product(self, supplied_gradient, x, y): ## here I think I need to figure out what the grad class does ## ... ## I think it's just an object with a list of coords in it, like a MapTile. ## Maaaaaybe. return ( (supplied_gradient[0] * x) + (supplied_gradient[1] * y) ) def generate_octaved_noise(self, supplied_x, supplied_y, scale, octaves, persistence): ''' From << http://code.google.com/p/battlestar-tux/source/browse/procedural/simplexnoise.py >> " 2D Multi-Octave Simplex noise. For each octave, a higher frequency/lower amplitude function will be added to the original. The higher the persistence [0-1], the more of each succeeding octave will be added. " ''' total_noise_for_this_cell = 0.0 frequency = scale # -_- amplitude = 1.0 # " We have to keep track of the largest possible amplitude, # because each octave adds more, ad we need a value in [-1, 1]. " max_amplitude = 0.0 for each_octave in range(octaves): new_x = supplied_x * frequency new_y = supplied_y * frequency total_noise_for_this_cell += ( self.generate_raw_unoctaved_noise(new_x, new_y) * amplitude ) frequency *= 2.0 max_amplitude += amplitude ## max_amplitude is also what the total is divided by at the end. ## This implies amplitude is some sort of average over all the iterations. amplitude *= persistence ###print(" (total_noise_for_this_cell / max_amplitude) == " + str((total_noise_for_this_cell / max_amplitude))) return (total_noise_for_this_cell / max_amplitude) def generate_noise(self, supplied_x, supplied_y, scale, octaves, persistence, randseed=None): ''' The gateway function for generate_octaved_noise(), this function makes sure the noise values are formatted according to the (array[y][x] == z) format used by my MapTile constructor. ''' ## IMPORTANT! I think there should be a reinitialize_noise_array() function called here. ## That function would reshuffle or maybe change the hash value on the noise_array (the permutations table, per other sources). ## ... ## Ooooor maybe that should be optional, because we might want to generate a map from a specific hash. ## I know: The hash should be changeable as a function outside this one that is invoked by the main program, like "rerandomized generator". ## This function should also apply to the other generators too... ## Perlin will be similar, plasma will be more of a hack involving saving state and giving that out unless a reset is requested, maybe? ## Or perhaps plasma will be just the same and I'm forgetting something about the RNG calls there. ## Check that. ## ... ## randomize_the_noise_array_seed() now handles randomization for this generator. ## It may be supplied with a random seed... but only if some moxie-filled programmer supplies it with the ability to do that, first! ## DEBUG ##print("\n Generating new array of simplex noise . . .\n") ## /DEBUG self.randomize_the_noise_array_seed(random_number_seed=randseed) array_to_be_returned = [] for each_y in range(0, supplied_y): new_row = [] for each_x in range(0, supplied_x): new_z_value = self.generate_octaved_noise(each_x, each_y, scale, octaves, persistence) ###print(" new_z_value == " + str(new_z_value)) new_row.append(new_z_value) array_to_be_returned.append(new_row) ##print("\n New array of simplex noise has been generated.\n") ## DEBUG ##print(" array_to_be_returned == %s" % (str(array_to_be_returned))) ## /DEBUG return array_to_be_returned def generate_raw_unoctaved_noise(self, supplied_x, supplied_y): ## After some review... ## The "skewing" is just multiplying the coord numbers by a constant so that everything we want to do on an x,y Cartesian board gets translated onto a simplex board. ## i and j are the "coordinates" when translated into simplexese. ## t uses G2 because it can't just do subtraction from the already-worked s value baked into i and j. ## t is, I think, the Cartesian midpoint coordinate. ## So essentially all the s, i, j, t, x0, y0 defining-lines are about getting simplex-to-Cartesian and vice versa translations. ## "Skew the input space to determine which simplex cell we're in" s = (supplied_x + supplied_y) * self.F2 # they also said something about "hairy skew factor" ... wat. i = int((supplied_x + s)) # how is this supposed to work?! j = int((supplied_y + s)) ## I THINK the values of x and y are always 0 or 1... (?) ## Which would be how all of these things can just add and subtract from eachother sensibly. ## Maybe?!? This IS what I'm trying to find out by translating it from Java... ## It isn't magic programming if I'm actually trying to understand how it works! t = (float(i + j) * self.G2) ## "Unskew the cell origin back to (x, y) space" <--- "(x, y) space" means the whole Cartesian square coordinate thing, rather than... simplex-adjusted coordinates. unskewed_x_zero = (i - t) unskewed_y_zero = (j - t) ## "The x,y distances from the cell origin" <--- by x,y they mean Cartesian rather than simplex-ian x0 = (supplied_x - unskewed_x_zero) y0 = (supplied_y - unskewed_y_zero) ## "For the twodee case, the simplex shape is an equilateral triangle." ## "Determine which simplex we are in." ## i1, j1 are "offsets for second (middle) corner of simplex in (i, j) coords" ## It's basically going top, right, bottom along the triangle, if I understand correctly. if x0 > y0: ## "lower triangle, XY order: (0, 0) --> (1, 0) --> (1, 1)" i1 = 1 j1 = 0 else: ## "upper triangle, YX order: (0, 0) --> (0, 1) --> (1, 1)" i1 = 0 j1 = 1 ## " A step of (1,0) in (i,j) means a step of (1-c,-c) in (x,y), and ## a step of (0,1) in (i,j) means a step of (-c,1-c) in (x,y), where ## c = (3-sqrt(3))/6 " ((c == G2)) ## "Offsets for second (middle) corner of simplex in (x,y) unskewed coords" x1 = (x0 - i1 + self.G2) y1 = (y0 - j1 + self.G2) ## "Offsets for last corner in (x,y) unskewed coords" x2 = x0 - 1.0 + 2.0 * self.G2 # Why do people think not using parens on math is a good idea? y2 = y0 - 1.0 + 2.0 * self.G2 # I don't care about OoP. It's just sensible to give punctuation to that sort of thing. Someone COULD easily make a mistake, but with punctuation you trade the reader's interpretation time for safety, which is far better, imo. ## "Work out the hashed gradient indices of the three simplex corners" ## I think th ## ... ## I don't know why they would bother hashing it with 255. ## Why does that even matter? Why not just do the operations on the base numbers? ## It was 255 in the Java. ## But I have no idea how that was supposed to work. Isn't it supposed to be 256 anyways? ## ... the Python code I saw also uses 255 and had the 512 permutations buffer thing fixed by copying the array onto itself, which is what I'm gonna use, so I'll try the 255 thing again too. ii = int(i) & self.hash_number jj = int(j) & self.hash_number ''' ## NOTE: All of the following in this commented block is tainted by my mistaken mod table. ## It was probably the reason I went through such trouble to debug it this way. Blah. ####print(" Gradient DEBUG:\n index of self.permutations_table[jj] == " + str(jj)) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + self.permutations_table[jj])] == " + str((ii + self.permutations_table[jj]))) ####print(" Gradient DEBUG:\n gradient_i_zero == " + str(self.permutations_table_Mod12[(ii + self.permutations_table[jj])]) + "\n") #gradient_i_zero = self.permutations_table_Mod12[ii + self.permutations_table[jj]] ####print(" Gradient DEBUG:\n index of self.permutations_table[(jj+j1)] == " + str((jj+j1))) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + i1 + self.permutations_table[(jj+j1)])] == " + str((ii + i1 + self.permutations_table[(jj+j1)]))) ####print(" Gradient DEBUG:\n gradient_i_one == " + str(self.permutations_table_Mod12[(ii + i1 + self.permutations_table[(jj+j1)])]) + "\n") #gradient_i_one = self.permutations_table_Mod12[ii + i1 + self.permutations_table[jj+j1]] ####print(" Gradient DEBUG:\n index of self.permutations_table[(jj+j1)] == " + str((jj+j1))) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + 1 + self.permutations_table[(jj+1)])] == " + str((ii + 1 + self.permutations_table[(jj+1)]))) ####print(" Gradient DEBUG:\n gradient_i_two == " + str(self.permutations_table_Mod12[(ii + 1 + self.permutations_table[(jj+1)])]) + "\n") #gradient_i_two = self.permutations_table_Mod12[ii + 1 + self.permutations_table[jj+1]] ''' ## Note that the 1 constants are balanced with omitted 0 constants in the lines with "missing" elements. gradient_i_zero = self.permutations_table[ii + self.permutations_table[jj ]] % 12 gradient_i_one = self.permutations_table[ii + i1 + self.permutations_table[jj + j1]] % 12 gradient_i_two = self.permutations_table[ii + 1 + self.permutations_table[jj + 1]] % 12 ## "Calculate the contribution from the three corners" t0 = 0.5 - x0*x0 - y0*y0 # I really wish people would use parens in all multi-operator statements. if t0 < 0: n0 = 0.0 else: t0 *= t0 ## " (x,y) of grad3 used for twodee gradient " ###print("\n DEBUG:\n t0 == " + str(t0) + "\n twodee_dot_product == " + str(self.twodee_dot_product(self.grad3[gradient_i_zero], x0, y0))) n0 = t0 * t0 * self.twodee_dot_product(self.grad3[gradient_i_zero], x0, y0) t1 = 0.5 - x1*x1 - y1*y1 if t1 < 0: n1 = 0.0 else: t1 *= t1 n1 = t1 * t1 * self.twodee_dot_product(self.grad3[gradient_i_one], x1, y1) ###print("\nDEBUGGING x0 == " + str(x0)) ###print("DEBUGGING x1 == " + str(x1)) ###print("DEBUGGING x2 == " + str(x2)) ###print("\nDEBUGGING y0 == " + str(y0)) ###print("DEBUGGING y1 == " + str(y1)) ###print("DEBUGGING y2 == " + str(y2)) ###print("\nDEBUGGING (x2 * x2) == " + str((x2 * x2))) ###print("DEBUGGING (y2 * y2) == " + str((y2 * y2))) ###print("DEBUGGING ((x2 * x2) - (y2 * y2)) == " + str((x2 * x2) - (y2 * y2))) ###print("DEBUGGING (0.5 - ((x2 * x2) - (y2 * y2))) == " + str((0.5 - ((x2 * x2) - (y2 * y2))))) ## Apparently some clown thought it would be funny to allow order of operations to work all screwy in Java, or maybe someone sabatoged the code I was looking at. ## I really couldn't guess why, but this was the original code, written in Java: ## double t2 = 0.5 - x2*x2-y2*y2; ## There were no parentheses anywhere there. t2 = 0.5 - x2*x2 - y2*y2 ###print("DEBUGGING t0 == " + str(t0)) ###print("DEBUGGING t1 == " + str(t1)) ###print("DEBUGGING t2 == " + str(t2)) ## I think I understand it now! the t's are ticking down like octaves in the perlin generator, or something? ## hrm it's multiplying, not dividing, so it couldn't get below zero that way unless it already was negative. >< ## Nevermind. Still don't understand it yet. if t2 < 0: n2 = 0.0 else: t2 *= t2 n2 = t2 * t2 * self.twodee_dot_product(self.grad3[gradient_i_two], x2, y2) ## "Add contributions from each corner to get the final noise value." ## "The result is scaled to return values in the interval [-1, 1]." ###print("\nDEBUGGING n0 == " + str(n0)) ###print("\nDEBUGGING n1 == " + str(n1)) ###print("\nDEBUGGING n2 == " + str(n2)) ####print("\nDEBUGGING return " + str(70.0 * (n0 + n1 + n2))) #return 70.0 * (n0 + n1 + n2) number_to_return = ( 70.0 * (n0 + n1 + n2) ) ## My program would work better with a result scaled to 0-255. Therefore... number_to_return += 1 number_to_return *= 128.0 ## blah, getting NoneTypes after the octaves were added. Hrm... ## debug: ###print("\n number_to_return == " + str(number_to_return)) return number_to_return class DungeonMapGenerator: ''' Generators for the creation of corridor-linked dungeon rooms for indoors maps. Output format uses z values to stand for different room types, eg: 0 = blocked 1 = unblocked 2 = corridor etc. ''' ## NOTE TO SELF!! ## How to do FoV algorithm: ## - Calculate a circle with a set radius (sight range) from the player's current position ## - Find all MapTiles in that radius ## - For each MapTile, draw a line from that MapTile to the player ## - For each properly-rounded coordinate along that line (aligns to MapTile coords; partial cover? think on it...), check MapTiles with those coordinates for opacity ## - If a MapTile with opacity is found, stop checking this line and set the MapTile whose line we're checking to "UNSEEN" def __init__(self, supplied_map_width=40, supplied_map_height=40, room_max_size=10, room_min_size=4, room_max_count=30, room_min_count=5): ## Using the DungeonMapGenerator should always involve supplying some or all of these constants. ## Defaults are being used here to make it simple for me to test and demonstrate. self.map_width = supplied_map_width self.map_height = supplied_map_height ## -= 1 because doing it during room generation would be mildly wasteful -- the bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. self.map_width -= 1 self.map_height -= 1 self.room_max_size = room_max_size self.room_min_size = room_min_size self.room_max_count = room_max_count self.room_min_count = room_min_count def check_these_two_rectangles_for_intersection(self, rectangle_alpha, rectangle_beta): ''' Check two rectangles, both formatted [x, y, w, h] for intersection; return True if they intersect and False if they do not intersect. ''' new_x = rectangle_alpha[0] new_x2 = (rectangle_alpha[0] + rectangle_alpha[2]) old_x = rectangle_beta[0] old_x2 = (rectangle_beta[0] + rectangle_beta[2]) new_y = rectangle_alpha[1] new_y2 = (rectangle_alpha[1] + rectangle_alpha[3]) old_y = rectangle_beta[1] old_y2 = (rectangle_beta[1] + rectangle_beta[3]) do_they_intersect = False if ( (new_x >= old_x) and (new_x <= old_x2) ) or ( (new_x2 >= old_x) and (new_x2 <= old_x2) ): if ( (new_y >= old_y) and (new_y <= old_y2) ) or ( (new_y2 >= old_y) and (new_y2 <= old_y2) ): do_they_intersect = True if ( (old_x >= old_x) and (old_x <= new_x2) ) or ( (old_x2 >= new_x) and (old_x2 <= new_x2) ): if ( (old_y >= new_y) and (old_y <= new_y2) ) or ( (old_y2 >= new_y) and (old_y2 <= new_y2) ): do_they_intersect = True ## This if tree checks to see whether or not any rooms are forming crosses. if ((new_x >= old_x) and (new_x2 <= old_x2)) and ((new_y <= old_y) and (new_y2 >= old_y2)): do_they_intersect = True ## ... and the same check in the other direction, for if the old room was the vertical bar of the cross rather than the new room, as is assumed in the preceding if tree: if ((old_x > new_x) and (old_x2 < new_x2)) and ((old_y < new_y) and (old_y2 > new_y2)): do_they_intersect = True return do_they_intersect def define_corridor(self, which_orientation, x, y, x2, y2): ''' Create a corridor (actually a one-tile-by-n-tiles rectangular room) connecting point (x, y) and point ((x + w), (y + h)), using the rectangular room definition format. ''' w = x2 - x h = y2 - y if which_orientation == 'horizontal': if w < 0: ## ((This fix worked perfectly! Hooray)) ## If it's negative, flip it and deduct it from the index. ## DO NOT put this before the orientation check, it doesn't need to care about which direction it isn't doing, and since it gets that info anyways it would just mess it up to flip and deduct in a direction it isn't going in (because that direction is the constant 1, see below). w *= -1 x -= w ## (x, y, width, height) new_corridor = [x, y, w + 1, 1] ## Yes, it could be handled in less verbose ways. ## This way makes it blindingly obvious what the code is supposed to do, which I prefer. ## Code ought to be easy to maintain. if which_orientation == 'vertical': if h < 0: ## If it's negative, flip it and deduct it from the index. ## DO NOT put this before the orientation check, it doesn't need to care about which direction it isn't doing, and since it gets that info anyways it would just mess it up to flip and deduct in a direction it isn't going in (because that direction is the constant 1, see below). h *= -1 y -= h ## (x, y, width, height) new_corridor = [x, y, 1, h + 1] return new_corridor def return_the_center_of_this_rectangle(self, upperleft_x, upperleft_y, width, height): centerpoint_x = ( upperleft_x + (width // 2) ) centerpoint_y = ( upperleft_y + (height // 2) ) return centerpoint_x, centerpoint_y def generate_noise(self, supplied_map_width=None, supplied_map_height=None, room_max_size=None, room_min_size=None, room_max_count=None, room_min_count=None): ''' It's noise that looks like a dungeon map. If R2-D2 sneezed, this would be the random pattern left on the tissue. ''' #### Arranging the generation parameters #### ## All the generators save the state of the last map made. ## The generate_noise() method of each generator accepts new parameters every time it's called, but if none are given, it goes back to the last parameters the generator worked with. ## This makes it easy to implement deterministic map regeneration from randseeds. ## -= 1 for the same reasoning as in the inits. if supplied_map_width != None: supplied_map_width -= 1 self.map_width = supplied_map_width if supplied_map_height != None: supplied_map_height -= 1 self.map_height = supplied_map_height if room_max_size != None: self.room_max_size = room_max_size if room_min_size != None: self.room_min_size = room_min_size if room_max_count != None: self.room_max_count = room_max_count if room_min_count != None: self.room_min_count = room_min_count #### Generating the map #### ## First, make a map full of zeroes. The rooms will be carved out of it. ## Remember, every NoiseMapGenerator returns results formatted: map[y][x] == z new_dungeon_map = [] for each_row in range(0, self.map_height): new_row = [] for each_column in range(0, self.map_width): new_row.append(0) new_dungeon_map.append(new_row) ## List comprehension method: ## new_dungeon_map = [[ 0 for y in range(0, self.supplied_map_height)] for x in range(0, self.supplied_map_width)] ## Try this and see how it goes. #### Generating room coordinates #### ## DEBUG #number_of_corridors_at_map_finish = 0 ## \DEBUG ## There must be at least room_min_count rooms in the end product. are_there_enough_rooms_yet = False while are_there_enough_rooms_yet == False: list_of_rooms = [] for each_room_attempt_number in range(0, self.room_max_count): ## DEBUG: Since walls are uncarved space, should the x and y randints begin at 1 or 0? ## Watching the output process will solve this issue quickly. ## ... ## This issue needs to be straightened out early on due to how intersection tests have to work. ## Only two edges need to have uncarved space in them, and every room will have those two edges uncarved. ## I decree those two edges to be the lower and right edges. ## The map will have upper and left edges uncarved so that any rooms at the edge of the map are properly walled. ## Thus the randints will begin at 1 (the upper and left edges)... ## and end at map_width and map_height, instead of (m_w - 1) and (m_h - 1). ## By letting rooms gen to the edges with their width and height values, they can sit on an edge with their two designated built-in edge walls and everything will be fine. new_room_width = random.randint(self.room_min_size, self.room_max_size) new_room_height = random.randint(self.room_min_size, self.room_max_size) new_room_upperleft_x = random.randint(1, self.map_width - new_room_width) new_room_upperleft_y = random.randint(1, self.map_height - new_room_height) ## [x, y, w, h] new_room = [new_room_upperleft_x, new_room_upperleft_y, new_room_width, new_room_height] ## The checks for validity favor x,y modification first -- and always pushing it to the lower right -- ## and w,h modification second -- and always pushing it to the upper left -- ## because this should lead to a mild tendency for rooms to cluster, and towards the center, at that. ## Which I think will look nice. ## ... ## or that's what I'd like to do, but not on the first implementation. ## Checking to see if the rooms intersect: failed_intersection_test = False for each_other_room in list_of_rooms: if self.check_these_two_rectangles_for_intersection(new_room, each_other_room) == True: failed_intersection_test = True if failed_intersection_test == False: list_of_rooms.append(new_room) if len(list_of_rooms) >= self.room_min_count: are_there_enough_rooms_yet = True else: del list_of_rooms #### Carving successful room coordinates #### ## Someone told me using range(foo, len(list)) is un-Pythonic, so I'm using an iterator to step through the list in parallel for the purposes of creating corridors to connect rooms. room_creation_iterator = -1 for each_completed_room in list_of_rooms: for each_x_coordinate in range(each_completed_room[0], (each_completed_room[0] + each_completed_room[2])): for each_y_coordinate in range(each_completed_room[1], (each_completed_room[1] + each_completed_room[3])): ## This conditional seems a bit hackish. if new_dungeon_map[each_y_coordinate][each_x_coordinate] == 0: ## This is so simple it's bound to fail miserably. ## ... ## And yet it works. new_dungeon_map[each_y_coordinate][each_x_coordinate] += 1 ## Connect every room with corridors. (Note that there may be dungeons where this trait is not desirable for some reason; other behavior may be added as desired.) ## Generate a random direction for the corridors to point in: which_direction_first = random.randint(0, 1) # remember, random.randint() includes min and max values, unlike range() #define_corridor(which_orientation, x, y, x2, y2) ## Note: Corridors are created from the current room to the next room even though the next room hasn't actually be written in yet. ## It works because the rooms already exist as rectangle coordinates. ## This is likely to cause debugging confusion if you try to change this code without taking that into account. Be advised. ## Find the centerpoints of both rooms and pack them as tuples. ## Syntax is [ ( (list_of_rooms[n][w] // 2) + list_of_rooms[n][x] ), ( (list_of_rooms[n][h] // 2) + list_of_rooms[n][y] ) ] ## Values resulting from this look like x, y and are just the centerpoints of the two rooms. ## Another representation: [(width // 2 + x offset), (height // 2 + y offset)] ## ... ## If desired, it's possible to change this to use floor divide + 1 instead of just floor divide. ## That would make it so that rooms with a thickness of 1 do not have projections off their sides. ## Corridors would slice into the center of the room rather than the rounded-down center. #room_alpha_center = [ ( (list_of_rooms[room_creation_iterator][2] // 2) + list_of_rooms[room_creation_iterator][0] ), ( (list_of_rooms[room_creation_iterator][3] // 2) + list_of_rooms[room_creation_iterator][1] ) ] #room_beta_center = [ ( (list_of_rooms[room_creation_iterator + 1][2] // 2) + list_of_rooms[room_creation_iterator + 1][0] ), ( (list_of_rooms[room_creation_iterator + 1][3] // 2) + list_of_rooms[room_creation_iterator + 1][1] ) ] ## Redoing this to make my dungeon generator cooler. ## Now, rooms will connect to the nearest two rooms, by centerpoint value! ## Or one or zero rooms, as in the case for the second and first rooms created. ## This should make tunnel connections a whole lot more friendly-looking. ## The way we're going to do this is: ## For each room in the rooms list: ## Use my new return_the_center_of_this_rectangle() method on every room in the rooms list and compare their centers to the room currently being considered ## The nearest two rooms that do not have centerpoints equal to the room being considered will be used as anchors for the define_corridor() method. the_centerpoint_of_this_room = self.return_the_center_of_this_rectangle() ## DEBUG #print("\n room_alpha_center == %s\n room_beta_center == %s" % (str(room_alpha_center), str(room_beta_center))) ## \DEBUG if which_direction_first == 0: ## DEBUG #number_of_corridors_at_map_finish += 1 ## \DEBUG ## It needs to take room alpha center and drag it out to room beta center in only the horizontal direction. ## That's why vertical is as easy as swapping reference order to the rooms. ## define_corridor() still needs a direction because I chose not to make it implicit by unpacking the centerpoint tuple here. I think it's more readable this way. ## ... ## Something is totally wrong here. This only works if alpha centerpoint > beta centerpoint because otherwise you get negative widths or something and that can't be drawn in can it? ## Maybe it can? Let's try it and see what fails. new_horizontal_corridor = self.define_corridor('horizontal', room_alpha_center[0], room_alpha_center[1], room_beta_center[0], room_beta_center[1]) new_vertical_corridor = self.define_corridor('vertical', room_beta_center[0], room_beta_center[1], room_alpha_center[0], room_alpha_center[1]) elif which_direction_first == 1: ## DEBUG #number_of_corridors_at_map_finish += 1 ## \DEBUG new_horizontal_corridor = self.define_corridor('horizontal', room_beta_center[0], room_beta_center[1], room_alpha_center[0], room_alpha_center[1]) new_vertical_corridor = self.define_corridor('vertical', room_alpha_center[0], room_alpha_center[1], room_beta_center[0], room_beta_center[1]) #print("\n new_horizontal_corridor == %s\n new_vertical_corridor == %s" % (str(new_horizontal_corridor), str(new_vertical_corridor))) ## When the next-to-last room is connected to the last room, reset the iterator to 0 so that the last room may be connected to the first room. ## NOTE! Linear dungeons should stop corridor creation when the next-to-last room is connected to the last room. ## DEBUG #print("\n room_creation_iterator == %d\n len(list_of_rooms == %d" % (room_creation_iterator, len(list_of_rooms))) ## \DEBUG if ( room_creation_iterator < (len(list_of_rooms) - 2) ): # plus equals to, NOT set equals to (incrementing, not rolling over) room_creation_iterator += 1 else: # set equals to, NOT minus equals to (rolling over, not incrementing) room_creation_iterator = -1 ## This should probably be turned into a create_room() method. ## First horizontal: ## DEBUG #print("\nnew_horizontal_corridor[0] == %d\nnew_horizontal_corridor[2] == %d\nnew_horizontal_corridor[0] + [2] == %d" % (new_horizontal_corridor[0] ,new_horizontal_corridor[2], (new_horizontal_corridor[0] + new_horizontal_corridor[2]))) #print("\nnew_horizontal_corridor[1] == %d\nnew_horizontal_corridor[3] == %d\nnew_horizontal_corridor[1] + [3] == %d" % (new_horizontal_corridor[1], new_horizontal_corridor[3], (new_horizontal_corridor[1] + new_horizontal_corridor[3]))) ## \DEBUG for each_horizontal_corridor_x_coordinate in range(new_horizontal_corridor[0], (new_horizontal_corridor[0] + new_horizontal_corridor[2])): for each_horizontal_corridor_y_coordinate in range(new_horizontal_corridor[1], (new_horizontal_corridor[1] + new_horizontal_corridor[3])): ## If it's already walkable, don't turn it debug mauve. if new_dungeon_map[each_horizontal_corridor_y_coordinate][each_horizontal_corridor_x_coordinate] == 0: new_dungeon_map[each_horizontal_corridor_y_coordinate][each_horizontal_corridor_x_coordinate] += 1 ## Second vertical: ## DEBUG #print("\nnew_vertical_corridor[0] == %d\nnew_vertical_corridor[2] == %d\nnew_vertical_corridor[0] + [2] == %d" % (new_vertical_corridor[0], new_vertical_corridor[2], (new_vertical_corridor[0] + new_vertical_corridor[2]))) #print("\nnew_vertical_corridor[1] == %d\nnew_vertical_corridor[3] == %d\nnew_vertical_corridor[1] + [3] == %d" % (new_vertical_corridor[1], new_vertical_corridor[3], (new_vertical_corridor[1] + new_vertical_corridor[3]))) ## \DEBUG for each_vertical_corridor_x_coordinate in range(new_vertical_corridor[0], (new_vertical_corridor[0] + new_vertical_corridor[2])): for each_vertical_corridor_y_coordinate in range(new_vertical_corridor[1], (new_vertical_corridor[1] + new_vertical_corridor[3])): ## If it's already walkable, don't turn it debug mauve. if new_dungeon_map[each_vertical_corridor_y_coordinate][each_vertical_corridor_x_coordinate] == 0: new_dungeon_map[each_vertical_corridor_y_coordinate][each_vertical_corridor_x_coordinate] += 1 ## DEBUG #print("\n number_of_corridors_at_map_finish == %d\n len(list_of_rooms) == %d" % (number_of_corridors_at_map_finish, len(list_of_rooms))) ## \DEBUG return new_dungeon_map class RoomFilledMapGenerator: ## I don't like this generator. It is not worth the effort right now. Keeping it for legacy/future inspiration purposes. ## To start, this code will be somewhat copypasted from DungeonMapGenerator. Mostly just the inits and some grid work. def __init__(self, supplied_map_width=40, supplied_map_height=40, room_max_size=10, room_min_size=4): ## Using the RoomFilledMapGenerator should always involve supplying some or all of these constants. ## Defaults are being used here to make it simple for me to test and demonstrate. ## DEBUG ## Let's see if storing new_dungeon_map as state magically solves it. Woooo ## Nope, not in the least. And yes I did put in self.* tags on every reference in this class's generate_noise() method. #self.new_dungeon_map = [] ## \DEBUG self.map_width = supplied_map_width self.map_height = supplied_map_height ## -= 1 because doing it during room generation would be mildly wasteful -- the bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. ## DEBUG COMMENTED ## The following adjustment is unnecessary with the way I've structured my code now. Good. #self.map_width -= 1 #self.map_height -= 1 ## \DEBUG COMMENTED ## This generator does not need min/max room count settings, but it wouldn't be all that difficult to add them as some sort of conditional'd loop. def generate_noise(self, supplied_map_width=None, supplied_map_height=None, room_max_size=None, room_min_size=None): ''' It's sorta like noise. Except blocky and in all these clean straight lines and right angles. ''' #### Arranging the generation parameters #### ## All the generators save the state of the last map made. ## The generate_noise() method of each generator accepts new parameters every time it's called, but if none are given, it goes back to the last parameters the generator worked with. ## This makes it easy to implement deterministic map regeneration from randseeds. if supplied_map_width != None: ## -= 1 for the same reasoning as in the inits. ## Is unnecessary with the way I've structured my code now. ## DEBUG COMMENTED #supplied_map_width -= 1 ## \DEBUG COMMENTED self.map_width = supplied_map_width if supplied_map_height != None: ## Is unnecessary with the way I've structured my code now. ## DEBUG COMMENTED #supplied_map_height -= 1 ## \DEBUG COMMENTED self.map_height = supplied_map_height if room_max_size != None: self.room_max_size = room_max_size if room_min_size != None: self.room_min_size = room_min_size ## Room count will be determined by the other parameters since the map will be filled with rooms. #### Generating the map #### ## First, make a map full of zeroes. The rooms will be carved out of it. ## Remember, every NoiseMapGenerator returns results formatted: map[y][x] == z ## Refactoring this might involve making a generate_blank_map() method. ## It would also be useful for DungeonMapGenerators. ## Maybe DungeonMapGenerator should be a base class and these room-based map generators would all draw from it. new_dungeon_map = [] for each_row in range(0, self.map_height): new_row = [] for each_column in range(0, self.map_width): new_row.append(0) new_dungeon_map.append(new_row) ## List comprehension version: ## new_dungeon_map = [[ 0 for y in range(0, self.supplied_map_height)] for x in range(0, self.supplied_map_width)] ## Try this and see how it goes. #### Filling the blank map with rooms #### ## I seriously don't understand why you wouldn't want to do for loops with index numbers. It makes dealing with the data SO much easier! for each_row_index in range(1, (len(new_dungeon_map) - 1)): for each_column_index in range(1, (len(new_dungeon_map[each_row_index]) - 1)): ## IMPORTANT! ## The syntax is the same for all of these map generators: ## ## new_dungeon_map[y][x] == z ## ## If there is some confusion about row/column stuff or sublist ordering, remember to compare it to this fact. #### ATTEMPT NUMBER TWO #### ## I'm just beating my hands on the keyboard and code is coming out ## Initialize the validator variable: should_we_start_a_room_here = 1 ## Whip up a potential room: new_room_width = random.randint(self.room_min_size, self.room_max_size)#(4, 10) # <-- does not help at all new_room_height = random.randint(self.room_min_size, self.room_max_size)#(4, 10) # :( ## Now check if the width or height go out of bounds and adjust to fit if possible; if not possible, set the validator toggle to False: if (each_column_index + new_room_width) >= (len(new_dungeon_map[each_row_index]) - 1): # len(map[row]) because we're checking this particular row's width, and -1 because of uncarved side difference_between_maximum_room_width_and_attempted_room_width = ( each_column_index + new_room_width - (len(new_dungeon_map[each_row_index]) - 1) ) new_room_width -= difference_between_maximum_room_width_and_attempted_room_width if new_room_width < self.room_min_size: should_we_start_a_room_here = 0 if (each_row_index + new_room_height) >= (len(new_dungeon_map) - 1): # len(map) because we're checking the height of columns in this map, and -1 because of uncarved side difference_between_maximum_room_height_and_attempted_room_height = ( each_row_index + new_room_height - (len(new_dungeon_map) - 1) ) new_room_height -= difference_between_maximum_room_height_and_attempted_room_height if new_room_height < self.room_min_size: should_we_start_a_room_here = 0 ## Determine if this is a good starting tile for a room: for each_nearby_tile_y in range(-1, 2): for each_nearby_tile_x in range(-1, 2): ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: if new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x)] != 0: should_we_start_a_room_here = 0 ## Next, check and see if this room will slice into another room at any point along its prospective length: ## Init/reset the width decrementor: room_max_width = 0 continue_incrementing_room_width = True ## range(-1, 2) should step through (-1, 0, 1), ie (up, same, down) or (left, same, right) for each_next_unit_of_width in range(0, new_room_width): should_we_increment_room_max_width_by_one_tile = False for each_nearby_tile_y in range(-1, 2): for each_nearby_tile_x in range(-1, 2): ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: if continue_incrementing_room_width == True and new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width] == 0: #print(" INCREMENTING decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) should_we_increment_room_max_width_by_one_tile = True else: continue_incrementing_room_width = False #print(" NOT incrementing decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) if should_we_increment_room_max_width_by_one_tile == True and continue_incrementing_room_width == True: room_max_width += 1 ## Apply the decrementor and check if the room is too small: #print("\nnew_room_width == %d\n room_width_decrementor == %d" % (new_room_width, room_width_decrementor)) new_room_width = room_max_width #print(" new_room_width == %d" % (new_room_width)) #print(" self.min_room_size == %d" % (self.room_min_size)) if new_room_width < self.room_min_size: ## Then the smallest possible room had to become too small to fit here and this tile should be skipped. should_we_start_a_room_here = 0 #print(" should_we_start_a_room_here == %r" % (should_we_start_a_room_here)) #else: #print("\nnew_room_width == %d\n room_width_decrementor == %d" % (new_room_width, room_width_decrementor)) #print(" self.min_room_size == %d" % (self.room_min_size)) #print(" (t) should_we_start_a_room_here == %r" % (should_we_start_a_room_here)) ## Duplicating the width decrementor code even though it should never be necessary... o_o ## ... ## It runs but it didn't fix the problem. Still bizarre. ## Init/reset the height decrementor: room_height_decrementor = 0 ## range(-1, 2) should step through (-1, 0, 1), ie (up, same, down) or (left, same, right) for each_next_unit_of_height in range(0, new_room_height): should_we_decrement_room_height_by_one_tile = False for each_nearby_tile_y in range(-1, 2): for each_nearby_tile_x in range(-1, 2): ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: if new_dungeon_map[(each_row_index + each_nearby_tile_y) + each_next_unit_of_height][(each_column_index + each_nearby_tile_x)] != 0: #print(" INCREMENTING decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) should_we_decrement_room_height_by_one_tile = True #else: #print(" NOT incrementing decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) if should_we_decrement_room_height_by_one_tile == True: room_height_decrementor += 1 new_room_height -= room_height_decrementor if new_room_height < self.room_min_size: ## Then the smallest possible room had to become too small to fit here and this tile should be skipped. should_we_start_a_room_here = 0 #if should_we_start_a_room_here == 0: # new_room_height = 0 # new_room_width = 0 ## Now that all checks have been passed, write the room to the map. if should_we_start_a_room_here == 1: for each_new_room_height_index in range(0, new_room_height): for each_new_room_width_index in range(0, new_room_width): new_dungeon_map[each_row_index + each_new_room_height_index][each_column_index + each_new_room_width_index] += 1 else: pass return new_dungeon_map ''' #### ATTEMPT NUMBER ONE #### ## Failed due to indexing or insufficient/incorrect tile validation. ## IMPORTANT! This code all assumes tile validation begins iterating one tile right and one tile down and fills a map of zeroes of exactly the right size. ## Let's switch to serial checks with a toggle rather than nested checks with a base case. ## This will make it very easy to add and remove conditionals to alter how rooms are validated. should_we_start_a_room_on_this_tile = True if new_dungeon_map[each_row_index][each_column_index] != 0: ## Then the current tile is carved and unusable. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index][each_column_index + 1] != 0: ## The next tile to the right is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index] != 0: ## The tile above the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index][each_column_index - 1] != 0: ## The tile to the left of the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index - 1] != 0: ## The tile above and to the left of the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index + 1] != 0: ## The tile above and to the right of the current tile is carved. should_we_start_a_room_on_this_tile = False ## The absurdity gallery. Should be logically impossible to get any hits here whatsoever. if new_dungeon_map[each_row_index + 1][each_column_index] != 0: should_we_start_a_room_on_this_time = False if new_dungeon_map[each_row_index + 1][each_column_index + 1] != 0: should_we_start_a_room_on_this_time = False if new_dungeon_map[each_row_index + 1][each_column_index - 1] != 0: should_we_start_a_room_on_this_time = False ## There has to be a check to see if the tile is fewer than self.room_min_size tiles away from the right and bottom edges of the map. ## We will take this opportunity to define these handy and descriptive variables: ## ## DEBUG - 1 on the end of this? Yes or no? To handle uncarved space on the bottom+right sides... this should be what makes it uncarved, if included here. ## distance_from_left_of_room_to_right_of_map = (self.map_width - each_column_index) distance_from_top_of_room_to_bottom_of_map = (self.map_height - each_row_index) ## Check to see if the room's min size is too large for its seed location: if self.room_min_size >= distance_from_left_of_room_to_right_of_map: ## This tile is too close to the right side of the map to be placed here. should_we_start_a_room_on_this_tile = False if self.room_min_size >= distance_from_top_of_room_to_bottom_of_map: ## This tile is too close to the bottom of the map to be placed here. should_we_start_a_room_on_this_tile = False ## Get how wide and tall the room wants to be, so we can check it against its neighbors and the map edges: random_room_width = random.randint(self.room_min_size, self.room_max_size) random_room_height = random.randint(self.room_min_size, self.room_max_size) #random_room_height = 6 ## If the tile is too close to the edge to fulfill its randomly generated width, decrement it untill it just fits. ## Note: This step happens before the next validation step because if it didn't the room would generate an index-out-of-range error there. if random_room_width >= distance_from_left_of_room_to_right_of_map: random_room_width -= (random_room_width - distance_from_left_of_room_to_right_of_map) if random_room_height >= distance_from_top_of_room_to_bottom_of_map: random_room_height -= (random_room_height - distance_from_top_of_room_to_bottom_of_map) ## Something is ridiculously wrong in the below code, but I have no idea what. ## It's still generating overlaps even though I've put in all the conditionals that are supposed to make it not do that. ## To prevent the decrementer in the following for loop from clipping its own loop too short. I think. random_room_width_adjustment_handler = 0 ## random_room_width + 1 is the upperleft coordinate, the span of the room, and the wall on the right. for each_next_tile_index in range(0, (random_room_width + 2)): ## The following line of code (plus one or two index offsets) took quite some time to figure out was needed. if ((each_next_tile_index + each_column_index) <= (self.map_width - 1)): if (new_dungeon_map[each_row_index][(each_column_index + each_next_tile_index)] != 0) or (new_dungeon_map[(each_row_index - 1)][(each_column_index + each_next_tile_index)] != 0) or (new_dungeon_map[(each_row_index - 1)][(each_column_index + each_next_tile_index)] != 0): ## Then something's in the way. Decrement the room's actual size. ## ... ## This used to be decrementing random_room_width, which I think made it end the for loop too early. Changing to a handler to disconnect those parts. ## ... ## I don't think that solved it. It SHOULDN'T solve it regardless. Leaving it in just to be safe. Refactor it out later. random_room_width_adjustment_handler += 1 ## DEBUG #if random_room_width_adjustment_handler > 0: # random_room_width_adjustment_handler += 0 ## \DEBUG ## Apply the accrued adjustment. random_room_width -= random_room_width_adjustment_handler ## One final check to ensure the previous validation step did not make the room too small: if random_room_width <= self.room_min_size: should_we_start_a_room_on_this_tile = False ## DEBUG ## Direct checking for intersection after the room is defined. ## Even though it solves all the debug mauve, it doesn't fix the inappropriate abuttment issue, and it also doesn't fill the map as cleanly as resizing rooms if they're too big. #for each_new_tile_y_offset in range(0, random_room_height): # for each_new_tile_x_offset in range(0, random_room_width): # if new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] != 0: # should_we_start_a_room_on_this_tile = False ## \DEBUG ## Another absurdity gallery -- the following should never turn up any hits due to logic. random_room_height_adjustment_handler = 0 for each_next_tile_index in range(0, (random_room_height + 2)): if ((each_next_tile_index + each_row_index) <= (self.map_height - 1)): if (new_dungeon_map[each_row_index + each_next_tile_index][(each_column_index)] != 0) or (new_dungeon_map[(each_row_index + each_next_tile_index)][(each_column_index + 1)] != 0) or (new_dungeon_map[(each_row_index + each_next_tile_index)][(each_column_index - 1)] != 0): random_room_height_adjustment_handler += 1 random_room_height -= random_room_height_adjustment_handler if random_room_height <= self.room_min_size: should_we_start_a_room_on_this_tile = False ## If it passes all the checks, write the room to the map. if should_we_start_a_room_on_this_tile == True: for each_new_tile_y_offset in range(0, random_room_height): for each_new_tile_x_offset in range(0, random_room_width): if new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] >= 1: ## DEBUG print("Error: Mauve for random_room_width %d, random_room_height %d\n min_size %d max_size %d" % (random_room_width, random_room_height, self.room_min_size, self.room_max_size)) ## \DEBUG new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] += 1 return new_dungeon_map ''' ''' #### ATTEMPT NUMBER ZERO #### ## Ugh, I don't know why none of this works. ## The indices seem perfect on paper but changing one index makes the results better or worse in ways that don't make any obvious kind of sense. ## I'm going to assume I made some error in figuring out what checking was needed. ## Step 1 if new_dungeon_map[each_row_index][each_column_index] == 0: ## Theory section... ## Imagine the algorithm makes a tall room, a wide room and another tall room on the first line. ## When it passes through the second line it would need to: ## 1. Detect uncarved space at (x, y) ## 2. Check ((x + 1), y) for uncarved space ## If False, then it's about to break into a room to the right; if True... ## 3. Check ((x + 1), y - 1) for uncarved space ## 3a If False, then it's a wall beneath a room; if True... ## 4. Check ((x + 2), y) for uncarved space ## 4a If False, then it's the end of a wall beneath a room and also abutting another room to the right. ## 4b If True then this is a good spot to place a room as it is not going to be carving out a wall from any adjacent rooms. ## ... ## There needs to be a check-ahead to make the room actually fill all the space infront of it, x-ly speaking. ## Or at least to make it easy to put an upper bound on the room width in this location. ## A similar procedure may need to happen at the bottom of the map for height of the room. ## ... ## 5. For each in range(0, room_max_size): Check ((x + each), y) for uncarved space, return sum_of_this_loop ## ... stuff. ## Step 2 if new_dungeon_map[each_row_index][(each_column_index + 1)] == 0: ## Step 3 if new_dungeon_map[(each_row_index - 1)][(each_column_index + 1)] == 0: ## Step 4 ## DEBUG Let's throw more conditionals onto the last uniform step, here, to see if something is the right one: if (new_dungeon_map[each_row_index][(each_column_index + 2)] == 0) and (new_dungeon_map[each_row_index][each_column_index - 1] == 0) and (new_dungeon_map[each_row_index - 1][each_column_index - 1] == 0): ## A good spot to place a room has been found. ## Determine the limiting condition for the width and height randint ranges based on distance between room edge and map edge: distance_from_left_of_room_to_right_of_map = (self.map_width - each_column_index) distance_from_top_of_room_to_bottom_of_map = (self.map_height - each_row_index) random_room_width = random.randint(self.room_min_size, self.room_max_size) random_room_height = random.randint(self.room_min_size, self.room_max_size) ## Forbidding the rooms to be larger than the map: if random_room_width >= distance_from_left_of_room_to_right_of_map: random_room_width = (distance_from_left_of_room_to_right_of_map - 1) if random_room_height >= distance_from_top_of_room_to_bottom_of_map: random_room_height = (distance_from_top_of_room_to_bottom_of_map - 1) ## Note: Step 5 comes after a tentative room width has been generated so that it doesn't have to check any further than it needs to. how_wide_to_actually_make_the_room = 0 ## Step 5 for each_next_tile_index in range(0, random_room_width): if new_dungeon_map[each_row_index][(each_column_index + each_next_tile_index + 1)] == 0: how_wide_to_actually_make_the_room += 1 else: how_wide_to_actually_make_the_room += 0 ## Now we know how wide to make the room and, implicitly, how tall to make it, since rooms are rectilinear and will never be placed to undercut other rooms, only to block their horizontal propagation. ## The maximum width is how_wide_to_actually_make_the_room, since it uses random_room_width (already bounded for map edge purposes) in its randrange. ## The maximum height is simply random_room_height, now that it's been bounded by distance_from_top_of_room_to_bottom_of_map. ## ... ## It occurred to me it might be simpler to make the map a large carved room one or two tiles wider than the end result is supposed to be, and simply "uncarve" the map inside it before starting any of this. ## Then is-carved checking would implicitly incorporate the distance to the edge in it too. ## Ah well, that's for some adventurous refactoring spree! new_room_rectangle = [each_row_index, each_column_index, how_wide_to_actually_make_the_room, random_room_height] ## Now write the room to the map so it can continue properly. for each_new_tile_y in range(1, random_room_height + 1): for each_new_tile_x in range(1, (how_wide_to_actually_make_the_room + 1)): new_dungeon_map[(each_row_index + each_new_tile_y)][(each_column_index + each_new_tile_x)] += 1 return new_dungeon_map ''' class MarkIIDungeonMapGenerator: ## Thinking of renaming this WingedDungeonGenerator, because it loves to make floorplans split into several "wings" each of which may be impressively lengthy at larger sizes. ## The effect is pretty cool, actually. Code could use some efficiency polish, though. ''' Idea remembered from the old WanderingLineGenerator: " Decided I didn't quite like the "wandering line" idea and I'm going to try something inspired by http://donjon.bin.sh/d20/dungeon/index.cgi instead. It's going to use the DungeonMapGenerator algorithm to place rooms and a new algorithm for tunnel connection. Specifically it will use the WanderingLineMapGenerator idea of keeping an in-object list of rooms and using that to do things like is-connected-yet checks and intersection testing. " ''' def __init__(self, supplied_map_width=40, supplied_map_height=40, room_max_size=10, room_min_size=4, room_max_count=30, room_min_count=5): self.map_width = supplied_map_width self.map_height = supplied_map_height ## The bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. ## ... ## I probably have no idea what I'm doing since testing it is easier than figuring out whether Python feels like pretending 0 is an ordinal or not this time. self.map_width -= 1 self.map_height -= 1 self.room_max_size = room_max_size self.room_min_size = room_min_size self.room_max_count = room_max_count self.room_min_count = room_min_count ## Saving it as state for brain friendliness purposes. Can be changed later. self.list_of_created_rooms = [] def define_corridor(self, which_orientation, x, y, x2, y2): ''' Create a corridor (actually a one-tile-by-n-tiles rectangular room) connecting point (x, y) and point ((x + width), (y + height)), using the rectangular room definition format. ''' width = x2 - x height = y2 - y if which_orientation == 'horizontal': if width < 0: width *= -1 x -= width new_corridor = [x, y, width + 1, 1] if which_orientation == 'vertical': if height < 0: height *= -1 y -= height new_corridor = [x, y, 1, height + 1] return new_corridor def return_the_center_of_this_rectangle(self, upperleft_x, upperleft_y, width, height): ## This is for placing corridors. centerpoint_x = ( upperleft_x + (width // 2) ) centerpoint_y = ( upperleft_y + (height // 2) ) return [centerpoint_x, centerpoint_y] def check_these_two_rectangles_for_intersection(self, rectangle_alpha, rectangle_beta): ''' Check two rectangles, both formatted [x, y, w, h] for intersection; return True if they intersect and False if they do not intersect. ''' new_x = rectangle_alpha[0] new_x2 = (rectangle_alpha[0] + rectangle_alpha[2]) old_x = rectangle_beta[0] old_x2 = (rectangle_beta[0] + rectangle_beta[2]) new_y = rectangle_alpha[1] new_y2 = (rectangle_alpha[1] + rectangle_alpha[3]) old_y = rectangle_beta[1] old_y2 = (rectangle_beta[1] + rectangle_beta[3]) do_they_intersect = False if ( (new_x >= old_x) and (new_x <= old_x2) ) or ( (new_x2 >= old_x) and (new_x2 <= old_x2) ): if ( (new_y >= old_y) and (new_y <= old_y2) ) or ( (new_y2 >= old_y) and (new_y2 <= old_y2) ): do_they_intersect = True if ( (old_x >= old_x) and (old_x <= new_x2) ) or ( (old_x2 >= new_x) and (old_x2 <= new_x2) ): if ( (old_y >= new_y) and (old_y <= new_y2) ) or ( (old_y2 >= new_y) and (old_y2 <= new_y2) ): do_they_intersect = True ## This if tree checks to see whether or not any rooms are forming crosses. if ((new_x >= old_x) and (new_x2 <= old_x2)) and ((new_y <= old_y) and (new_y2 >= old_y2)): do_they_intersect = True ## ... and the same check in the other direction, for if the old room was the vertical bar of the cross rather than the new room, as is assumed in the preceding if tree: if ((old_x > new_x) and (old_x2 < new_x2)) and ((old_y < new_y) and (old_y2 > new_y2)): do_they_intersect = True ## DEBUG #print("Successfully checked for intersection") ## \DEBUG return do_they_intersect def generate_noise(self, supplied_map_width=None, supplied_map_height=None, room_max_size=None, room_min_size=None, room_max_count=None, room_min_count=None): ## I have this sinking feeling it's un-Pythonic to have this kind of optional state for my MapGenerator objects. if supplied_map_width != None: self.map_width = supplied_map_width self.map_width -= 1 if supplied_map_height != None: self.map_height = supplied_map_height self.map_height -= 1 if room_max_size != None: self.room_max_size = room_max_size if room_min_size != None: self.room_min_size = room_min_size if room_max_count != None: self.room_max_count = room_max_count if room_min_count != None: self.room_min_count = room_min_count list_of_candidate_rooms = [] while (len(list_of_candidate_rooms) < self.room_min_count): for each_new_room_attempt in range(0, self.room_max_count): #print("each_new_room_attempt == %d" % (each_new_room_attempt)) ## Width and height are defined BEFORE x/y position. ## Doing it this way makes it unnecessary to check if the room extends off the map. new_room_width = random.randint(self.room_min_size, self.room_max_size) new_room_height = random.randint(self.room_min_size, self.room_max_size) new_room_x = random.randint(1, ((self.map_width - 1) - new_room_width)) new_room_y = random.randint(1, ((self.map_height - 1) - new_room_height)) new_room_candidate = [new_room_x, new_room_y, new_room_width, new_room_height] should_we_append_this_room = True for each_other_room in list_of_candidate_rooms: if (self.check_these_two_rectangles_for_intersection(new_room_candidate, each_other_room) == True): should_we_append_this_room = False #print("Failed a room intersect test") if should_we_append_this_room == True: list_of_candidate_rooms.append(new_room_candidate) #print("Appended a room") if (len(list_of_candidate_rooms) < self.room_min_count): del list_of_candidate_rooms[:] #print("Gotta use a while loop eventually") # Do we? Doesn't seem like it now. ## Now create corridors linking rooms. ## The list_of_all_centerpoints is not the same as the list_of_candidate_rooms or list_of_new_corridors, but I guess it technically could be merged with a small redesign. Keeping them separate for now to preserve the conceptual history of the things. ## Note the reason this is done after the list_of_candidate_rooms is filled is because that list gets wiped during generation if the genned number is lower than the minimum. ## Corridor generation doesn't do that, so it can append corridors as they're created. list_of_all_centerpoints = [] ## "Colors" are an abstraction used to represent the fact that each room has a connected-to-these-other-rooms quality, which is common to all of them. ## Thinking of this quality as a color makes for an easily relatable analogy. ## The list_of_room_connection_colors is going to be a list of lists which is constructed as corridors are added (eg, as rooms pass their first and only classical connection check). list_of_room_connection_colors = [] list_of_new_corridors = [] for each_room in list_of_candidate_rooms: ## Appends [centerpoint_x, centerpoint_y] to the list, so it's a 2-ple: [ [],[],[],[],[]... ] list_of_all_centerpoints.append(self.return_the_center_of_this_rectangle(upperleft_x=each_room[0], upperleft_y=each_room[1], width=each_room[2], height=each_room[3])) ## In the following for loop, all created rooms are connected to the closest other room or corridor (technically the closest centerpoint, which stores both). ## The connecton of a room involves creating precisely one vertical and one horizontal corridor attaching it to another room. ## It also involves appending the centerpoints of the two corridors and the two rooms they connect to the list_of_room_connection_colors in their proper color. ## It does NOT involve connecting colors to each other. That comes after this "first pass" of room connection. for each_room in list_of_candidate_rooms: ## The first step is to find which other room's centerpoint is the closest to the current room's centerpoint. alpha_room_centerpoint_x, alpha_room_centerpoint_y = self.return_the_center_of_this_rectangle(upperleft_x=each_room[0], upperleft_y=each_room[1], width=each_room[2], height=each_room[3]) the_shortest_hypotenuse_found_for_this_room = None which_centerpoint_is_closest = None for each_centerpoint in list_of_all_centerpoints: ## DEBUG #print(" each_centerpoint == %s" % (str(each_centerpoint))) ## \DEBUG beta_room_centerpoint_x, beta_room_centerpoint_y = each_centerpoint[0], each_centerpoint[1] if (alpha_room_centerpoint_x == beta_room_centerpoint_x) and (alpha_room_centerpoint_y == beta_room_centerpoint_y): ## Then they're the same centerpoint and should be skipped for this step. pass else: ## Then these centerpoints should be checked to see if they're the closest to each other as of this iteration. x_distance_between_the_two = abs(beta_room_centerpoint_x - alpha_room_centerpoint_x) y_distance_between_the_two = abs(beta_room_centerpoint_y - alpha_room_centerpoint_y) hypotenuse_distance_between_the_two = math.sqrt((x_distance_between_the_two ** 2) + (y_distance_between_the_two ** 2)) if (the_shortest_hypotenuse_found_for_this_room == None) or (hypotenuse_distance_between_the_two < the_shortest_hypotenuse_found_for_this_room): ## Then these centerpoints are in fact the closest to each other as of this iteration. the_shortest_hypotenuse_found_for_this_room = hypotenuse_distance_between_the_two which_centerpoint_is_closest = each_centerpoint ## Now that the closest room rectangle has been found, draw a corridor between it's and the current room's centerpoints: which_direction_first = random.randint(0, 1) # remember, random.randint() includes min and max values, unlike range() ## NOTE! It might be a good idea to check for intersection here and, if detected, invert which_direction_first via: which_direction_first = abs(which_direction_first - 1) ## That would make it slightly less favorable to crossed tunnels, though it should already have rather few of those. I think. ## Redefining these terms so we can use them to create corridors. This may not be maximally Pythonic... It would be a decent candidate for refactoring. alpha_room_centerpoint_x, alpha_room_centerpoint_y = self.return_the_center_of_this_rectangle(upperleft_x=each_room[0], upperleft_y=each_room[1], width=each_room[2], height=each_room[3]) beta_room_centerpoint_x, beta_room_centerpoint_y = which_centerpoint_is_closest[0], which_centerpoint_is_closest[1] if which_direction_first == 0: new_horizontal_corridor = self.define_corridor('horizontal', alpha_room_centerpoint_x, alpha_room_centerpoint_y, beta_room_centerpoint_x, beta_room_centerpoint_y) new_vertical_corridor = self.define_corridor('vertical', beta_room_centerpoint_x, beta_room_centerpoint_y, alpha_room_centerpoint_x, alpha_room_centerpoint_y) elif which_direction_first == 1: new_horizontal_corridor = self.define_corridor('horizontal', beta_room_centerpoint_x, beta_room_centerpoint_y, alpha_room_centerpoint_x, alpha_room_centerpoint_y) new_vertical_corridor = self.define_corridor('vertical', alpha_room_centerpoint_x, alpha_room_centerpoint_y, beta_room_centerpoint_x, beta_room_centerpoint_y) ## Save the corridors: list_of_new_corridors.append(new_horizontal_corridor) list_of_new_corridors.append(new_vertical_corridor) ## Also save the corridors' centerpoints: horizontal_corridor_centerpoint = self.return_the_center_of_this_rectangle(upperleft_x=new_horizontal_corridor[0], upperleft_y=new_horizontal_corridor[1], width=new_horizontal_corridor[2], height=new_horizontal_corridor[3]) vertical_corridor_centerpoint = self.return_the_center_of_this_rectangle(upperleft_x=new_vertical_corridor[0], upperleft_y=new_vertical_corridor[1], width=new_vertical_corridor[2], height=new_vertical_corridor[3]) list_of_all_centerpoints.append(horizontal_corridor_centerpoint) list_of_all_centerpoints.append(vertical_corridor_centerpoint) ## We're going to absolutely have to ensure they're all connected. ## 1. I think the way to do this is to have corridors, upon creation, append their centerpoints along with their associated rooms' to a list which will later be crosschecked with the list_of_all_centerpoints. ## 2. The first room connected this way will have a color associated with it that colors the centerpoints of itself, the corridor, and the room it's connected to. ## 3. When a new corridor is created, it will check if start or end have colors associated with them and adopt it as its own color if so; if not, a new color will be created which follows this pattern. ## 4. When the map is finished creating corridors via the classical closest-centerpoints method, the color lists will be cross-checked and if any centerpoint appears in at least two color lists simultaneously, those colors are considered connected. ## 5. If this process completes and certain colors remain unconnected, the closest centerpoints in each of them will be discerned and connected to each other. ## 6. Steps 4 and 5 will iterate untill no colors remain unconnected. ## Since this is where every room has a corridor added on to it, we'll begin here. ## list_of_room_connection_colors will be a three-ple: ## [ [ [x, y], [x, y], [x, y] ], [ [x, y], [x, y], [x, y] ], ... ] ## If there are no colors yet... if len(list_of_room_connection_colors) == 0: ## Make the current room the source of the first color. new_color = [] new_color.append([alpha_room_centerpoint_x, alpha_room_centerpoint_y]) new_color.append([beta_room_centerpoint_x, beta_room_centerpoint_y]) new_color.append(horizontal_corridor_centerpoint) new_color.append(vertical_corridor_centerpoint) list_of_room_connection_colors.append(new_color) #print("\n\nlist_of_room_connection_colors == \n%s\n\n" % (str(list_of_room_connection_colors))) ## That part worked correctly the very first time! ## Otherwise, check the list of colors for cross-results with all four centerpoints currently being considered: else: does_this_room_fit_in_any_color = False for each_color in list_of_room_connection_colors: do_these_centerpoints_connect_to_this_color = False we_can_append_alpha_room_centerpoint = True we_can_append_beta_room_centerpoint = True we_can_append_horizontal_corridor_centerpoint = True we_can_append_vertical_corridor_centerpoint = True for each_centerpoint in each_color: ## Notice the split between do_these_centerpoints_connect_to_this_color and we_can_append_foo_centerpoint ## This is because the former gates the adding of all centerpoints, and the latter gates the adding of specific centerpoints. ## Without the latter it would add too many centerpoints, creating duplicates. ## Important: This part of the function does NOT pare down colors, it can only build them up. It does try to build them up only when previous colors are insufficient, however. if ((each_centerpoint[0] == alpha_room_centerpoint_x) and (each_centerpoint[1] == alpha_room_centerpoint_y)): do_these_centerpoints_connect_to_this_color = True we_can_append_alpha_room_centerpoint = False if ((each_centerpoint[0] == beta_room_centerpoint_x) and (each_centerpoint[1] == beta_room_centerpoint_y)): do_these_centerpoints_connect_to_this_color = True we_can_append_beta_room_centerpoint = False if ((each_centerpoint[0] == horizontal_corridor_centerpoint[0]) and (each_centerpoint[1] == horizontal_corridor_centerpoint[1])): do_these_centerpoints_connect_to_this_color = True we_can_append_horizontal_corridor_centerpoint = False if ((each_centerpoint[0] == vertical_corridor_centerpoint[0]) and (each_centerpoint[1] == vertical_corridor_centerpoint[1])): do_these_centerpoints_connect_to_this_color = True we_can_append_vertical_corridor_centerpoint = False if do_these_centerpoints_connect_to_this_color == True: if we_can_append_alpha_room_centerpoint == True: each_color.append([alpha_room_centerpoint_x, alpha_room_centerpoint_y]) if we_can_append_beta_room_centerpoint == True: each_color.append([beta_room_centerpoint_x, beta_room_centerpoint_y]) if we_can_append_horizontal_corridor_centerpoint == True: each_color.append(horizontal_corridor_centerpoint) if we_can_append_vertical_corridor_centerpoint == True: each_color.append(vertical_corridor_centerpoint) does_this_room_fit_in_any_color = True if does_this_room_fit_in_any_color == False: ## I thiiiiiink there's going to be a slight problem with needing more than one pass to connect colors, after these steps are done. ## Limited by the number of colors, but still, would be nice to narrow that down to get minimum runtime and maximum cleanness. the_newly_required_color = [] ## This absolutely cannot be the best way to do this kind of checking, but the tutorials didn't tell me any better way, and this way certainly works. ## It's also very flat and readable. ## ... ## It also saved me a whole lot of processing time, from the looks of my debug statements. ## ... ## I think this was mistakenly placed. Newly-required colors should not have to check for duplicates again, since this happened in the preceding part of this conditional tree -- see just above here. ## Commenting it all for rumination and debugging purposes. #we_can_append_alpha_room_centerpoint = True #we_can_append_beta_room_centerpoint = True #we_can_append_horizontal_corridor_centerpoint = True #we_can_append_vertical_corridor_centerpoint = True #for each_other_color in list_of_room_connection_colors: # for each_other_centerpoint in each_other_color: # if each_other_centerpoint == [alpha_room_centerpoint_x, alpha_room_centerpoint_y]: # we_can_append_alpha_room_centerpoint = False # if each_other_centerpoint == [beta_room_centerpoint_x, beta_room_centerpoint_y]: # we_can_append_beta_room_centerpoint = False # if each_other_centerpoint == horizontal_corridor_centerpoint: # we_can_append_horizontal_corridor_centerpoint = False # if each_other_centerpoint == vertical_corridor_centerpoint: # we_can_append_vertical_corridor_centerpoint = False #if we_can_append_alpha_room_centerpoint == True: the_newly_required_color.append([alpha_room_centerpoint_x, alpha_room_centerpoint_y]) #if we_can_append_beta_room_centerpoint == True: the_newly_required_color.append([beta_room_centerpoint_x, beta_room_centerpoint_y]) #if we_can_append_horizontal_corridor_centerpoint == True: the_newly_required_color.append(horizontal_corridor_centerpoint) #if we_can_append_vertical_corridor_centerpoint == True: the_newly_required_color.append(vertical_corridor_centerpoint) #if len(the_newly_required_color) != 0: list_of_room_connection_colors.append(the_newly_required_color) #print("pre-step: list_of_room_connection_colors == ") #for each_color in list_of_room_connection_colors: # print(" " + str(each_color)) # print("|||||||||||||||||||||||") ## The rooms are placed, connected with classical, first-pass corridors, and the initial color lists have been established. ## Next we must winnow down the color lists to the bare minimum, since there will be some colors that are connected which were not recognized as such in the establishment pass. ''' print("PRE-STEP: list_of_room_connection_colors == ") for each_color in list_of_room_connection_colors: print(" " + str(each_color)) print("|||||||||||||||||||||||") ''' ## The next step is to connect the disconnected colors. ## ## This will be accomplished by: ## 1. Taking the color in the list_of_room_connection_colors at index 0 (hereafter "color alpha"), finding its centerpoint and comparing it to all other colors' centerpoints, saving: ## a. the centerpoint of color alpha ## b. the centerpoint of the closest color to color alpha (hereafter "color beta") ## c. a list containing all the centerpoints inside the color beta (eg, identical to color beta at the time of its discovery) ## 2. Finding the room inside color alpha which is closest to 1.b. ## 3. Finding the room inside color beta which is closest to 1.a. ## 4. Connecting 2. and 3. with an L-corridor and adding its two components' centerpoints to color gamma ## 5. Appending every room centerpoint in color alpha and color beta to color gamma ## 6. Appending color gamma to the next pass's new color list ## 7. If any colors remain, appending every color not in alpha, beta, or gamma to the next pass's color list ## 8. If the next pass's color list length is greater than 1, repeat this process, starting at step 1. ## Number of passes is just the length of the color list, since this algorithm is guaranteed to step through each and every color, connecting them individually. for each_remaining_color in range(0, len(list_of_room_connection_colors)): ## Giant conditional. if (len(list_of_room_connection_colors) > 1): color_alpha = list_of_room_connection_colors[0] ## Figure out the average centerpoint of color alpha: color_alpha_average_x_stack = 0 color_alpha_average_y_stack = 0 for each_color_alpha_centerpoint in color_alpha: color_alpha_average_x_stack += each_color_alpha_centerpoint[0] color_alpha_average_y_stack += each_color_alpha_centerpoint[1] color_alpha_average_x = (color_alpha_average_x_stack // len(color_alpha)) color_alpha_average_y = (color_alpha_average_y_stack // len(color_alpha)) the_shortest_hypotenuse_found_for_this_color = None #the_closest_beta_average_x = None #the_closest_beta_average_y = None ## Yes, we have to use the index value for this, since equality checking lists doesn't seem to work, based on previous experiments. for each_beta_color_index in range(1, len(list_of_room_connection_colors)): color_beta = list_of_room_connection_colors[each_beta_color_index] color_beta_average_x_stack = 0 color_beta_average_y_stack = 0 for each_beta_centerpoint in color_beta: color_beta_average_x_stack += each_beta_centerpoint[0] color_beta_average_y_stack += each_beta_centerpoint[1] color_beta_average_x = (color_beta_average_x_stack // len(color_beta)) color_beta_average_y = (color_beta_average_y_stack // len(color_beta)) x_distance_between_the_two_colors = abs(color_beta_average_x - color_alpha_average_x) y_distance_between_the_two_colors = abs(color_beta_average_y - color_alpha_average_y) hypotenuse_distance_between_the_two_colors = math.sqrt((x_distance_between_the_two_colors ** 2) + (y_distance_between_the_two_colors ** 2)) if (the_shortest_hypotenuse_found_for_this_color == None) or (hypotenuse_distance_between_the_two_colors < the_shortest_hypotenuse_found_for_this_color): ## Then these centerpoints are in fact the closest to each other as of this iteration. the_shortest_hypotenuse_found_for_this_color = hypotenuse_distance_between_the_two_colors which_color_is_closest = color_beta which_beta_color_index_is_closest = each_beta_color_index ## Do I need to make these globals? Is python telling me to refactor my function into a zillion impossible to track tiny functions? =/ the_closest_beta_average_x = color_beta_average_x the_closest_beta_average_y = color_beta_average_y ## Now that we've found the beta color with an average centerpoint closest to the alpha color's average centerpoint, we need to find the alpha room centerpoint closest to the beta average centerpoint: the_shortest_hypotenuse_found_between_alpha_room_and_beta_average = None for each_color_alpha_centerpoint in color_alpha: x_distance_between_this_alpha_room_and_the_beta_average = abs(the_closest_beta_average_x - each_color_alpha_centerpoint[0]) y_distance_between_this_alpha_room_and_the_beta_average = abs(the_closest_beta_average_y - each_color_alpha_centerpoint[1]) hypotenuse_distance_between_alpha_room_and_beta_average = math.sqrt((x_distance_between_this_alpha_room_and_the_beta_average ** 2) + (y_distance_between_this_alpha_room_and_the_beta_average ** 2)) if (the_shortest_hypotenuse_found_between_alpha_room_and_beta_average == None) or (hypotenuse_distance_between_alpha_room_and_beta_average < the_shortest_hypotenuse_found_between_alpha_room_and_beta_average): the_shortest_hypotenuse_found_between_alpha_room_and_beta_average = hypotenuse_distance_between_alpha_room_and_beta_average which_alpha_centerpoint_is_closest_to_beta_average = each_color_alpha_centerpoint ## Mirror the above process to find the beta room with a centerpoint closest to the alpha color's average centerpoint: the_shortest_hypotenuse_found_between_beta_room_and_alpha_average = None for each_color_beta_centerpoint in which_color_is_closest: x_distance_between_this_beta_room_and_the_alpha_average = abs(color_alpha_average_x - each_color_beta_centerpoint[0]) y_distance_between_this_beta_room_and_the_alpha_average = abs(color_alpha_average_y - each_color_beta_centerpoint[1]) hypotenuse_distance_between_beta_room_and_alpha_average = math.sqrt((x_distance_between_this_beta_room_and_the_alpha_average ** 2) + (y_distance_between_this_beta_room_and_the_alpha_average ** 2)) if (the_shortest_hypotenuse_found_between_beta_room_and_alpha_average == None) or (hypotenuse_distance_between_beta_room_and_alpha_average < the_shortest_hypotenuse_found_between_beta_room_and_alpha_average): the_shortest_hypotenuse_found_between_beta_room_and_alpha_average = hypotenuse_distance_between_beta_room_and_alpha_average which_beta_centerpoint_is_closest_to_alpha_average = each_color_beta_centerpoint ## Now that we have the alpha and beta room centerpoints closest to each other, connect them with a corridor. which_direction_first = random.randint(0, 1) # remember, random.randint() includes min and max values, unlike range() if which_direction_first == 0: new_horizontal_corridor = self.define_corridor('horizontal', which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1], which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1]) new_vertical_corridor = self.define_corridor('vertical', which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1], which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1]) elif which_direction_first == 1: new_horizontal_corridor = self.define_corridor('horizontal', which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1], which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1]) new_vertical_corridor = self.define_corridor('vertical', which_alpha_centerpoint_is_closest_to_beta_average[0], which_alpha_centerpoint_is_closest_to_beta_average[1], which_beta_centerpoint_is_closest_to_alpha_average[0], which_beta_centerpoint_is_closest_to_alpha_average[1]) ## Save the corridors: list_of_new_corridors.append(new_horizontal_corridor) list_of_new_corridors.append(new_vertical_corridor) ## And now merge alpha and beta colors and delete beta color: ## DEBUG #print("which_color_is_closest == %s" % (str(which_color_is_closest))) ## \DEBUG for each_beta_centerpoint in which_color_is_closest: color_alpha.append(each_beta_centerpoint) ## The following line wasn't sufficient, since it only deleted this call-by-value variable rather than the call-by-reference pointer I was pretending it would be. Obvious in hindsight. #del which_color_is_closest ## Fortunately the index was already available! See above. del list_of_room_connection_colors[which_beta_color_index_is_closest] ## And now everything works as perfectly as I knew it would. ''' print(" POST-STEP: list_of_room_connection_colors == ") for each_color in list_of_room_connection_colors: print(" " + str(each_color)) print("|||||||||||||||||||||||") ''' ## Having generated enough rooms and corridors, create the map: the_dungeon_map = [] for each_row in range(0, self.map_height): new_row = [] for each_column in range(0, self.map_width): new_row.append(0) the_dungeon_map.append(new_row) ## Write the rooms... for each_successful_room_candidate in list_of_candidate_rooms: for each_room_height_unit in range(0, each_successful_room_candidate[3]): for each_room_width_unit in range(0, each_successful_room_candidate[2]): the_dungeon_map[(each_successful_room_candidate[1] + each_room_height_unit)][(each_successful_room_candidate[0] + each_room_width_unit)] += 1 ## and write the corridors: for each_corridor in list_of_new_corridors: #print("Attempting to write a corridor...") for each_corridor_height_unit in range(0, each_corridor[3]): for each_corridor_width_unit in range(0, each_corridor[2]): ## Check for uncarved space. When defining corridors this is the simplest way to go about it since the debug mauve is only really important for debugging room generation. if the_dungeon_map[(each_corridor[1] + each_corridor_height_unit)][(each_corridor[0] + each_corridor_width_unit)] == 0: the_dungeon_map[(each_corridor[1] + each_corridor_height_unit)][(each_corridor[0] + each_corridor_width_unit)] += 1 return the_dungeon_map
en
0.851581
Fractal noise map generator library. Also includes additional non-"noise" map generators for dungeon generation purposes. The reason for putting them in is because I want the games I make to be able to use very similar code for all the different types of maps I need. Ensuring a high level of cross-compatibility at the generator level may enhance creativity later on. Each generator is an object built by a class specific to that type of generator. All generators SHOULD have enough defaults to require only a handful of arguments (tailored to your display needs) before they're popping out noiseclouds. The most useful generators are the Perlin generator and the Mk II dungeon map generator, but they all have their own unique capabilities. Generators currently include: PlasmaFractalGenerator() PerlinNoiseGenerator() SimplexNoiseGenerator() DungeonMapGenerator() RoomFilledMapGenerator() MarkIIDungeonMapGenerator() Of these, the Simplex generator is the most technically complex but is theoretically faster at creating a noise map than the Plasma and Perlin generators. It's not clear whether my implementation is even close to optimized for speed, though. I don't yet know enough about python/C integration to try speeding it up. The Perlin generator return the best-looking terrain maps, possibly tied with the Simplex generator. They both require some fiddling with generator input parameters to get better-looking results. The plasma generator has some gridwards bias, but it too produces decent noise clouds, as long as you don't look too closely or get too unlucky. It was the first noise generator I made, before I realized I wanted to make all the parameters of the various generators more similar to each other. I might go back and change it to that at some point, but I have no especial reason to given its technical inferiority to the simplex and Perlin generators. TerrainMapGenerators contains noise generators and "dungeon map generators," which are more like signal than noise, as they return maps full of rooms and corridors illustrated using two Z values (0 and 1). The DungeonMapGenerator produces randomly placed rectangular rooms that all connect to each other using L-shaped corridors daisy chained from one room's centerpoint to the next, in the order of room placement. This algorithm was inspired by/copied from the libtcod roguelike tutorial at < http://www.roguebasin.com/index.php?title=Complete_Roguelike_Tutorial,_using_python%2Blibtcod,_part_1 >. The RoomFilledMapGenerator creates maps packed full of rectangular rooms. Has significant bias and no connecting corridors. I didn't really like the direction it was going in, but it can probably be turned into something decent with some upgrading and tweaking. The MarkIIDungeonMapGenerator is my favorite one so far. It produces maps wherein the rooms are connected in a branching pattern such that dungeons have "wings" which can be quite lengthy and significantly subdivided. Note that the dependencies do NOT include pygame, even though the display program I created for demonstrations does. #### Classes #### Create a fractal generator that returns a list of ((word for things that come in parentheses)) consisting of three floating point values: x, y and z coordinates for constructing a plasma fractal for use as a noise map. ## The root of the array (it's square root, or side measurement): ## Save the width and height of the map as state. We'll be using this to construct a new map to hold the plasma fractal in a method designed for this purpose. ## Init the plasma fractal's handler, the noise array, as None: ## Min and max values for randomly generated corner Z values: ## The range of randomness that can be applied to each midpoint displacement. ## Usual supplied values have a negative min and a positive max. ## The distance at which the fractal stops subdividing itself and returns a value for the next least coordinate point ( 1.004 --> 1, 1.000 --> 1, 0.996 --> 0 etc if min_sep_dist is 1). ## Corners' initial zee values, can be set manually in __init__() parameters: ## Someone might want the corners to be preset values, so check if they didn't at the time of initialization. ## ... ## This section may be a candidate for refactorization in the future, with the addition of parameters to reinitialize_corners() This function is the gateway function to generate_plasma(). ## This section necessitated by the combination of my desire to make generate_noise() callable with arbitrary arguments and Python's refusal to accept self.foo as parameters for a method. ## Remember, no call to self in the parameters when a method is calling another method. The definition of the second method will invoke its own self, don't worry. :p ####print(" Debug: self.saved_noise_array == ") #for each in self.saved_noise_array: # ###print(" " + str(each)) ## Now convert that giant list into a tuple with the same ordering as the PerlinNoiseGenerator's results. # y ## Fill the array_to_return with rows full of -1s so we only have to iterate through it once in the next step! # x ####print(" Debug: array_to_return == " + str(array_to_return) + "\n") ## Round down x and y since the values are probably all floats. ## This will ALMOST CERTAINLY give me bad results and I'm gonna have to change something, maybe cleverer rounding?? ## I may have to round up and down more precisely than int() depending on exactly what ends up happening with the results. :S ## EDIT: The following is probably not the best way to do this. I added the -1 overwrite procedure instead. ## ... ## Complicated syntax is actually very shallow conceptually. ## array[a].insert([b], [c]) ## a == the rounded down y value of the cell ## b == the rounded down x value of the cell ## c == the floating point z value of the cell ## Rounding is currently being done by int() calls, this may very well be a bad idea. See above note. ## All index variables are referenced by their index number in each_cell; hence the square brackets. array_to_return[int(each_cell[1])].insert(int(each_cell[0]), each_cell[2]) ####print(" Debug: each_cell == " + str(each_cell)) ####print(" each_cell[0] == " + str(each_cell[0])) ####print(" each_cell[1] == " + str(each_cell[1])) ####print(" each_cell[2] == " + str(each_cell[2])) ## The syntax is now: ## array[y][x] = z ## where y, x and z are extracted from their respective indices in each_cell. ## Rounding is once again involved at this step. See above notes in this method. ## DEBUG: Testing -1 to see if it always rounds one way or does a split at 0.5 ## If this line is left out the generator will use the same corner values and make a whole new map between them. ## Remember, self.reinitialize_corners() can be called in the main program. #self.reinitialize_corners() ## This method is intended to be called by self.generate_noise() ## The results of calling this separately from self.generate_noise() will be a long list of [x, y, z] values rather than a tuple with the form ( array[y][x] == (z) ). Recursively supply [x, y, z]-formatted plasma fractal elements to self.saved_noise_array, as called by self.generate_noise() ## This step must happen during this part of the conditional tree. Not after the else! ## Create midpoint's zee by averaging corners' zees and mixing in the random_midpoint_displacement: ## Deduce sides' zees: ## Recursion. Note this happens inside the earlier if statement. The alternative is not recurring at this call, and instead returning a value. ## When the distance between the corners drops below the minimum separation distance, create an [x, y, z] cell and return it up the chain: ## The generator saves its noise-map state: Returns a tuple of [parameter 2] lists each containing [parameter 1] randomly generated integer numbers between $FIX_ME_MINIMUM and $FIX_ME_MAXIMUM, fractally smoothed as Perlin noise using a frequency of [parameter 3] and an octave count of [parameter 4]. ## Octaves? ## It's used for calling turbulence(), which considers that parameter to be "size". ## The original function declared that changing octaves changes how far in or out from the noise you are zoomed. ## Which seems like a decent interpretation of the results. ## Raising the frequency makes it spikier (which is reminiscent of zooming out). ## Raising the octaves make it smoother (which is reminiscent of zooming in). ## Note that keeping the ratios of frequency to octaves the same will keep the results looking similar! ## For this reason I recommend using small octave values, since that governs the recursor runtime. ## First, clear the currently saved noise map: ## ... ## actually self.noise_array is used internally to the generator's function and does not save the actual noise map. ## Interesting, that. ## Now assign this NoiseGenerator's current noise_width and noise_height to the values supplied by the function call parameters: ## Note that the NoiseGenerator saves these as state because they need to be referenced in the sub-functions below. ## Initializing the noise_array with random numbers. ## This for loop provides the raw random data smeuthanized into a pretty, pretty vapor cloud further in the program. ## Create a bunch of rows, equal in number to self.noise_height... ## ... and fill them with randint()s equal in number to self.noise_width: ## Attach each row to the noise_array. ## The generator's noise_array should now be full of rows which are full of integers. ## The noise_array isn't the finished product. It's used to create it, in the below functions. ## Turbulating the noise array ## ## Note: Frequency is rolled into the parameters here! ## NOTE that the NoiseGenerator does NOT save the result as state. ## It hands it off to whatever called its generate_noise() function. ## This is where this generator's entire function chain ends: ## noise_value is "built up" by smooth_noise(): ## Floats it: ## This is kind of like fractally splitting a grid, except it just sort of "resonates" itself in half and applies noise smoothening or something. Octaves. ## Add it to the noise_value pile: ## Paring down the size... iterating downwards... ## Order of Operations suggests division before multiplication, so: ## ??? ## Experiment to figure out what it does! o_o ## ... ## Biases the resulting z values to average out at this number: Return the average value of the 4 neighbors of the point (x, y) from self.noise_array. ## NOTE! self.noise_array is a cranny full of state used for THIS FUNCTION ONLY. ## The following is necessary because of modulo calls further down that would ignore it, but it needs to be saved. ## Get the trailing part of the floats of x and y: ## I think the -1 is to compensate for the fractional_element_of_foo being extracted earlier. ## Remember, that fractional_element is added back in below. ## Apart from that, this is exactly the same as \ ## figuring out the length of a line between \ ## (x1, y1) and (x2, y2) in a noise plane. ## Or something like that. Surely. ## Take NOTE of the use of self.noise_array below... ## It's the place it really matters in this ridiculous three-function chain, \ ## even though it's stored at the object level. ## Begin the cooking process by taking out a bowl. ## Place inside the bowl the fractional element of X times the fractional element of Y times the noise value at location (y1, x1) ## Next, stir in the fractional element of X times (one minus the fractional element of y) times the noise value at location (y2, x1) ## Sprinkle liberal amounts of (one minus the fractional element of X) times the fractional element of Y times the noise value at location (y1, x2) ## Line baking pan with a mixture of (one minus the fractional element of X) times (one minus the fractional element of Y) times the noise value at location (y2, x2) ## I'm not yet sure how adding four things and then not dividing by four returns the AVERAGE value of the four neighbors of point (x, y) in the noise array. (Maybe it's already taken into account?) ## But slap that pan in the oven and let it burn for 0.002 ms. ## These things are true for every instance of this class and does not require re-initting. ## I don't really know what's going on here. <NAME>. ## ... ## The way it's referenced suggests that grad3 is an ordered list of simplex vertices. ## gi0/gi1/gi2 gives numbers that somehow map to indices of this list via a quite arcane mathemagical cantrip with no justification given. See below in the noise generator. ## I'm just gonna interpret all those Grad objects as simple boxes for vertex coordinates. ## ... ## Wow I think they actually decided not to include a grad2 table because the mapping for grad3 technically works for grad2 too. ## Wow. ## I'm gonna go ahead and make a grad2 table based on my interpretation of what is going on here. ## Nooope does not make more sense now. ## I'm going to put all of my trust in the implicit knowledge of the Java coder here. ## Just going to assume using the first two columns of the grad3 table works. ## It probably should, given that in grad2, there are precisely 4 instances of each specific value across the table, in varying combinations. ## So even though there are repeats I guess it still works somehow?! ## Maybe the fact there's some modulus going on ensures the repeated indices get skipped or something? ## The next section initializes the skewing and unskewing factors. ## I looked in the Java code and these are just constants. ## They should probably be called in preprocessing somehow, maybe at the top of this module... ## But I want the generators to be able to whip out new worlds at high speeds... ## So it's either top of the module, presolved here, or it takes too long. Choose one. #F2 = 0.3660254037844386 # 0.5*(Math.sqrt(3.0)-1.0); <-- IDLE gives me 0.3660254037844386 instead of what I had -- the lower-precision 0.36602540378 ... I clearly made a mistake while putting the formula into Google as an impromptu calculator substitute. Whatever, I hadn't considered putting the stuff in the base class at that time. #G2 = 0.21132486540518713 # (3.0-Math.sqrt(3.0))/6.0; apparently I copied it incorrectly. I had 2.71132486541 before I changed it to 0.21132486540518713 ## Trying out the math module for debugging purposes and it sort of makes it better anyways? ## There's a fastfloor algorithm in the Java code. ## Whether or not any algorithm modifications like this in Python might help is currently beyond me and beyond my needs to implement this generator. ## I'm skipping that. ## The following section initializes self.noise_array. ## NOTE: The Java example just runs through the same list twice -- in Python this approach makes index errors with all the easy ways to do that behavior, so I'm using a separate Python implementation's technique of repeating the list twice, instead. ## The list contains every integer from 0 to 255. ## Prep the noise_array variable for subsequent randomization. Remember, this is the __init__() for the generator. Things have to be initialized somewhere. ## Hash number is a variable because someone might think to make the seed some other number than 255 and would want to change the hash to match. ## in Java I think you need to explicitly set the size of the array; not so in Python #self.permutations_table_Mod12 = [] ## Randomize the seed distribution (CURRENTLY DEBUGGING): ## This may only be called after self.hash_number has been established. #self.generate_permutations_table() ## Note that this presumes generate_permutations_table() will never be called when self.noise_array is zeroed out for regeneration or only half its normal size, which ought to always be the case. ## The reasoning behind this is somewhat complex. It has to do with there being 512 numbers in the noise array... ## which is constructed by taking the 256 numbers in the initial noise array and putting them in again on the end in the same order. ## The noise array is that doubled size because of some sort of wraparound thing the simplex grid needs, I think. ## But for the hash number, it needs to be 255 if there are 256 distinct values. Why, I forget, but it's somewhere in the logic behind needing a permutations_table. ## I changed 512 to 256 because it was giving me "list index out of range" ## This was probably not a good idea but I'll figure out why once it gives me more meaningful results with its errors ## ... ## Comparing the java and python versions convinced me it should be the other way. Their tables are both supposed to be 512. ## ... ## Made hash number changeable. ## Note that the permutations_table must be the size of the noise_array, since it is the table of permutations of that noise array's values, with a 1:1 correspondence (bijection??) ## 255 was what the Java code said, but 256 produces non-errored results. Why would it be 255 any not 256 anyways? Very strange! Is python's method of storing data really that different from Java's? Can a short in Java only be positive? Can a list in python only be positive?! Argh... ## ... ## Just use 255. The Python implementation doesn't have a Mod12 table... ## I don't even know if it'll be faster, since I have to rehash everything every time I regenerate the array, anyways. ## It's entirely possible the second table for modulus results is actually wasteful rather than helpful. Idk. ## ... ## In fact I think it raises bad new problems in Python, given my perhaps mistaken instinct to use self.noise_array in building the mod table rather than permutations_table which I think is what's supposed to be used in the Java program... ## I'm going to drop the mod table and leave it here as evidence of my thought processes, for at least this version. ## Does not currently support random seeds. >_> ## The parameter is there to inspire you to write it in, of course! ## I bet handlers are un-Pythonic for some convoluted reason nobody bothered to explain to me. #noise_array_seed_handler = self.noise_array_seed ## ^--- This didn't work because the assignment operation here made changes to noise_array_seed_handler propagate to self.noise_array_seed... ## Which is undesirable for repeated randomizations. ## Sooooo, instead, copy more explicitly: ##print("DEBUG: noise_array_seed_handler == %s" % (str(noise_array_seed_handler))) ## This whole function is about shuffling order of the noise array's contents while keeping the individual values of its contents the same. ## + 1 because it needs to include the final index. ## ... ## Except random.randint DOES NOT work like range(x, y) -- it includes the zeroeth index and the maximum index, I think. Whyyyyy did they make it inconsistent! ## ... ## It's even worse -- it gave me out-of-range errors when it was simply nothing added or subtracted, too. Have to do - 1 to make it randomize properly. ## This is something that should really be investigated when this routine is next improved. ## DEBUGGING ##print("len(noise_array_seed_handler) == %d\n which_number_to_pick == %d" % (len(noise_array_seed_handler), which_number_to_pick)) ## Put the number at that index into the new_noise_array. ## Remove the number at that index from the list so this doesn't go on forever. ## The last one doesn't need and doesn't want to be randinted into existence. ## Out with the old... ## ... and in with the new: ## DEBUG ##print(" Debug: self.noise_array == %s" % (str(self.noise_array))) ## /DEBUG ## The randomization call should be callable on its own, so include this to make it the proper length: ## This part is required because the permutations table draws from the noise array and is critical to making a new noise map. Forgot about that after taking a few days' break. ## Always call generate_permutations_table() when the noise array is full and doubled. ## Clean the list references: ## ... ## I think this step is likely to be unnecessary, but it rules out one problem I thought my current issue could have been. ## Uses the supplied argument to construct a more Python-friendly way of handling the simplex noise seed. ## This function supports the creation (and re-creation) of the noise array. Called in the noise generator's __init__() and reseed_noise() methods. ## I got an out-of-memory error when trying to call this on itself. ## It just kept reading the array after it added all the numbers and looped endlessly. Oops. ## Time to break out the handlers! ## ... ## After changing the range number to 3 and 1, it seems to not actually care about being doubled. o_o ## Some day I'll know how simplex noise works. Eventually. ## Untill then, we move onwards with the cargo cult programming boilerplate. ## here I think I need to figure out what the grad class does ## ... ## I think it's just an object with a list of coords in it, like a MapTile. ## Maaaaaybe. From << http://code.google.com/p/battlestar-tux/source/browse/procedural/simplexnoise.py >> " 2D Multi-Octave Simplex noise. For each octave, a higher frequency/lower amplitude function will be added to the original. The higher the persistence [0-1], the more of each succeeding octave will be added. " # -_- # " We have to keep track of the largest possible amplitude, # because each octave adds more, ad we need a value in [-1, 1]. " ## max_amplitude is also what the total is divided by at the end. ## This implies amplitude is some sort of average over all the iterations. ###print(" (total_noise_for_this_cell / max_amplitude) == " + str((total_noise_for_this_cell / max_amplitude))) The gateway function for generate_octaved_noise(), this function makes sure the noise values are formatted according to the (array[y][x] == z) format used by my MapTile constructor. ## IMPORTANT! I think there should be a reinitialize_noise_array() function called here. ## That function would reshuffle or maybe change the hash value on the noise_array (the permutations table, per other sources). ## ... ## Ooooor maybe that should be optional, because we might want to generate a map from a specific hash. ## I know: The hash should be changeable as a function outside this one that is invoked by the main program, like "rerandomized generator". ## This function should also apply to the other generators too... ## Perlin will be similar, plasma will be more of a hack involving saving state and giving that out unless a reset is requested, maybe? ## Or perhaps plasma will be just the same and I'm forgetting something about the RNG calls there. ## Check that. ## ... ## randomize_the_noise_array_seed() now handles randomization for this generator. ## It may be supplied with a random seed... but only if some moxie-filled programmer supplies it with the ability to do that, first! ## DEBUG ##print("\n Generating new array of simplex noise . . .\n") ## /DEBUG ###print(" new_z_value == " + str(new_z_value)) ##print("\n New array of simplex noise has been generated.\n") ## DEBUG ##print(" array_to_be_returned == %s" % (str(array_to_be_returned))) ## /DEBUG ## After some review... ## The "skewing" is just multiplying the coord numbers by a constant so that everything we want to do on an x,y Cartesian board gets translated onto a simplex board. ## i and j are the "coordinates" when translated into simplexese. ## t uses G2 because it can't just do subtraction from the already-worked s value baked into i and j. ## t is, I think, the Cartesian midpoint coordinate. ## So essentially all the s, i, j, t, x0, y0 defining-lines are about getting simplex-to-Cartesian and vice versa translations. ## "Skew the input space to determine which simplex cell we're in" # they also said something about "hairy skew factor" ... wat. # how is this supposed to work?! ## I THINK the values of x and y are always 0 or 1... (?) ## Which would be how all of these things can just add and subtract from eachother sensibly. ## Maybe?!? This IS what I'm trying to find out by translating it from Java... ## It isn't magic programming if I'm actually trying to understand how it works! ## "Unskew the cell origin back to (x, y) space" <--- "(x, y) space" means the whole Cartesian square coordinate thing, rather than... simplex-adjusted coordinates. ## "The x,y distances from the cell origin" <--- by x,y they mean Cartesian rather than simplex-ian ## "For the twodee case, the simplex shape is an equilateral triangle." ## "Determine which simplex we are in." ## i1, j1 are "offsets for second (middle) corner of simplex in (i, j) coords" ## It's basically going top, right, bottom along the triangle, if I understand correctly. ## "lower triangle, XY order: (0, 0) --> (1, 0) --> (1, 1)" ## "upper triangle, YX order: (0, 0) --> (0, 1) --> (1, 1)" ## " A step of (1,0) in (i,j) means a step of (1-c,-c) in (x,y), and ## a step of (0,1) in (i,j) means a step of (-c,1-c) in (x,y), where ## c = (3-sqrt(3))/6 " ((c == G2)) ## "Offsets for second (middle) corner of simplex in (x,y) unskewed coords" ## "Offsets for last corner in (x,y) unskewed coords" # Why do people think not using parens on math is a good idea? # I don't care about OoP. It's just sensible to give punctuation to that sort of thing. Someone COULD easily make a mistake, but with punctuation you trade the reader's interpretation time for safety, which is far better, imo. ## "Work out the hashed gradient indices of the three simplex corners" ## I think th ## ... ## I don't know why they would bother hashing it with 255. ## Why does that even matter? Why not just do the operations on the base numbers? ## It was 255 in the Java. ## But I have no idea how that was supposed to work. Isn't it supposed to be 256 anyways? ## ... the Python code I saw also uses 255 and had the 512 permutations buffer thing fixed by copying the array onto itself, which is what I'm gonna use, so I'll try the 255 thing again too. ## NOTE: All of the following in this commented block is tainted by my mistaken mod table. ## It was probably the reason I went through such trouble to debug it this way. Blah. ####print(" Gradient DEBUG:\n index of self.permutations_table[jj] == " + str(jj)) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + self.permutations_table[jj])] == " + str((ii + self.permutations_table[jj]))) ####print(" Gradient DEBUG:\n gradient_i_zero == " + str(self.permutations_table_Mod12[(ii + self.permutations_table[jj])]) + "\n") #gradient_i_zero = self.permutations_table_Mod12[ii + self.permutations_table[jj]] ####print(" Gradient DEBUG:\n index of self.permutations_table[(jj+j1)] == " + str((jj+j1))) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + i1 + self.permutations_table[(jj+j1)])] == " + str((ii + i1 + self.permutations_table[(jj+j1)]))) ####print(" Gradient DEBUG:\n gradient_i_one == " + str(self.permutations_table_Mod12[(ii + i1 + self.permutations_table[(jj+j1)])]) + "\n") #gradient_i_one = self.permutations_table_Mod12[ii + i1 + self.permutations_table[jj+j1]] ####print(" Gradient DEBUG:\n index of self.permutations_table[(jj+j1)] == " + str((jj+j1))) ####print(" Gradient DEBUG:\n index of self.permutations_table_Mod12[(ii + 1 + self.permutations_table[(jj+1)])] == " + str((ii + 1 + self.permutations_table[(jj+1)]))) ####print(" Gradient DEBUG:\n gradient_i_two == " + str(self.permutations_table_Mod12[(ii + 1 + self.permutations_table[(jj+1)])]) + "\n") #gradient_i_two = self.permutations_table_Mod12[ii + 1 + self.permutations_table[jj+1]] ## Note that the 1 constants are balanced with omitted 0 constants in the lines with "missing" elements. ## "Calculate the contribution from the three corners" # I really wish people would use parens in all multi-operator statements. ## " (x,y) of grad3 used for twodee gradient " ###print("\n DEBUG:\n t0 == " + str(t0) + "\n twodee_dot_product == " + str(self.twodee_dot_product(self.grad3[gradient_i_zero], x0, y0))) ###print("\nDEBUGGING x0 == " + str(x0)) ###print("DEBUGGING x1 == " + str(x1)) ###print("DEBUGGING x2 == " + str(x2)) ###print("\nDEBUGGING y0 == " + str(y0)) ###print("DEBUGGING y1 == " + str(y1)) ###print("DEBUGGING y2 == " + str(y2)) ###print("\nDEBUGGING (x2 * x2) == " + str((x2 * x2))) ###print("DEBUGGING (y2 * y2) == " + str((y2 * y2))) ###print("DEBUGGING ((x2 * x2) - (y2 * y2)) == " + str((x2 * x2) - (y2 * y2))) ###print("DEBUGGING (0.5 - ((x2 * x2) - (y2 * y2))) == " + str((0.5 - ((x2 * x2) - (y2 * y2))))) ## Apparently some clown thought it would be funny to allow order of operations to work all screwy in Java, or maybe someone sabatoged the code I was looking at. ## I really couldn't guess why, but this was the original code, written in Java: ## double t2 = 0.5 - x2*x2-y2*y2; ## There were no parentheses anywhere there. ###print("DEBUGGING t0 == " + str(t0)) ###print("DEBUGGING t1 == " + str(t1)) ###print("DEBUGGING t2 == " + str(t2)) ## I think I understand it now! the t's are ticking down like octaves in the perlin generator, or something? ## hrm it's multiplying, not dividing, so it couldn't get below zero that way unless it already was negative. >< ## Nevermind. Still don't understand it yet. ## "Add contributions from each corner to get the final noise value." ## "The result is scaled to return values in the interval [-1, 1]." ###print("\nDEBUGGING n0 == " + str(n0)) ###print("\nDEBUGGING n1 == " + str(n1)) ###print("\nDEBUGGING n2 == " + str(n2)) ####print("\nDEBUGGING return " + str(70.0 * (n0 + n1 + n2))) #return 70.0 * (n0 + n1 + n2) ## My program would work better with a result scaled to 0-255. Therefore... ## blah, getting NoneTypes after the octaves were added. Hrm... ## debug: ###print("\n number_to_return == " + str(number_to_return)) Generators for the creation of corridor-linked dungeon rooms for indoors maps. Output format uses z values to stand for different room types, eg: 0 = blocked 1 = unblocked 2 = corridor etc. ## NOTE TO SELF!! ## How to do FoV algorithm: ## - Calculate a circle with a set radius (sight range) from the player's current position ## - Find all MapTiles in that radius ## - For each MapTile, draw a line from that MapTile to the player ## - For each properly-rounded coordinate along that line (aligns to MapTile coords; partial cover? think on it...), check MapTiles with those coordinates for opacity ## - If a MapTile with opacity is found, stop checking this line and set the MapTile whose line we're checking to "UNSEEN" ## Using the DungeonMapGenerator should always involve supplying some or all of these constants. ## Defaults are being used here to make it simple for me to test and demonstrate. ## -= 1 because doing it during room generation would be mildly wasteful -- the bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. Check two rectangles, both formatted [x, y, w, h] for intersection; return True if they intersect and False if they do not intersect. ## This if tree checks to see whether or not any rooms are forming crosses. ## ... and the same check in the other direction, for if the old room was the vertical bar of the cross rather than the new room, as is assumed in the preceding if tree: Create a corridor (actually a one-tile-by-n-tiles rectangular room) connecting point (x, y) and point ((x + w), (y + h)), using the rectangular room definition format. ## ((This fix worked perfectly! Hooray)) ## If it's negative, flip it and deduct it from the index. ## DO NOT put this before the orientation check, it doesn't need to care about which direction it isn't doing, and since it gets that info anyways it would just mess it up to flip and deduct in a direction it isn't going in (because that direction is the constant 1, see below). ## (x, y, width, height) ## Yes, it could be handled in less verbose ways. ## This way makes it blindingly obvious what the code is supposed to do, which I prefer. ## Code ought to be easy to maintain. ## If it's negative, flip it and deduct it from the index. ## DO NOT put this before the orientation check, it doesn't need to care about which direction it isn't doing, and since it gets that info anyways it would just mess it up to flip and deduct in a direction it isn't going in (because that direction is the constant 1, see below). ## (x, y, width, height) It's noise that looks like a dungeon map. If R2-D2 sneezed, this would be the random pattern left on the tissue. #### Arranging the generation parameters #### ## All the generators save the state of the last map made. ## The generate_noise() method of each generator accepts new parameters every time it's called, but if none are given, it goes back to the last parameters the generator worked with. ## This makes it easy to implement deterministic map regeneration from randseeds. ## -= 1 for the same reasoning as in the inits. #### Generating the map #### ## First, make a map full of zeroes. The rooms will be carved out of it. ## Remember, every NoiseMapGenerator returns results formatted: map[y][x] == z ## List comprehension method: ## new_dungeon_map = [[ 0 for y in range(0, self.supplied_map_height)] for x in range(0, self.supplied_map_width)] ## Try this and see how it goes. #### Generating room coordinates #### ## DEBUG #number_of_corridors_at_map_finish = 0 ## \DEBUG ## There must be at least room_min_count rooms in the end product. ## DEBUG: Since walls are uncarved space, should the x and y randints begin at 1 or 0? ## Watching the output process will solve this issue quickly. ## ... ## This issue needs to be straightened out early on due to how intersection tests have to work. ## Only two edges need to have uncarved space in them, and every room will have those two edges uncarved. ## I decree those two edges to be the lower and right edges. ## The map will have upper and left edges uncarved so that any rooms at the edge of the map are properly walled. ## Thus the randints will begin at 1 (the upper and left edges)... ## and end at map_width and map_height, instead of (m_w - 1) and (m_h - 1). ## By letting rooms gen to the edges with their width and height values, they can sit on an edge with their two designated built-in edge walls and everything will be fine. ## [x, y, w, h] ## The checks for validity favor x,y modification first -- and always pushing it to the lower right -- ## and w,h modification second -- and always pushing it to the upper left -- ## because this should lead to a mild tendency for rooms to cluster, and towards the center, at that. ## Which I think will look nice. ## ... ## or that's what I'd like to do, but not on the first implementation. ## Checking to see if the rooms intersect: #### Carving successful room coordinates #### ## Someone told me using range(foo, len(list)) is un-Pythonic, so I'm using an iterator to step through the list in parallel for the purposes of creating corridors to connect rooms. ## This conditional seems a bit hackish. ## This is so simple it's bound to fail miserably. ## ... ## And yet it works. ## Connect every room with corridors. (Note that there may be dungeons where this trait is not desirable for some reason; other behavior may be added as desired.) ## Generate a random direction for the corridors to point in: # remember, random.randint() includes min and max values, unlike range() #define_corridor(which_orientation, x, y, x2, y2) ## Note: Corridors are created from the current room to the next room even though the next room hasn't actually be written in yet. ## It works because the rooms already exist as rectangle coordinates. ## This is likely to cause debugging confusion if you try to change this code without taking that into account. Be advised. ## Find the centerpoints of both rooms and pack them as tuples. ## Syntax is [ ( (list_of_rooms[n][w] // 2) + list_of_rooms[n][x] ), ( (list_of_rooms[n][h] // 2) + list_of_rooms[n][y] ) ] ## Values resulting from this look like x, y and are just the centerpoints of the two rooms. ## Another representation: [(width // 2 + x offset), (height // 2 + y offset)] ## ... ## If desired, it's possible to change this to use floor divide + 1 instead of just floor divide. ## That would make it so that rooms with a thickness of 1 do not have projections off their sides. ## Corridors would slice into the center of the room rather than the rounded-down center. #room_alpha_center = [ ( (list_of_rooms[room_creation_iterator][2] // 2) + list_of_rooms[room_creation_iterator][0] ), ( (list_of_rooms[room_creation_iterator][3] // 2) + list_of_rooms[room_creation_iterator][1] ) ] #room_beta_center = [ ( (list_of_rooms[room_creation_iterator + 1][2] // 2) + list_of_rooms[room_creation_iterator + 1][0] ), ( (list_of_rooms[room_creation_iterator + 1][3] // 2) + list_of_rooms[room_creation_iterator + 1][1] ) ] ## Redoing this to make my dungeon generator cooler. ## Now, rooms will connect to the nearest two rooms, by centerpoint value! ## Or one or zero rooms, as in the case for the second and first rooms created. ## This should make tunnel connections a whole lot more friendly-looking. ## The way we're going to do this is: ## For each room in the rooms list: ## Use my new return_the_center_of_this_rectangle() method on every room in the rooms list and compare their centers to the room currently being considered ## The nearest two rooms that do not have centerpoints equal to the room being considered will be used as anchors for the define_corridor() method. ## DEBUG #print("\n room_alpha_center == %s\n room_beta_center == %s" % (str(room_alpha_center), str(room_beta_center))) ## \DEBUG ## DEBUG #number_of_corridors_at_map_finish += 1 ## \DEBUG ## It needs to take room alpha center and drag it out to room beta center in only the horizontal direction. ## That's why vertical is as easy as swapping reference order to the rooms. ## define_corridor() still needs a direction because I chose not to make it implicit by unpacking the centerpoint tuple here. I think it's more readable this way. ## ... ## Something is totally wrong here. This only works if alpha centerpoint > beta centerpoint because otherwise you get negative widths or something and that can't be drawn in can it? ## Maybe it can? Let's try it and see what fails. ## DEBUG #number_of_corridors_at_map_finish += 1 ## \DEBUG #print("\n new_horizontal_corridor == %s\n new_vertical_corridor == %s" % (str(new_horizontal_corridor), str(new_vertical_corridor))) ## When the next-to-last room is connected to the last room, reset the iterator to 0 so that the last room may be connected to the first room. ## NOTE! Linear dungeons should stop corridor creation when the next-to-last room is connected to the last room. ## DEBUG #print("\n room_creation_iterator == %d\n len(list_of_rooms == %d" % (room_creation_iterator, len(list_of_rooms))) ## \DEBUG # plus equals to, NOT set equals to (incrementing, not rolling over) # set equals to, NOT minus equals to (rolling over, not incrementing) ## This should probably be turned into a create_room() method. ## First horizontal: ## DEBUG #print("\nnew_horizontal_corridor[0] == %d\nnew_horizontal_corridor[2] == %d\nnew_horizontal_corridor[0] + [2] == %d" % (new_horizontal_corridor[0] ,new_horizontal_corridor[2], (new_horizontal_corridor[0] + new_horizontal_corridor[2]))) #print("\nnew_horizontal_corridor[1] == %d\nnew_horizontal_corridor[3] == %d\nnew_horizontal_corridor[1] + [3] == %d" % (new_horizontal_corridor[1], new_horizontal_corridor[3], (new_horizontal_corridor[1] + new_horizontal_corridor[3]))) ## \DEBUG ## If it's already walkable, don't turn it debug mauve. ## Second vertical: ## DEBUG #print("\nnew_vertical_corridor[0] == %d\nnew_vertical_corridor[2] == %d\nnew_vertical_corridor[0] + [2] == %d" % (new_vertical_corridor[0], new_vertical_corridor[2], (new_vertical_corridor[0] + new_vertical_corridor[2]))) #print("\nnew_vertical_corridor[1] == %d\nnew_vertical_corridor[3] == %d\nnew_vertical_corridor[1] + [3] == %d" % (new_vertical_corridor[1], new_vertical_corridor[3], (new_vertical_corridor[1] + new_vertical_corridor[3]))) ## \DEBUG ## If it's already walkable, don't turn it debug mauve. ## DEBUG #print("\n number_of_corridors_at_map_finish == %d\n len(list_of_rooms) == %d" % (number_of_corridors_at_map_finish, len(list_of_rooms))) ## \DEBUG ## I don't like this generator. It is not worth the effort right now. Keeping it for legacy/future inspiration purposes. ## To start, this code will be somewhat copypasted from DungeonMapGenerator. Mostly just the inits and some grid work. ## Using the RoomFilledMapGenerator should always involve supplying some or all of these constants. ## Defaults are being used here to make it simple for me to test and demonstrate. ## DEBUG ## Let's see if storing new_dungeon_map as state magically solves it. Woooo ## Nope, not in the least. And yes I did put in self.* tags on every reference in this class's generate_noise() method. #self.new_dungeon_map = [] ## \DEBUG ## -= 1 because doing it during room generation would be mildly wasteful -- the bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. ## DEBUG COMMENTED ## The following adjustment is unnecessary with the way I've structured my code now. Good. #self.map_width -= 1 #self.map_height -= 1 ## \DEBUG COMMENTED ## This generator does not need min/max room count settings, but it wouldn't be all that difficult to add them as some sort of conditional'd loop. It's sorta like noise. Except blocky and in all these clean straight lines and right angles. #### Arranging the generation parameters #### ## All the generators save the state of the last map made. ## The generate_noise() method of each generator accepts new parameters every time it's called, but if none are given, it goes back to the last parameters the generator worked with. ## This makes it easy to implement deterministic map regeneration from randseeds. ## -= 1 for the same reasoning as in the inits. ## Is unnecessary with the way I've structured my code now. ## DEBUG COMMENTED #supplied_map_width -= 1 ## \DEBUG COMMENTED ## Is unnecessary with the way I've structured my code now. ## DEBUG COMMENTED #supplied_map_height -= 1 ## \DEBUG COMMENTED ## Room count will be determined by the other parameters since the map will be filled with rooms. #### Generating the map #### ## First, make a map full of zeroes. The rooms will be carved out of it. ## Remember, every NoiseMapGenerator returns results formatted: map[y][x] == z ## Refactoring this might involve making a generate_blank_map() method. ## It would also be useful for DungeonMapGenerators. ## Maybe DungeonMapGenerator should be a base class and these room-based map generators would all draw from it. ## List comprehension version: ## new_dungeon_map = [[ 0 for y in range(0, self.supplied_map_height)] for x in range(0, self.supplied_map_width)] ## Try this and see how it goes. #### Filling the blank map with rooms #### ## I seriously don't understand why you wouldn't want to do for loops with index numbers. It makes dealing with the data SO much easier! ## IMPORTANT! ## The syntax is the same for all of these map generators: ## ## new_dungeon_map[y][x] == z ## ## If there is some confusion about row/column stuff or sublist ordering, remember to compare it to this fact. #### ATTEMPT NUMBER TWO #### ## I'm just beating my hands on the keyboard and code is coming out ## Initialize the validator variable: ## Whip up a potential room: #(4, 10) # <-- does not help at all #(4, 10) # :( ## Now check if the width or height go out of bounds and adjust to fit if possible; if not possible, set the validator toggle to False: # len(map[row]) because we're checking this particular row's width, and -1 because of uncarved side # len(map) because we're checking the height of columns in this map, and -1 because of uncarved side ## Determine if this is a good starting tile for a room: ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: ## Next, check and see if this room will slice into another room at any point along its prospective length: ## Init/reset the width decrementor: ## range(-1, 2) should step through (-1, 0, 1), ie (up, same, down) or (left, same, right) ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: #print(" INCREMENTING decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) #print(" NOT incrementing decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) ## Apply the decrementor and check if the room is too small: #print("\nnew_room_width == %d\n room_width_decrementor == %d" % (new_room_width, room_width_decrementor)) #print(" new_room_width == %d" % (new_room_width)) #print(" self.min_room_size == %d" % (self.room_min_size)) ## Then the smallest possible room had to become too small to fit here and this tile should be skipped. #print(" should_we_start_a_room_here == %r" % (should_we_start_a_room_here)) #else: #print("\nnew_room_width == %d\n room_width_decrementor == %d" % (new_room_width, room_width_decrementor)) #print(" self.min_room_size == %d" % (self.room_min_size)) #print(" (t) should_we_start_a_room_here == %r" % (should_we_start_a_room_here)) ## Duplicating the width decrementor code even though it should never be necessary... o_o ## ... ## It runs but it didn't fix the problem. Still bizarre. ## Init/reset the height decrementor: ## range(-1, 2) should step through (-1, 0, 1), ie (up, same, down) or (left, same, right) ## Check every tile adjacent to the current tile (and the current tile, too) for carved space and Falsitivize the validator if any is found: #print(" INCREMENTING decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) #else: #print(" NOT incrementing decrementor. checked tile == %d" % (new_dungeon_map[(each_row_index + each_nearby_tile_y)][(each_column_index + each_nearby_tile_x) + each_next_unit_of_width])) ## Then the smallest possible room had to become too small to fit here and this tile should be skipped. #if should_we_start_a_room_here == 0: # new_room_height = 0 # new_room_width = 0 ## Now that all checks have been passed, write the room to the map. #### ATTEMPT NUMBER ONE #### ## Failed due to indexing or insufficient/incorrect tile validation. ## IMPORTANT! This code all assumes tile validation begins iterating one tile right and one tile down and fills a map of zeroes of exactly the right size. ## Let's switch to serial checks with a toggle rather than nested checks with a base case. ## This will make it very easy to add and remove conditionals to alter how rooms are validated. should_we_start_a_room_on_this_tile = True if new_dungeon_map[each_row_index][each_column_index] != 0: ## Then the current tile is carved and unusable. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index][each_column_index + 1] != 0: ## The next tile to the right is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index] != 0: ## The tile above the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index][each_column_index - 1] != 0: ## The tile to the left of the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index - 1] != 0: ## The tile above and to the left of the current tile is carved. should_we_start_a_room_on_this_tile = False if new_dungeon_map[each_row_index - 1][each_column_index + 1] != 0: ## The tile above and to the right of the current tile is carved. should_we_start_a_room_on_this_tile = False ## The absurdity gallery. Should be logically impossible to get any hits here whatsoever. if new_dungeon_map[each_row_index + 1][each_column_index] != 0: should_we_start_a_room_on_this_time = False if new_dungeon_map[each_row_index + 1][each_column_index + 1] != 0: should_we_start_a_room_on_this_time = False if new_dungeon_map[each_row_index + 1][each_column_index - 1] != 0: should_we_start_a_room_on_this_time = False ## There has to be a check to see if the tile is fewer than self.room_min_size tiles away from the right and bottom edges of the map. ## We will take this opportunity to define these handy and descriptive variables: ## ## DEBUG - 1 on the end of this? Yes or no? To handle uncarved space on the bottom+right sides... this should be what makes it uncarved, if included here. ## distance_from_left_of_room_to_right_of_map = (self.map_width - each_column_index) distance_from_top_of_room_to_bottom_of_map = (self.map_height - each_row_index) ## Check to see if the room's min size is too large for its seed location: if self.room_min_size >= distance_from_left_of_room_to_right_of_map: ## This tile is too close to the right side of the map to be placed here. should_we_start_a_room_on_this_tile = False if self.room_min_size >= distance_from_top_of_room_to_bottom_of_map: ## This tile is too close to the bottom of the map to be placed here. should_we_start_a_room_on_this_tile = False ## Get how wide and tall the room wants to be, so we can check it against its neighbors and the map edges: random_room_width = random.randint(self.room_min_size, self.room_max_size) random_room_height = random.randint(self.room_min_size, self.room_max_size) #random_room_height = 6 ## If the tile is too close to the edge to fulfill its randomly generated width, decrement it untill it just fits. ## Note: This step happens before the next validation step because if it didn't the room would generate an index-out-of-range error there. if random_room_width >= distance_from_left_of_room_to_right_of_map: random_room_width -= (random_room_width - distance_from_left_of_room_to_right_of_map) if random_room_height >= distance_from_top_of_room_to_bottom_of_map: random_room_height -= (random_room_height - distance_from_top_of_room_to_bottom_of_map) ## Something is ridiculously wrong in the below code, but I have no idea what. ## It's still generating overlaps even though I've put in all the conditionals that are supposed to make it not do that. ## To prevent the decrementer in the following for loop from clipping its own loop too short. I think. random_room_width_adjustment_handler = 0 ## random_room_width + 1 is the upperleft coordinate, the span of the room, and the wall on the right. for each_next_tile_index in range(0, (random_room_width + 2)): ## The following line of code (plus one or two index offsets) took quite some time to figure out was needed. if ((each_next_tile_index + each_column_index) <= (self.map_width - 1)): if (new_dungeon_map[each_row_index][(each_column_index + each_next_tile_index)] != 0) or (new_dungeon_map[(each_row_index - 1)][(each_column_index + each_next_tile_index)] != 0) or (new_dungeon_map[(each_row_index - 1)][(each_column_index + each_next_tile_index)] != 0): ## Then something's in the way. Decrement the room's actual size. ## ... ## This used to be decrementing random_room_width, which I think made it end the for loop too early. Changing to a handler to disconnect those parts. ## ... ## I don't think that solved it. It SHOULDN'T solve it regardless. Leaving it in just to be safe. Refactor it out later. random_room_width_adjustment_handler += 1 ## DEBUG #if random_room_width_adjustment_handler > 0: # random_room_width_adjustment_handler += 0 ## \DEBUG ## Apply the accrued adjustment. random_room_width -= random_room_width_adjustment_handler ## One final check to ensure the previous validation step did not make the room too small: if random_room_width <= self.room_min_size: should_we_start_a_room_on_this_tile = False ## DEBUG ## Direct checking for intersection after the room is defined. ## Even though it solves all the debug mauve, it doesn't fix the inappropriate abuttment issue, and it also doesn't fill the map as cleanly as resizing rooms if they're too big. #for each_new_tile_y_offset in range(0, random_room_height): # for each_new_tile_x_offset in range(0, random_room_width): # if new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] != 0: # should_we_start_a_room_on_this_tile = False ## \DEBUG ## Another absurdity gallery -- the following should never turn up any hits due to logic. random_room_height_adjustment_handler = 0 for each_next_tile_index in range(0, (random_room_height + 2)): if ((each_next_tile_index + each_row_index) <= (self.map_height - 1)): if (new_dungeon_map[each_row_index + each_next_tile_index][(each_column_index)] != 0) or (new_dungeon_map[(each_row_index + each_next_tile_index)][(each_column_index + 1)] != 0) or (new_dungeon_map[(each_row_index + each_next_tile_index)][(each_column_index - 1)] != 0): random_room_height_adjustment_handler += 1 random_room_height -= random_room_height_adjustment_handler if random_room_height <= self.room_min_size: should_we_start_a_room_on_this_tile = False ## If it passes all the checks, write the room to the map. if should_we_start_a_room_on_this_tile == True: for each_new_tile_y_offset in range(0, random_room_height): for each_new_tile_x_offset in range(0, random_room_width): if new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] >= 1: ## DEBUG print("Error: Mauve for random_room_width %d, random_room_height %d\n min_size %d max_size %d" % (random_room_width, random_room_height, self.room_min_size, self.room_max_size)) ## \DEBUG new_dungeon_map[(each_row_index + each_new_tile_y_offset)][(each_column_index + each_new_tile_x_offset)] += 1 return new_dungeon_map #### ATTEMPT NUMBER ZERO #### ## Ugh, I don't know why none of this works. ## The indices seem perfect on paper but changing one index makes the results better or worse in ways that don't make any obvious kind of sense. ## I'm going to assume I made some error in figuring out what checking was needed. ## Step 1 if new_dungeon_map[each_row_index][each_column_index] == 0: ## Theory section... ## Imagine the algorithm makes a tall room, a wide room and another tall room on the first line. ## When it passes through the second line it would need to: ## 1. Detect uncarved space at (x, y) ## 2. Check ((x + 1), y) for uncarved space ## If False, then it's about to break into a room to the right; if True... ## 3. Check ((x + 1), y - 1) for uncarved space ## 3a If False, then it's a wall beneath a room; if True... ## 4. Check ((x + 2), y) for uncarved space ## 4a If False, then it's the end of a wall beneath a room and also abutting another room to the right. ## 4b If True then this is a good spot to place a room as it is not going to be carving out a wall from any adjacent rooms. ## ... ## There needs to be a check-ahead to make the room actually fill all the space infront of it, x-ly speaking. ## Or at least to make it easy to put an upper bound on the room width in this location. ## A similar procedure may need to happen at the bottom of the map for height of the room. ## ... ## 5. For each in range(0, room_max_size): Check ((x + each), y) for uncarved space, return sum_of_this_loop ## ... stuff. ## Step 2 if new_dungeon_map[each_row_index][(each_column_index + 1)] == 0: ## Step 3 if new_dungeon_map[(each_row_index - 1)][(each_column_index + 1)] == 0: ## Step 4 ## DEBUG Let's throw more conditionals onto the last uniform step, here, to see if something is the right one: if (new_dungeon_map[each_row_index][(each_column_index + 2)] == 0) and (new_dungeon_map[each_row_index][each_column_index - 1] == 0) and (new_dungeon_map[each_row_index - 1][each_column_index - 1] == 0): ## A good spot to place a room has been found. ## Determine the limiting condition for the width and height randint ranges based on distance between room edge and map edge: distance_from_left_of_room_to_right_of_map = (self.map_width - each_column_index) distance_from_top_of_room_to_bottom_of_map = (self.map_height - each_row_index) random_room_width = random.randint(self.room_min_size, self.room_max_size) random_room_height = random.randint(self.room_min_size, self.room_max_size) ## Forbidding the rooms to be larger than the map: if random_room_width >= distance_from_left_of_room_to_right_of_map: random_room_width = (distance_from_left_of_room_to_right_of_map - 1) if random_room_height >= distance_from_top_of_room_to_bottom_of_map: random_room_height = (distance_from_top_of_room_to_bottom_of_map - 1) ## Note: Step 5 comes after a tentative room width has been generated so that it doesn't have to check any further than it needs to. how_wide_to_actually_make_the_room = 0 ## Step 5 for each_next_tile_index in range(0, random_room_width): if new_dungeon_map[each_row_index][(each_column_index + each_next_tile_index + 1)] == 0: how_wide_to_actually_make_the_room += 1 else: how_wide_to_actually_make_the_room += 0 ## Now we know how wide to make the room and, implicitly, how tall to make it, since rooms are rectilinear and will never be placed to undercut other rooms, only to block their horizontal propagation. ## The maximum width is how_wide_to_actually_make_the_room, since it uses random_room_width (already bounded for map edge purposes) in its randrange. ## The maximum height is simply random_room_height, now that it's been bounded by distance_from_top_of_room_to_bottom_of_map. ## ... ## It occurred to me it might be simpler to make the map a large carved room one or two tiles wider than the end result is supposed to be, and simply "uncarve" the map inside it before starting any of this. ## Then is-carved checking would implicitly incorporate the distance to the edge in it too. ## Ah well, that's for some adventurous refactoring spree! new_room_rectangle = [each_row_index, each_column_index, how_wide_to_actually_make_the_room, random_room_height] ## Now write the room to the map so it can continue properly. for each_new_tile_y in range(1, random_room_height + 1): for each_new_tile_x in range(1, (how_wide_to_actually_make_the_room + 1)): new_dungeon_map[(each_row_index + each_new_tile_y)][(each_column_index + each_new_tile_x)] += 1 return new_dungeon_map ## Thinking of renaming this WingedDungeonGenerator, because it loves to make floorplans split into several "wings" each of which may be impressively lengthy at larger sizes. ## The effect is pretty cool, actually. Code could use some efficiency polish, though. Idea remembered from the old WanderingLineGenerator: " Decided I didn't quite like the "wandering line" idea and I'm going to try something inspired by http://donjon.bin.sh/d20/dungeon/index.cgi instead. It's going to use the DungeonMapGenerator algorithm to place rooms and a new algorithm for tunnel connection. Specifically it will use the WanderingLineMapGenerator idea of keeping an in-object list of rooms and using that to do things like is-connected-yet checks and intersection testing. " ## The bottom and right edges must always be uncarved. ## Doing it here, during the inits, guarantees that for all rooms and every map. ## ... ## I probably have no idea what I'm doing since testing it is easier than figuring out whether Python feels like pretending 0 is an ordinal or not this time. ## Saving it as state for brain friendliness purposes. Can be changed later. Create a corridor (actually a one-tile-by-n-tiles rectangular room) connecting point (x, y) and point ((x + width), (y + height)), using the rectangular room definition format. ## This is for placing corridors. Check two rectangles, both formatted [x, y, w, h] for intersection; return True if they intersect and False if they do not intersect. ## This if tree checks to see whether or not any rooms are forming crosses. ## ... and the same check in the other direction, for if the old room was the vertical bar of the cross rather than the new room, as is assumed in the preceding if tree: ## DEBUG #print("Successfully checked for intersection") ## \DEBUG ## I have this sinking feeling it's un-Pythonic to have this kind of optional state for my MapGenerator objects. #print("each_new_room_attempt == %d" % (each_new_room_attempt)) ## Width and height are defined BEFORE x/y position. ## Doing it this way makes it unnecessary to check if the room extends off the map. #print("Failed a room intersect test") #print("Appended a room") #print("Gotta use a while loop eventually") # Do we? Doesn't seem like it now. ## Now create corridors linking rooms. ## The list_of_all_centerpoints is not the same as the list_of_candidate_rooms or list_of_new_corridors, but I guess it technically could be merged with a small redesign. Keeping them separate for now to preserve the conceptual history of the things. ## Note the reason this is done after the list_of_candidate_rooms is filled is because that list gets wiped during generation if the genned number is lower than the minimum. ## Corridor generation doesn't do that, so it can append corridors as they're created. ## "Colors" are an abstraction used to represent the fact that each room has a connected-to-these-other-rooms quality, which is common to all of them. ## Thinking of this quality as a color makes for an easily relatable analogy. ## The list_of_room_connection_colors is going to be a list of lists which is constructed as corridors are added (eg, as rooms pass their first and only classical connection check). ## Appends [centerpoint_x, centerpoint_y] to the list, so it's a 2-ple: [ [],[],[],[],[]... ] ## In the following for loop, all created rooms are connected to the closest other room or corridor (technically the closest centerpoint, which stores both). ## The connecton of a room involves creating precisely one vertical and one horizontal corridor attaching it to another room. ## It also involves appending the centerpoints of the two corridors and the two rooms they connect to the list_of_room_connection_colors in their proper color. ## It does NOT involve connecting colors to each other. That comes after this "first pass" of room connection. ## The first step is to find which other room's centerpoint is the closest to the current room's centerpoint. ## DEBUG #print(" each_centerpoint == %s" % (str(each_centerpoint))) ## \DEBUG ## Then they're the same centerpoint and should be skipped for this step. ## Then these centerpoints should be checked to see if they're the closest to each other as of this iteration. ## Then these centerpoints are in fact the closest to each other as of this iteration. ## Now that the closest room rectangle has been found, draw a corridor between it's and the current room's centerpoints: # remember, random.randint() includes min and max values, unlike range() ## NOTE! It might be a good idea to check for intersection here and, if detected, invert which_direction_first via: which_direction_first = abs(which_direction_first - 1) ## That would make it slightly less favorable to crossed tunnels, though it should already have rather few of those. I think. ## Redefining these terms so we can use them to create corridors. This may not be maximally Pythonic... It would be a decent candidate for refactoring. ## Save the corridors: ## Also save the corridors' centerpoints: ## We're going to absolutely have to ensure they're all connected. ## 1. I think the way to do this is to have corridors, upon creation, append their centerpoints along with their associated rooms' to a list which will later be crosschecked with the list_of_all_centerpoints. ## 2. The first room connected this way will have a color associated with it that colors the centerpoints of itself, the corridor, and the room it's connected to. ## 3. When a new corridor is created, it will check if start or end have colors associated with them and adopt it as its own color if so; if not, a new color will be created which follows this pattern. ## 4. When the map is finished creating corridors via the classical closest-centerpoints method, the color lists will be cross-checked and if any centerpoint appears in at least two color lists simultaneously, those colors are considered connected. ## 5. If this process completes and certain colors remain unconnected, the closest centerpoints in each of them will be discerned and connected to each other. ## 6. Steps 4 and 5 will iterate untill no colors remain unconnected. ## Since this is where every room has a corridor added on to it, we'll begin here. ## list_of_room_connection_colors will be a three-ple: ## [ [ [x, y], [x, y], [x, y] ], [ [x, y], [x, y], [x, y] ], ... ] ## If there are no colors yet... ## Make the current room the source of the first color. #print("\n\nlist_of_room_connection_colors == \n%s\n\n" % (str(list_of_room_connection_colors))) ## That part worked correctly the very first time! ## Otherwise, check the list of colors for cross-results with all four centerpoints currently being considered: ## Notice the split between do_these_centerpoints_connect_to_this_color and we_can_append_foo_centerpoint ## This is because the former gates the adding of all centerpoints, and the latter gates the adding of specific centerpoints. ## Without the latter it would add too many centerpoints, creating duplicates. ## Important: This part of the function does NOT pare down colors, it can only build them up. It does try to build them up only when previous colors are insufficient, however. ## I thiiiiiink there's going to be a slight problem with needing more than one pass to connect colors, after these steps are done. ## Limited by the number of colors, but still, would be nice to narrow that down to get minimum runtime and maximum cleanness. ## This absolutely cannot be the best way to do this kind of checking, but the tutorials didn't tell me any better way, and this way certainly works. ## It's also very flat and readable. ## ... ## It also saved me a whole lot of processing time, from the looks of my debug statements. ## ... ## I think this was mistakenly placed. Newly-required colors should not have to check for duplicates again, since this happened in the preceding part of this conditional tree -- see just above here. ## Commenting it all for rumination and debugging purposes. #we_can_append_alpha_room_centerpoint = True #we_can_append_beta_room_centerpoint = True #we_can_append_horizontal_corridor_centerpoint = True #we_can_append_vertical_corridor_centerpoint = True #for each_other_color in list_of_room_connection_colors: # for each_other_centerpoint in each_other_color: # if each_other_centerpoint == [alpha_room_centerpoint_x, alpha_room_centerpoint_y]: # we_can_append_alpha_room_centerpoint = False # if each_other_centerpoint == [beta_room_centerpoint_x, beta_room_centerpoint_y]: # we_can_append_beta_room_centerpoint = False # if each_other_centerpoint == horizontal_corridor_centerpoint: # we_can_append_horizontal_corridor_centerpoint = False # if each_other_centerpoint == vertical_corridor_centerpoint: # we_can_append_vertical_corridor_centerpoint = False #if we_can_append_alpha_room_centerpoint == True: #if we_can_append_beta_room_centerpoint == True: #if we_can_append_horizontal_corridor_centerpoint == True: #if we_can_append_vertical_corridor_centerpoint == True: #if len(the_newly_required_color) != 0: #print("pre-step: list_of_room_connection_colors == ") #for each_color in list_of_room_connection_colors: # print(" " + str(each_color)) # print("|||||||||||||||||||||||") ## The rooms are placed, connected with classical, first-pass corridors, and the initial color lists have been established. ## Next we must winnow down the color lists to the bare minimum, since there will be some colors that are connected which were not recognized as such in the establishment pass. print("PRE-STEP: list_of_room_connection_colors == ") for each_color in list_of_room_connection_colors: print(" " + str(each_color)) print("|||||||||||||||||||||||") ## The next step is to connect the disconnected colors. ## ## This will be accomplished by: ## 1. Taking the color in the list_of_room_connection_colors at index 0 (hereafter "color alpha"), finding its centerpoint and comparing it to all other colors' centerpoints, saving: ## a. the centerpoint of color alpha ## b. the centerpoint of the closest color to color alpha (hereafter "color beta") ## c. a list containing all the centerpoints inside the color beta (eg, identical to color beta at the time of its discovery) ## 2. Finding the room inside color alpha which is closest to 1.b. ## 3. Finding the room inside color beta which is closest to 1.a. ## 4. Connecting 2. and 3. with an L-corridor and adding its two components' centerpoints to color gamma ## 5. Appending every room centerpoint in color alpha and color beta to color gamma ## 6. Appending color gamma to the next pass's new color list ## 7. If any colors remain, appending every color not in alpha, beta, or gamma to the next pass's color list ## 8. If the next pass's color list length is greater than 1, repeat this process, starting at step 1. ## Number of passes is just the length of the color list, since this algorithm is guaranteed to step through each and every color, connecting them individually. ## Giant conditional. ## Figure out the average centerpoint of color alpha: #the_closest_beta_average_x = None #the_closest_beta_average_y = None ## Yes, we have to use the index value for this, since equality checking lists doesn't seem to work, based on previous experiments. ## Then these centerpoints are in fact the closest to each other as of this iteration. ## Do I need to make these globals? Is python telling me to refactor my function into a zillion impossible to track tiny functions? =/ ## Now that we've found the beta color with an average centerpoint closest to the alpha color's average centerpoint, we need to find the alpha room centerpoint closest to the beta average centerpoint: ## Mirror the above process to find the beta room with a centerpoint closest to the alpha color's average centerpoint: ## Now that we have the alpha and beta room centerpoints closest to each other, connect them with a corridor. # remember, random.randint() includes min and max values, unlike range() ## Save the corridors: ## And now merge alpha and beta colors and delete beta color: ## DEBUG #print("which_color_is_closest == %s" % (str(which_color_is_closest))) ## \DEBUG ## The following line wasn't sufficient, since it only deleted this call-by-value variable rather than the call-by-reference pointer I was pretending it would be. Obvious in hindsight. #del which_color_is_closest ## Fortunately the index was already available! See above. ## And now everything works as perfectly as I knew it would. print(" POST-STEP: list_of_room_connection_colors == ") for each_color in list_of_room_connection_colors: print(" " + str(each_color)) print("|||||||||||||||||||||||") ## Having generated enough rooms and corridors, create the map: ## Write the rooms... ## and write the corridors: #print("Attempting to write a corridor...") ## Check for uncarved space. When defining corridors this is the simplest way to go about it since the debug mauve is only really important for debugging room generation.
2.305477
2
project/mahjong/tests/tests_points_calculation.py
huangenyan/Lattish
9
6622995
<reponame>huangenyan/Lattish # -*- coding: utf-8 -*- import unittest from mahjong.hand import FinishedHand class PointsCalculationTestCase(unittest.TestCase): def test_calculate_scores_and_ron(self): hand = FinishedHand() result = hand.calculate_scores(han=1, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 1000) result = hand.calculate_scores(han=1, fu=110, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 3600) result = hand.calculate_scores(han=2, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 2000) result = hand.calculate_scores(han=3, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 3900) result = hand.calculate_scores(han=4, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 7700) result = hand.calculate_scores(han=4, fu=40, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 8000) result = hand.calculate_scores(han=4, fu=40, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 12000) result = hand.calculate_scores(han=5, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 8000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 12000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 16000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 24000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 32000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 64000) def test_calculate_scores_and_ron_by_dealer(self): hand = FinishedHand() result = hand.calculate_scores(han=1, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 1500) result = hand.calculate_scores(han=2, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 2900) result = hand.calculate_scores(han=3, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 5800) result = hand.calculate_scores(han=4, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 11600) result = hand.calculate_scores(han=5, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 12000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 18000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 24000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 36000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 48000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 96000) def test_calculate_scores_and_tsumo(self): hand = FinishedHand() # result = hand.calculate_scores(han=1, fu=30, is_tsumo=True, is_dealer=False) # self.assertEqual(result['main'], 500) # self.assertEqual(result['additional'], 300) # # result = hand.calculate_scores(han=3, fu=30, is_tsumo=True, is_dealer=False) # self.assertEqual(result['main'], 2000) # self.assertEqual(result['additional'], 1000) result = hand.calculate_scores(han=3, fu=60, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 3900) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=4, fu=30, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 3900) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=5, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 4000) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 6000) self.assertEqual(result['additional'], 3000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 8000) self.assertEqual(result['additional'], 4000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 12000) self.assertEqual(result['additional'], 6000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 16000) self.assertEqual(result['additional'], 8000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 32000) self.assertEqual(result['additional'], 16000) def test_calculate_scores_and_tsumo_by_dealer(self): hand = FinishedHand() result = hand.calculate_scores(han=1, fu=30, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 500) self.assertEqual(result['additional'], 500) result = hand.calculate_scores(han=3, fu=30, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 2000) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=4, fu=30, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 3900) self.assertEqual(result['additional'], 3900) result = hand.calculate_scores(han=5, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 4000) self.assertEqual(result['additional'], 4000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 6000) self.assertEqual(result['additional'], 6000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 8000) self.assertEqual(result['additional'], 8000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 12000) self.assertEqual(result['additional'], 12000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 16000) self.assertEqual(result['additional'], 16000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 32000) self.assertEqual(result['additional'], 32000)
# -*- coding: utf-8 -*- import unittest from mahjong.hand import FinishedHand class PointsCalculationTestCase(unittest.TestCase): def test_calculate_scores_and_ron(self): hand = FinishedHand() result = hand.calculate_scores(han=1, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 1000) result = hand.calculate_scores(han=1, fu=110, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 3600) result = hand.calculate_scores(han=2, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 2000) result = hand.calculate_scores(han=3, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 3900) result = hand.calculate_scores(han=4, fu=30, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 7700) result = hand.calculate_scores(han=4, fu=40, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 8000) result = hand.calculate_scores(han=4, fu=40, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 12000) result = hand.calculate_scores(han=5, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 8000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 12000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 16000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 24000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 32000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=False, is_dealer=False) self.assertEqual(result['main'], 64000) def test_calculate_scores_and_ron_by_dealer(self): hand = FinishedHand() result = hand.calculate_scores(han=1, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 1500) result = hand.calculate_scores(han=2, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 2900) result = hand.calculate_scores(han=3, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 5800) result = hand.calculate_scores(han=4, fu=30, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 11600) result = hand.calculate_scores(han=5, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 12000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 18000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 24000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 36000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 48000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=False, is_dealer=True) self.assertEqual(result['main'], 96000) def test_calculate_scores_and_tsumo(self): hand = FinishedHand() # result = hand.calculate_scores(han=1, fu=30, is_tsumo=True, is_dealer=False) # self.assertEqual(result['main'], 500) # self.assertEqual(result['additional'], 300) # # result = hand.calculate_scores(han=3, fu=30, is_tsumo=True, is_dealer=False) # self.assertEqual(result['main'], 2000) # self.assertEqual(result['additional'], 1000) result = hand.calculate_scores(han=3, fu=60, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 3900) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=4, fu=30, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 3900) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=5, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 4000) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 6000) self.assertEqual(result['additional'], 3000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 8000) self.assertEqual(result['additional'], 4000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 12000) self.assertEqual(result['additional'], 6000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 16000) self.assertEqual(result['additional'], 8000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=True, is_dealer=False) self.assertEqual(result['main'], 32000) self.assertEqual(result['additional'], 16000) def test_calculate_scores_and_tsumo_by_dealer(self): hand = FinishedHand() result = hand.calculate_scores(han=1, fu=30, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 500) self.assertEqual(result['additional'], 500) result = hand.calculate_scores(han=3, fu=30, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 2000) self.assertEqual(result['additional'], 2000) result = hand.calculate_scores(han=4, fu=30, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 3900) self.assertEqual(result['additional'], 3900) result = hand.calculate_scores(han=5, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 4000) self.assertEqual(result['additional'], 4000) result = hand.calculate_scores(han=6, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 6000) self.assertEqual(result['additional'], 6000) result = hand.calculate_scores(han=8, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 8000) self.assertEqual(result['additional'], 8000) result = hand.calculate_scores(han=11, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 12000) self.assertEqual(result['additional'], 12000) result = hand.calculate_scores(han=13, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 16000) self.assertEqual(result['additional'], 16000) result = hand.calculate_scores(han=26, fu=0, is_tsumo=True, is_dealer=True) self.assertEqual(result['main'], 32000) self.assertEqual(result['additional'], 32000)
en
0.232497
# -*- coding: utf-8 -*- # result = hand.calculate_scores(han=1, fu=30, is_tsumo=True, is_dealer=False) # self.assertEqual(result['main'], 500) # self.assertEqual(result['additional'], 300) # # result = hand.calculate_scores(han=3, fu=30, is_tsumo=True, is_dealer=False) # self.assertEqual(result['main'], 2000) # self.assertEqual(result['additional'], 1000)
3.047774
3
Leetcode/198. House Robber.py
qinyang39/daily-leetcode
1
6622996
class Solution: def rob(self, nums: List[int]) -> int: if not nums: return 0 if len(nums)==1: return nums[0] dp = [0] * len(nums) dp[0] = nums[0] dp[1] = max(nums[0], nums[1]) for i in range(2, len(nums)): dp[i] = max(dp[i-2]+nums[i], dp[i-1]) return max(dp)
class Solution: def rob(self, nums: List[int]) -> int: if not nums: return 0 if len(nums)==1: return nums[0] dp = [0] * len(nums) dp[0] = nums[0] dp[1] = max(nums[0], nums[1]) for i in range(2, len(nums)): dp[i] = max(dp[i-2]+nums[i], dp[i-1]) return max(dp)
none
1
2.982303
3
Projects/turtle.py
eshaananand/HACKTOBERFEST_2021
0
6622997
import turtle turtle.bgcolor("black") turtle.pensize(2) def curve(): for i in range(200): turtle.right(1) turtle.forward(1) turtle.speed(0) turtle.color("pink","red") turtle.begin_fill() turtle.left(140) turtle.forward(111.65) curve() turtle.left(120) curve() turtle.forward(111.65) turtle.end_fill() turtle.hideturtle()
import turtle turtle.bgcolor("black") turtle.pensize(2) def curve(): for i in range(200): turtle.right(1) turtle.forward(1) turtle.speed(0) turtle.color("pink","red") turtle.begin_fill() turtle.left(140) turtle.forward(111.65) curve() turtle.left(120) curve() turtle.forward(111.65) turtle.end_fill() turtle.hideturtle()
none
1
3.720345
4
src/covid19sim/inference/server_utils.py
mila-iqia/COVI-AgentSim
13
6622998
""" Contains utility classes for remote inference inside the simulation. """ import datetime # import h5py import zarr import numcodecs import json import multiprocessing import multiprocessing.managers import numpy as np import os import pickle import platform import subprocess import sys import time import typing import xdelta3 import zmq from pathlib import Path from ctt.inference.infer import InferenceEngine import covid19sim.inference.clustering.base import covid19sim.inference.message_utils import covid19sim.inference.helper import covid19sim.inference.oracle import covid19sim.utils.utils expected_raw_packet_param_names = [ "start", "current_day", "human", "time_slot", "conf" ] expected_processed_packet_param_names = [ "current_day", "observed", "unobserved" ] default_poll_delay_ms = 500 default_data_buffer_size = ((10 * 1024) * 1024) # 10MB if os.environ.get("RAVEN_DIR", None) is not None: # if on MPI-IS cluster (htcondor + raven) backend_path = frontend_path = os.environ.get("RAVEN_DIR") elif os.environ.get("COVID19SIM_IPC_PATH", None) is not None: # if custom ipc path provided backend_path = frontend_path = os.environ.get("COVID19SIM_IPC_PATH") elif os.path.isdir("/Tmp"): # if on slurm frontend_path = Path("/Tmp/slurm.{}.0".format(os.environ.get("SLURM_JOB_ID"))) backend_path = Path("/Tmp/slurm.{}.0".format(os.environ.get("SLURM_JOB_ID"))) else: frontend_path = "/tmp" backend_path = "/tmp" default_inference_frontend_address = "ipc://" + os.path.join(frontend_path, "covid19sim-inference-frontend.ipc") default_inference_backend_address = "ipc://" + os.path.join(backend_path, "covid19sim-inference-backend.ipc") default_datacollect_frontend_address = "ipc://" + os.path.join(frontend_path, "covid19sim-datacollect-frontend.ipc") default_datacollect_backend_address = "ipc://" + os.path.join(backend_path, "covid19sim-datacollect-backend.ipc") class BaseWorker(multiprocessing.Process): """Spawns a single worker instance. These workers are managed by a broker class. They communicate with the broker using a backend connection. """ def __init__( self, backend_address: typing.AnyStr, identifier: typing.Any = "worker", ): super().__init__() self.backend_address = backend_address self.identifier = identifier self.stop_flag = multiprocessing.Event() self.reset_flag = multiprocessing.Event() self.running_flag = multiprocessing.Value("i", 0) self.packet_counter = multiprocessing.Value("i", 0) self.time_counter = multiprocessing.Value("f", 0.0) self.time_init = multiprocessing.Value("f", 0.0) def run(self): """Main loop of the worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. """ raise NotImplementedError def get_processed_count(self): """Returns the total number of processed requests by this worker.""" return int(self.packet_counter.value) def get_total_delay(self): """Returns the total time spent processing requests by this worker.""" return float(self.time_counter.value) def get_uptime(self): """Returns the total uptime of this worker.""" return time.time() - float(self.time_init.value) def is_running(self): """Returns whether this worker is running or not.""" return bool(self.running_flag.value) def get_averge_processing_delay(self): """Returns the average sample processing time between reception & response (in seconds).""" tot_delay, tot_packet_count = self.get_total_delay(), self.get_processed_count() if not tot_packet_count: return float("nan") return tot_delay / tot_packet_count def get_processing_uptime(self): """Returns the fraction of total uptime that the server spends processing requests.""" tot_process_time, tot_time = self.get_total_delay(), self.get_uptime() return tot_process_time / tot_time def stop_gracefully(self): """Stops the infinite data reception loop, allowing a clean shutdown.""" self.stop_flag.set() class BaseBroker: """Manages workers through a backend connection for load balancing.""" def __init__( self, workers: int, frontend_address: typing.AnyStr, backend_address: typing.AnyStr, verbose: bool = False, verbose_print_delay: float = 5., ): """ Initializes the broker's attributes (counters, condvars, ...). Args: workers: the number of independent workers to spawn to process requests. frontend_address: address through which to exchange requests with clients. backend_address: address through which to exchange requests with workers. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. """ self.workers = workers self.frontend_address = frontend_address self.backend_address = backend_address assert frontend_address != backend_address self.stop_flag = multiprocessing.Event() self.verbose = verbose self.verbose_print_delay = verbose_print_delay def run(self): """Main loop of the broker process. Will received requests from clients and dispatch them to available workers. """ raise NotImplementedError def stop_gracefully(self): """ Stops the infinite data reception loop, allowing a clean shutdown. """ self.stop_flag.set() class InferenceWorker(BaseWorker): """ Spawns a single inference worker instance. These workers are managed by the InferenceBroker class. They communicate with the broker using a backend connection. """ def __init__( self, experiment_directory: typing.AnyStr, backend_address: typing.AnyStr, identifier: typing.Any, cluster_mgr_map: typing.Dict, weights_path: typing.Optional[typing.AnyStr] = None, ): """ Initializes the inference worker's attributes (counters, condvars, ...). Args: experiment_directory: the path to the experiment directory to pass to the inference engine. backend_address: address through which to exchange inference requests with the broker. identifier: identifier for this worker (name, used for debug purposes only). cluster_mgr_map: map of human-to-cluster-managers to use for clustering. weights_path: the path to the specific weight file to use. If not, will use the 'best checkpoint weights' inside the experiment directory. """ super().__init__(backend_address=backend_address, identifier=identifier) self.experiment_directory = experiment_directory self.weights_path = weights_path self.cluster_mgr_map = cluster_mgr_map def run(self): """Main loop of the inference worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. """ engine = InferenceEngineWrapper(self.experiment_directory, self.weights_path) context = zmq.Context() socket = context.socket(zmq.REQ) socket.identity = self.identifier.encode() print(f"{self.identifier} contacting broker via: {self.backend_address}", flush=True) socket.connect(self.backend_address) socket.send(b"READY") # tell broker we're ready poller = zmq.Poller() poller.register(socket, zmq.POLLIN) self.time_init.value = time.time() self.time_counter.value = 0.0 self.packet_counter.value = 0 self.running_flag.value = 1 while not self.stop_flag.is_set(): if self.reset_flag.is_set(): self.time_counter.value = 0.0 self.packet_counter.value = 0 self.time_init.value = 0.0 self.reset_flag.clear() evts = dict(poller.poll(default_poll_delay_ms)) if socket in evts and evts[socket] == zmq.POLLIN: proc_start_time = time.time() address, empty, buffer = socket.recv_multipart() sample = pickle.loads(buffer) response = proc_human_batch( sample=sample, engine=engine, cluster_mgr_map=self.cluster_mgr_map, ) response = pickle.dumps(response) socket.send_multipart([address, b"", response]) with self.time_counter.get_lock(): self.time_counter.value += time.time() - proc_start_time with self.packet_counter.get_lock(): self.packet_counter.value += 1 self.running_flag.value = 0 socket.close() class InferenceBroker(BaseBroker): """Manages inference workers through a backend connection for load balancing.""" def __init__( self, model_exp_path: typing.AnyStr, workers: int, frontend_address: typing.AnyStr = default_inference_frontend_address, backend_address: typing.AnyStr = default_inference_backend_address, verbose: bool = False, verbose_print_delay: float = 5., weights_path: typing.Optional[typing.AnyStr] = None, ): """ Initializes the inference broker's attributes (counters, condvars, ...). Args: model_exp_path: the path to the experiment directory to pass to the inference engine. workers: the number of independent inference workers to spawn to process requests. frontend_address: address through which to exchange inference requests with clients. backend_address: address through which to exchange inference requests with workers. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. weights_path: the path to the specific weight file to use. If not, will use the 'best checkpoint weights' inside the experiment directory. """ super().__init__( workers=workers, frontend_address=frontend_address, backend_address=backend_address, verbose=verbose, verbose_print_delay=verbose_print_delay, ) self.model_exp_path = model_exp_path self.weights_path = weights_path def run(self): """Main loop of the inference broker process. Will received requests from clients and dispatch them to available workers. """ print(f"Initializing {self.workers} worker(s) from experiment: {self.model_exp_path}", flush=True) if self.weights_path is not None: print(f"\t will use weights directly from: {self.weights_path}", flush=True) context = zmq.Context() frontend = context.socket(zmq.ROUTER) print(f"Will listen for inference requests at: {self.frontend_address}", flush=True) frontend.bind(self.frontend_address) backend = context.socket(zmq.ROUTER) print(f"Will dispatch inference work at: {self.backend_address}", flush=True) backend.bind(self.backend_address) worker_backend_address = self.backend_address.replace("*", "localhost") worker_poller = zmq.Poller() worker_poller.register(backend, zmq.POLLIN) worker_poller.register(frontend, zmq.POLLIN) with multiprocessing.Manager() as mem_manager: worker_map = {} cluster_mgr_map = mem_manager.dict() available_worker_ids = [] for worker_idx in range(self.workers): worker_id = f"worker:{worker_idx}" print(f"Launching {worker_id}...", flush=True) worker = InferenceWorker( experiment_directory=self.model_exp_path, backend_address=worker_backend_address, identifier=worker_id, cluster_mgr_map=cluster_mgr_map, weights_path=self.weights_path, ) worker_map[worker_id] = worker worker.start() request = backend.recv_multipart() worker_id, empty, response = request[:3] assert worker_id == worker.identifier.encode() and response == b"READY" available_worker_ids.append(worker.identifier.encode()) last_update_timestamp = time.time() print("Entering dispatch loop...", flush=True) while not self.stop_flag.is_set(): evts = dict(worker_poller.poll(default_poll_delay_ms)) if backend in evts and evts[backend] == zmq.POLLIN: request = backend.recv_multipart() worker_id, empty, client = request[:3] assert worker_id not in available_worker_ids, \ f"got unexpected stuff from {worker_id}: {request}" available_worker_ids.append(worker_id) empty, reply = request[3:] frontend.send_multipart([client, b"", reply]) if available_worker_ids and frontend in evts and evts[frontend] == zmq.POLLIN: client, empty, request = frontend.recv_multipart() if request == b"RESET": print("got reset request, will clear all clusters", flush=True) assert len(available_worker_ids) == self.workers for k in list(cluster_mgr_map.keys()): del cluster_mgr_map[k] for worker in worker_map.values(): worker.reset_flag.set() frontend.send_multipart([client, b"", b"READY"]) else: worker_id = available_worker_ids.pop(0) backend.send_multipart([worker_id, b"", client, b"", request]) if self.verbose and time.time() - last_update_timestamp > self.verbose_print_delay: print(f" {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} stats:") for worker_id, worker in worker_map.items(): packets = worker.get_processed_count() delay = worker.get_averge_processing_delay() uptime = worker.get_processing_uptime() print( f" {worker_id}:" f" running={worker.is_running()}" f" packets={packets}" f" avg_delay={delay:.6f}sec" f" proc_time_ratio={uptime:.1%}" f" nb_clusters={len(worker.cluster_mgr_map)}" ) sys.stdout.flush() last_update_timestamp = time.time() for w in worker_map.values(): w.stop_gracefully() w.join() class InferenceClient: """ Creates a client through which data samples can be sent for inference. This object will automatically be able to pick a proper remote inference engine. This object should be fairly lightweight and low-cost, so creating it once per day, per human *should* not create a significant overhead. """ def __init__( self, server_address: typing.Optional[typing.AnyStr] = default_inference_frontend_address, context: typing.Optional[zmq.Context] = None, ): """ Initializes the client's attributes (socket, context). Args: server_address: address of the inference server frontend to send requests to. context: zmq context to create i/o objects from. """ if context is None: context = zmq.Context() self.context = context self.socket = self.context.socket(zmq.REQ) if server_address is None: server_address = default_inference_frontend_address self.socket.connect(server_address) def infer(self, sample): """Forwards a data sample for the inference engine using pickle.""" self.socket.send_pyobj(sample) return self.socket.recv_pyobj() def request_reset(self): self.socket.send(b"RESET") response = self.socket.recv() assert response == b"READY" class InferenceServer(InferenceBroker, multiprocessing.Process): """Wrapper object used to initialize a broker inside a separate process.""" def __init__(self, **kwargs): multiprocessing.Process.__init__(self) InferenceBroker.__init__(self, **kwargs) class InferenceEngineWrapper(InferenceEngine): """Inference engine wrapper used to download & extract experiment data, if necessary.""" def __init__(self, experiment_directory, *args, **kwargs): if experiment_directory.startswith("http"): assert os.path.isdir("/tmp"), "don't know where to download data to..." experiment_root_directory = \ covid19sim.utils.utils.download_exp_data_if_not_exist(experiment_directory, "/tmp") experiment_subdirectories = \ [os.path.join(experiment_root_directory, p) for p in os.listdir(experiment_root_directory) if os.path.isdir(os.path.join(experiment_root_directory, p))] assert len(experiment_subdirectories) == 1, "should only have one dir per experiment zip" experiment_directory = experiment_subdirectories[0] super().__init__(experiment_directory, *args, **kwargs) class DataCollectionWorker(BaseWorker): """ Spawns a data collection worker instance. This workers is managed by the DataCollectionBroker class. It communicates with the broker using a backend connection. """ def __init__( self, data_output_path: typing.AnyStr, backend_address: typing.AnyStr, human_count: int, simulation_days: int, compression: typing.Optional[typing.AnyStr] = "lzf", compression_opts: typing.Optional[typing.Any] = None, config_backup: typing.Optional[typing.Dict] = None, ): """ Initializes the data collection worker's attributes (counters, condvars, ...). Args: data_output_path: the path where the collected data should be saved. backend_address: address through which to exchange data collection requests with the broker. """ super().__init__(backend_address=backend_address, identifier="data-collector") self.data_output_path = data_output_path self.human_count = human_count self.simulation_days = simulation_days self.config_backup = config_backup self.chunk_size = 1 # These are not used anymore! # It's because zarr uses a meta-compressor (Blosc) to figure out which # compressor to use, and it appears to work well. self.compression = compression self.compression_opts = compression_opts def run(self): """Main loop of the data collection worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. """ context = zmq.Context() socket = context.socket(zmq.REP) socket.setsockopt(zmq.RCVTIMEO, default_poll_delay_ms) socket.connect(self.backend_address) self.time_init.value = time.time() self.time_counter.value = 0.0 self.packet_counter.value = 0 self.running_flag.value = 1 print(f"creating zarr collection file at: {self.data_output_path}, " f"but ignoring compression flag {self.compression}", flush=True) fd = zarr.open(self.data_output_path, "w") try: fd.attrs["git_hash"] = covid19sim.utils.utils.get_git_revision_hash() except subprocess.CalledProcessError: fd.attrs["git_hash"] = "NO_GIT" fd.attrs["creation_date"] = datetime.datetime.now().isoformat() fd.attrs["creator"] = str(platform.node()) config_backup = json.dumps(covid19sim.utils.utils.dumps_conf(self.config_backup)) \ if self.config_backup else None fd.attrs["config"] = config_backup dataset = fd.create_dataset( "dataset", shape=(self.simulation_days, 24, self.human_count,), chunks=(1, None, None), # 1 x 6 x human_count dtype=object, object_codec=numcodecs.Pickle(), ) is_filled = fd.create_dataset( "is_filled", shape=(self.simulation_days, 24, self.human_count,), dtype=bool, fillvalue=False ) total_dataset_bytes = 0 sample_idx = 0 current_day = 0 dataset_cache_factory = lambda: np.zeros(shape=(24, self.human_count), dtype=object) is_filled_cache_factory = lambda: np.zeros(shape=(24, self.human_count), dtype=bool) dataset_cache = dataset_cache_factory() is_filled_cache = is_filled_cache_factory() while not self.stop_flag.is_set(): if self.reset_flag.is_set(): self.time_counter.value = 0.0 self.packet_counter.value = 0 self.time_init.value = 0.0 self.reset_flag.clear() try: buffer = socket.recv() except zmq.error.Again: continue proc_start_time = time.time() day_idx, hour_idx, human_idx, buffer = pickle.loads(buffer) total_dataset_bytes += len(buffer) if day_idx == (current_day + 1): # It's a new day # Dump the cache dataset[current_day, :, :] = dataset_cache is_filled[current_day, :, :] = is_filled_cache # Make a new cache dataset_cache = dataset_cache_factory() is_filled_cache = is_filled_cache_factory() # Update the current_day counter current_day += 1 elif day_idx == current_day: pass else: raise RuntimeError(f"The worker was at day {current_day}, but got a " f"message from day {day_idx}. Bonk!") # Write the pickle and is_filled to cache dataset_cache[hour_idx, human_idx] = pickle.loads(buffer) is_filled_cache[hour_idx, human_idx] = True # Note to future self: this is what it used to be: # dataset[day_idx, hour_idx, human_idx] = pickle.loads(buffer) # is_filled[day_idx, hour_idx, human_idx] = True socket.send(str(sample_idx).encode()) sample_idx += 1 with self.time_counter.get_lock(): self.time_counter.value += time.time() - proc_start_time with self.packet_counter.get_lock(): self.packet_counter.value += 1 self.running_flag.value = 0 socket.close() dataset.attrs["total_samples"] = sample_idx dataset.attrs["total_bytes"] = total_dataset_bytes class DataCollectionBroker(BaseBroker): """Manages exchanges with the data collection worker by buffering client requests.""" def __init__( self, data_output_path: typing.AnyStr, human_count: int, simulation_days: int, data_buffer_size: int = default_data_buffer_size, # NOTE: in bytes! frontend_address: typing.AnyStr = default_datacollect_frontend_address, backend_address: typing.AnyStr = default_datacollect_backend_address, compression: typing.Optional[typing.AnyStr] = "lzf", compression_opts: typing.Optional[typing.Any] = None, verbose: bool = False, verbose_print_delay: float = 5., config_backup: typing.Optional[typing.Dict] = None, ): """ Initializes the data collection broker's attributes (counters, condvars, ...). Args: data_output_path: the path where the collected data should be saved. data_buffer_size: the amount of data that can be buffered by the broker (in bytes). frontend_address: address through which to exchange data logging requests with clients. backend_address: address through which to exchange data logging requests with the worker. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. """ super().__init__( workers=1, # cannot safely write more than one sample at a time with this impl frontend_address=frontend_address, backend_address=backend_address, verbose=verbose, verbose_print_delay=verbose_print_delay, ) self.data_output_path = data_output_path self.human_count = human_count self.simulation_days = simulation_days self.data_buffer_size = data_buffer_size self.compression = compression self.compression_opts = compression_opts self.config_backup = config_backup def run(self): """Main loop of the data collection broker process. Will received requests from clients and dispatch them to the data collection worker. """ context = zmq.Context() frontend = context.socket(zmq.ROUTER) print(f"Will listen for data collection requests at: {self.frontend_address}", flush=True) frontend.bind(self.frontend_address) backend = context.socket(zmq.REQ) print(f"Will dispatch data collection work at: {self.backend_address}", flush=True) backend.bind(self.backend_address) worker_backend_address = self.backend_address.replace("*", "localhost") worker_poller = zmq.Poller() worker_poller.register(frontend, zmq.POLLIN) print(f"Launching worker...", flush=True) worker = DataCollectionWorker( data_output_path=self.data_output_path, backend_address=worker_backend_address, human_count=self.human_count, simulation_days=self.simulation_days, compression=self.compression, compression_opts=self.compression_opts, config_backup=self.config_backup, ) worker.start() backend.setsockopt(zmq.SNDTIMEO, 5) last_update_timestamp = time.time() curr_queue_size = 0 expected_sample_idx = 0 request_queue = [] print("Entering dispatch loop...", flush=True) while not self.stop_flag.is_set(): evts = dict(worker_poller.poll(default_poll_delay_ms if not request_queue else 1)) if curr_queue_size < self.data_buffer_size and \ frontend in evts and evts[frontend] == zmq.POLLIN: client, empty, request = frontend.recv_multipart() if request == b"RESET": worker.reset_flag.set() frontend.send_multipart([client, b"", b"READY"]) else: request_queue.append(request) curr_queue_size += len(request) frontend.send_multipart([client, b"", b"GOTCHA"]) if request_queue: next_packet = request_queue[0] assert curr_queue_size >= len(next_packet) try: backend.send(next_packet) written_sample_idx_str = backend.recv() assert expected_sample_idx == int(written_sample_idx_str.decode()) expected_sample_idx = expected_sample_idx + 1 curr_queue_size -= len(next_packet) request_queue.pop(0) except zmq.error.Again: pass if self.verbose and time.time() - last_update_timestamp > self.verbose_print_delay: print(f" {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} stats:") packets = worker.get_processed_count() delay = worker.get_averge_processing_delay() uptime = worker.get_processing_uptime() print( f" running={worker.is_running()} packets={packets}" f" avg_delay={delay:.6f}sec proc_time_ratio={uptime:.1%}", flush=True, ) last_update_timestamp = time.time() backend.setsockopt(zmq.SNDTIMEO, -1) while request_queue: next_packet = request_queue.pop(0) backend.send_multipart(next_packet) curr_queue_size -= len(next_packet[-1]) worker.stop_gracefully() worker.join() class DataCollectionClient: """ Creates a client through which data samples can be sent for collection. This object should be fairly lightweight and low-cost, so creating it once per day, per human *should* not create a significant overhead. """ def __init__( self, server_address: typing.Optional[typing.AnyStr] = default_datacollect_frontend_address, context: typing.Optional[zmq.Context] = None, ): """ Initializes the client's attributes (socket, context). Args: server_address: address of the data collection server frontend to send requests to. context: zmq context to create i/o objects from. """ if context is None: context = zmq.Context() self.context = context self.socket = self.context.socket(zmq.REQ) if server_address is None: server_address = default_datacollect_frontend_address self.socket.connect(server_address) def write(self, day_idx, hour_idx, human_idx, sample): """Forwards a data sample for the data writer using pickle.""" self.socket.send_pyobj((day_idx, hour_idx, human_idx, pickle.dumps(sample))) response = self.socket.recv() assert response == b"GOTCHA" def request_reset(self): self.socket.send(b"RESET") response = self.socket.recv() assert response == b"READY" class DataCollectionServer(DataCollectionBroker, multiprocessing.Process): """Wrapper object used to initialize a broker inside a separate process.""" def __init__(self, **kwargs): multiprocessing.Process.__init__(self) DataCollectionBroker.__init__(self, **kwargs) def proc_human_batch( sample, engine, cluster_mgr_map, clusters_dump_path: typing.Optional[typing.AnyStr] = None, ): """ Processes a chunk of human data, clustering messages and computing new risk levels. Args: sample: a dictionary of data necessary for clustering+inference. engine: the inference engine, pre-instantiated with the right experiment config. cluster_mgr_map: map of human-to-cluster-managers to use for clustering. n_parallel_procs: internal joblib parallel process count for clustering+inference. clusters_dump_path: defines where to dump clusters (if required). Returns: The clustering + risk level update results. """ assert isinstance(sample, list) and all([isinstance(p, dict) for p in sample]) ref_timestamp = None for params in sample: human_name = params["human"].name timestamp = params["start"] + datetime.timedelta(days=params["current_day"], hours=params["time_slot"]) if ref_timestamp is None: ref_timestamp = timestamp else: assert ref_timestamp == timestamp, "how can we possibly have different timestamps here" cluster_mgr_hash = str(params["city_hash"]) + ":" + human_name params["cluster_mgr_hash"] = cluster_mgr_hash if cluster_mgr_hash not in cluster_mgr_map: cluster_algo_type = covid19sim.inference.clustering.base.get_cluster_manager_type( params["conf"].get("CLUSTER_ALGO_TYPE", "blind"), ) cluster_mgr = cluster_algo_type( max_history_offset=datetime.timedelta(days=params["conf"].get("TRACING_N_DAYS_HISTORY")), add_orphan_updates_as_clusters=True, generate_embeddings_by_timestamp=True, generate_backw_compat_embeddings=True, ) else: cluster_mgr = cluster_mgr_map[cluster_mgr_hash] assert not cluster_mgr._is_being_used, "two processes should never try to access the same human" cluster_mgr._is_being_used = True params["cluster_mgr"] = cluster_mgr results = [_proc_human(params, engine) for params in sample] for params in sample: cluster_mgr = params["cluster_mgr"] assert cluster_mgr._is_being_used cluster_mgr._is_being_used = False cluster_mgr_map[params["cluster_mgr_hash"]] = cluster_mgr if clusters_dump_path and ref_timestamp: os.makedirs(clusters_dump_path, exist_ok=True) curr_date_str = ref_timestamp.strftime("%Y%m%d-%H%M%S") curr_dump_path = os.path.join(clusters_dump_path, curr_date_str + ".pkl") to_dump = {params["human"].name: params["cluster_mgr"] for params in sample} with open(curr_dump_path, "wb") as fd: pickle.dump(to_dump, fd) return results def _proc_human(params, inference_engine): """Internal implementation of the `proc_human_batch` function.""" assert isinstance(params, dict) and \ all([p in params for p in expected_raw_packet_param_names]), \ "unexpected/broken _proc_human input format between simulator and inference service" conf = params["conf"] todays_date = params["start"] + datetime.timedelta(days=params["current_day"], hours=params["time_slot"]) human, cluster_mgr = params["human"], params["cluster_mgr"] # set the current day as the refresh timestamp to auto-purge outdated messages in advance cluster_mgr.set_current_timestamp(todays_date) update_messages = covid19sim.inference.message_utils.batch_messages(human.update_messages) cluster_mgr.add_messages(messages=update_messages, current_timestamp=todays_date) # Format for supervised learning / transformer inference is_exposed, exposure_day = covid19sim.inference.helper.exposure_array(human.infection_timestamp, todays_date, conf) is_recovered, recovery_day = covid19sim.inference.helper.recovered_array(human.recovered_timestamp, todays_date, conf) candidate_encounters, exposure_encounter = covid19sim.inference.helper.candidate_exposures(cluster_mgr) reported_symptoms = human.rolling_all_reported_symptoms true_symptoms = human.rolling_all_symptoms # FIXME: DIRTY DIRTY HACK; Nasim's DataLoader expects that the embeddings contain an absolute # day index instead of a relative offset (i.e. the exact simulation day instead of [0,14])... if len(candidate_encounters): candidate_encounters[:, 3] = params["current_day"] - candidate_encounters[:, 3] # Nasim also does some masking with a hard-coded 14-day history length, let's do the same... valid_encounter_mask = candidate_encounters[:, 3] > (params["current_day"] - 14) candidate_encounters = candidate_encounters[valid_encounter_mask] exposure_encounter = exposure_encounter[valid_encounter_mask] daily_output = { "current_day": params["current_day"], "observed": { "reported_symptoms": reported_symptoms, "candidate_encounters": candidate_encounters, "test_results": human.test_results, "preexisting_conditions": human.obs_preexisting_conditions, "age": human.obs_age, "sex": human.obs_sex, "risk_mapping": conf.get("RISK_MAPPING"), }, "unobserved": { "human_id": human.name, "incubation_days": human.incubation_days, "recovery_days": human.recovery_days, "true_symptoms": true_symptoms, "is_exposed": is_exposed, "exposure_encounter": exposure_encounter, "exposure_day": exposure_day, "is_recovered": is_recovered, "recovery_day": recovery_day, "infectiousness": np.array(human.infectiousnesses), "true_preexisting_conditions": human.preexisting_conditions, "true_age": human.age, "true_sex": human.sex, "viral_load_to_infectiousness_multiplier": human.viral_load_to_infectiousness_multiplier, "infection_timestamp": human.infection_timestamp, "recovered_timestamp": human.recovered_timestamp, } } if conf.get("COLLECT_TRAINING_DATA"): data_collect_client = DataCollectionClient( server_address=conf.get("data_collection_server_address", default_datacollect_frontend_address), ) human_id = int(human.name.split(":")[-1]) data_collect_client.write(params["current_day"], params["time_slot"], human_id, daily_output) inference_result, risk_history = None, None if conf.get("USE_ORACLE"): risk_history = covid19sim.inference.oracle.oracle(human, conf) elif conf.get("RISK_MODEL") == "transformer": # no need to do actual inference if the cluster count is zero inference_result = inference_engine.infer(daily_output) if inference_result is not None: risk_history = inference_result['infectiousness'] return human.name, risk_history
""" Contains utility classes for remote inference inside the simulation. """ import datetime # import h5py import zarr import numcodecs import json import multiprocessing import multiprocessing.managers import numpy as np import os import pickle import platform import subprocess import sys import time import typing import xdelta3 import zmq from pathlib import Path from ctt.inference.infer import InferenceEngine import covid19sim.inference.clustering.base import covid19sim.inference.message_utils import covid19sim.inference.helper import covid19sim.inference.oracle import covid19sim.utils.utils expected_raw_packet_param_names = [ "start", "current_day", "human", "time_slot", "conf" ] expected_processed_packet_param_names = [ "current_day", "observed", "unobserved" ] default_poll_delay_ms = 500 default_data_buffer_size = ((10 * 1024) * 1024) # 10MB if os.environ.get("RAVEN_DIR", None) is not None: # if on MPI-IS cluster (htcondor + raven) backend_path = frontend_path = os.environ.get("RAVEN_DIR") elif os.environ.get("COVID19SIM_IPC_PATH", None) is not None: # if custom ipc path provided backend_path = frontend_path = os.environ.get("COVID19SIM_IPC_PATH") elif os.path.isdir("/Tmp"): # if on slurm frontend_path = Path("/Tmp/slurm.{}.0".format(os.environ.get("SLURM_JOB_ID"))) backend_path = Path("/Tmp/slurm.{}.0".format(os.environ.get("SLURM_JOB_ID"))) else: frontend_path = "/tmp" backend_path = "/tmp" default_inference_frontend_address = "ipc://" + os.path.join(frontend_path, "covid19sim-inference-frontend.ipc") default_inference_backend_address = "ipc://" + os.path.join(backend_path, "covid19sim-inference-backend.ipc") default_datacollect_frontend_address = "ipc://" + os.path.join(frontend_path, "covid19sim-datacollect-frontend.ipc") default_datacollect_backend_address = "ipc://" + os.path.join(backend_path, "covid19sim-datacollect-backend.ipc") class BaseWorker(multiprocessing.Process): """Spawns a single worker instance. These workers are managed by a broker class. They communicate with the broker using a backend connection. """ def __init__( self, backend_address: typing.AnyStr, identifier: typing.Any = "worker", ): super().__init__() self.backend_address = backend_address self.identifier = identifier self.stop_flag = multiprocessing.Event() self.reset_flag = multiprocessing.Event() self.running_flag = multiprocessing.Value("i", 0) self.packet_counter = multiprocessing.Value("i", 0) self.time_counter = multiprocessing.Value("f", 0.0) self.time_init = multiprocessing.Value("f", 0.0) def run(self): """Main loop of the worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. """ raise NotImplementedError def get_processed_count(self): """Returns the total number of processed requests by this worker.""" return int(self.packet_counter.value) def get_total_delay(self): """Returns the total time spent processing requests by this worker.""" return float(self.time_counter.value) def get_uptime(self): """Returns the total uptime of this worker.""" return time.time() - float(self.time_init.value) def is_running(self): """Returns whether this worker is running or not.""" return bool(self.running_flag.value) def get_averge_processing_delay(self): """Returns the average sample processing time between reception & response (in seconds).""" tot_delay, tot_packet_count = self.get_total_delay(), self.get_processed_count() if not tot_packet_count: return float("nan") return tot_delay / tot_packet_count def get_processing_uptime(self): """Returns the fraction of total uptime that the server spends processing requests.""" tot_process_time, tot_time = self.get_total_delay(), self.get_uptime() return tot_process_time / tot_time def stop_gracefully(self): """Stops the infinite data reception loop, allowing a clean shutdown.""" self.stop_flag.set() class BaseBroker: """Manages workers through a backend connection for load balancing.""" def __init__( self, workers: int, frontend_address: typing.AnyStr, backend_address: typing.AnyStr, verbose: bool = False, verbose_print_delay: float = 5., ): """ Initializes the broker's attributes (counters, condvars, ...). Args: workers: the number of independent workers to spawn to process requests. frontend_address: address through which to exchange requests with clients. backend_address: address through which to exchange requests with workers. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. """ self.workers = workers self.frontend_address = frontend_address self.backend_address = backend_address assert frontend_address != backend_address self.stop_flag = multiprocessing.Event() self.verbose = verbose self.verbose_print_delay = verbose_print_delay def run(self): """Main loop of the broker process. Will received requests from clients and dispatch them to available workers. """ raise NotImplementedError def stop_gracefully(self): """ Stops the infinite data reception loop, allowing a clean shutdown. """ self.stop_flag.set() class InferenceWorker(BaseWorker): """ Spawns a single inference worker instance. These workers are managed by the InferenceBroker class. They communicate with the broker using a backend connection. """ def __init__( self, experiment_directory: typing.AnyStr, backend_address: typing.AnyStr, identifier: typing.Any, cluster_mgr_map: typing.Dict, weights_path: typing.Optional[typing.AnyStr] = None, ): """ Initializes the inference worker's attributes (counters, condvars, ...). Args: experiment_directory: the path to the experiment directory to pass to the inference engine. backend_address: address through which to exchange inference requests with the broker. identifier: identifier for this worker (name, used for debug purposes only). cluster_mgr_map: map of human-to-cluster-managers to use for clustering. weights_path: the path to the specific weight file to use. If not, will use the 'best checkpoint weights' inside the experiment directory. """ super().__init__(backend_address=backend_address, identifier=identifier) self.experiment_directory = experiment_directory self.weights_path = weights_path self.cluster_mgr_map = cluster_mgr_map def run(self): """Main loop of the inference worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. """ engine = InferenceEngineWrapper(self.experiment_directory, self.weights_path) context = zmq.Context() socket = context.socket(zmq.REQ) socket.identity = self.identifier.encode() print(f"{self.identifier} contacting broker via: {self.backend_address}", flush=True) socket.connect(self.backend_address) socket.send(b"READY") # tell broker we're ready poller = zmq.Poller() poller.register(socket, zmq.POLLIN) self.time_init.value = time.time() self.time_counter.value = 0.0 self.packet_counter.value = 0 self.running_flag.value = 1 while not self.stop_flag.is_set(): if self.reset_flag.is_set(): self.time_counter.value = 0.0 self.packet_counter.value = 0 self.time_init.value = 0.0 self.reset_flag.clear() evts = dict(poller.poll(default_poll_delay_ms)) if socket in evts and evts[socket] == zmq.POLLIN: proc_start_time = time.time() address, empty, buffer = socket.recv_multipart() sample = pickle.loads(buffer) response = proc_human_batch( sample=sample, engine=engine, cluster_mgr_map=self.cluster_mgr_map, ) response = pickle.dumps(response) socket.send_multipart([address, b"", response]) with self.time_counter.get_lock(): self.time_counter.value += time.time() - proc_start_time with self.packet_counter.get_lock(): self.packet_counter.value += 1 self.running_flag.value = 0 socket.close() class InferenceBroker(BaseBroker): """Manages inference workers through a backend connection for load balancing.""" def __init__( self, model_exp_path: typing.AnyStr, workers: int, frontend_address: typing.AnyStr = default_inference_frontend_address, backend_address: typing.AnyStr = default_inference_backend_address, verbose: bool = False, verbose_print_delay: float = 5., weights_path: typing.Optional[typing.AnyStr] = None, ): """ Initializes the inference broker's attributes (counters, condvars, ...). Args: model_exp_path: the path to the experiment directory to pass to the inference engine. workers: the number of independent inference workers to spawn to process requests. frontend_address: address through which to exchange inference requests with clients. backend_address: address through which to exchange inference requests with workers. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. weights_path: the path to the specific weight file to use. If not, will use the 'best checkpoint weights' inside the experiment directory. """ super().__init__( workers=workers, frontend_address=frontend_address, backend_address=backend_address, verbose=verbose, verbose_print_delay=verbose_print_delay, ) self.model_exp_path = model_exp_path self.weights_path = weights_path def run(self): """Main loop of the inference broker process. Will received requests from clients and dispatch them to available workers. """ print(f"Initializing {self.workers} worker(s) from experiment: {self.model_exp_path}", flush=True) if self.weights_path is not None: print(f"\t will use weights directly from: {self.weights_path}", flush=True) context = zmq.Context() frontend = context.socket(zmq.ROUTER) print(f"Will listen for inference requests at: {self.frontend_address}", flush=True) frontend.bind(self.frontend_address) backend = context.socket(zmq.ROUTER) print(f"Will dispatch inference work at: {self.backend_address}", flush=True) backend.bind(self.backend_address) worker_backend_address = self.backend_address.replace("*", "localhost") worker_poller = zmq.Poller() worker_poller.register(backend, zmq.POLLIN) worker_poller.register(frontend, zmq.POLLIN) with multiprocessing.Manager() as mem_manager: worker_map = {} cluster_mgr_map = mem_manager.dict() available_worker_ids = [] for worker_idx in range(self.workers): worker_id = f"worker:{worker_idx}" print(f"Launching {worker_id}...", flush=True) worker = InferenceWorker( experiment_directory=self.model_exp_path, backend_address=worker_backend_address, identifier=worker_id, cluster_mgr_map=cluster_mgr_map, weights_path=self.weights_path, ) worker_map[worker_id] = worker worker.start() request = backend.recv_multipart() worker_id, empty, response = request[:3] assert worker_id == worker.identifier.encode() and response == b"READY" available_worker_ids.append(worker.identifier.encode()) last_update_timestamp = time.time() print("Entering dispatch loop...", flush=True) while not self.stop_flag.is_set(): evts = dict(worker_poller.poll(default_poll_delay_ms)) if backend in evts and evts[backend] == zmq.POLLIN: request = backend.recv_multipart() worker_id, empty, client = request[:3] assert worker_id not in available_worker_ids, \ f"got unexpected stuff from {worker_id}: {request}" available_worker_ids.append(worker_id) empty, reply = request[3:] frontend.send_multipart([client, b"", reply]) if available_worker_ids and frontend in evts and evts[frontend] == zmq.POLLIN: client, empty, request = frontend.recv_multipart() if request == b"RESET": print("got reset request, will clear all clusters", flush=True) assert len(available_worker_ids) == self.workers for k in list(cluster_mgr_map.keys()): del cluster_mgr_map[k] for worker in worker_map.values(): worker.reset_flag.set() frontend.send_multipart([client, b"", b"READY"]) else: worker_id = available_worker_ids.pop(0) backend.send_multipart([worker_id, b"", client, b"", request]) if self.verbose and time.time() - last_update_timestamp > self.verbose_print_delay: print(f" {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} stats:") for worker_id, worker in worker_map.items(): packets = worker.get_processed_count() delay = worker.get_averge_processing_delay() uptime = worker.get_processing_uptime() print( f" {worker_id}:" f" running={worker.is_running()}" f" packets={packets}" f" avg_delay={delay:.6f}sec" f" proc_time_ratio={uptime:.1%}" f" nb_clusters={len(worker.cluster_mgr_map)}" ) sys.stdout.flush() last_update_timestamp = time.time() for w in worker_map.values(): w.stop_gracefully() w.join() class InferenceClient: """ Creates a client through which data samples can be sent for inference. This object will automatically be able to pick a proper remote inference engine. This object should be fairly lightweight and low-cost, so creating it once per day, per human *should* not create a significant overhead. """ def __init__( self, server_address: typing.Optional[typing.AnyStr] = default_inference_frontend_address, context: typing.Optional[zmq.Context] = None, ): """ Initializes the client's attributes (socket, context). Args: server_address: address of the inference server frontend to send requests to. context: zmq context to create i/o objects from. """ if context is None: context = zmq.Context() self.context = context self.socket = self.context.socket(zmq.REQ) if server_address is None: server_address = default_inference_frontend_address self.socket.connect(server_address) def infer(self, sample): """Forwards a data sample for the inference engine using pickle.""" self.socket.send_pyobj(sample) return self.socket.recv_pyobj() def request_reset(self): self.socket.send(b"RESET") response = self.socket.recv() assert response == b"READY" class InferenceServer(InferenceBroker, multiprocessing.Process): """Wrapper object used to initialize a broker inside a separate process.""" def __init__(self, **kwargs): multiprocessing.Process.__init__(self) InferenceBroker.__init__(self, **kwargs) class InferenceEngineWrapper(InferenceEngine): """Inference engine wrapper used to download & extract experiment data, if necessary.""" def __init__(self, experiment_directory, *args, **kwargs): if experiment_directory.startswith("http"): assert os.path.isdir("/tmp"), "don't know where to download data to..." experiment_root_directory = \ covid19sim.utils.utils.download_exp_data_if_not_exist(experiment_directory, "/tmp") experiment_subdirectories = \ [os.path.join(experiment_root_directory, p) for p in os.listdir(experiment_root_directory) if os.path.isdir(os.path.join(experiment_root_directory, p))] assert len(experiment_subdirectories) == 1, "should only have one dir per experiment zip" experiment_directory = experiment_subdirectories[0] super().__init__(experiment_directory, *args, **kwargs) class DataCollectionWorker(BaseWorker): """ Spawns a data collection worker instance. This workers is managed by the DataCollectionBroker class. It communicates with the broker using a backend connection. """ def __init__( self, data_output_path: typing.AnyStr, backend_address: typing.AnyStr, human_count: int, simulation_days: int, compression: typing.Optional[typing.AnyStr] = "lzf", compression_opts: typing.Optional[typing.Any] = None, config_backup: typing.Optional[typing.Dict] = None, ): """ Initializes the data collection worker's attributes (counters, condvars, ...). Args: data_output_path: the path where the collected data should be saved. backend_address: address through which to exchange data collection requests with the broker. """ super().__init__(backend_address=backend_address, identifier="data-collector") self.data_output_path = data_output_path self.human_count = human_count self.simulation_days = simulation_days self.config_backup = config_backup self.chunk_size = 1 # These are not used anymore! # It's because zarr uses a meta-compressor (Blosc) to figure out which # compressor to use, and it appears to work well. self.compression = compression self.compression_opts = compression_opts def run(self): """Main loop of the data collection worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. """ context = zmq.Context() socket = context.socket(zmq.REP) socket.setsockopt(zmq.RCVTIMEO, default_poll_delay_ms) socket.connect(self.backend_address) self.time_init.value = time.time() self.time_counter.value = 0.0 self.packet_counter.value = 0 self.running_flag.value = 1 print(f"creating zarr collection file at: {self.data_output_path}, " f"but ignoring compression flag {self.compression}", flush=True) fd = zarr.open(self.data_output_path, "w") try: fd.attrs["git_hash"] = covid19sim.utils.utils.get_git_revision_hash() except subprocess.CalledProcessError: fd.attrs["git_hash"] = "NO_GIT" fd.attrs["creation_date"] = datetime.datetime.now().isoformat() fd.attrs["creator"] = str(platform.node()) config_backup = json.dumps(covid19sim.utils.utils.dumps_conf(self.config_backup)) \ if self.config_backup else None fd.attrs["config"] = config_backup dataset = fd.create_dataset( "dataset", shape=(self.simulation_days, 24, self.human_count,), chunks=(1, None, None), # 1 x 6 x human_count dtype=object, object_codec=numcodecs.Pickle(), ) is_filled = fd.create_dataset( "is_filled", shape=(self.simulation_days, 24, self.human_count,), dtype=bool, fillvalue=False ) total_dataset_bytes = 0 sample_idx = 0 current_day = 0 dataset_cache_factory = lambda: np.zeros(shape=(24, self.human_count), dtype=object) is_filled_cache_factory = lambda: np.zeros(shape=(24, self.human_count), dtype=bool) dataset_cache = dataset_cache_factory() is_filled_cache = is_filled_cache_factory() while not self.stop_flag.is_set(): if self.reset_flag.is_set(): self.time_counter.value = 0.0 self.packet_counter.value = 0 self.time_init.value = 0.0 self.reset_flag.clear() try: buffer = socket.recv() except zmq.error.Again: continue proc_start_time = time.time() day_idx, hour_idx, human_idx, buffer = pickle.loads(buffer) total_dataset_bytes += len(buffer) if day_idx == (current_day + 1): # It's a new day # Dump the cache dataset[current_day, :, :] = dataset_cache is_filled[current_day, :, :] = is_filled_cache # Make a new cache dataset_cache = dataset_cache_factory() is_filled_cache = is_filled_cache_factory() # Update the current_day counter current_day += 1 elif day_idx == current_day: pass else: raise RuntimeError(f"The worker was at day {current_day}, but got a " f"message from day {day_idx}. Bonk!") # Write the pickle and is_filled to cache dataset_cache[hour_idx, human_idx] = pickle.loads(buffer) is_filled_cache[hour_idx, human_idx] = True # Note to future self: this is what it used to be: # dataset[day_idx, hour_idx, human_idx] = pickle.loads(buffer) # is_filled[day_idx, hour_idx, human_idx] = True socket.send(str(sample_idx).encode()) sample_idx += 1 with self.time_counter.get_lock(): self.time_counter.value += time.time() - proc_start_time with self.packet_counter.get_lock(): self.packet_counter.value += 1 self.running_flag.value = 0 socket.close() dataset.attrs["total_samples"] = sample_idx dataset.attrs["total_bytes"] = total_dataset_bytes class DataCollectionBroker(BaseBroker): """Manages exchanges with the data collection worker by buffering client requests.""" def __init__( self, data_output_path: typing.AnyStr, human_count: int, simulation_days: int, data_buffer_size: int = default_data_buffer_size, # NOTE: in bytes! frontend_address: typing.AnyStr = default_datacollect_frontend_address, backend_address: typing.AnyStr = default_datacollect_backend_address, compression: typing.Optional[typing.AnyStr] = "lzf", compression_opts: typing.Optional[typing.Any] = None, verbose: bool = False, verbose_print_delay: float = 5., config_backup: typing.Optional[typing.Dict] = None, ): """ Initializes the data collection broker's attributes (counters, condvars, ...). Args: data_output_path: the path where the collected data should be saved. data_buffer_size: the amount of data that can be buffered by the broker (in bytes). frontend_address: address through which to exchange data logging requests with clients. backend_address: address through which to exchange data logging requests with the worker. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. """ super().__init__( workers=1, # cannot safely write more than one sample at a time with this impl frontend_address=frontend_address, backend_address=backend_address, verbose=verbose, verbose_print_delay=verbose_print_delay, ) self.data_output_path = data_output_path self.human_count = human_count self.simulation_days = simulation_days self.data_buffer_size = data_buffer_size self.compression = compression self.compression_opts = compression_opts self.config_backup = config_backup def run(self): """Main loop of the data collection broker process. Will received requests from clients and dispatch them to the data collection worker. """ context = zmq.Context() frontend = context.socket(zmq.ROUTER) print(f"Will listen for data collection requests at: {self.frontend_address}", flush=True) frontend.bind(self.frontend_address) backend = context.socket(zmq.REQ) print(f"Will dispatch data collection work at: {self.backend_address}", flush=True) backend.bind(self.backend_address) worker_backend_address = self.backend_address.replace("*", "localhost") worker_poller = zmq.Poller() worker_poller.register(frontend, zmq.POLLIN) print(f"Launching worker...", flush=True) worker = DataCollectionWorker( data_output_path=self.data_output_path, backend_address=worker_backend_address, human_count=self.human_count, simulation_days=self.simulation_days, compression=self.compression, compression_opts=self.compression_opts, config_backup=self.config_backup, ) worker.start() backend.setsockopt(zmq.SNDTIMEO, 5) last_update_timestamp = time.time() curr_queue_size = 0 expected_sample_idx = 0 request_queue = [] print("Entering dispatch loop...", flush=True) while not self.stop_flag.is_set(): evts = dict(worker_poller.poll(default_poll_delay_ms if not request_queue else 1)) if curr_queue_size < self.data_buffer_size and \ frontend in evts and evts[frontend] == zmq.POLLIN: client, empty, request = frontend.recv_multipart() if request == b"RESET": worker.reset_flag.set() frontend.send_multipart([client, b"", b"READY"]) else: request_queue.append(request) curr_queue_size += len(request) frontend.send_multipart([client, b"", b"GOTCHA"]) if request_queue: next_packet = request_queue[0] assert curr_queue_size >= len(next_packet) try: backend.send(next_packet) written_sample_idx_str = backend.recv() assert expected_sample_idx == int(written_sample_idx_str.decode()) expected_sample_idx = expected_sample_idx + 1 curr_queue_size -= len(next_packet) request_queue.pop(0) except zmq.error.Again: pass if self.verbose and time.time() - last_update_timestamp > self.verbose_print_delay: print(f" {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} stats:") packets = worker.get_processed_count() delay = worker.get_averge_processing_delay() uptime = worker.get_processing_uptime() print( f" running={worker.is_running()} packets={packets}" f" avg_delay={delay:.6f}sec proc_time_ratio={uptime:.1%}", flush=True, ) last_update_timestamp = time.time() backend.setsockopt(zmq.SNDTIMEO, -1) while request_queue: next_packet = request_queue.pop(0) backend.send_multipart(next_packet) curr_queue_size -= len(next_packet[-1]) worker.stop_gracefully() worker.join() class DataCollectionClient: """ Creates a client through which data samples can be sent for collection. This object should be fairly lightweight and low-cost, so creating it once per day, per human *should* not create a significant overhead. """ def __init__( self, server_address: typing.Optional[typing.AnyStr] = default_datacollect_frontend_address, context: typing.Optional[zmq.Context] = None, ): """ Initializes the client's attributes (socket, context). Args: server_address: address of the data collection server frontend to send requests to. context: zmq context to create i/o objects from. """ if context is None: context = zmq.Context() self.context = context self.socket = self.context.socket(zmq.REQ) if server_address is None: server_address = default_datacollect_frontend_address self.socket.connect(server_address) def write(self, day_idx, hour_idx, human_idx, sample): """Forwards a data sample for the data writer using pickle.""" self.socket.send_pyobj((day_idx, hour_idx, human_idx, pickle.dumps(sample))) response = self.socket.recv() assert response == b"GOTCHA" def request_reset(self): self.socket.send(b"RESET") response = self.socket.recv() assert response == b"READY" class DataCollectionServer(DataCollectionBroker, multiprocessing.Process): """Wrapper object used to initialize a broker inside a separate process.""" def __init__(self, **kwargs): multiprocessing.Process.__init__(self) DataCollectionBroker.__init__(self, **kwargs) def proc_human_batch( sample, engine, cluster_mgr_map, clusters_dump_path: typing.Optional[typing.AnyStr] = None, ): """ Processes a chunk of human data, clustering messages and computing new risk levels. Args: sample: a dictionary of data necessary for clustering+inference. engine: the inference engine, pre-instantiated with the right experiment config. cluster_mgr_map: map of human-to-cluster-managers to use for clustering. n_parallel_procs: internal joblib parallel process count for clustering+inference. clusters_dump_path: defines where to dump clusters (if required). Returns: The clustering + risk level update results. """ assert isinstance(sample, list) and all([isinstance(p, dict) for p in sample]) ref_timestamp = None for params in sample: human_name = params["human"].name timestamp = params["start"] + datetime.timedelta(days=params["current_day"], hours=params["time_slot"]) if ref_timestamp is None: ref_timestamp = timestamp else: assert ref_timestamp == timestamp, "how can we possibly have different timestamps here" cluster_mgr_hash = str(params["city_hash"]) + ":" + human_name params["cluster_mgr_hash"] = cluster_mgr_hash if cluster_mgr_hash not in cluster_mgr_map: cluster_algo_type = covid19sim.inference.clustering.base.get_cluster_manager_type( params["conf"].get("CLUSTER_ALGO_TYPE", "blind"), ) cluster_mgr = cluster_algo_type( max_history_offset=datetime.timedelta(days=params["conf"].get("TRACING_N_DAYS_HISTORY")), add_orphan_updates_as_clusters=True, generate_embeddings_by_timestamp=True, generate_backw_compat_embeddings=True, ) else: cluster_mgr = cluster_mgr_map[cluster_mgr_hash] assert not cluster_mgr._is_being_used, "two processes should never try to access the same human" cluster_mgr._is_being_used = True params["cluster_mgr"] = cluster_mgr results = [_proc_human(params, engine) for params in sample] for params in sample: cluster_mgr = params["cluster_mgr"] assert cluster_mgr._is_being_used cluster_mgr._is_being_used = False cluster_mgr_map[params["cluster_mgr_hash"]] = cluster_mgr if clusters_dump_path and ref_timestamp: os.makedirs(clusters_dump_path, exist_ok=True) curr_date_str = ref_timestamp.strftime("%Y%m%d-%H%M%S") curr_dump_path = os.path.join(clusters_dump_path, curr_date_str + ".pkl") to_dump = {params["human"].name: params["cluster_mgr"] for params in sample} with open(curr_dump_path, "wb") as fd: pickle.dump(to_dump, fd) return results def _proc_human(params, inference_engine): """Internal implementation of the `proc_human_batch` function.""" assert isinstance(params, dict) and \ all([p in params for p in expected_raw_packet_param_names]), \ "unexpected/broken _proc_human input format between simulator and inference service" conf = params["conf"] todays_date = params["start"] + datetime.timedelta(days=params["current_day"], hours=params["time_slot"]) human, cluster_mgr = params["human"], params["cluster_mgr"] # set the current day as the refresh timestamp to auto-purge outdated messages in advance cluster_mgr.set_current_timestamp(todays_date) update_messages = covid19sim.inference.message_utils.batch_messages(human.update_messages) cluster_mgr.add_messages(messages=update_messages, current_timestamp=todays_date) # Format for supervised learning / transformer inference is_exposed, exposure_day = covid19sim.inference.helper.exposure_array(human.infection_timestamp, todays_date, conf) is_recovered, recovery_day = covid19sim.inference.helper.recovered_array(human.recovered_timestamp, todays_date, conf) candidate_encounters, exposure_encounter = covid19sim.inference.helper.candidate_exposures(cluster_mgr) reported_symptoms = human.rolling_all_reported_symptoms true_symptoms = human.rolling_all_symptoms # FIXME: DIRTY DIRTY HACK; Nasim's DataLoader expects that the embeddings contain an absolute # day index instead of a relative offset (i.e. the exact simulation day instead of [0,14])... if len(candidate_encounters): candidate_encounters[:, 3] = params["current_day"] - candidate_encounters[:, 3] # Nasim also does some masking with a hard-coded 14-day history length, let's do the same... valid_encounter_mask = candidate_encounters[:, 3] > (params["current_day"] - 14) candidate_encounters = candidate_encounters[valid_encounter_mask] exposure_encounter = exposure_encounter[valid_encounter_mask] daily_output = { "current_day": params["current_day"], "observed": { "reported_symptoms": reported_symptoms, "candidate_encounters": candidate_encounters, "test_results": human.test_results, "preexisting_conditions": human.obs_preexisting_conditions, "age": human.obs_age, "sex": human.obs_sex, "risk_mapping": conf.get("RISK_MAPPING"), }, "unobserved": { "human_id": human.name, "incubation_days": human.incubation_days, "recovery_days": human.recovery_days, "true_symptoms": true_symptoms, "is_exposed": is_exposed, "exposure_encounter": exposure_encounter, "exposure_day": exposure_day, "is_recovered": is_recovered, "recovery_day": recovery_day, "infectiousness": np.array(human.infectiousnesses), "true_preexisting_conditions": human.preexisting_conditions, "true_age": human.age, "true_sex": human.sex, "viral_load_to_infectiousness_multiplier": human.viral_load_to_infectiousness_multiplier, "infection_timestamp": human.infection_timestamp, "recovered_timestamp": human.recovered_timestamp, } } if conf.get("COLLECT_TRAINING_DATA"): data_collect_client = DataCollectionClient( server_address=conf.get("data_collection_server_address", default_datacollect_frontend_address), ) human_id = int(human.name.split(":")[-1]) data_collect_client.write(params["current_day"], params["time_slot"], human_id, daily_output) inference_result, risk_history = None, None if conf.get("USE_ORACLE"): risk_history = covid19sim.inference.oracle.oracle(human, conf) elif conf.get("RISK_MODEL") == "transformer": # no need to do actual inference if the cluster count is zero inference_result = inference_engine.infer(daily_output) if inference_result is not None: risk_history = inference_result['infectiousness'] return human.name, risk_history
en
0.869266
Contains utility classes for remote inference inside the simulation. # import h5py # 10MB # if on MPI-IS cluster (htcondor + raven) # if custom ipc path provided # if on slurm Spawns a single worker instance. These workers are managed by a broker class. They communicate with the broker using a backend connection. Main loop of the worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. Returns the total number of processed requests by this worker. Returns the total time spent processing requests by this worker. Returns the total uptime of this worker. Returns whether this worker is running or not. Returns the average sample processing time between reception & response (in seconds). Returns the fraction of total uptime that the server spends processing requests. Stops the infinite data reception loop, allowing a clean shutdown. Manages workers through a backend connection for load balancing. Initializes the broker's attributes (counters, condvars, ...). Args: workers: the number of independent workers to spawn to process requests. frontend_address: address through which to exchange requests with clients. backend_address: address through which to exchange requests with workers. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. Main loop of the broker process. Will received requests from clients and dispatch them to available workers. Stops the infinite data reception loop, allowing a clean shutdown. Spawns a single inference worker instance. These workers are managed by the InferenceBroker class. They communicate with the broker using a backend connection. Initializes the inference worker's attributes (counters, condvars, ...). Args: experiment_directory: the path to the experiment directory to pass to the inference engine. backend_address: address through which to exchange inference requests with the broker. identifier: identifier for this worker (name, used for debug purposes only). cluster_mgr_map: map of human-to-cluster-managers to use for clustering. weights_path: the path to the specific weight file to use. If not, will use the 'best checkpoint weights' inside the experiment directory. Main loop of the inference worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. # tell broker we're ready Manages inference workers through a backend connection for load balancing. Initializes the inference broker's attributes (counters, condvars, ...). Args: model_exp_path: the path to the experiment directory to pass to the inference engine. workers: the number of independent inference workers to spawn to process requests. frontend_address: address through which to exchange inference requests with clients. backend_address: address through which to exchange inference requests with workers. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. weights_path: the path to the specific weight file to use. If not, will use the 'best checkpoint weights' inside the experiment directory. Main loop of the inference broker process. Will received requests from clients and dispatch them to available workers. Creates a client through which data samples can be sent for inference. This object will automatically be able to pick a proper remote inference engine. This object should be fairly lightweight and low-cost, so creating it once per day, per human *should* not create a significant overhead. Initializes the client's attributes (socket, context). Args: server_address: address of the inference server frontend to send requests to. context: zmq context to create i/o objects from. Forwards a data sample for the inference engine using pickle. Wrapper object used to initialize a broker inside a separate process. Inference engine wrapper used to download & extract experiment data, if necessary. Spawns a data collection worker instance. This workers is managed by the DataCollectionBroker class. It communicates with the broker using a backend connection. Initializes the data collection worker's attributes (counters, condvars, ...). Args: data_output_path: the path where the collected data should be saved. backend_address: address through which to exchange data collection requests with the broker. # These are not used anymore! # It's because zarr uses a meta-compressor (Blosc) to figure out which # compressor to use, and it appears to work well. Main loop of the data collection worker process. Will receive brokered requests from the frontend, process them, and respond with the result through the broker. # 1 x 6 x human_count # It's a new day # Dump the cache # Make a new cache # Update the current_day counter # Write the pickle and is_filled to cache # Note to future self: this is what it used to be: # dataset[day_idx, hour_idx, human_idx] = pickle.loads(buffer) # is_filled[day_idx, hour_idx, human_idx] = True Manages exchanges with the data collection worker by buffering client requests. # NOTE: in bytes! Initializes the data collection broker's attributes (counters, condvars, ...). Args: data_output_path: the path where the collected data should be saved. data_buffer_size: the amount of data that can be buffered by the broker (in bytes). frontend_address: address through which to exchange data logging requests with clients. backend_address: address through which to exchange data logging requests with the worker. verbose: toggles whether to print extra debug information while running. verbose_print_delay: specifies how often the extra debug info should be printed. # cannot safely write more than one sample at a time with this impl Main loop of the data collection broker process. Will received requests from clients and dispatch them to the data collection worker. Creates a client through which data samples can be sent for collection. This object should be fairly lightweight and low-cost, so creating it once per day, per human *should* not create a significant overhead. Initializes the client's attributes (socket, context). Args: server_address: address of the data collection server frontend to send requests to. context: zmq context to create i/o objects from. Forwards a data sample for the data writer using pickle. Wrapper object used to initialize a broker inside a separate process. Processes a chunk of human data, clustering messages and computing new risk levels. Args: sample: a dictionary of data necessary for clustering+inference. engine: the inference engine, pre-instantiated with the right experiment config. cluster_mgr_map: map of human-to-cluster-managers to use for clustering. n_parallel_procs: internal joblib parallel process count for clustering+inference. clusters_dump_path: defines where to dump clusters (if required). Returns: The clustering + risk level update results. Internal implementation of the `proc_human_batch` function. # set the current day as the refresh timestamp to auto-purge outdated messages in advance # Format for supervised learning / transformer inference # FIXME: DIRTY DIRTY HACK; Nasim's DataLoader expects that the embeddings contain an absolute # day index instead of a relative offset (i.e. the exact simulation day instead of [0,14])... # Nasim also does some masking with a hard-coded 14-day history length, let's do the same... # no need to do actual inference if the cluster count is zero
1.899094
2
mempw/__init__.py
mdvthu/mempw
0
6622999
from .core import new_password # noqa: F401
from .core import new_password # noqa: F401
uz
0.465103
# noqa: F401
0.91197
1
6-collections/list.py
elbeg/introduction-to-python
5
6623000
squared = [] for n in range(11): squared.append(n**2) print(squared) squared = [ n**2 for n in range(11) if n % 2 is 0 ] print(squared)
squared = [] for n in range(11): squared.append(n**2) print(squared) squared = [ n**2 for n in range(11) if n % 2 is 0 ] print(squared)
none
1
3.860917
4
src/url_storer.py
Taekyoon/PyCrawler
0
6623001
from db_accessor import MultiThreadDBAccessor class UrlStorer: conn = None def __init__(self, file_path): UrlStorer.conn = MultiThreadDBAccessor(file_path) @staticmethod def create_table(table_name): UrlStorer.conn.execute("create table if not exists %s\ (URL text primary key not null,\ LEVEL int not null)" % (table_name)) @staticmethod def get(table_name, limit=10): return UrlStorer.conn.select("select * from %s limit %s" % (table_name, str(limit))) @staticmethod def put(table_name, url, level): UrlStorer.conn.execute("insert into %s (URL, LEVEL) VALUES\ (%s, %s)" % (table_name, "'" + url + "'", level)) @staticmethod def exist(table_name, key): cursor = UrlStorer.conn.execute("select exist(select URL from %s\ where KEY = %s)" % (table_name, "'" + key + "'")) return True if [x for x in cursor][0][0] == 1 else False @staticmethod def delete(table_name, key): UrlStorer.conn.execute("delete from %s where URL = %s" % (table_name, "'" + key + "'")) return True @staticmethod def drop_table(table_name): UrlStorer.conn.execute("drop table if exists %s" % (table_name)) @staticmethod def close(): UrlStorer.conn.close()
from db_accessor import MultiThreadDBAccessor class UrlStorer: conn = None def __init__(self, file_path): UrlStorer.conn = MultiThreadDBAccessor(file_path) @staticmethod def create_table(table_name): UrlStorer.conn.execute("create table if not exists %s\ (URL text primary key not null,\ LEVEL int not null)" % (table_name)) @staticmethod def get(table_name, limit=10): return UrlStorer.conn.select("select * from %s limit %s" % (table_name, str(limit))) @staticmethod def put(table_name, url, level): UrlStorer.conn.execute("insert into %s (URL, LEVEL) VALUES\ (%s, %s)" % (table_name, "'" + url + "'", level)) @staticmethod def exist(table_name, key): cursor = UrlStorer.conn.execute("select exist(select URL from %s\ where KEY = %s)" % (table_name, "'" + key + "'")) return True if [x for x in cursor][0][0] == 1 else False @staticmethod def delete(table_name, key): UrlStorer.conn.execute("delete from %s where URL = %s" % (table_name, "'" + key + "'")) return True @staticmethod def drop_table(table_name): UrlStorer.conn.execute("drop table if exists %s" % (table_name)) @staticmethod def close(): UrlStorer.conn.close()
none
1
2.880289
3
config/backup_config.py
coolexplorer/vault-script
0
6623002
import datetime class BackUpConfig(object): def __init__(self, option): self.file_path = option.file_path self.dir_path = option.dir_path self.compress_option = option.compress_option self.remote_address = option.remote_address self.username = option.username self.password = option.password self.local_path = option.local_path self.remote_path = option.remote_path self.copy_mode = "remote" def validate_options(self): if self.file_path is None: raise Exception("No value") else: self.add_file_name_postfix() if self.dir_path is None: raise Exception("No value") if self.compress_option is None: self.compress_option = True if self.remote_address is None: self.copy_mode = "local" self.remote_address = "apseo-qe-test2" if self.username is None: self.username = "eass-build" if self.password is None: self.password = '<PASSWORD>!!' if self.local_path is None: self.local_path = self.file_path if self.remote_path is None: self.remote_path = "/home/eass-build" def add_file_name_postfix(self): name = self.file_path.replace(".tar.gz", "") now = datetime.datetime.now() formatted_date = now.strftime("%Y%m%d_%H%M%S") print(formatted_date) self.file_path = name + "_" + formatted_date + ".tar.gz"
import datetime class BackUpConfig(object): def __init__(self, option): self.file_path = option.file_path self.dir_path = option.dir_path self.compress_option = option.compress_option self.remote_address = option.remote_address self.username = option.username self.password = option.password self.local_path = option.local_path self.remote_path = option.remote_path self.copy_mode = "remote" def validate_options(self): if self.file_path is None: raise Exception("No value") else: self.add_file_name_postfix() if self.dir_path is None: raise Exception("No value") if self.compress_option is None: self.compress_option = True if self.remote_address is None: self.copy_mode = "local" self.remote_address = "apseo-qe-test2" if self.username is None: self.username = "eass-build" if self.password is None: self.password = '<PASSWORD>!!' if self.local_path is None: self.local_path = self.file_path if self.remote_path is None: self.remote_path = "/home/eass-build" def add_file_name_postfix(self): name = self.file_path.replace(".tar.gz", "") now = datetime.datetime.now() formatted_date = now.strftime("%Y%m%d_%H%M%S") print(formatted_date) self.file_path = name + "_" + formatted_date + ".tar.gz"
none
1
2.792209
3
src/server/front_srv.py
jhchen3121/wechat_shop
0
6623003
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import import logging import logging.config import settings import os logger_conf = os.path.join(settings.PROJ_DIR, 'etc', 'frontend_logger.conf') logging.config.fileConfig(logger_conf) from core_backend import server as Server from core_backend import websocket from core_backend.module import load_modules_hook import core_backend.http import tornado.web logger = logging.getLogger(__name__) def _start_frontend(): handlers = [] hooks = load_modules_hook() for hook, pkg in hooks: handlers_hook = getattr(hook, 'handlers') if handlers_hook: handlers_list = handlers_hook() for hdl in handlers_list: logger.info("add url handler %s by [%s]", hdl[0], pkg) handlers.append(hdl) handlers.extend([ (r'/service/(.*)', Server.ServiceHandler), (r'/mp_service/(.*)', Server.MpServiceHandler), (r'/attachment', Server.AttachmentHandler), (r'/static_source/(.*)', Server.StaticSourceHandler), (r'/file_export', Server.FileExportHandler), (r'/bms', websocket.MessageHandler), # admin静态资源文件 (r"/(.*)", core_backend.http.StaticFileHandler, {"path": "../web/dist/web", "default_filename": "index.html"}) ]) pika_client = Server.PikaClient() pika_consumer = websocket.PikaConsumer() # 上传至静态资源文件夹 upload_path = settings.STATIC_SOURCE_DIR application = tornado.web.Application(handlers, pika_client = pika_client, pika_consumer = pika_consumer, upload_path = upload_path, ) port = settings.FRONT_SRV_PORT print "Tornado is serving on port {0}.".format(port) sockets = tornado.netutil.bind_sockets(port) server = tornado.httpserver.HTTPServer(application) server.add_sockets(sockets) ioloop = tornado.ioloop.IOLoop.instance() ioloop.spawn_callback(pika_client.connect) ioloop.spawn_callback(pika_consumer.connect) try: ioloop.start() except KeyboardInterrupt: print "Front Server Exit!" if __name__ == "__main__": _start_frontend()
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import import logging import logging.config import settings import os logger_conf = os.path.join(settings.PROJ_DIR, 'etc', 'frontend_logger.conf') logging.config.fileConfig(logger_conf) from core_backend import server as Server from core_backend import websocket from core_backend.module import load_modules_hook import core_backend.http import tornado.web logger = logging.getLogger(__name__) def _start_frontend(): handlers = [] hooks = load_modules_hook() for hook, pkg in hooks: handlers_hook = getattr(hook, 'handlers') if handlers_hook: handlers_list = handlers_hook() for hdl in handlers_list: logger.info("add url handler %s by [%s]", hdl[0], pkg) handlers.append(hdl) handlers.extend([ (r'/service/(.*)', Server.ServiceHandler), (r'/mp_service/(.*)', Server.MpServiceHandler), (r'/attachment', Server.AttachmentHandler), (r'/static_source/(.*)', Server.StaticSourceHandler), (r'/file_export', Server.FileExportHandler), (r'/bms', websocket.MessageHandler), # admin静态资源文件 (r"/(.*)", core_backend.http.StaticFileHandler, {"path": "../web/dist/web", "default_filename": "index.html"}) ]) pika_client = Server.PikaClient() pika_consumer = websocket.PikaConsumer() # 上传至静态资源文件夹 upload_path = settings.STATIC_SOURCE_DIR application = tornado.web.Application(handlers, pika_client = pika_client, pika_consumer = pika_consumer, upload_path = upload_path, ) port = settings.FRONT_SRV_PORT print "Tornado is serving on port {0}.".format(port) sockets = tornado.netutil.bind_sockets(port) server = tornado.httpserver.HTTPServer(application) server.add_sockets(sockets) ioloop = tornado.ioloop.IOLoop.instance() ioloop.spawn_callback(pika_client.connect) ioloop.spawn_callback(pika_consumer.connect) try: ioloop.start() except KeyboardInterrupt: print "Front Server Exit!" if __name__ == "__main__": _start_frontend()
zh
0.622751
#!/usr/bin/env python # -*- coding: utf-8 -*- # admin静态资源文件 # 上传至静态资源文件夹
1.793684
2
models/asset_transactions.py
IanMcLaughlin19/fitest
0
6623004
<reponame>IanMcLaughlin19/fitest<gh_stars>0 @dataclass class Multisig: subsignature: List[Dict] = field(default_factory=list) threshold: int = field(default_factory=int) version: int = field(default_factory=int) @dataclass class MultisigSignature: subsignature: List[Dict] = field(default_factory=list) threshold: int = field(default_factory=int) version: int = field(default_factory=int) @dataclass class Logicsig: args: List[str] = field(default_factory=list) logic: str = field(default_factory=str) multisig_signature: MultisigSignature signature: str = field(default_factory=str) @dataclass class Signature: logicsig: Logicsig multisig: Multisig sig: str = field(default_factory=str) @dataclass class PaymentTransaction: amount: int = field(default_factory=int) close_amount: int = field(default_factory=int) close_remainder_to: str = field(default_factory=str) receiver: str = field(default_factory=str) @dataclass class KeyregTransaction: non_participation: bool = field(default_factory=bool) selection_participation_key: str = field(default_factory=str) vote_first_valid: int = field(default_factory=int) vote_key_dilution: int = field(default_factory=int) vote_last_valid: int = field(default_factory=int) vote_participation_key: str = field(default_factory=str) @dataclass class AssetTransferTransaction: amount: int = field(default_factory=int) asset_id: int = field(default_factory=int) close_amount: int = field(default_factory=int) close_to: str = field(default_factory=str) receiver: str = field(default_factory=str) sender: str = field(default_factory=str) @dataclass class AssetFreezeTransaction: address: str = field(default_factory=str) asset_id: int = field(default_factory=int) new_freeze_status: bool = field(default_factory=bool) @dataclass class Params: clawback: str = field(default_factory=str) creator: str = field(default_factory=str) decimals: int = field(default_factory=int) default_frozen: bool = field(default_factory=bool) freeze: str = field(default_factory=str) manager: str = field(default_factory=str) metadata_hash: str = field(default_factory=str) name: str = field(default_factory=str) name_b64: str = field(default_factory=str) reserve: str = field(default_factory=str) total: int = field(default_factory=int) unit_name: str = field(default_factory=str) unit_name_b64: str = field(default_factory=str) url: str = field(default_factory=str) url_b64: str = field(default_factory=str) @dataclass class AssetConfigTransaction: asset_id: int = field(default_factory=int) params: Params @dataclass class GlobalStateSchema: num_uint: int = field(default_factory=int) num_byte_slice: int = field(default_factory=int) @dataclass class LocalStateSchema: num_uint: int = field(default_factory=int) num_byte_slice: int = field(default_factory=int) @dataclass class ApplicationTransaction: application_id: int = field(default_factory=int) on_completion: str = field(default_factory=str) application_args: List[str] = field(default_factory=list) accounts: List[str] = field(default_factory=list) foreign_apps: List[int] = field(default_factory=list) foreign_assets: List[int] = field(default_factory=list) local_state_schema: LocalStateSchema global_state_schema: GlobalStateSchema approval_program: str = field(default_factory=str) clear_state_program: str = field(default_factory=str) extra_program_pages: int = field(default_factory=int) @dataclass class Transactions: application_transaction: ApplicationTransaction asset_config_transaction: AssetConfigTransaction asset_freeze_transaction: AssetFreezeTransaction asset_transfer_transaction: AssetTransferTransaction auth_addr: str = field(default_factory=str) close_rewards: int = field(default_factory=int) closing_amount: int = field(default_factory=int) confirmed_round: int = field(default_factory=int) created_application_index: int = field(default_factory=int) created_asset_index: int = field(default_factory=int) fee: int = field(default_factory=int) first_valid: int = field(default_factory=int) genesis_hash: str = field(default_factory=str) genesis_id: str = field(default_factory=str) group: str = field(default_factory=str) id: str = field(default_factory=str) intra_round_offset: int = field(default_factory=int) keyreg_transaction: KeyregTransaction last_valid: int = field(default_factory=int) lease: str = field(default_factory=str) note: str = field(default_factory=str) payment_transaction: PaymentTransaction receiver_rewards: int = field(default_factory=int) rekey_to: str = field(default_factory=str) round_time: int = field(default_factory=int) sender: str = field(default_factory=str) sender_rewards: int = field(default_factory=int) signature: Signature tx_type: str = field(default_factory=str) local_state_delta: List[Dict] = field(default_factory=list) global_state_delta: List[Dict] = field(default_factory=list) logs: List[str] = field(default_factory=list) inner_txns: List[str] = field(default_factory=list) @dataclass class AccountBalanceReq: current_round: int = field(default_factory=int) next_token: str = field(default_factory=str) transactions: List[Transactions]
@dataclass class Multisig: subsignature: List[Dict] = field(default_factory=list) threshold: int = field(default_factory=int) version: int = field(default_factory=int) @dataclass class MultisigSignature: subsignature: List[Dict] = field(default_factory=list) threshold: int = field(default_factory=int) version: int = field(default_factory=int) @dataclass class Logicsig: args: List[str] = field(default_factory=list) logic: str = field(default_factory=str) multisig_signature: MultisigSignature signature: str = field(default_factory=str) @dataclass class Signature: logicsig: Logicsig multisig: Multisig sig: str = field(default_factory=str) @dataclass class PaymentTransaction: amount: int = field(default_factory=int) close_amount: int = field(default_factory=int) close_remainder_to: str = field(default_factory=str) receiver: str = field(default_factory=str) @dataclass class KeyregTransaction: non_participation: bool = field(default_factory=bool) selection_participation_key: str = field(default_factory=str) vote_first_valid: int = field(default_factory=int) vote_key_dilution: int = field(default_factory=int) vote_last_valid: int = field(default_factory=int) vote_participation_key: str = field(default_factory=str) @dataclass class AssetTransferTransaction: amount: int = field(default_factory=int) asset_id: int = field(default_factory=int) close_amount: int = field(default_factory=int) close_to: str = field(default_factory=str) receiver: str = field(default_factory=str) sender: str = field(default_factory=str) @dataclass class AssetFreezeTransaction: address: str = field(default_factory=str) asset_id: int = field(default_factory=int) new_freeze_status: bool = field(default_factory=bool) @dataclass class Params: clawback: str = field(default_factory=str) creator: str = field(default_factory=str) decimals: int = field(default_factory=int) default_frozen: bool = field(default_factory=bool) freeze: str = field(default_factory=str) manager: str = field(default_factory=str) metadata_hash: str = field(default_factory=str) name: str = field(default_factory=str) name_b64: str = field(default_factory=str) reserve: str = field(default_factory=str) total: int = field(default_factory=int) unit_name: str = field(default_factory=str) unit_name_b64: str = field(default_factory=str) url: str = field(default_factory=str) url_b64: str = field(default_factory=str) @dataclass class AssetConfigTransaction: asset_id: int = field(default_factory=int) params: Params @dataclass class GlobalStateSchema: num_uint: int = field(default_factory=int) num_byte_slice: int = field(default_factory=int) @dataclass class LocalStateSchema: num_uint: int = field(default_factory=int) num_byte_slice: int = field(default_factory=int) @dataclass class ApplicationTransaction: application_id: int = field(default_factory=int) on_completion: str = field(default_factory=str) application_args: List[str] = field(default_factory=list) accounts: List[str] = field(default_factory=list) foreign_apps: List[int] = field(default_factory=list) foreign_assets: List[int] = field(default_factory=list) local_state_schema: LocalStateSchema global_state_schema: GlobalStateSchema approval_program: str = field(default_factory=str) clear_state_program: str = field(default_factory=str) extra_program_pages: int = field(default_factory=int) @dataclass class Transactions: application_transaction: ApplicationTransaction asset_config_transaction: AssetConfigTransaction asset_freeze_transaction: AssetFreezeTransaction asset_transfer_transaction: AssetTransferTransaction auth_addr: str = field(default_factory=str) close_rewards: int = field(default_factory=int) closing_amount: int = field(default_factory=int) confirmed_round: int = field(default_factory=int) created_application_index: int = field(default_factory=int) created_asset_index: int = field(default_factory=int) fee: int = field(default_factory=int) first_valid: int = field(default_factory=int) genesis_hash: str = field(default_factory=str) genesis_id: str = field(default_factory=str) group: str = field(default_factory=str) id: str = field(default_factory=str) intra_round_offset: int = field(default_factory=int) keyreg_transaction: KeyregTransaction last_valid: int = field(default_factory=int) lease: str = field(default_factory=str) note: str = field(default_factory=str) payment_transaction: PaymentTransaction receiver_rewards: int = field(default_factory=int) rekey_to: str = field(default_factory=str) round_time: int = field(default_factory=int) sender: str = field(default_factory=str) sender_rewards: int = field(default_factory=int) signature: Signature tx_type: str = field(default_factory=str) local_state_delta: List[Dict] = field(default_factory=list) global_state_delta: List[Dict] = field(default_factory=list) logs: List[str] = field(default_factory=list) inner_txns: List[str] = field(default_factory=list) @dataclass class AccountBalanceReq: current_round: int = field(default_factory=int) next_token: str = field(default_factory=str) transactions: List[Transactions]
none
1
2.302862
2
statarb/src/config/sources/compustat_splits.py
mikimaus78/ml_monorepo
51
6623005
<reponame>mikimaus78/ml_monorepo { "method": "ftp", "host": "ftp.standardandpoors.com", "user": "limegrp", "pass": "<PASSWORD>", "remote_dir": "/outbound/", "local_dir": "compustat/splits", "regex": "future_splits.txt", "prefix": "%Y%m%d_", "format": "compustat_splits", "new_data_frequency": 24L*60L*60L*1000L, }
{ "method": "ftp", "host": "ftp.standardandpoors.com", "user": "limegrp", "pass": "<PASSWORD>", "remote_dir": "/outbound/", "local_dir": "compustat/splits", "regex": "future_splits.txt", "prefix": "%Y%m%d_", "format": "compustat_splits", "new_data_frequency": 24L*60L*60L*1000L, }
none
1
0.925946
1
Code/server_web/tk/session.py
JacksonWuxs/Traditional-Archery-Event-Management-System
0
6623006
from flask_login import UserMixin from random import choice from .db import database import uuid HomeDB = database('db/home.db') def get_info(email=None, id=None): if id: return HomeDB.select('ID = %s' % id) return HomeDB.select('Email = "%s"' % email) class User(UserMixin): def __init__(self, email=None, **kwar): self._email = email if not kwar: self.get_info(email) else: self._id = kwar['id'] self._password = kwar['password'] self._username = kwar['username'] self._date = kwar['date'] self._money = kwar['money'] self._mode = 'menber' def __repr__(self): return '-'.join(map(str, [self._id, self._username, self._password, self._email, self._money, self._mode])) @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): raise AttributeError('password does not support resetting in this version') def verify_password(self, password): if password != self._password: return False return True def get_id(self): return self._id def get_info(self, email=None, id=None): menber = get_info(email, id) if menber != []: self._id = menber[0][0] self._username = menber[0][1].encode('utf-8') self._password = men<PASSWORD>[0][2] self._email = menber[0][3] self._date = menber[0][4] self._money = menber[0][5] self._mode = 'member' else: self._id = uuid.uuid4().int self._username = choice(['Jack', 'Henry', 'Iris', 'Julie']) self._password = None self._email = None self._date = 20180101 self._money = 0 self._mode = 'guest' @staticmethod def get(user_id): if not user_id: return None menber = get_info(None, user_id) if menber == []: return None return User(email=menber[0][3], id=menber[0][0], username=menber[0][1], password=<PASSWORD>[0][2], date=menber[0][4], money=menber[0][5])
from flask_login import UserMixin from random import choice from .db import database import uuid HomeDB = database('db/home.db') def get_info(email=None, id=None): if id: return HomeDB.select('ID = %s' % id) return HomeDB.select('Email = "%s"' % email) class User(UserMixin): def __init__(self, email=None, **kwar): self._email = email if not kwar: self.get_info(email) else: self._id = kwar['id'] self._password = kwar['password'] self._username = kwar['username'] self._date = kwar['date'] self._money = kwar['money'] self._mode = 'menber' def __repr__(self): return '-'.join(map(str, [self._id, self._username, self._password, self._email, self._money, self._mode])) @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): raise AttributeError('password does not support resetting in this version') def verify_password(self, password): if password != self._password: return False return True def get_id(self): return self._id def get_info(self, email=None, id=None): menber = get_info(email, id) if menber != []: self._id = menber[0][0] self._username = menber[0][1].encode('utf-8') self._password = men<PASSWORD>[0][2] self._email = menber[0][3] self._date = menber[0][4] self._money = menber[0][5] self._mode = 'member' else: self._id = uuid.uuid4().int self._username = choice(['Jack', 'Henry', 'Iris', 'Julie']) self._password = None self._email = None self._date = 20180101 self._money = 0 self._mode = 'guest' @staticmethod def get(user_id): if not user_id: return None menber = get_info(None, user_id) if menber == []: return None return User(email=menber[0][3], id=menber[0][0], username=menber[0][1], password=<PASSWORD>[0][2], date=menber[0][4], money=menber[0][5])
none
1
2.854653
3
p36.py
AI-Rabbit/Python-problems
0
6623007
<gh_stars>0 # p36.py str1 = input().split() str2 = input().split() str3 = input().split() str1 = list(map(int, str1)) str2 = list(map(int, str2)) str3 = list(map(int, str3)) n = str1[0] k = str1[1] for i in range(1, k + 1): if i % 2 != 0: for j in range(0, n): if str2[2*j] >= str3[i-1]: print(2*j+1) break else: for j in range(0, n): if str2[2*n-1-2*j] >= str3[i-1]: print(2*n-2*j) break
# p36.py str1 = input().split() str2 = input().split() str3 = input().split() str1 = list(map(int, str1)) str2 = list(map(int, str2)) str3 = list(map(int, str3)) n = str1[0] k = str1[1] for i in range(1, k + 1): if i % 2 != 0: for j in range(0, n): if str2[2*j] >= str3[i-1]: print(2*j+1) break else: for j in range(0, n): if str2[2*n-1-2*j] >= str3[i-1]: print(2*n-2*j) break
none
1
2.981081
3
SACWebApp/mainPage/resources.py
feng-jj/SAC-Project1
1
6623008
<reponame>feng-jj/SAC-Project1 from import_export import resources from .models import Clinical, Clinical_VOCA, Advocacy, MAP, OV, SAFE_Clinic, Crisis_Line, Prevention, Training, Development class ClinicalResource(resources.ModelResource): class Meta: model = Clinical class ClinicalVOCAResource(resources.ModelResource): class Meta: model = Clinical_VOCA class AdvocacyResource(resources.ModelResource): class Meta: model = Advocacy class MAPResource(resources.ModelResource): class Meta: model = MAP class OVResource(resources.ModelResource): class Meta: model = OV class SAFE_ClinicResource(resources.ModelResource): class Meta: model = SAFE_Clinic class Crisis_LineResource(resources.ModelResource): class Meta: model = Crisis_Line class PreventionResource(resources.ModelResource): class Meta: model = Prevention class TrainingResource(resources.ModelResource): class Meta: model = Training class DevelopmentResource(resources.ModelResource): class Meta: model = Development
from import_export import resources from .models import Clinical, Clinical_VOCA, Advocacy, MAP, OV, SAFE_Clinic, Crisis_Line, Prevention, Training, Development class ClinicalResource(resources.ModelResource): class Meta: model = Clinical class ClinicalVOCAResource(resources.ModelResource): class Meta: model = Clinical_VOCA class AdvocacyResource(resources.ModelResource): class Meta: model = Advocacy class MAPResource(resources.ModelResource): class Meta: model = MAP class OVResource(resources.ModelResource): class Meta: model = OV class SAFE_ClinicResource(resources.ModelResource): class Meta: model = SAFE_Clinic class Crisis_LineResource(resources.ModelResource): class Meta: model = Crisis_Line class PreventionResource(resources.ModelResource): class Meta: model = Prevention class TrainingResource(resources.ModelResource): class Meta: model = Training class DevelopmentResource(resources.ModelResource): class Meta: model = Development
none
1
2.073041
2
Blog/apps/accounts/forms.py
xinghalok/first-django-blog
0
6623009
<filename>Blog/apps/accounts/forms.py from django import forms from django.contrib.auth import password_validation from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.forms import AuthenticationForm as BaseAuthenticationForm from django.contrib.auth.forms import PasswordResetForm as BasePasswordResetForm from django.contrib.auth.forms import SetPasswordForm as BaseSetPasswordForm from django.contrib.auth.forms import UsernameField from django.contrib.auth.models import User class SignUpForm(UserCreationForm): first_name = forms.CharField(max_length=30, required=False, help_text='Optional.') last_name = forms.CharField(max_length=30, required=False, help_text='Optional.') email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.') class Meta: model = User fields = ('username', 'first_name', 'last_name', 'email', '<PASSWORD>', '<PASSWORD>', ) def __init__(self, *args, **kwargs): super(SignUpForm, self).__init__(*args, **kwargs) for fieldname in ['username', 'first_name', 'last_name', 'email', '<PASSWORD>', '<PASSWORD>']: self.fields[fieldname].help_text = None class AuthenticationForm(BaseAuthenticationForm): username = UsernameField(widget=forms.TextInput(attrs={ 'autofocus': True, 'class': 'form-control', 'placeholder': _('Username') })) password = forms.CharField(label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={ 'autocomplete': 'current-password', 'class': 'form-control', 'placeholder': _('Password') })) class PasswordResetForm(BasePasswordResetForm): email = forms.EmailField( label=_("Email"), max_length=254, widget=forms.EmailInput(attrs={'autocomplete': 'email', 'class': 'form-control', 'autofocus': True, 'placeholder': _('Email')}) ) class SetPasswordForm(BaseSetPasswordForm): new_password1 = forms.CharField( label=_("New password"), widget=forms.PasswordInput(attrs={'autocomplete': 'off', 'class': 'form-control', 'autofocus': True, 'placeholder': _('New Password')}), strip=False, help_text=password_validation.password_validators_help_text_html(), ) new_password2 = forms.CharField( label=_("New password confirmation"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'off', 'class': 'form-control', 'placeholder': _('Confirm New Password')}) ) class PasswordChangeForm(SetPasswordForm): old_password = forms.CharField( label=_("Old password"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True, 'class': 'form-control', 'placeholder': _('Old Password')}), )
<filename>Blog/apps/accounts/forms.py from django import forms from django.contrib.auth import password_validation from django.utils.translation import gettext, gettext_lazy as _ from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.forms import AuthenticationForm as BaseAuthenticationForm from django.contrib.auth.forms import PasswordResetForm as BasePasswordResetForm from django.contrib.auth.forms import SetPasswordForm as BaseSetPasswordForm from django.contrib.auth.forms import UsernameField from django.contrib.auth.models import User class SignUpForm(UserCreationForm): first_name = forms.CharField(max_length=30, required=False, help_text='Optional.') last_name = forms.CharField(max_length=30, required=False, help_text='Optional.') email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.') class Meta: model = User fields = ('username', 'first_name', 'last_name', 'email', '<PASSWORD>', '<PASSWORD>', ) def __init__(self, *args, **kwargs): super(SignUpForm, self).__init__(*args, **kwargs) for fieldname in ['username', 'first_name', 'last_name', 'email', '<PASSWORD>', '<PASSWORD>']: self.fields[fieldname].help_text = None class AuthenticationForm(BaseAuthenticationForm): username = UsernameField(widget=forms.TextInput(attrs={ 'autofocus': True, 'class': 'form-control', 'placeholder': _('Username') })) password = forms.CharField(label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={ 'autocomplete': 'current-password', 'class': 'form-control', 'placeholder': _('Password') })) class PasswordResetForm(BasePasswordResetForm): email = forms.EmailField( label=_("Email"), max_length=254, widget=forms.EmailInput(attrs={'autocomplete': 'email', 'class': 'form-control', 'autofocus': True, 'placeholder': _('Email')}) ) class SetPasswordForm(BaseSetPasswordForm): new_password1 = forms.CharField( label=_("New password"), widget=forms.PasswordInput(attrs={'autocomplete': 'off', 'class': 'form-control', 'autofocus': True, 'placeholder': _('New Password')}), strip=False, help_text=password_validation.password_validators_help_text_html(), ) new_password2 = forms.CharField( label=_("New password confirmation"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'off', 'class': 'form-control', 'placeholder': _('Confirm New Password')}) ) class PasswordChangeForm(SetPasswordForm): old_password = forms.CharField( label=_("Old password"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True, 'class': 'form-control', 'placeholder': _('Old Password')}), )
none
1
2.30109
2
main.py
cs-aware/WP3-data-collection-twitter
0
6623010
<reponame>cs-aware/WP3-data-collection-twitter # -*- coding: utf-8 -*- """ The script monitors a set of Twitter accounts and collects the latest posts. The new posts are consolidated in a CSV file and stored within AWS S3 storage. Currently, for the CS-AWARE project, we started monitoring the accounts listed in users.json and executed this scrip every 8 hours. This solution uses tweepy, an easy-to-use Python library for accessing the Twitter API, that requires a credential set sd in credential.json. Finally, the code is written for Python3, anyhow it could be easily adapted for Python2. @author: <NAME> """ import sys import tweepy import csv import json from tweepy import OAuthHandler import os import glob import pandas as pd import datetime from datetime import date, datetime, timedelta import boto3 from stix2 import Bundle, ObservedData, IPv4Address, UserAccount, Bundle from stix2 import CustomObservable, properties from emojis import remove_emoji from unidecode import unidecode @CustomObservable('x-csaware-social', [ ('source', properties.StringProperty()), ('title', properties.StringProperty()), ('text', properties.StringProperty()), ('subject', properties.StringProperty()), ]) class CSAwareSocial(): pass BUCKET_NAME = "cs-aware-data-collection" CREDENTIALS = './credential.json' USERS = './users.json' PERIOD = 1 # Number of hours POST_LIMIT = 200 now = datetime.now() today_str = now.strftime("%Y%m%d_%H%M") date_from = now - timedelta(hours=PERIOD) def load_customer_conf(): """Loads user credentials""" with open(CREDENTIALS) as f: return json.load(f) def load_screen_names(): """Loads username list""" with open(USERS) as f: return json.load(f) def to_aws(local_filename): # Generate remote path remote_path = "%d/%02d/%02d/TWITTER/%s" % (now.year, now.month, now.day, local_filename) print("Uploading", remote_path) # Upload to AWS with open(local_filename, "rb") as f: s3 = boto3.resource('s3') s3.Object(BUCKET_NAME, remote_path).upload_fileobj(f) # Delete local copy os.remove(local_filename) def main(): #reload(sys) #sys.setdefaultencoding("ISO-8859-1") observed_data_list = [] credential = load_customer_conf() user_to_follow = load_screen_names()['user_to_follow'] df = pd.DataFrame() user = list(credential)[0] consumer_key = credential[user]['consumer_key'] consumer_secret = credential[user]['consumer_secret'] access_token = credential[user]['access_token'] access_secret = credential[user]['access_secret'] auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = tweepy.API(auth) for screen_name in user_to_follow: print(screen_name) try: statuses = api.user_timeline(screen_name=screen_name, count=POST_LIMIT) except: print("ERROR: user {} not found".format(screen_name)) continue col0 = [statuses[i].user.name for i in range(len(statuses))] col1 = [statuses[i].created_at for i in range(len(statuses))] col2 = [statuses[i].text for i in range(len(statuses))] col3 = [statuses[i]._json for i in range(len(statuses))] #print(col2) df = df.append(pd.DataFrame({"username": screen_name.replace("@", ""), "name":col0, "date":col1, "text":col2, "json":col3 })) df = df[df['date'] >= date_from] try: old_df = pd.read_csv("output_{}.csv".format(today_str)) except: print("initialize new file of the day") old_df = pd.DataFrame() new_df = old_df.append(df) new_df.drop_duplicates(['username', 'date'], inplace=True) new_df.to_csv("output_{}.csv".format(today_str), index=False, quoting = csv.QUOTE_ALL) file_to_write = "output_{}.csv".format(today_str) print(file_to_write) for index, row in new_df.iterrows(): txt = remove_emoji(row['text'], remove_components=True) """ Replace unicode special characters with the nearest ASCII look-alike. See alexis answer (https://stackoverflow.com/a/40692852). """ txt = unidecode(txt) args = { 'source': 'twitter', 'title': '', 'text': txt, 'subject': '', } observed_user = UserAccount(type='user-account', user_id=row['username'], display_name=row['name']) observed_object = CSAwareSocial(**args, allow_custom=True) objects = {"0": observed_user, "1": observed_object} observed_data = ObservedData(first_observed=row['date'], last_observed=row['date'], number_observed=1, objects=objects, allow_custom=True) observed_data_list.append(observed_data) bundle = Bundle(observed_data_list) stix_filename = file_to_write.replace('.csv', '.json') stix_output = open(stix_filename, 'w') stix_output.write(bundle.serialize(indent=4)) stix_output.close() # Upload to AWS to_aws(file_to_write) to_aws(stix_filename) if __name__ == "__main__": main()
# -*- coding: utf-8 -*- """ The script monitors a set of Twitter accounts and collects the latest posts. The new posts are consolidated in a CSV file and stored within AWS S3 storage. Currently, for the CS-AWARE project, we started monitoring the accounts listed in users.json and executed this scrip every 8 hours. This solution uses tweepy, an easy-to-use Python library for accessing the Twitter API, that requires a credential set sd in credential.json. Finally, the code is written for Python3, anyhow it could be easily adapted for Python2. @author: <NAME> """ import sys import tweepy import csv import json from tweepy import OAuthHandler import os import glob import pandas as pd import datetime from datetime import date, datetime, timedelta import boto3 from stix2 import Bundle, ObservedData, IPv4Address, UserAccount, Bundle from stix2 import CustomObservable, properties from emojis import remove_emoji from unidecode import unidecode @CustomObservable('x-csaware-social', [ ('source', properties.StringProperty()), ('title', properties.StringProperty()), ('text', properties.StringProperty()), ('subject', properties.StringProperty()), ]) class CSAwareSocial(): pass BUCKET_NAME = "cs-aware-data-collection" CREDENTIALS = './credential.json' USERS = './users.json' PERIOD = 1 # Number of hours POST_LIMIT = 200 now = datetime.now() today_str = now.strftime("%Y%m%d_%H%M") date_from = now - timedelta(hours=PERIOD) def load_customer_conf(): """Loads user credentials""" with open(CREDENTIALS) as f: return json.load(f) def load_screen_names(): """Loads username list""" with open(USERS) as f: return json.load(f) def to_aws(local_filename): # Generate remote path remote_path = "%d/%02d/%02d/TWITTER/%s" % (now.year, now.month, now.day, local_filename) print("Uploading", remote_path) # Upload to AWS with open(local_filename, "rb") as f: s3 = boto3.resource('s3') s3.Object(BUCKET_NAME, remote_path).upload_fileobj(f) # Delete local copy os.remove(local_filename) def main(): #reload(sys) #sys.setdefaultencoding("ISO-8859-1") observed_data_list = [] credential = load_customer_conf() user_to_follow = load_screen_names()['user_to_follow'] df = pd.DataFrame() user = list(credential)[0] consumer_key = credential[user]['consumer_key'] consumer_secret = credential[user]['consumer_secret'] access_token = credential[user]['access_token'] access_secret = credential[user]['access_secret'] auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = tweepy.API(auth) for screen_name in user_to_follow: print(screen_name) try: statuses = api.user_timeline(screen_name=screen_name, count=POST_LIMIT) except: print("ERROR: user {} not found".format(screen_name)) continue col0 = [statuses[i].user.name for i in range(len(statuses))] col1 = [statuses[i].created_at for i in range(len(statuses))] col2 = [statuses[i].text for i in range(len(statuses))] col3 = [statuses[i]._json for i in range(len(statuses))] #print(col2) df = df.append(pd.DataFrame({"username": screen_name.replace("@", ""), "name":col0, "date":col1, "text":col2, "json":col3 })) df = df[df['date'] >= date_from] try: old_df = pd.read_csv("output_{}.csv".format(today_str)) except: print("initialize new file of the day") old_df = pd.DataFrame() new_df = old_df.append(df) new_df.drop_duplicates(['username', 'date'], inplace=True) new_df.to_csv("output_{}.csv".format(today_str), index=False, quoting = csv.QUOTE_ALL) file_to_write = "output_{}.csv".format(today_str) print(file_to_write) for index, row in new_df.iterrows(): txt = remove_emoji(row['text'], remove_components=True) """ Replace unicode special characters with the nearest ASCII look-alike. See alexis answer (https://stackoverflow.com/a/40692852). """ txt = unidecode(txt) args = { 'source': 'twitter', 'title': '', 'text': txt, 'subject': '', } observed_user = UserAccount(type='user-account', user_id=row['username'], display_name=row['name']) observed_object = CSAwareSocial(**args, allow_custom=True) objects = {"0": observed_user, "1": observed_object} observed_data = ObservedData(first_observed=row['date'], last_observed=row['date'], number_observed=1, objects=objects, allow_custom=True) observed_data_list.append(observed_data) bundle = Bundle(observed_data_list) stix_filename = file_to_write.replace('.csv', '.json') stix_output = open(stix_filename, 'w') stix_output.write(bundle.serialize(indent=4)) stix_output.close() # Upload to AWS to_aws(file_to_write) to_aws(stix_filename) if __name__ == "__main__": main()
en
0.81629
# -*- coding: utf-8 -*- The script monitors a set of Twitter accounts and collects the latest posts. The new posts are consolidated in a CSV file and stored within AWS S3 storage. Currently, for the CS-AWARE project, we started monitoring the accounts listed in users.json and executed this scrip every 8 hours. This solution uses tweepy, an easy-to-use Python library for accessing the Twitter API, that requires a credential set sd in credential.json. Finally, the code is written for Python3, anyhow it could be easily adapted for Python2. @author: <NAME> # Number of hours Loads user credentials Loads username list # Generate remote path # Upload to AWS # Delete local copy #reload(sys) #sys.setdefaultencoding("ISO-8859-1") #print(col2) Replace unicode special characters with the nearest ASCII look-alike. See alexis answer (https://stackoverflow.com/a/40692852). # Upload to AWS
2.690945
3
tests/python/integration/test_int_jobs.py
music2score/music2score
0
6623011
<reponame>music2score/music2score # from python.jobs import * # from python.constants import * # import unittest # from unittest import TestCase # import mysql.connector as conn # class TestFetchJob(TestCase): # def test_fetch_job(self): # try: # mydbobj = conn.connect(db) # mycursor = mydbobj.cursor() # fetchFlag, myresult = fetch_job(mycursor) # mycursor.close() # #self.assertEqual(fetchFlag,True) # if fetchFlag: # if myresult == None: # self.assertTrue(True) # elif not myresult: # self.assertTrue(False) # else: # self.assertTrue(False) # except Exception as ex: # print("Fetch Job Error:\n" + str(ex)) # self.assertTrue(False)
# from python.jobs import * # from python.constants import * # import unittest # from unittest import TestCase # import mysql.connector as conn # class TestFetchJob(TestCase): # def test_fetch_job(self): # try: # mydbobj = conn.connect(db) # mycursor = mydbobj.cursor() # fetchFlag, myresult = fetch_job(mycursor) # mycursor.close() # #self.assertEqual(fetchFlag,True) # if fetchFlag: # if myresult == None: # self.assertTrue(True) # elif not myresult: # self.assertTrue(False) # else: # self.assertTrue(False) # except Exception as ex: # print("Fetch Job Error:\n" + str(ex)) # self.assertTrue(False)
en
0.346035
# from python.jobs import * # from python.constants import * # import unittest # from unittest import TestCase # import mysql.connector as conn # class TestFetchJob(TestCase): # def test_fetch_job(self): # try: # mydbobj = conn.connect(db) # mycursor = mydbobj.cursor() # fetchFlag, myresult = fetch_job(mycursor) # mycursor.close() # #self.assertEqual(fetchFlag,True) # if fetchFlag: # if myresult == None: # self.assertTrue(True) # elif not myresult: # self.assertTrue(False) # else: # self.assertTrue(False) # except Exception as ex: # print("Fetch Job Error:\n" + str(ex)) # self.assertTrue(False)
2.749506
3
src/kgm/training/__init__.py
mberr/ea-sota-comparison
12
6623012
# coding=utf-8 """Training Loops."""
# coding=utf-8 """Training Loops."""
en
0.872304
# coding=utf-8 Training Loops.
0.99114
1
app/code/image_crop.py
LuQ232/Chessboard-Importer
6
6623013
import math import functools import cv2 import numpy as np na = np.array def image_scale(pts, scale): """scale to original image size""" def __loop(x, y): return [x[0] * y, x[1] * y] return list(map(functools.partial(__loop, y=1/scale), pts)) def image_resize(img, height=500): """resize image to square area (height^2)""" pixels = height * height shape = list(np.shape(img)) scale = math.sqrt(float(pixels)/float(shape[0]*shape[1])) shape[0] *= scale shape[1] *= scale img = cv2.resize(img, (int(shape[1]), int(shape[0]))) img_shape = np.shape(img) return img, img_shape, scale def image_transform(img, points, square_length=150): """crop original image using perspective warp""" board_length = square_length * 8 def __dis(a, b): return np.linalg.norm(na(a)-na(b)) def __shi(seq, n=0): return seq[-(n % len(seq)):] + seq[:-(n % len(seq))] best_idx, best_val = 0, 10**6 for idx, val in enumerate(points): val = __dis(val, [0, 0]) if val < best_val: best_idx, best_val = idx, val pts1 = np.float32(__shi(points, 4 - best_idx)) pts2 = np.float32([[0, 0], [board_length, 0], \ [board_length, board_length], [0, board_length]]) M = cv2.getPerspectiveTransform(pts1, pts2) W = cv2.warpPerspective(img, M, (board_length, board_length)) return W def crop(pts,img): """cropping original image to output""" img_crop = image_transform(img,pts) return img_crop
import math import functools import cv2 import numpy as np na = np.array def image_scale(pts, scale): """scale to original image size""" def __loop(x, y): return [x[0] * y, x[1] * y] return list(map(functools.partial(__loop, y=1/scale), pts)) def image_resize(img, height=500): """resize image to square area (height^2)""" pixels = height * height shape = list(np.shape(img)) scale = math.sqrt(float(pixels)/float(shape[0]*shape[1])) shape[0] *= scale shape[1] *= scale img = cv2.resize(img, (int(shape[1]), int(shape[0]))) img_shape = np.shape(img) return img, img_shape, scale def image_transform(img, points, square_length=150): """crop original image using perspective warp""" board_length = square_length * 8 def __dis(a, b): return np.linalg.norm(na(a)-na(b)) def __shi(seq, n=0): return seq[-(n % len(seq)):] + seq[:-(n % len(seq))] best_idx, best_val = 0, 10**6 for idx, val in enumerate(points): val = __dis(val, [0, 0]) if val < best_val: best_idx, best_val = idx, val pts1 = np.float32(__shi(points, 4 - best_idx)) pts2 = np.float32([[0, 0], [board_length, 0], \ [board_length, board_length], [0, board_length]]) M = cv2.getPerspectiveTransform(pts1, pts2) W = cv2.warpPerspective(img, M, (board_length, board_length)) return W def crop(pts,img): """cropping original image to output""" img_crop = image_transform(img,pts) return img_crop
en
0.742253
scale to original image size resize image to square area (height^2) crop original image using perspective warp cropping original image to output
2.844913
3
libs/parse_audio.py
prateekKrOraon/song-identification
0
6623014
import os import io import numpy as np from pydub import AudioSegment from hashlib import sha1 from pydub.utils import audioop def parse_bytes(bytes_data, format="mp3", offline=False): """Processes the bytes received to extract audio information. Extracts audio channels, frame rate and SHA-1 hash digest. Args: bytes_data: A bytes stream. format: Audio coding format. offline: If the file is coming from local storage. Returns: A dict of Audio information containing channels, frame rate and hash value. Raises: audioop.error: An error occurred while processing audio file """ global channels, frame_rate, hash_val try: if not offline: file = io.BytesIO(bytes_data) else: file = bytes_data audio_file = AudioSegment.from_file(file, format=format) data = np.fromstring(audio_file._data, np.int16) channels = [] for channel in range(audio_file.channels): channels.append(data[channel::audio_file.channels]) frame_rate = audio_file.frame_rate hash_val = parse_file_hash(file, offline) except audioop.error: print('audioop.error') song = { 'channels': channels, 'frame_rate': frame_rate, 'hash_value': hash_val } return song def parse_file_hash(bytes, offline, blocksize=2 * 20): """Generates SHA-1 hash of file Args: bytes: File object. offline: File coming from local storage or not. blocksize: Size of block to be processed in each iteration while generating hash. Returns: str : Hex digest of the file in upper case. """ global file s = sha1() if offline: file = open(bytes, "rb") else: file = bytes while True: buf = file.read(blocksize) if not buf: break s.update(buf) if offline: file.close() return s.hexdigest().upper()
import os import io import numpy as np from pydub import AudioSegment from hashlib import sha1 from pydub.utils import audioop def parse_bytes(bytes_data, format="mp3", offline=False): """Processes the bytes received to extract audio information. Extracts audio channels, frame rate and SHA-1 hash digest. Args: bytes_data: A bytes stream. format: Audio coding format. offline: If the file is coming from local storage. Returns: A dict of Audio information containing channels, frame rate and hash value. Raises: audioop.error: An error occurred while processing audio file """ global channels, frame_rate, hash_val try: if not offline: file = io.BytesIO(bytes_data) else: file = bytes_data audio_file = AudioSegment.from_file(file, format=format) data = np.fromstring(audio_file._data, np.int16) channels = [] for channel in range(audio_file.channels): channels.append(data[channel::audio_file.channels]) frame_rate = audio_file.frame_rate hash_val = parse_file_hash(file, offline) except audioop.error: print('audioop.error') song = { 'channels': channels, 'frame_rate': frame_rate, 'hash_value': hash_val } return song def parse_file_hash(bytes, offline, blocksize=2 * 20): """Generates SHA-1 hash of file Args: bytes: File object. offline: File coming from local storage or not. blocksize: Size of block to be processed in each iteration while generating hash. Returns: str : Hex digest of the file in upper case. """ global file s = sha1() if offline: file = open(bytes, "rb") else: file = bytes while True: buf = file.read(blocksize) if not buf: break s.update(buf) if offline: file.close() return s.hexdigest().upper()
en
0.736144
Processes the bytes received to extract audio information. Extracts audio channels, frame rate and SHA-1 hash digest. Args: bytes_data: A bytes stream. format: Audio coding format. offline: If the file is coming from local storage. Returns: A dict of Audio information containing channels, frame rate and hash value. Raises: audioop.error: An error occurred while processing audio file Generates SHA-1 hash of file Args: bytes: File object. offline: File coming from local storage or not. blocksize: Size of block to be processed in each iteration while generating hash. Returns: str : Hex digest of the file in upper case.
3.296856
3
pracnbastats/format.py
practicallypredictable/pracnbstats
0
6623015
<filename>pracnbastats/format.py import numpy as np import pandas as pd from . import params def season_id(df): """Extract season and season type from a box score season ID.""" df['season'] = df['SEASON_ID'].apply( lambda id: params.Season.season_from_id(id).start_year ) df['season_type'] = df['SEASON_ID'].apply( lambda id: params.SeasonType.season_type_from_id(id).attr_abbr ) df = df.drop(columns=['SEASON_ID']) return df def matchup(df): """Add more useful columns based upon matchup information.""" df['home_road'] = np.where(df['MATCHUP'].str.contains('@'), 'R', 'H') df['opp_team_abbr'] = df['MATCHUP'].str.split(' ').str.get(-1) df = df.drop(columns=['MATCHUP']) return df def order_columns(df, *, first_cols, last_cols=None): """Reorder DataFrame columns by first/middle/last grouping.""" if last_cols: middle_cols = [col for col in df if col not in set().union(first_cols, last_cols)] else: middle_cols = [] last_cols = [col for col in df if col not in first_cols] return df[(first_cols + middle_cols + last_cols)]
<filename>pracnbastats/format.py import numpy as np import pandas as pd from . import params def season_id(df): """Extract season and season type from a box score season ID.""" df['season'] = df['SEASON_ID'].apply( lambda id: params.Season.season_from_id(id).start_year ) df['season_type'] = df['SEASON_ID'].apply( lambda id: params.SeasonType.season_type_from_id(id).attr_abbr ) df = df.drop(columns=['SEASON_ID']) return df def matchup(df): """Add more useful columns based upon matchup information.""" df['home_road'] = np.where(df['MATCHUP'].str.contains('@'), 'R', 'H') df['opp_team_abbr'] = df['MATCHUP'].str.split(' ').str.get(-1) df = df.drop(columns=['MATCHUP']) return df def order_columns(df, *, first_cols, last_cols=None): """Reorder DataFrame columns by first/middle/last grouping.""" if last_cols: middle_cols = [col for col in df if col not in set().union(first_cols, last_cols)] else: middle_cols = [] last_cols = [col for col in df if col not in first_cols] return df[(first_cols + middle_cols + last_cols)]
en
0.808239
Extract season and season type from a box score season ID. Add more useful columns based upon matchup information. Reorder DataFrame columns by first/middle/last grouping.
3.21152
3
nsl/ast/__init__.py
Anteru/nsl
0
6623016
import collections import collections.abc from nsl import op, types, Visitor from enum import Enum import bisect from typing import List class SourceMapping: def __init__(self, source, sourceName = '<unknown>'): self.__sourceName = sourceName self.__lineOffsets = [] currentOffset = 0 for line in source.split ('\n'): self.__lineOffsets.append(currentOffset) currentOffset += len (line) + 1 # trailing \n def GetLineFromOffset(self, offset): return bisect.bisect_right(self.__lineOffsets, offset) - 1 def GetLineStartOffset(self, line): return self.__lineOffsets[line] def GetSourceName(self): return self.__sourceName class Location: def __init__(self, span, sourceMapping = None): assert span[1] >= span [0] self.__span = span self.__sourceMapping = sourceMapping @classmethod def Merge (cls, *args): assert len(args) > 0 result = args[0].__span mapping = args[0].__sourceMapping for arg in args [1:]: assert isinstance (arg, Location) end = max (result [1], arg.GetEnd ()) start = min (result [0], arg.GetBegin ()) result = (start, end, ) return cls(result, mapping) def GetBegin(self): return self.__span [0] def GetEnd(self): return self.__span [1] @property def IsUnknown(self): return self.__span == (-1, -1) def __str__(self): if self.IsUnknown: return '<unknown>' if self.__sourceMapping: # Lines are 0 based (as are columns), and we need to offset # with +1 for display startLine = self.__sourceMapping.GetLineFromOffset (self.GetBegin ()) endLine = self.__sourceMapping.GetLineFromOffset (self.GetEnd ()) if startLine == endLine: startOffset = self.__sourceMapping.GetLineStartOffset(startLine) return '{}:{}-{}'.format ( startLine + 1, self.GetBegin () - startOffset + 1, self.GetEnd () - startOffset + 1) else: startOffset = self.__sourceMapping.GetLineStartOffset (startLine) endOffset = self.__sourceMapping.GetLineStartOffset(endLine) return '{}:{}-{}:{}'.format ( startLine + 1, self.GetBegin () - startOffset + 1, endLine + 1, self.GetEnd () - endOffset + 1) else: return '[{},{})'.format (self.GetBegin (), self.GetEnd ()) def __repr__(self): return 'Location({})'.format (repr(self.__span)) class Node(Visitor.Node): def __init__(self): self.__location = Location((-1, -1)) def Clone(self): import copy return copy.deepcopy(self) def SetLocation(self, location): assert isinstance(location, Location) self.__location = location def GetLocation(self): return self.__location class Module (Node): '''A single translation module. A module consists of types, variables and functions.''' def __init__(self): super().__init__() self.__variables = list () self.__functions = list () # Types may depend on types which are previously defined # Ensure ordering by using an ordered dict self.__types = collections.OrderedDict () self.__imports = set() def _Traverse (self, function): self.__types = function(self.__types) self.__variables = function(self.__variables) self.__functions = function(self.__functions) def AddDeclaration (self, variable): self.__variables.append (variable) def AddFunction(self, func): self.__functions.append (func) def AddType (self, decl): self.__types [decl.GetName ()] = decl def AddImport(self, name): self.__imports.add((name)) def GetDeclarations (self): return self.__variables def GetTypes(self): return self.__types.values () def GetFunctions(self): return self.__functions def GetImports(self): return self.__imports def __str__(self): return '''Module ({0} variable(s), {1} function(s), {2} type(s))'''.format( len(self.__variables), len(self.__functions), len (self.__types)) class Expression(Node): def __init__(self, children=[]): super().__init__() self.children = children self.__type = None def GetType(self): return self.__type def SetType(self, nslType): '''The type of this expression. This depends on the specific expression type, for instance, for a call expression this will be a function type, while for an unary expression it will be a primitive or structure type.''' assert not isinstance(nslType, types.UnresolvedType) self.__type = nslType def _Traverse(self, function): self.children = function(self.children) def __iter__(self): return self.children.__iter__() class UnaryExpression(Expression): pass class EmptyExpression(Expression): def __init__(self): super().__init__() class CastExpression(UnaryExpression): def __init__(self, expr, targetType, implicit = False): super().__init__([expr]) assert isinstance (targetType, types.PrimitiveType) self.SetType (targetType) self.__implicit = implicit def IsImplicit(self): return self.__implicit def GetArgument(self): return self.children[0] def __str__(self): return '{} ({})'.format (self.GetType(), self.GetArgument()) def __repr__(self): return 'CastExpression ({}, {}, {})'.format ( repr(self.GetArgument()), repr (self.GetType ()), self.IsImplicit ()) class ConstructPrimitiveExpression(UnaryExpression): '''Expression of the type primitive_type (expr, ...).''' def __init__(self, targetType, expressions): super().__init__(expressions) assert isinstance (targetType, types.PrimitiveType) self.SetType (targetType) def __str__(self): return '{} ({})'.format (self.GetType ().GetName (), ', '.join ([str(expr) for expr in self.children])) def GetArguments(self): return self.children def SetArguments(self, args): self.children = args class CallExpression(UnaryExpression): """A function call of the form ID ([expr], ...). ID references an unresolved function type at first.""" def __init__(self, function: types.Type, expressions: List[Expression]): super().__init__(expressions) self.function = function def __str__(self): r = self.function.GetName () + ' (' r += ', '.join(['{0}'.format(str(expr)) for expr in self.children]) return r + ')' def GetArguments(self): return self.children def SetArguments(self, arguments: List[Expression]): self.children = arguments def GetFunction(self) -> types.Type: return self.function def ResolveType(self, scope): self.function = types.ResolveFunction(self.function, scope, [expr.GetType() for expr in self.GetArguments ()]) assert isinstance(self.function, types.Function) class VariableAccessExpression(UnaryExpression): pass class ArrayExpression(VariableAccessExpression): '''Expression of the form 'id[expr]', where id can be a nested access expression itself.''' def __init__(self, identifier, expression): super().__init__() self.id = identifier self._expression = expression def GetParent(self): return self.id def GetExpression(self): return self._expression def SetExpression(self, expr): self._expression = expr def _Traverse(self, function): self.id = function(self.id) self._expression = function(self._expression) def __str__(self): return str(self.id) + ' [' + str(self._expression) + ']' class MemberAccessExpression(VariableAccessExpression): '''Expression of the form 'id.member', where id can be a access nested expression itself. A member access expression can be a swizzle. If so, ``isSwizzle`` should be set to ``True``.''' def __init__(self, identifier, member): super().__init__() self.id = identifier self.member = member self.isSwizzle = False def GetMember(self): return self.member def GetParent(self): return self.id def _Traverse(self, function): self.id = function(self.id) self.member = function(self.member) def SetSwizzle(self, isSwizzle: bool) -> None: self.isSwizzle = isSwizzle def __str__(self): return str(self.id) + '.' + str(self.member) class BinaryExpression(Expression): def __init__(self, op, left, right): super().__init__([left, right]) self.op = op self._operator = None def GetLeft(self): return self.children [0] def GetRight(self): return self.children [1] def SetLeft(self, left): self.children [0] = left def SetRight(self, right): self.children [1] = right def GetOperation(self): '''The the operation.''' return self.op def GetOperator(self): '''Get the used operator. This is an instance of ExpressionType.''' return self._operator def ResolveType (self, left, right): self._operator = types.ResolveBinaryExpressionType (self.op, left, right) def __str__(self): r = '' if (isinstance (self.GetLeft (), BinaryExpression)): r += '(' + str (self.GetLeft ()) + ')' else: r += str (self.GetLeft ()) r += ' ' + op.OpToStr(self.op) + ' ' if (isinstance (self.GetRight (), BinaryExpression)): r += '(' + str (self.GetRight ()) + ')' else: r += str (self.GetRight ()) return r class AssignmentExpression(BinaryExpression): def __init__(self, left, right, *, operation=op.Operation.ASSIGN): super().__init__(operation, left, right) def ResolveType(self, left, right): self._operator = types.ExpressionType (self.GetLeft().GetType (), [self.GetLeft ().GetType(), self.GetRight ().GetType ()]) class Affix: PRE = 1 POST = 2 class AffixExpression(UnaryExpression): def __init__(self, op, expr, affix): super().__init__([expr]) self.op = op self.affix = affix def IsPostfix (self): return self.affix == Affix.POST def IsPrefix (self): return self.affix == Affix.PRE def GetOperation (self): return self.op def GetExpression(self): return self.children[0] def __str__(self): if self.affix == Affix.PRE: if self.op == op.Operation.ADD: return f'++{self.children[0]}' elif self.op == op.Operation.SUB: return f'--{self.children[0]}' elif self.affix == Affix.POST: if self.op == op.Operation.ADD: return f'{self.children[0]}++' elif self.op == op.Operation.SUB: return f'{self.children[0]}--' class LiteralExpression(UnaryExpression): def __init__(self, value, literalType): super().__init__() self.value = value self.SetType (literalType) def GetValue(self): return self.value def __str__(self): return str (self.value) class PrimaryExpression(UnaryExpression): def __init__(self, identifier): super().__init__() self.identifier = identifier def GetName(self): return self.identifier def __str__(self): return self.identifier class InvalidStructureDefinitionException(Exception): def __init__(self, structName: str, memberName: str): self.structName = structName self.memberName = memberName class StructureDefinition(Node): def __init__(self, name, fields = list()): super().__init__() self.__name = name self.__fields = fields self.__type = types.UnresolvedType (name) # Check that all element names are unique fieldNames = set() for field in fields: if field.GetName () in fieldNames: raise InvalidStructureDefinitionException(name, field.GetName()) fieldNames.add (field.GetName ()) self.__annotations = [] def _Traverse(self, function): self.__fields = function(self.__fields) def AddAnnotation (self, annotation): assert isinstance(annotation, Annotation) self.__annotations.append (annotation) def GetAnnotations(self): return self.__annotations def GetName(self): return self.__name def __str__(self): return 'struct {0} ({1} field(s))'.format (self.GetName (), len (self.GetFields())) def GetFields (self): return self.__fields def SetType(self, structType): assert isinstance (structType, types.StructType) self.__type = structType def GetType(self): return self.__type class InterfaceDefinition(Node): def __init__(self, name, methods = list ()): super().__init__() self.__name = name self.__methods = methods self.__type = types.UnresolvedType (name) def _Traverse(self, function): self.__methods = function(self.__methods) def GetMethods (self): return self.__methods def GetName (self): return self.__name def SetType(self, interfaceType): assert isinstance (interfaceType, types.ClassType) self.__type = interfaceType def GetType(self): return self.__type class VariableDeclaration(Node): def __init__(self, variableType, symbol, initExpression = None): super().__init__() self.__symbol = symbol self.__initializer = initExpression self.__type = variableType def ResolveType(self, scope): self.__type = types.ResolveType(self.__type, scope) return self.__type def _Traverse(self, function): self.__initializer = function(self.__initializer) def __str__(self): if not self.__type.NeedsResolve (): if self.__type.IsArray (): result = str(self.__type.GetComponentType ()) + ' ' + str(self.GetName ()) + '[' + ', '.join(map(str, self.__type.GetSize())) + ']' else: result = str(self.__type) + ' ' + str(self.GetName ()) else: result = self.GetName () if (self.HasInitializerExpression ()): result += '= ' + str(self.__initializer) return result def GetType(self): return self.__type def GetName(self): return self.__symbol def HasInitializerExpression(self): return self.__initializer is not None def GetInitializerExpression(self): return self.__initializer class ArgumentModifier(Enum): Optional = 1 class Argument(Node): '''Function argument. Captures the type (potentially a Type or UnresolvedType) and the name of the argument.''' def __init__(self, argumentType, name = None, modifiers = set()): super().__init__() self.__type = argumentType self.__name = name self.__modifiers = modifiers def ResolveType(self, scope): self.__type = types.ResolveType(self.__type, scope) return self.__type def GetType(self): return self.__type def GetName(self): return self.__name def HasName (self): return self.__name is not None def GetModifiers(self): return self.__modifiers def IsOptional(self): return ArgumentModifier.Optional in self.__modifiers def __str__(self): if self.__name is not None: return '{} {}'.format (self.__type.GetName (), self.__name) else: return '{} <unnamed>'.format (self.__type.GetName ()) class Function(Node): def __init__(self, name, arguments = list (), returnType = types.Void (), body = None, *, isForwardDeclaration = False, isExported = False): super().__init__() self.name = name self.__body = body self.__type = types.Function (name, returnType, arguments, isExported) self.arguments = arguments self.isForwardDeclaration = isForwardDeclaration self.isExported = isExported def ResolveType(self, scope): for arg in self.arguments: arg.ResolveType (scope) def _Traverse(self, function): self.arguments = function(self.arguments) if not self.isForwardDeclaration: self.__body = function(self.__body) def GetName(self): return self.name def GetType(self): return self.__type def GetArguments(self): return self.arguments def GetBody(self): return self.__body def __str__(self): return '{} ({} argument(s))'.format (self.GetName (), len (self.GetArguments())) class Statement(Node): pass class BranchControl(Enum): Default = 0 Branch = 1 class FlowStatement(Statement): pass class EmptyStatement(Statement): pass class ExpressionStatement(Statement): def __init__(self, expr): super().__init__() self.__expression = expr def _Traverse(self, function): self.__expression = function(self.__expression) def GetExpression(self): return self.__expression def __str__(self): return 'Expression' class CompoundStatement(Statement): '''Compound statement consisting of zero or more statements. Compound statements also create a new visibility block.''' def __init__(self, stmts): super().__init__() self.__statements = stmts def GetStatements(self): return self.__statements def SetStatements(self, statements): self.__statements = statements def _Traverse(self, function): self.__statements = function(self.__statements) def __len__(self): return len(self.__statements) def __iter__(self): '''Iterate over the statements.''' return self.__statements.__iter__() def __str__(self): return '{0} statement(s)'.format (len(self)) class ReturnStatement(FlowStatement): def __init__(self, expression = None): super().__init__() self.__expression = expression def _Traverse(self, function): if self.__expression: self.__expression = function(self.__expression) def GetExpression(self): return self.__expression def __str__(self): if self.__expression: return 'return ' + str(self.__expression) else: return 'return' class DeclarationStatement(Statement): def __init__(self, variableDeclarations): super().__init__() self.declarations = variableDeclarations def GetDeclarations(self): return self.declarations def _Traverse(self, function): self.declarations = function(self.declarations) def __str__(self): return '{0} declaration(s)'.format(len(self.declarations)) class IfStatement(FlowStatement): def __init__(self, cond, true_path, else_path=None, *, branch_control=BranchControl.Default): super().__init__() self.__condition = cond self.__trueBlock = true_path self.__elseBlock = else_path self.__branchControl = branch_control def _Traverse(self, function): self.__condition = function(self.__condition) self.__trueBlock = function(self.__trueBlock) self.__elseBlock = function(self.__elseBlock) def GetCondition(self): return self.__condition def GetTruePath(self): return self.__trueBlock def GetElsePath(self): return self.__elseBlock def HasElsePath(self): return self.__elseBlock is not None def __str__(self): return str(self.__condition) class ContinueStatement(FlowStatement): def __init__(self): super().__init__() class BreakStatement(FlowStatement): def __init__(self): super().__init__() class ForStatement(FlowStatement): def __init__(self, init, cond, increment, body): super().__init__() self.__initializer = init self.__condition = cond self.__next = increment self.__body = body def GetBody (self): return self.__body def GetInitialization (self): return self.__initializer def GetCondition(self): return self.__condition def GetNext (self): return self.__next def _Traverse(self, function): self.__initializer = function(self.__initializer) self.__condition = function(self.__condition) self.__next = function(self.__next) self.__body = function(self.__body) def __str__(self): return 'ForStatement' class DoStatement(FlowStatement): def __init__(self, cond, body): super().__init__() self.__condition = cond self.__body = body def _Traverse(self, function): self.__body = function(self.__body) self.__condition = function(self.__condition) def GetCondition(self): return self.__condition def GetBody (self): return self.__body class WhileStatement(FlowStatement): def __init__(self, cond, body): super().__init__() self.__condition = cond self.__body = body def _Traverse(self, function): self.__body = function(self.__body) self.__condition = function(self.__condition) def GetCondition(self): return self.__condition def GetBody (self): return self.__body class Annotation(Node): def __init__(self, value): super().__init__() self.__value = value def GetValue(self): return self.__value def __str__(self): return '[{}]'.format (self.__value) def __repr__(self): return 'Annotation({})'.format (repr(self.__value))
import collections import collections.abc from nsl import op, types, Visitor from enum import Enum import bisect from typing import List class SourceMapping: def __init__(self, source, sourceName = '<unknown>'): self.__sourceName = sourceName self.__lineOffsets = [] currentOffset = 0 for line in source.split ('\n'): self.__lineOffsets.append(currentOffset) currentOffset += len (line) + 1 # trailing \n def GetLineFromOffset(self, offset): return bisect.bisect_right(self.__lineOffsets, offset) - 1 def GetLineStartOffset(self, line): return self.__lineOffsets[line] def GetSourceName(self): return self.__sourceName class Location: def __init__(self, span, sourceMapping = None): assert span[1] >= span [0] self.__span = span self.__sourceMapping = sourceMapping @classmethod def Merge (cls, *args): assert len(args) > 0 result = args[0].__span mapping = args[0].__sourceMapping for arg in args [1:]: assert isinstance (arg, Location) end = max (result [1], arg.GetEnd ()) start = min (result [0], arg.GetBegin ()) result = (start, end, ) return cls(result, mapping) def GetBegin(self): return self.__span [0] def GetEnd(self): return self.__span [1] @property def IsUnknown(self): return self.__span == (-1, -1) def __str__(self): if self.IsUnknown: return '<unknown>' if self.__sourceMapping: # Lines are 0 based (as are columns), and we need to offset # with +1 for display startLine = self.__sourceMapping.GetLineFromOffset (self.GetBegin ()) endLine = self.__sourceMapping.GetLineFromOffset (self.GetEnd ()) if startLine == endLine: startOffset = self.__sourceMapping.GetLineStartOffset(startLine) return '{}:{}-{}'.format ( startLine + 1, self.GetBegin () - startOffset + 1, self.GetEnd () - startOffset + 1) else: startOffset = self.__sourceMapping.GetLineStartOffset (startLine) endOffset = self.__sourceMapping.GetLineStartOffset(endLine) return '{}:{}-{}:{}'.format ( startLine + 1, self.GetBegin () - startOffset + 1, endLine + 1, self.GetEnd () - endOffset + 1) else: return '[{},{})'.format (self.GetBegin (), self.GetEnd ()) def __repr__(self): return 'Location({})'.format (repr(self.__span)) class Node(Visitor.Node): def __init__(self): self.__location = Location((-1, -1)) def Clone(self): import copy return copy.deepcopy(self) def SetLocation(self, location): assert isinstance(location, Location) self.__location = location def GetLocation(self): return self.__location class Module (Node): '''A single translation module. A module consists of types, variables and functions.''' def __init__(self): super().__init__() self.__variables = list () self.__functions = list () # Types may depend on types which are previously defined # Ensure ordering by using an ordered dict self.__types = collections.OrderedDict () self.__imports = set() def _Traverse (self, function): self.__types = function(self.__types) self.__variables = function(self.__variables) self.__functions = function(self.__functions) def AddDeclaration (self, variable): self.__variables.append (variable) def AddFunction(self, func): self.__functions.append (func) def AddType (self, decl): self.__types [decl.GetName ()] = decl def AddImport(self, name): self.__imports.add((name)) def GetDeclarations (self): return self.__variables def GetTypes(self): return self.__types.values () def GetFunctions(self): return self.__functions def GetImports(self): return self.__imports def __str__(self): return '''Module ({0} variable(s), {1} function(s), {2} type(s))'''.format( len(self.__variables), len(self.__functions), len (self.__types)) class Expression(Node): def __init__(self, children=[]): super().__init__() self.children = children self.__type = None def GetType(self): return self.__type def SetType(self, nslType): '''The type of this expression. This depends on the specific expression type, for instance, for a call expression this will be a function type, while for an unary expression it will be a primitive or structure type.''' assert not isinstance(nslType, types.UnresolvedType) self.__type = nslType def _Traverse(self, function): self.children = function(self.children) def __iter__(self): return self.children.__iter__() class UnaryExpression(Expression): pass class EmptyExpression(Expression): def __init__(self): super().__init__() class CastExpression(UnaryExpression): def __init__(self, expr, targetType, implicit = False): super().__init__([expr]) assert isinstance (targetType, types.PrimitiveType) self.SetType (targetType) self.__implicit = implicit def IsImplicit(self): return self.__implicit def GetArgument(self): return self.children[0] def __str__(self): return '{} ({})'.format (self.GetType(), self.GetArgument()) def __repr__(self): return 'CastExpression ({}, {}, {})'.format ( repr(self.GetArgument()), repr (self.GetType ()), self.IsImplicit ()) class ConstructPrimitiveExpression(UnaryExpression): '''Expression of the type primitive_type (expr, ...).''' def __init__(self, targetType, expressions): super().__init__(expressions) assert isinstance (targetType, types.PrimitiveType) self.SetType (targetType) def __str__(self): return '{} ({})'.format (self.GetType ().GetName (), ', '.join ([str(expr) for expr in self.children])) def GetArguments(self): return self.children def SetArguments(self, args): self.children = args class CallExpression(UnaryExpression): """A function call of the form ID ([expr], ...). ID references an unresolved function type at first.""" def __init__(self, function: types.Type, expressions: List[Expression]): super().__init__(expressions) self.function = function def __str__(self): r = self.function.GetName () + ' (' r += ', '.join(['{0}'.format(str(expr)) for expr in self.children]) return r + ')' def GetArguments(self): return self.children def SetArguments(self, arguments: List[Expression]): self.children = arguments def GetFunction(self) -> types.Type: return self.function def ResolveType(self, scope): self.function = types.ResolveFunction(self.function, scope, [expr.GetType() for expr in self.GetArguments ()]) assert isinstance(self.function, types.Function) class VariableAccessExpression(UnaryExpression): pass class ArrayExpression(VariableAccessExpression): '''Expression of the form 'id[expr]', where id can be a nested access expression itself.''' def __init__(self, identifier, expression): super().__init__() self.id = identifier self._expression = expression def GetParent(self): return self.id def GetExpression(self): return self._expression def SetExpression(self, expr): self._expression = expr def _Traverse(self, function): self.id = function(self.id) self._expression = function(self._expression) def __str__(self): return str(self.id) + ' [' + str(self._expression) + ']' class MemberAccessExpression(VariableAccessExpression): '''Expression of the form 'id.member', where id can be a access nested expression itself. A member access expression can be a swizzle. If so, ``isSwizzle`` should be set to ``True``.''' def __init__(self, identifier, member): super().__init__() self.id = identifier self.member = member self.isSwizzle = False def GetMember(self): return self.member def GetParent(self): return self.id def _Traverse(self, function): self.id = function(self.id) self.member = function(self.member) def SetSwizzle(self, isSwizzle: bool) -> None: self.isSwizzle = isSwizzle def __str__(self): return str(self.id) + '.' + str(self.member) class BinaryExpression(Expression): def __init__(self, op, left, right): super().__init__([left, right]) self.op = op self._operator = None def GetLeft(self): return self.children [0] def GetRight(self): return self.children [1] def SetLeft(self, left): self.children [0] = left def SetRight(self, right): self.children [1] = right def GetOperation(self): '''The the operation.''' return self.op def GetOperator(self): '''Get the used operator. This is an instance of ExpressionType.''' return self._operator def ResolveType (self, left, right): self._operator = types.ResolveBinaryExpressionType (self.op, left, right) def __str__(self): r = '' if (isinstance (self.GetLeft (), BinaryExpression)): r += '(' + str (self.GetLeft ()) + ')' else: r += str (self.GetLeft ()) r += ' ' + op.OpToStr(self.op) + ' ' if (isinstance (self.GetRight (), BinaryExpression)): r += '(' + str (self.GetRight ()) + ')' else: r += str (self.GetRight ()) return r class AssignmentExpression(BinaryExpression): def __init__(self, left, right, *, operation=op.Operation.ASSIGN): super().__init__(operation, left, right) def ResolveType(self, left, right): self._operator = types.ExpressionType (self.GetLeft().GetType (), [self.GetLeft ().GetType(), self.GetRight ().GetType ()]) class Affix: PRE = 1 POST = 2 class AffixExpression(UnaryExpression): def __init__(self, op, expr, affix): super().__init__([expr]) self.op = op self.affix = affix def IsPostfix (self): return self.affix == Affix.POST def IsPrefix (self): return self.affix == Affix.PRE def GetOperation (self): return self.op def GetExpression(self): return self.children[0] def __str__(self): if self.affix == Affix.PRE: if self.op == op.Operation.ADD: return f'++{self.children[0]}' elif self.op == op.Operation.SUB: return f'--{self.children[0]}' elif self.affix == Affix.POST: if self.op == op.Operation.ADD: return f'{self.children[0]}++' elif self.op == op.Operation.SUB: return f'{self.children[0]}--' class LiteralExpression(UnaryExpression): def __init__(self, value, literalType): super().__init__() self.value = value self.SetType (literalType) def GetValue(self): return self.value def __str__(self): return str (self.value) class PrimaryExpression(UnaryExpression): def __init__(self, identifier): super().__init__() self.identifier = identifier def GetName(self): return self.identifier def __str__(self): return self.identifier class InvalidStructureDefinitionException(Exception): def __init__(self, structName: str, memberName: str): self.structName = structName self.memberName = memberName class StructureDefinition(Node): def __init__(self, name, fields = list()): super().__init__() self.__name = name self.__fields = fields self.__type = types.UnresolvedType (name) # Check that all element names are unique fieldNames = set() for field in fields: if field.GetName () in fieldNames: raise InvalidStructureDefinitionException(name, field.GetName()) fieldNames.add (field.GetName ()) self.__annotations = [] def _Traverse(self, function): self.__fields = function(self.__fields) def AddAnnotation (self, annotation): assert isinstance(annotation, Annotation) self.__annotations.append (annotation) def GetAnnotations(self): return self.__annotations def GetName(self): return self.__name def __str__(self): return 'struct {0} ({1} field(s))'.format (self.GetName (), len (self.GetFields())) def GetFields (self): return self.__fields def SetType(self, structType): assert isinstance (structType, types.StructType) self.__type = structType def GetType(self): return self.__type class InterfaceDefinition(Node): def __init__(self, name, methods = list ()): super().__init__() self.__name = name self.__methods = methods self.__type = types.UnresolvedType (name) def _Traverse(self, function): self.__methods = function(self.__methods) def GetMethods (self): return self.__methods def GetName (self): return self.__name def SetType(self, interfaceType): assert isinstance (interfaceType, types.ClassType) self.__type = interfaceType def GetType(self): return self.__type class VariableDeclaration(Node): def __init__(self, variableType, symbol, initExpression = None): super().__init__() self.__symbol = symbol self.__initializer = initExpression self.__type = variableType def ResolveType(self, scope): self.__type = types.ResolveType(self.__type, scope) return self.__type def _Traverse(self, function): self.__initializer = function(self.__initializer) def __str__(self): if not self.__type.NeedsResolve (): if self.__type.IsArray (): result = str(self.__type.GetComponentType ()) + ' ' + str(self.GetName ()) + '[' + ', '.join(map(str, self.__type.GetSize())) + ']' else: result = str(self.__type) + ' ' + str(self.GetName ()) else: result = self.GetName () if (self.HasInitializerExpression ()): result += '= ' + str(self.__initializer) return result def GetType(self): return self.__type def GetName(self): return self.__symbol def HasInitializerExpression(self): return self.__initializer is not None def GetInitializerExpression(self): return self.__initializer class ArgumentModifier(Enum): Optional = 1 class Argument(Node): '''Function argument. Captures the type (potentially a Type or UnresolvedType) and the name of the argument.''' def __init__(self, argumentType, name = None, modifiers = set()): super().__init__() self.__type = argumentType self.__name = name self.__modifiers = modifiers def ResolveType(self, scope): self.__type = types.ResolveType(self.__type, scope) return self.__type def GetType(self): return self.__type def GetName(self): return self.__name def HasName (self): return self.__name is not None def GetModifiers(self): return self.__modifiers def IsOptional(self): return ArgumentModifier.Optional in self.__modifiers def __str__(self): if self.__name is not None: return '{} {}'.format (self.__type.GetName (), self.__name) else: return '{} <unnamed>'.format (self.__type.GetName ()) class Function(Node): def __init__(self, name, arguments = list (), returnType = types.Void (), body = None, *, isForwardDeclaration = False, isExported = False): super().__init__() self.name = name self.__body = body self.__type = types.Function (name, returnType, arguments, isExported) self.arguments = arguments self.isForwardDeclaration = isForwardDeclaration self.isExported = isExported def ResolveType(self, scope): for arg in self.arguments: arg.ResolveType (scope) def _Traverse(self, function): self.arguments = function(self.arguments) if not self.isForwardDeclaration: self.__body = function(self.__body) def GetName(self): return self.name def GetType(self): return self.__type def GetArguments(self): return self.arguments def GetBody(self): return self.__body def __str__(self): return '{} ({} argument(s))'.format (self.GetName (), len (self.GetArguments())) class Statement(Node): pass class BranchControl(Enum): Default = 0 Branch = 1 class FlowStatement(Statement): pass class EmptyStatement(Statement): pass class ExpressionStatement(Statement): def __init__(self, expr): super().__init__() self.__expression = expr def _Traverse(self, function): self.__expression = function(self.__expression) def GetExpression(self): return self.__expression def __str__(self): return 'Expression' class CompoundStatement(Statement): '''Compound statement consisting of zero or more statements. Compound statements also create a new visibility block.''' def __init__(self, stmts): super().__init__() self.__statements = stmts def GetStatements(self): return self.__statements def SetStatements(self, statements): self.__statements = statements def _Traverse(self, function): self.__statements = function(self.__statements) def __len__(self): return len(self.__statements) def __iter__(self): '''Iterate over the statements.''' return self.__statements.__iter__() def __str__(self): return '{0} statement(s)'.format (len(self)) class ReturnStatement(FlowStatement): def __init__(self, expression = None): super().__init__() self.__expression = expression def _Traverse(self, function): if self.__expression: self.__expression = function(self.__expression) def GetExpression(self): return self.__expression def __str__(self): if self.__expression: return 'return ' + str(self.__expression) else: return 'return' class DeclarationStatement(Statement): def __init__(self, variableDeclarations): super().__init__() self.declarations = variableDeclarations def GetDeclarations(self): return self.declarations def _Traverse(self, function): self.declarations = function(self.declarations) def __str__(self): return '{0} declaration(s)'.format(len(self.declarations)) class IfStatement(FlowStatement): def __init__(self, cond, true_path, else_path=None, *, branch_control=BranchControl.Default): super().__init__() self.__condition = cond self.__trueBlock = true_path self.__elseBlock = else_path self.__branchControl = branch_control def _Traverse(self, function): self.__condition = function(self.__condition) self.__trueBlock = function(self.__trueBlock) self.__elseBlock = function(self.__elseBlock) def GetCondition(self): return self.__condition def GetTruePath(self): return self.__trueBlock def GetElsePath(self): return self.__elseBlock def HasElsePath(self): return self.__elseBlock is not None def __str__(self): return str(self.__condition) class ContinueStatement(FlowStatement): def __init__(self): super().__init__() class BreakStatement(FlowStatement): def __init__(self): super().__init__() class ForStatement(FlowStatement): def __init__(self, init, cond, increment, body): super().__init__() self.__initializer = init self.__condition = cond self.__next = increment self.__body = body def GetBody (self): return self.__body def GetInitialization (self): return self.__initializer def GetCondition(self): return self.__condition def GetNext (self): return self.__next def _Traverse(self, function): self.__initializer = function(self.__initializer) self.__condition = function(self.__condition) self.__next = function(self.__next) self.__body = function(self.__body) def __str__(self): return 'ForStatement' class DoStatement(FlowStatement): def __init__(self, cond, body): super().__init__() self.__condition = cond self.__body = body def _Traverse(self, function): self.__body = function(self.__body) self.__condition = function(self.__condition) def GetCondition(self): return self.__condition def GetBody (self): return self.__body class WhileStatement(FlowStatement): def __init__(self, cond, body): super().__init__() self.__condition = cond self.__body = body def _Traverse(self, function): self.__body = function(self.__body) self.__condition = function(self.__condition) def GetCondition(self): return self.__condition def GetBody (self): return self.__body class Annotation(Node): def __init__(self, value): super().__init__() self.__value = value def GetValue(self): return self.__value def __str__(self): return '[{}]'.format (self.__value) def __repr__(self): return 'Annotation({})'.format (repr(self.__value))
en
0.817938
# trailing \n # Lines are 0 based (as are columns), and we need to offset # with +1 for display A single translation module. A module consists of types, variables and functions. # Types may depend on types which are previously defined # Ensure ordering by using an ordered dict Module ({0} variable(s), {1} function(s), {2} type(s)) The type of this expression. This depends on the specific expression type, for instance, for a call expression this will be a function type, while for an unary expression it will be a primitive or structure type. Expression of the type primitive_type (expr, ...). A function call of the form ID ([expr], ...). ID references an unresolved function type at first. Expression of the form 'id[expr]', where id can be a nested access expression itself. Expression of the form 'id.member', where id can be a access nested expression itself. A member access expression can be a swizzle. If so, ``isSwizzle`` should be set to ``True``. The the operation. Get the used operator. This is an instance of ExpressionType. # Check that all element names are unique Function argument. Captures the type (potentially a Type or UnresolvedType) and the name of the argument. Compound statement consisting of zero or more statements. Compound statements also create a new visibility block. Iterate over the statements.
2.708107
3
machine_learning/cloud_functions/collect_from_datastore.py
cyberj0g/verification-classifier
8
6623017
""" Module for collecting metrics values from GCE datastore generated with the cloud functions located in feature_engineering USAGE: $python3 collect_from_datastore.py This will create a .csv file in the current folder containing the values of all the metrics available in the database for later use in the jupyter notebooks of this section """ import numpy as np import pandas as pd from tqdm import tqdm # [START datastore_build_service] from google.cloud import datastore # Function to create datastore client def create_client(project_id): return datastore.Client(project_id) # [END datastore_build_service] # Function to gather all properties for a kind in a pandas dataframe def get_jobs_df(kind, namespace): query = client.query(kind=kind, namespace=namespace) query_iter = query.fetch() i = 0 number_of_pages = 300 jobs_df = pd.DataFrame() for page in tqdm(query_iter.pages): i += 1 tasks = list(page) page_df = pd.DataFrame(data=tasks) print(i * number_of_pages, ' videos retrieved so far') jobs_df = pd.concat([jobs_df, page_df], axis=0, sort=True) print('Data retrieval completed {} videos retrieved, {} features extracted'.format(jobs_df.shape[0],jobs_df.shape[1])) return jobs_df def initialize(): global client print('Initializing...') pd.set_option('display.max_colwidth', -1) namespace = 'livepeer-verifier-brisque' client = create_client('epiclabs') query = client.query(kind='__kind__',namespace=namespace) query.keys_only() jobs_dict = {} inputs_df = pd.DataFrame() print('Getting inputs...') input_kinds = [entity.key.name for entity in query.fetch() if 'features_input_brisque' in entity.key.name] print('Retrieving data from Datastore...') for kind in input_kinds: kind_df = get_jobs_df(kind, namespace) kind_df['kind'] = kind inputs_df = pd.concat([inputs_df, kind_df],axis=0,sort=True, ignore_index=True) # jobs_dict[kind] = inputs_df['title'][inputs_df['kind']==kind] inputs_df.to_csv('data-brisque-large.csv') initialize()
""" Module for collecting metrics values from GCE datastore generated with the cloud functions located in feature_engineering USAGE: $python3 collect_from_datastore.py This will create a .csv file in the current folder containing the values of all the metrics available in the database for later use in the jupyter notebooks of this section """ import numpy as np import pandas as pd from tqdm import tqdm # [START datastore_build_service] from google.cloud import datastore # Function to create datastore client def create_client(project_id): return datastore.Client(project_id) # [END datastore_build_service] # Function to gather all properties for a kind in a pandas dataframe def get_jobs_df(kind, namespace): query = client.query(kind=kind, namespace=namespace) query_iter = query.fetch() i = 0 number_of_pages = 300 jobs_df = pd.DataFrame() for page in tqdm(query_iter.pages): i += 1 tasks = list(page) page_df = pd.DataFrame(data=tasks) print(i * number_of_pages, ' videos retrieved so far') jobs_df = pd.concat([jobs_df, page_df], axis=0, sort=True) print('Data retrieval completed {} videos retrieved, {} features extracted'.format(jobs_df.shape[0],jobs_df.shape[1])) return jobs_df def initialize(): global client print('Initializing...') pd.set_option('display.max_colwidth', -1) namespace = 'livepeer-verifier-brisque' client = create_client('epiclabs') query = client.query(kind='__kind__',namespace=namespace) query.keys_only() jobs_dict = {} inputs_df = pd.DataFrame() print('Getting inputs...') input_kinds = [entity.key.name for entity in query.fetch() if 'features_input_brisque' in entity.key.name] print('Retrieving data from Datastore...') for kind in input_kinds: kind_df = get_jobs_df(kind, namespace) kind_df['kind'] = kind inputs_df = pd.concat([inputs_df, kind_df],axis=0,sort=True, ignore_index=True) # jobs_dict[kind] = inputs_df['title'][inputs_df['kind']==kind] inputs_df.to_csv('data-brisque-large.csv') initialize()
en
0.68491
Module for collecting metrics values from GCE datastore generated with the cloud functions located in feature_engineering USAGE: $python3 collect_from_datastore.py This will create a .csv file in the current folder containing the values of all the metrics available in the database for later use in the jupyter notebooks of this section # [START datastore_build_service] # Function to create datastore client # [END datastore_build_service] # Function to gather all properties for a kind in a pandas dataframe # jobs_dict[kind] = inputs_df['title'][inputs_df['kind']==kind]
2.672406
3
executors/display_functions.py
thevickypedia/jarvis
0
6623018
import os import random from threading import Thread from modules.audio import speaker from modules.conditions import conversation from modules.models import models from modules.utils import support env = models.env def brightness(phrase: str): """Pre-process to check the phrase received and call the appropriate brightness function as necessary. Args: phrase: Takes the phrase spoken as an argument. """ if not env.macos: support.missing_windows_features() return phrase = phrase.lower() speaker.speak(text=random.choice(conversation.acknowledgement)) if 'set' in phrase: level = support.extract_nos(input_=phrase, method=int) if level is None: level = 50 Thread(target=set_brightness, args=[level]).start() elif 'decrease' in phrase or 'reduce' in phrase or 'lower' in phrase or \ 'dark' in phrase or 'dim' in phrase: Thread(target=decrease_brightness).start() elif 'increase' in phrase or 'bright' in phrase or 'max' in phrase or \ 'brighten' in phrase or 'light up' in phrase: Thread(target=increase_brightness).start() def increase_brightness() -> None: """Increases the brightness to maximum in macOS.""" for _ in range(32): os.system("""osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'""") def decrease_brightness() -> None: """Decreases the brightness to bare minimum in macOS.""" for _ in range(32): os.system("""osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell'""") def set_brightness(level: int) -> None: """Set brightness to a custom level. - | Since Jarvis uses in-built apple script, the only way to achieve this is to set the brightness to absolute | minimum/maximum and increase/decrease the required % from there. Args: level: Percentage of brightness to be set. """ level = round((32 * int(level)) / 100) for _ in range(32): os.system("""osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell'""") for _ in range(level): os.system("""osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'""")
import os import random from threading import Thread from modules.audio import speaker from modules.conditions import conversation from modules.models import models from modules.utils import support env = models.env def brightness(phrase: str): """Pre-process to check the phrase received and call the appropriate brightness function as necessary. Args: phrase: Takes the phrase spoken as an argument. """ if not env.macos: support.missing_windows_features() return phrase = phrase.lower() speaker.speak(text=random.choice(conversation.acknowledgement)) if 'set' in phrase: level = support.extract_nos(input_=phrase, method=int) if level is None: level = 50 Thread(target=set_brightness, args=[level]).start() elif 'decrease' in phrase or 'reduce' in phrase or 'lower' in phrase or \ 'dark' in phrase or 'dim' in phrase: Thread(target=decrease_brightness).start() elif 'increase' in phrase or 'bright' in phrase or 'max' in phrase or \ 'brighten' in phrase or 'light up' in phrase: Thread(target=increase_brightness).start() def increase_brightness() -> None: """Increases the brightness to maximum in macOS.""" for _ in range(32): os.system("""osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'""") def decrease_brightness() -> None: """Decreases the brightness to bare minimum in macOS.""" for _ in range(32): os.system("""osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell'""") def set_brightness(level: int) -> None: """Set brightness to a custom level. - | Since Jarvis uses in-built apple script, the only way to achieve this is to set the brightness to absolute | minimum/maximum and increase/decrease the required % from there. Args: level: Percentage of brightness to be set. """ level = round((32 * int(level)) / 100) for _ in range(32): os.system("""osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell'""") for _ in range(level): os.system("""osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'""")
en
0.778181
Pre-process to check the phrase received and call the appropriate brightness function as necessary. Args: phrase: Takes the phrase spoken as an argument. Increases the brightness to maximum in macOS. osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell' Decreases the brightness to bare minimum in macOS. osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell' Set brightness to a custom level. - | Since Jarvis uses in-built apple script, the only way to achieve this is to set the brightness to absolute | minimum/maximum and increase/decrease the required % from there. Args: level: Percentage of brightness to be set. osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell' osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'
3.022237
3
tests/test_advance_compiler.py
chuanhao01/Python_Expression_Evaluator
0
6623019
import pytest from src.advance.expression_evaluator import Evaluator class TestAdvancedCompiler: @pytest.mark.parametrize('text, eval', [ ('1 + 1', 2), ('1', 1), ('2 - 3', -1), ("'aa' + 'bb'", 'aabb'), ('5 // 2', 2), ('5 / 2', 2.5), ('5 - 2 * 3', -1), ('(5 - 2) * 3', 9), ('5 - 7%4', 2), ('+-+-+-3', -3), ('3 - -+-++--2 * 2', -1), ('4**5%7', 2), ('perm(5, 2)', 20), ]) def test_success(self, text, eval): evaluator = Evaluator() test_evaluation = evaluator.evaluate(text) assert test_evaluation == eval
import pytest from src.advance.expression_evaluator import Evaluator class TestAdvancedCompiler: @pytest.mark.parametrize('text, eval', [ ('1 + 1', 2), ('1', 1), ('2 - 3', -1), ("'aa' + 'bb'", 'aabb'), ('5 // 2', 2), ('5 / 2', 2.5), ('5 - 2 * 3', -1), ('(5 - 2) * 3', 9), ('5 - 7%4', 2), ('+-+-+-3', -3), ('3 - -+-++--2 * 2', -1), ('4**5%7', 2), ('perm(5, 2)', 20), ]) def test_success(self, text, eval): evaluator = Evaluator() test_evaluation = evaluator.evaluate(text) assert test_evaluation == eval
none
1
2.842417
3
src/old_scripts/old_prob.py
s-zhang/DnDnProbabilities
0
6623020
from functools import * from itertools import * from math import sqrt # from pprint import pprint import operator # from multipledispatch import dispatch class Pmf(object): """Probability mass function""" def __init__(self): super(Pmf, self).__init__() def prob(self, outcome): raise NotImplementedError def num_outcomes(self): raise NotImplementedError def entries(self): raise NotImplementedError def map(self, f): table = {} for outcome, prob in self: mapped_outcome = f(outcome) # print(outcome, prob, mapped_outcome) table[mapped_outcome] = table.get(mapped_outcome, 0) + prob return TablePmf(table) """ @classmethod def reduce(cls, op, pmfs, unit = None): def pmf_op(pmf1, pmf2): return cls.multimap(op, (pmf1, pmf2)) if unit == None: return reduce(pmf_op, pmfs) else: return reduce(pmf_op, pmfs, unit) """ @classmethod def multimap(cls, f, pmfs): return JointPmf(pmfs).map(lambda p: f(*p)) @classmethod def multimap_any(cls, f, objs): return cls.multimap(f, tuple(map(lambda obj: cls.any_to_value_pmf(obj), objs))) @classmethod def ifthenelse(cls, condition_pmf, then_pmf, else_pmf): return cls.multimap_any(lambda oc, ot, oe: ot if oc else oe, (condition_pmf, then_pmf, else_pmf)) @classmethod def maketuple(cls, pmfs): return cls.multimap_any(lambda *os: os, pmfs) def to_dict(self): return {outcome: prob for outcome, prob in iter(self)} def at_least(self): at_least_probs = {} probs = self.to_dict() accum = 0 for outcome in reversed(sorted(probs)): at_least_probs[outcome] = probs[outcome] + accum accum += probs[outcome] return at_least_probs @staticmethod def almost_equal(n1, n2, tolerance=0.0001): return abs((n1 - n2) / n2) < tolerance @staticmethod def dict_almost_equal(dict1, dict2, tolerance=0.0001): if len(dict1) != len(dict2): return False for key, value in dict1.items(): if not Pmf.almost_equal(value, dict2[key], tolerance): print(key, value, dict2[key]) return False return True def is_equiv(self, other, tolerance=0.0001): # print(self, other) if len(self) != len(other): return False for outcome, prob in iter(self): if not Pmf.almost_equal(prob, other[outcome], tolerance): print(outcome, prob, other[outcome]) return False return True def to_value_pmf(self): return self @classmethod def any_to_value_pmf(cls, obj): if isinstance(obj, Pmf): return obj.to_value_pmf() else: return ConstantPmf(obj) def pmf_op(self, other, op): # return self.multimap(op, (self.to_value_pmf(), self.any_to_value_pmf(other))) return self.multimap_any(op, (self, other)) def avg(self): return reduce(operator.add, map(lambda i: i[0] * i[1], iter(self.to_value_pmf()))) def sd(self): return sqrt(self.to_value_pmf().map(lambda n: n ** 2).avg() - (self.avg()) ** 2) def print_stats(self): print(self.avg(), self.sd(), self.at_least()) def __getitem__(self, key): return self.prob(key) def __len__(self): return self.num_outcomes() def __iter__(self): return self.entries() def __radd__(self, other): return self.pmf_op(other, operator.add) def __add__(self, other): return self.pmf_op(other, operator.add) def __rsub__(self, other): return self.pmf_op(other, operator.sub) def __sub__(self, other): return self.pmf_op(other, operator.sub) def __neg__(self): return self.map(operator.neg) def __rmul__(self, other): return self.pmf_op(other, operator.mul) def __mul__(self, other): return self.pmf_op(other, operator.mul) def __rtruediv__(self, other): return self.pmf_op(other, operator.truediv) def __truediv__(self, other): return self.pmf_op(other, operator.truediv) def __rfloordiv__(self, other): return self.pmf_op(other, operator.floordiv) def __floordiv__(self, other): return self.pmf_op(other, operator.floordiv) def __lt__(self, other): return self.pmf_op(other, operator.lt) def __le__(self, other): return self.pmf_op(other, operator.le) def __eq__(self, other): return self.is_equiv(other) def __ge__(self, other): return self.pmf_op(other, operator.ge) def __gt__(self, other): return self.pmf_op(other, operator.gt) def __not__(self): return self.map(operator.not_) def __and__(self, other): return self.pmf_op(other, operator.and_) def __or__(self, other): return self.pmf_op(other, operator.or_) def __str__(self): return str(self.to_dict()) # @dispatch(Pmf, Pmf, object) # def pmf_op(pmf1, pmf2, op): # return Pmf.multimap(op, (pmf1, pmf2))#JointPmf((pmf1, pmf2)).map(lambda p: op(*p)) """ @dispatch(Pmf, int, object) def pmf_op(pmf, c, op): return self.pmf_op(ConstantPmf(c), op) """ class TablePmf(Pmf): """Table based Pmf""" def __init__(self, table): super(TablePmf, self).__init__() self.table = table def prob(self, outcome): return self.table.get(outcome, 0) def num_outcomes(self): return len(self.table) def entries(self): return iter(self.table.items()) class ConstantPmf(TablePmf): def __init__(self, n): super(ConstantPmf, self).__init__({n: 1}) class D(Pmf): def __init__(self, faces): super(D, self).__init__() self.outcomes = range(1, faces + 1) self.p = 1 / faces def prob(self, outcome): if outcome in self.outcomes: return self.p else: return 0 def num_outcomes(self): return len(self.outcomes) def entries(self): for outcome in self.outcomes: yield (outcome, self.p) class JointPmf(Pmf): """Joint Pmf""" def __init__(self, pmfs): super(JointPmf, self).__init__() # print(1) # print(pmfs) # for p in pmfs: print(p) # print(2) self.pmfs = pmfs # ((pmf if isinstance(pmf, Pmf) else ConstantPmf(pmf)) for pmf in pmfs) def prob(self, outcome): return reduce(operator.mul, imap(lambda pmf, o: pmf[o], self.pmfs, outcome)) def num_outcomes(self): return reduce(operator.mul, map(lambda pmf: pmf.num_outcomes(), self.pmfs)) def entries(self): return self._entries_helper(iter(self.pmfs)) def _entries_helper(self, pmfs_iter): try: pmf = next(pmfs_iter) for suffix_outcome, suffix_prob in self._entries_helper(pmfs_iter): for outcome, prob in pmf: yield ((outcome,) + suffix_outcome, prob * suffix_prob) except StopIteration: yield ((), 1) def sum(self): return self.map(sum) def reduce(self, op, unit=None): def pmf_op(pmf1, pmf2): return Pmf.multimap(op, (pmf1, pmf2)) # print("r", self.pmfs) if unit == None: return reduce(pmf_op, self.pmfs) else: return reduce(pmf_op, self.pmfs, unit) def to_value_pmf(self): return self.sum() def largest_n(self, n): # print("l", self.pmfs) return JointPmf( tuple(map(lambda pmf: pmf.map(lambda n: (n,) + tuple((float('-inf') for _ in range(n - 1)))), self.pmfs))).reduce(lambda t1, t2: tuple(sorted(t1 + t2)[-n:])) def nth_largest(self, n): return self.largest_n(n).map(lambda t: t[-n]) def max(self): return self.reduce(max) """def __radd__(self, other): return self.sum().__radd__(other) def __add__(self, other): return self.sum().__add__(other) def __rsub__(self, other): return self.sum().__rsub__(other) def __sub__(self, other): return self.sum().__sub__(other) def __neg__(self): return self.sum().__neg__() def __rmul__(self, other): return self.sum().__rmul__(other) def __mul__(self, other): return self.sum().__mul__(other) def __rtruediv__(self, other): return self.sum().__rtruediv__(other) def __truediv__(self, other): return self.sum().__truediv__(other) def __rfloordiv__(self, other): return self.sum().__rfloordiv__(other) def __floordiv__(self, other): return self.sum().__floordiv__(other) def __lt__(self, other): return other > self.sum() def __le__(self, other): return other >= self.sum() def __ge__(self, other): return other <= self.sum() def __gt__(self, other): return other < self.sum() #return self.sum().__gt__(other) """ def __not__(self): raise TypeError def __and__(self, other): raise TypeError def __or__(self, other): raise TypeError class NPmf(JointPmf): """N many Pmfs""" def __init__(self, n, pmf): super(NPmf, self).__init__(tuple(pmf for _ in range(n))) class ND(NPmf): """N dice""" def __init__(self, n, faces): super(ND, self).__init__(n, D(faces)) # print(ND(3, 4) > D(15)) # print(ND(3, 4) > 8) # print(D(4) + ND(2, 4)) assert D(4) + ND(2, 4) == ND(3, 4).sum() # exit() assert D(2) + D(2) == TablePmf({2: 0.25, 3: 0.5, 4: 0.25}) assert D(2) + 1 == TablePmf({2: 0.5, 3: 0.5}) assert 1 + D(2) == TablePmf({2: 0.5, 3: 0.5}) assert ND(2, 2) + 1 == TablePmf({3: 0.25, 4: 0.5, 5: 0.25}) """ roll = D(20) if roll > 15: x = roll condition roll > 15 hit = true else: condition roll <= 15 hit = false merge branch hit """ stat_roll = ND(4, 6).largest_n(3).map(sum) assert stat_roll == TablePmf( {3: 0.0007716049382716049, 4: 0.0030864197530864196, 5: 0.007716049382716049, 6: 0.016203703703703703, 7: 0.029320987654320986, 8: 0.047839506172839504, 9: 0.07021604938271606, 10: 0.09413580246913579, 11: 0.11419753086419754, 12: 0.12885802469135801, 13: 0.13271604938271603, 14: 0.12345679012345678, 15: 0.10108024691358025, 16: 0.07253086419753085, 17: 0.04166666666666666, 18: 0.016203703703703703}) assert Pmf.dict_almost_equal(NPmf(6, stat_roll).nth_largest(2).at_least(), {18: 0.0037712978997010613, 17: 0.04297174396189502, 16: 0.1785025147554727, 15: 0.42163254105952985, 14: 0.6901003676248133, 13: 0.8786176420382443, 12: 0.9661397862008139, 11: 0.9934099842270956, 10: 0.9991552411369676, 9: 0.9999303248135635, 8: 0.9999965317348086, 7: 0.9999999030679377, 6: 0.999999998765834, 5: 0.9999999999948876, 4: 0.9999999999999979, 3: 0.9999999999999996}) assert Pmf.dict_almost_equal(NPmf(6, stat_roll).nth_largest(1).at_least(), {18: 0.09336788352756517, 17: 0.30069928149459824, 16: 0.5675723186031841, 15: 0.7939721069010339, 14: 0.9279543680017638, 13: 0.9819125032121617, 12: 0.9968194053566061, 11: 0.9996186510521065, 10: 0.9999711247911593, 9: 0.9999986646243858, 8: 0.999999965345446, 7: 0.9999999995406059, 6: 0.9999999999975957, 5: 0.9999999999999963, 4: 0.9999999999999997, 3: 0.9999999999999997}) assert Pmf.dict_almost_equal(NPmf(6, stat_roll).largest_n(2).map(sum).at_least(), {36: 0.0037712978997010613, 35: 0.021204173018408268, 34: 0.06678950251364398, 33: 0.1503173497095011, 32: 0.27328695484648013, 31: 0.4195516640008528, 30: 0.5731467072301601, 29: 0.7086608028822727, 28: 0.819832313566671, 27: 0.8961149664818756, 26: 0.9467231694762607, 25: 0.9741534736268778, 24: 0.9891492615027956, 23: 0.9955834539859781, 22: 0.9985102911733038, 21: 0.9994953600444297, 20: 0.9998688100006322, 19: 0.9999635085211747, 18: 0.9999929491919841, 17: 0.999998399446, 16: 0.9999997840507013, 15: 0.9999999605103931, 14: 0.9999999965711357, 13: 0.9999999995038211, 12: 0.9999999999772524, 11: 0.9999999999975125, 10: 0.9999999999999567, 9: 0.9999999999999963, 8: 0.9999999999999997, 7: 0.9999999999999997, 6: 0.9999999999999997}) assert ND(2, 10).max() == TablePmf( {1: 0.010000000000000002, 2: 0.030000000000000006, 3: 0.05000000000000001, 4: 0.07, 5: 0.09000000000000002, 6: 0.11000000000000004, 7: 0.13000000000000006, 8: 0.15000000000000008, 9: 0.1700000000000001, 10: 0.1900000000000001}) assert D(6).avg() == 3.5 assert ND(2, 6).avg() == 7 assert Pmf.almost_equal(D(6).sd(), 1.707825127659933) assert Pmf.almost_equal(ND(2, 6).sd(), 2.4152294576982403) normal = ND(2, 6).sum() savage = NPmf(2, normal).max() normal.print_stats() savage.print_stats() ATTACK_HIT_CRIT = 2 ATTACK_HIT_NON_CRIT = 1 ATTACK_MISS = 0 """def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold = 20): #print("a1", damage_roll * 2 + damage_bonus) return Pmf.ifthenelse(attack_base >= critical_threshold, Pmf.maketuple((damage_roll * 2 + damage_bonus, ATTACK_HIT_CRIT)), Pmf.ifthenelse(attack_base > 1 and attack_base + attack_bonus >= ac, Pmf.maketuple((damage_roll + damage_bonus, ATTACK_HIT_NON_CRIT)), Pmf.maketuple((0, ATTACK_MISS)) ) )""" # attack_base > 1 and """def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold = 20): #print("a1", damage_roll * 2 + damage_bonus) return Pmf.ifthenelse(attack_base >= critical_threshold, ATTACK_HIT_CRIT, Pmf.ifthenelse(attack_base < critical_threshold and attack_base + attack_bonus >= ac, ATTACK_HIT_NON_CRIT, ATTACK_MISS ) ) """ # print(D(20) <= 8) # print(attack_basic(D(20), 9, 18, ND(2, 6), 5)) def d(t): return {o: 1 / t for o in range(1, t + 1)} def npmf(n, pmf): return tuple((pmf for _ in range(n))) # @infix def nd(n, t): return npmf(n, d(t)) pmf_empty = {(): 1} def n(x): return {x: 1} def pmf_map(f, pmf): npmf = {} for o, p in pmf.items(): no = f(o) # print(no) npmf[no] = npmf.get(no, 0) + p # print(npmf) return npmf def join(*pmfs): jpmf = pmf_empty for pmf in pmfs: jpmf = {jo + (o,): jp * p for jo, jp in jpmf.items() for o, p in pmf.items()} return jpmf def pmf_reduce(op, pmfs, u=None): def pmf_op(pmf1, pmf2): # print(pmf1) # print(pmf2) pmf = join(pmf1, pmf2) # print(pmf) # return pmf_map(lambda o: op(*o), pmf) return pmf_map(lambda o: op(*o), pmf) if u == None: return reduce(pmf_op, pmfs) else: return reduce(pmf_op, pmfs, u) def pmf_func(f, *pmfs): return pmf_map(lambda o: f(*o), join(*pmfs)) def pmf_if(c, pmf_c, pmf_t, pmf_f): return pmf_func(lambda oc, ot, of: ot if c(oc) else of, pmf_c, pmf_t, pmf_f) def pmf_sum(*pmfs): return pmf_reduce(lambda o1, o2: o1 + o2, pmfs) def pmf_subtract(pmf1, pmf2): return pmf_func(lambda o1, o2: o1 - o2, pmf1, pmf2) def pmf_max(*pmfs): return pmf_reduce(max, pmfs) def pmf_min(*pmfs): return pmf_reduce(min, pmfs) def npmf_sum(n, pmf): return pmf_sum(*npmf(n, pmf)) def at_least(pmf): d = {} a = 0 for o in reversed(sorted(pmf)): d[o] = pmf[o] + a a += pmf[o] return d def top_nth(n, *pmfs): return pmf_reduce( lambda o1, o2: tuple(sorted(o1 + o2)[-n:]), (pmf_map(lambda o: (o,) + tuple((float('-inf') for _ in range(n - 1))), pmf) for pmf in pmfs) ) def nth_largest(n, *pmfs): return pmf_map( lambda o: o[-n], top_nth(n, *pmfs), ) def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold=20): # print(attack_base, attack_bonus) if attack_base >= critical_threshold: return (damage_roll * 2 + damage_bonus, ATTACK_HIT_CRIT) elif attack_base > 1 and attack_base + attack_bonus >= ac: return (damage_roll + damage_bonus, ATTACK_HIT_NON_CRIT) else: return (0, ATTACK_MISS) PROFICIENCY_BONUS = 4 ABILITY_MODIFIER = 5 CHARISMA_MODIFIER = 3 AC = n(18) def attack( damage_base, proficiency_bonus=PROFICIENCY_BONUS, ability_modifier=ABILITY_MODIFIER, add_ability_modifier_to_damange=True, is_attack_adv=False, is_archery=False, is_great_weapon_fighting=False, is_blessed=False, is_sacred_weapon=False, critical_threshold=20): attack_bonus = n(proficiency_bonus + ability_modifier) damage_bonus = 0 if is_attack_adv: attack_base = pmf_max(*nd(2, 20)) else: attack_base = d(20) if add_ability_modifier_to_damange: damage_bonus += ability_modifier if is_great_weapon_fighting: attack_bonus = pmf_subtract(attack_bonus, n(5)) damage_bonus += 10 if is_blessed: attack_bonus = pmf_sum(attack_bonus, d(4)) if is_sacred_weapon: attack_bonus += CHARISMA_MODIFIER return pmf_func(lambda aba, abo, ac, db: attack_basic( attack_base=aba, attack_bonus=abo, ac=ac, damage_roll=db, damage_bonus=damage_bonus, critical_threshold=critical_threshold), attack_base, attack_bonus, AC, damage_base) """print(pmf_func(lambda aba, abo, ac, db: attack_basic( attack_base=aba, attack_bonus=abo, ac=ac, damage_roll=db, damage_bonus=5), d(20), n(9), n(18), pmf_sum(*nd(2, 6)))) """ def multi_attack_damage(*attack_pmfs, extra_damage_roll=n(0), extra_damange_bonus=0): # print(attack_pmfs[0]) all_attacks_pmf = pmf_reduce(lambda a1, a2: (a1[0] + a2[0], max(a1[1], a2[1])), attack_pmfs) # print(all_attacks_pmf) return pmf_func(lambda aa, ed: aa[0] + aa[1] * (ed + extra_damange_bonus), all_attacks_pmf, extra_damage_roll) def damage_against_save(save_dc, save_bonus, damage, is_save_disadvantage=False, is_save_half=False): if is_save_disadvantage: save_roll = pmf_min(*nd(2, 20)) else: save_roll = d(20) if is_save_half: save_success_damage = pmf_map(lambda o: o // 2, damage) else: save_success_damage = n(0) return pmf_if(lambda s: s + save_bonus < save_dc, save_roll, damage, save_success_damage) def pmf_equal(pmf1, pmf2, tolerance=0.0001): if len(pmf1) != len(pmf2): return False for o, p in pmf1.items(): if o not in pmf2 or abs(pmf2[o] - p) / pmf2[o] > tolerance: print(o, pmf2[o], p) return False return True # print(at_least(damage_against_save(save_dc=17, save_bonus=7, damage=pmf_sum(n(0), *nd(3, 12)), is_save_half=True))) assert at_least(multi_attack_damage(*npmf(6, attack(pmf_sum(d(10), d(6)))))) == {222: 3.3489797668038423e-19, 220: 4.3536736968449956e-18, 218: 3.0475715877914974e-17, 216: 1.5237857938957488e-16, 214: 6.095143175582995e-16, 212: 2.0723486796982186e-15, 210: 6.215036651234575e-15, 208: 1.684871720679014e-14, 206: 4.202634709362144e-14, 205: 4.207055362654325e-14, 204: 9.787594307270242e-14, 203: 9.840642146776415e-14, 202: 2.1577476637517166e-13, 201: 2.1922287594307288e-13, 200: 4.564170471107685e-13, 199: 4.724640185613857e-13, 198: 9.381815173825455e-13, 197: 9.979929564257554e-13, 196: 1.894035266096538e-12, 195: 2.0836370857981843e-12, 194: 3.782116381065675e-12, 193: 4.312373743462794e-12, 192: 7.490725670331794e-12, 191: 8.831111955054018e-12, 190: 1.469895351080248e-11, 189: 1.7816221386316884e-11, 188: 2.8485785590277794e-11, 187: 3.525517618312759e-11, 186: 5.43450946153764e-11, 185: 6.826147996774266e-11, 184: 1.0190801658682703e-10, 183: 1.293492741421254e-10, 182: 1.8796479284550764e-10, 181: 2.404885196973595e-10, 180: 3.4189264108581974e-10, 179: 4.4041658093278483e-10, 178: 6.153987295310359e-10, 177: 7.976379069680216e-10, 176: 1.0995802395458254e-09, 175: 1.4327500689889838e-09, 174: 1.953581400637111e-09, 173: 2.5550641922394777e-09, 172: 3.4505440521449563e-09, 171: 4.520119764539933e-09, 170: 6.048663293909147e-09, 169: 7.917773062749169e-09, 168: 1.0499883226099006e-08, 167: 1.370771379853342e-08, 166: 1.801930551905168e-08, 165: 2.3434061276617696e-08, 164: 3.0558302945695635e-08, 163: 3.957628425035099e-08, 162: 5.125581097621423e-08, 161: 6.613064980321398e-08, 160: 8.518360590009864e-08, 159: 1.0955997715326007e-07, 158: 1.405441421049142e-07, 157: 1.8028761197615264e-07, 156: 2.30509771015183e-07, 155: 2.94932555347584e-07, 154: 3.759062169325088e-07, 153: 4.794983995644319e-07, 152: 6.089779759292081e-07, 151: 7.738079701341335e-07, 150: 9.78627203739538e-07, 149: 1.2377360821514788e-06, 148: 1.5579160736276556e-06, 147: 1.960349573899593e-06, 146: 2.4553414604947656e-06, 145: 3.0739267436898533e-06, 144: 3.832354066045901e-06, 143: 4.775816119953758e-06, 142: 5.930493200215743e-06, 141: 7.361758400603958e-06, 140: 9.111773711737575e-06, 139: 1.1274013331472979e-05, 138: 1.3915037547681439e-05, 137: 1.7166086645201052e-05, 136: 2.1129024563820847e-05, 135: 2.598395582261527e-05, 134: 3.1881775105847536e-05, 133: 3.906413077198373e-05, 132: 4.7748903990092734e-05, 131: 5.825511076692209e-05, 130: 7.089317049781984e-05, 129: 8.608211897103202e-05, 128: 0.00010426368322232004, 127: 0.00012599198892655422, 126: 0.00015189877711226186, 125: 0.00018272625240665325, 124: 0.0002193795566208962, 123: 0.00026286147044929717, 122: 0.00031445682042577225, 121: 0.0003755142978165058, 120: 0.0004478158434089597, 119: 0.0005331385730093929, 118: 0.0006338710810288649, 117: 0.0007522808602403247, 116: 0.0008914505200766179, 115: 0.0010541761653392891, 114: 0.0012443078959162314, 113: 0.0014652119846524472, 112: 0.0017216077772940568, 111: 0.002017539041216797, 110: 0.002358791848553119, 109: 0.002750336522782816, 108: 0.0031993763767807397, 107: 0.0037122415110301286, 106: 0.004298044960182538, 105: 0.004965037380168065, 104: 0.005724736764713471, 103: 0.006587807620014312, 102: 0.007568398785385817, 101: 0.00867971945387806, 100: 0.009938197909649778, 99: 0.01135912744384013, 98: 0.012960168069877435, 97: 0.014757702669013209, 96: 0.01676913244746984, 95: 0.019010898812807514, 94: 0.021498876381575473, 93: 0.024249267701011077, 92: 0.027276167706109473, 91: 0.030596665421207488, 90: 0.03422432411331296, 89: 0.038179713540655295, 88: 0.0424782126956378, 87: 0.047146945699880594, 86: 0.05220507655449236, 85: 0.057687824241469156, 84: 0.06361753574352523, 83: 0.07003537356099147, 82: 0.07696220277604915, 81: 0.08443865488460411, 80: 0.09247599181630142, 79: 0.10110574281767015, 78: 0.11032188071791486, 77: 0.12014079812267253, 76: 0.13053662146814138, 75: 0.14151150383807942, 74: 0.15302620743663828, 73: 0.16507751746572974, 72: 0.17762717928100047, 71: 0.19067988082293333, 70: 0.20421502361061133, 69: 0.21825480312528056, 68: 0.23280495624969533, 67: 0.24790114745639263, 66: 0.2635684968163977, 65: 0.2798341501304599, 64: 0.29671934822880924, 63: 0.31420861788025733, 62: 0.3322937546566702, 61: 0.3508914971250305, 60: 0.3699549402124897, 59: 0.3893325016432111, 58: 0.4089647883711039, 57: 0.42866353434923893, 56: 0.4484001950710131, 55: 0.4679944953379022, 54: 0.48749538158475125, 53: 0.5067712708426519, 52: 0.5259638545070847, 51: 0.5449920070408331, 50: 0.5640658966755194, 49: 0.5831167055354866, 48: 0.6023500899743012, 47: 0.6216381879941681, 46: 0.6411023599755987, 45: 0.6605103369385857, 44: 0.6798613333655006, 43: 0.6988171472939357, 42: 0.7173121816674168, 41: 0.7350280002123484, 40: 0.7519377448973812, 39: 0.7678241411760898, 38: 0.7827733266043884, 37: 0.7968050725795138, 36: 0.8100143093064718, 35: 0.8226125220914361, 34: 0.8346429584849546, 33: 0.8464910173368065, 32: 0.8580443519525474, 31: 0.8696677468414362, 30: 0.8809902445266214, 29: 0.8923598312025474, 28: 0.9031346881481492, 27: 0.91360192888889, 26: 0.9229606266666678, 25: 0.9316321044444456, 24: 0.9388921251851863, 23: 0.945251231851853, 22: 0.9501086140740752, 21: 0.9547112000000011, 20: 0.9585666666666678, 19: 0.9624173333333345, 18: 0.9658290666666678, 17: 0.9695581333333345, 16: 0.9732658666666678, 15: 0.9769789333333345, 14: 0.9803904000000012, 13: 0.9839232000000012, 12: 0.9873024000000012, 11: 0.9902208000000012, 10: 0.9924736000000013, 9: 0.9942144000000013, 8: 0.9953408000000012, 7: 0.9959040000000012, 0: 1.0000000000000013} ss = pmf_map(sum, top_nth(3, *nd(4, 6))) assert pmf_if(lambda c: c > 15, d(20), n(1), n(0)) == {0: 0.7500000000000001, 1: 0.25}
from functools import * from itertools import * from math import sqrt # from pprint import pprint import operator # from multipledispatch import dispatch class Pmf(object): """Probability mass function""" def __init__(self): super(Pmf, self).__init__() def prob(self, outcome): raise NotImplementedError def num_outcomes(self): raise NotImplementedError def entries(self): raise NotImplementedError def map(self, f): table = {} for outcome, prob in self: mapped_outcome = f(outcome) # print(outcome, prob, mapped_outcome) table[mapped_outcome] = table.get(mapped_outcome, 0) + prob return TablePmf(table) """ @classmethod def reduce(cls, op, pmfs, unit = None): def pmf_op(pmf1, pmf2): return cls.multimap(op, (pmf1, pmf2)) if unit == None: return reduce(pmf_op, pmfs) else: return reduce(pmf_op, pmfs, unit) """ @classmethod def multimap(cls, f, pmfs): return JointPmf(pmfs).map(lambda p: f(*p)) @classmethod def multimap_any(cls, f, objs): return cls.multimap(f, tuple(map(lambda obj: cls.any_to_value_pmf(obj), objs))) @classmethod def ifthenelse(cls, condition_pmf, then_pmf, else_pmf): return cls.multimap_any(lambda oc, ot, oe: ot if oc else oe, (condition_pmf, then_pmf, else_pmf)) @classmethod def maketuple(cls, pmfs): return cls.multimap_any(lambda *os: os, pmfs) def to_dict(self): return {outcome: prob for outcome, prob in iter(self)} def at_least(self): at_least_probs = {} probs = self.to_dict() accum = 0 for outcome in reversed(sorted(probs)): at_least_probs[outcome] = probs[outcome] + accum accum += probs[outcome] return at_least_probs @staticmethod def almost_equal(n1, n2, tolerance=0.0001): return abs((n1 - n2) / n2) < tolerance @staticmethod def dict_almost_equal(dict1, dict2, tolerance=0.0001): if len(dict1) != len(dict2): return False for key, value in dict1.items(): if not Pmf.almost_equal(value, dict2[key], tolerance): print(key, value, dict2[key]) return False return True def is_equiv(self, other, tolerance=0.0001): # print(self, other) if len(self) != len(other): return False for outcome, prob in iter(self): if not Pmf.almost_equal(prob, other[outcome], tolerance): print(outcome, prob, other[outcome]) return False return True def to_value_pmf(self): return self @classmethod def any_to_value_pmf(cls, obj): if isinstance(obj, Pmf): return obj.to_value_pmf() else: return ConstantPmf(obj) def pmf_op(self, other, op): # return self.multimap(op, (self.to_value_pmf(), self.any_to_value_pmf(other))) return self.multimap_any(op, (self, other)) def avg(self): return reduce(operator.add, map(lambda i: i[0] * i[1], iter(self.to_value_pmf()))) def sd(self): return sqrt(self.to_value_pmf().map(lambda n: n ** 2).avg() - (self.avg()) ** 2) def print_stats(self): print(self.avg(), self.sd(), self.at_least()) def __getitem__(self, key): return self.prob(key) def __len__(self): return self.num_outcomes() def __iter__(self): return self.entries() def __radd__(self, other): return self.pmf_op(other, operator.add) def __add__(self, other): return self.pmf_op(other, operator.add) def __rsub__(self, other): return self.pmf_op(other, operator.sub) def __sub__(self, other): return self.pmf_op(other, operator.sub) def __neg__(self): return self.map(operator.neg) def __rmul__(self, other): return self.pmf_op(other, operator.mul) def __mul__(self, other): return self.pmf_op(other, operator.mul) def __rtruediv__(self, other): return self.pmf_op(other, operator.truediv) def __truediv__(self, other): return self.pmf_op(other, operator.truediv) def __rfloordiv__(self, other): return self.pmf_op(other, operator.floordiv) def __floordiv__(self, other): return self.pmf_op(other, operator.floordiv) def __lt__(self, other): return self.pmf_op(other, operator.lt) def __le__(self, other): return self.pmf_op(other, operator.le) def __eq__(self, other): return self.is_equiv(other) def __ge__(self, other): return self.pmf_op(other, operator.ge) def __gt__(self, other): return self.pmf_op(other, operator.gt) def __not__(self): return self.map(operator.not_) def __and__(self, other): return self.pmf_op(other, operator.and_) def __or__(self, other): return self.pmf_op(other, operator.or_) def __str__(self): return str(self.to_dict()) # @dispatch(Pmf, Pmf, object) # def pmf_op(pmf1, pmf2, op): # return Pmf.multimap(op, (pmf1, pmf2))#JointPmf((pmf1, pmf2)).map(lambda p: op(*p)) """ @dispatch(Pmf, int, object) def pmf_op(pmf, c, op): return self.pmf_op(ConstantPmf(c), op) """ class TablePmf(Pmf): """Table based Pmf""" def __init__(self, table): super(TablePmf, self).__init__() self.table = table def prob(self, outcome): return self.table.get(outcome, 0) def num_outcomes(self): return len(self.table) def entries(self): return iter(self.table.items()) class ConstantPmf(TablePmf): def __init__(self, n): super(ConstantPmf, self).__init__({n: 1}) class D(Pmf): def __init__(self, faces): super(D, self).__init__() self.outcomes = range(1, faces + 1) self.p = 1 / faces def prob(self, outcome): if outcome in self.outcomes: return self.p else: return 0 def num_outcomes(self): return len(self.outcomes) def entries(self): for outcome in self.outcomes: yield (outcome, self.p) class JointPmf(Pmf): """Joint Pmf""" def __init__(self, pmfs): super(JointPmf, self).__init__() # print(1) # print(pmfs) # for p in pmfs: print(p) # print(2) self.pmfs = pmfs # ((pmf if isinstance(pmf, Pmf) else ConstantPmf(pmf)) for pmf in pmfs) def prob(self, outcome): return reduce(operator.mul, imap(lambda pmf, o: pmf[o], self.pmfs, outcome)) def num_outcomes(self): return reduce(operator.mul, map(lambda pmf: pmf.num_outcomes(), self.pmfs)) def entries(self): return self._entries_helper(iter(self.pmfs)) def _entries_helper(self, pmfs_iter): try: pmf = next(pmfs_iter) for suffix_outcome, suffix_prob in self._entries_helper(pmfs_iter): for outcome, prob in pmf: yield ((outcome,) + suffix_outcome, prob * suffix_prob) except StopIteration: yield ((), 1) def sum(self): return self.map(sum) def reduce(self, op, unit=None): def pmf_op(pmf1, pmf2): return Pmf.multimap(op, (pmf1, pmf2)) # print("r", self.pmfs) if unit == None: return reduce(pmf_op, self.pmfs) else: return reduce(pmf_op, self.pmfs, unit) def to_value_pmf(self): return self.sum() def largest_n(self, n): # print("l", self.pmfs) return JointPmf( tuple(map(lambda pmf: pmf.map(lambda n: (n,) + tuple((float('-inf') for _ in range(n - 1)))), self.pmfs))).reduce(lambda t1, t2: tuple(sorted(t1 + t2)[-n:])) def nth_largest(self, n): return self.largest_n(n).map(lambda t: t[-n]) def max(self): return self.reduce(max) """def __radd__(self, other): return self.sum().__radd__(other) def __add__(self, other): return self.sum().__add__(other) def __rsub__(self, other): return self.sum().__rsub__(other) def __sub__(self, other): return self.sum().__sub__(other) def __neg__(self): return self.sum().__neg__() def __rmul__(self, other): return self.sum().__rmul__(other) def __mul__(self, other): return self.sum().__mul__(other) def __rtruediv__(self, other): return self.sum().__rtruediv__(other) def __truediv__(self, other): return self.sum().__truediv__(other) def __rfloordiv__(self, other): return self.sum().__rfloordiv__(other) def __floordiv__(self, other): return self.sum().__floordiv__(other) def __lt__(self, other): return other > self.sum() def __le__(self, other): return other >= self.sum() def __ge__(self, other): return other <= self.sum() def __gt__(self, other): return other < self.sum() #return self.sum().__gt__(other) """ def __not__(self): raise TypeError def __and__(self, other): raise TypeError def __or__(self, other): raise TypeError class NPmf(JointPmf): """N many Pmfs""" def __init__(self, n, pmf): super(NPmf, self).__init__(tuple(pmf for _ in range(n))) class ND(NPmf): """N dice""" def __init__(self, n, faces): super(ND, self).__init__(n, D(faces)) # print(ND(3, 4) > D(15)) # print(ND(3, 4) > 8) # print(D(4) + ND(2, 4)) assert D(4) + ND(2, 4) == ND(3, 4).sum() # exit() assert D(2) + D(2) == TablePmf({2: 0.25, 3: 0.5, 4: 0.25}) assert D(2) + 1 == TablePmf({2: 0.5, 3: 0.5}) assert 1 + D(2) == TablePmf({2: 0.5, 3: 0.5}) assert ND(2, 2) + 1 == TablePmf({3: 0.25, 4: 0.5, 5: 0.25}) """ roll = D(20) if roll > 15: x = roll condition roll > 15 hit = true else: condition roll <= 15 hit = false merge branch hit """ stat_roll = ND(4, 6).largest_n(3).map(sum) assert stat_roll == TablePmf( {3: 0.0007716049382716049, 4: 0.0030864197530864196, 5: 0.007716049382716049, 6: 0.016203703703703703, 7: 0.029320987654320986, 8: 0.047839506172839504, 9: 0.07021604938271606, 10: 0.09413580246913579, 11: 0.11419753086419754, 12: 0.12885802469135801, 13: 0.13271604938271603, 14: 0.12345679012345678, 15: 0.10108024691358025, 16: 0.07253086419753085, 17: 0.04166666666666666, 18: 0.016203703703703703}) assert Pmf.dict_almost_equal(NPmf(6, stat_roll).nth_largest(2).at_least(), {18: 0.0037712978997010613, 17: 0.04297174396189502, 16: 0.1785025147554727, 15: 0.42163254105952985, 14: 0.6901003676248133, 13: 0.8786176420382443, 12: 0.9661397862008139, 11: 0.9934099842270956, 10: 0.9991552411369676, 9: 0.9999303248135635, 8: 0.9999965317348086, 7: 0.9999999030679377, 6: 0.999999998765834, 5: 0.9999999999948876, 4: 0.9999999999999979, 3: 0.9999999999999996}) assert Pmf.dict_almost_equal(NPmf(6, stat_roll).nth_largest(1).at_least(), {18: 0.09336788352756517, 17: 0.30069928149459824, 16: 0.5675723186031841, 15: 0.7939721069010339, 14: 0.9279543680017638, 13: 0.9819125032121617, 12: 0.9968194053566061, 11: 0.9996186510521065, 10: 0.9999711247911593, 9: 0.9999986646243858, 8: 0.999999965345446, 7: 0.9999999995406059, 6: 0.9999999999975957, 5: 0.9999999999999963, 4: 0.9999999999999997, 3: 0.9999999999999997}) assert Pmf.dict_almost_equal(NPmf(6, stat_roll).largest_n(2).map(sum).at_least(), {36: 0.0037712978997010613, 35: 0.021204173018408268, 34: 0.06678950251364398, 33: 0.1503173497095011, 32: 0.27328695484648013, 31: 0.4195516640008528, 30: 0.5731467072301601, 29: 0.7086608028822727, 28: 0.819832313566671, 27: 0.8961149664818756, 26: 0.9467231694762607, 25: 0.9741534736268778, 24: 0.9891492615027956, 23: 0.9955834539859781, 22: 0.9985102911733038, 21: 0.9994953600444297, 20: 0.9998688100006322, 19: 0.9999635085211747, 18: 0.9999929491919841, 17: 0.999998399446, 16: 0.9999997840507013, 15: 0.9999999605103931, 14: 0.9999999965711357, 13: 0.9999999995038211, 12: 0.9999999999772524, 11: 0.9999999999975125, 10: 0.9999999999999567, 9: 0.9999999999999963, 8: 0.9999999999999997, 7: 0.9999999999999997, 6: 0.9999999999999997}) assert ND(2, 10).max() == TablePmf( {1: 0.010000000000000002, 2: 0.030000000000000006, 3: 0.05000000000000001, 4: 0.07, 5: 0.09000000000000002, 6: 0.11000000000000004, 7: 0.13000000000000006, 8: 0.15000000000000008, 9: 0.1700000000000001, 10: 0.1900000000000001}) assert D(6).avg() == 3.5 assert ND(2, 6).avg() == 7 assert Pmf.almost_equal(D(6).sd(), 1.707825127659933) assert Pmf.almost_equal(ND(2, 6).sd(), 2.4152294576982403) normal = ND(2, 6).sum() savage = NPmf(2, normal).max() normal.print_stats() savage.print_stats() ATTACK_HIT_CRIT = 2 ATTACK_HIT_NON_CRIT = 1 ATTACK_MISS = 0 """def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold = 20): #print("a1", damage_roll * 2 + damage_bonus) return Pmf.ifthenelse(attack_base >= critical_threshold, Pmf.maketuple((damage_roll * 2 + damage_bonus, ATTACK_HIT_CRIT)), Pmf.ifthenelse(attack_base > 1 and attack_base + attack_bonus >= ac, Pmf.maketuple((damage_roll + damage_bonus, ATTACK_HIT_NON_CRIT)), Pmf.maketuple((0, ATTACK_MISS)) ) )""" # attack_base > 1 and """def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold = 20): #print("a1", damage_roll * 2 + damage_bonus) return Pmf.ifthenelse(attack_base >= critical_threshold, ATTACK_HIT_CRIT, Pmf.ifthenelse(attack_base < critical_threshold and attack_base + attack_bonus >= ac, ATTACK_HIT_NON_CRIT, ATTACK_MISS ) ) """ # print(D(20) <= 8) # print(attack_basic(D(20), 9, 18, ND(2, 6), 5)) def d(t): return {o: 1 / t for o in range(1, t + 1)} def npmf(n, pmf): return tuple((pmf for _ in range(n))) # @infix def nd(n, t): return npmf(n, d(t)) pmf_empty = {(): 1} def n(x): return {x: 1} def pmf_map(f, pmf): npmf = {} for o, p in pmf.items(): no = f(o) # print(no) npmf[no] = npmf.get(no, 0) + p # print(npmf) return npmf def join(*pmfs): jpmf = pmf_empty for pmf in pmfs: jpmf = {jo + (o,): jp * p for jo, jp in jpmf.items() for o, p in pmf.items()} return jpmf def pmf_reduce(op, pmfs, u=None): def pmf_op(pmf1, pmf2): # print(pmf1) # print(pmf2) pmf = join(pmf1, pmf2) # print(pmf) # return pmf_map(lambda o: op(*o), pmf) return pmf_map(lambda o: op(*o), pmf) if u == None: return reduce(pmf_op, pmfs) else: return reduce(pmf_op, pmfs, u) def pmf_func(f, *pmfs): return pmf_map(lambda o: f(*o), join(*pmfs)) def pmf_if(c, pmf_c, pmf_t, pmf_f): return pmf_func(lambda oc, ot, of: ot if c(oc) else of, pmf_c, pmf_t, pmf_f) def pmf_sum(*pmfs): return pmf_reduce(lambda o1, o2: o1 + o2, pmfs) def pmf_subtract(pmf1, pmf2): return pmf_func(lambda o1, o2: o1 - o2, pmf1, pmf2) def pmf_max(*pmfs): return pmf_reduce(max, pmfs) def pmf_min(*pmfs): return pmf_reduce(min, pmfs) def npmf_sum(n, pmf): return pmf_sum(*npmf(n, pmf)) def at_least(pmf): d = {} a = 0 for o in reversed(sorted(pmf)): d[o] = pmf[o] + a a += pmf[o] return d def top_nth(n, *pmfs): return pmf_reduce( lambda o1, o2: tuple(sorted(o1 + o2)[-n:]), (pmf_map(lambda o: (o,) + tuple((float('-inf') for _ in range(n - 1))), pmf) for pmf in pmfs) ) def nth_largest(n, *pmfs): return pmf_map( lambda o: o[-n], top_nth(n, *pmfs), ) def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold=20): # print(attack_base, attack_bonus) if attack_base >= critical_threshold: return (damage_roll * 2 + damage_bonus, ATTACK_HIT_CRIT) elif attack_base > 1 and attack_base + attack_bonus >= ac: return (damage_roll + damage_bonus, ATTACK_HIT_NON_CRIT) else: return (0, ATTACK_MISS) PROFICIENCY_BONUS = 4 ABILITY_MODIFIER = 5 CHARISMA_MODIFIER = 3 AC = n(18) def attack( damage_base, proficiency_bonus=PROFICIENCY_BONUS, ability_modifier=ABILITY_MODIFIER, add_ability_modifier_to_damange=True, is_attack_adv=False, is_archery=False, is_great_weapon_fighting=False, is_blessed=False, is_sacred_weapon=False, critical_threshold=20): attack_bonus = n(proficiency_bonus + ability_modifier) damage_bonus = 0 if is_attack_adv: attack_base = pmf_max(*nd(2, 20)) else: attack_base = d(20) if add_ability_modifier_to_damange: damage_bonus += ability_modifier if is_great_weapon_fighting: attack_bonus = pmf_subtract(attack_bonus, n(5)) damage_bonus += 10 if is_blessed: attack_bonus = pmf_sum(attack_bonus, d(4)) if is_sacred_weapon: attack_bonus += CHARISMA_MODIFIER return pmf_func(lambda aba, abo, ac, db: attack_basic( attack_base=aba, attack_bonus=abo, ac=ac, damage_roll=db, damage_bonus=damage_bonus, critical_threshold=critical_threshold), attack_base, attack_bonus, AC, damage_base) """print(pmf_func(lambda aba, abo, ac, db: attack_basic( attack_base=aba, attack_bonus=abo, ac=ac, damage_roll=db, damage_bonus=5), d(20), n(9), n(18), pmf_sum(*nd(2, 6)))) """ def multi_attack_damage(*attack_pmfs, extra_damage_roll=n(0), extra_damange_bonus=0): # print(attack_pmfs[0]) all_attacks_pmf = pmf_reduce(lambda a1, a2: (a1[0] + a2[0], max(a1[1], a2[1])), attack_pmfs) # print(all_attacks_pmf) return pmf_func(lambda aa, ed: aa[0] + aa[1] * (ed + extra_damange_bonus), all_attacks_pmf, extra_damage_roll) def damage_against_save(save_dc, save_bonus, damage, is_save_disadvantage=False, is_save_half=False): if is_save_disadvantage: save_roll = pmf_min(*nd(2, 20)) else: save_roll = d(20) if is_save_half: save_success_damage = pmf_map(lambda o: o // 2, damage) else: save_success_damage = n(0) return pmf_if(lambda s: s + save_bonus < save_dc, save_roll, damage, save_success_damage) def pmf_equal(pmf1, pmf2, tolerance=0.0001): if len(pmf1) != len(pmf2): return False for o, p in pmf1.items(): if o not in pmf2 or abs(pmf2[o] - p) / pmf2[o] > tolerance: print(o, pmf2[o], p) return False return True # print(at_least(damage_against_save(save_dc=17, save_bonus=7, damage=pmf_sum(n(0), *nd(3, 12)), is_save_half=True))) assert at_least(multi_attack_damage(*npmf(6, attack(pmf_sum(d(10), d(6)))))) == {222: 3.3489797668038423e-19, 220: 4.3536736968449956e-18, 218: 3.0475715877914974e-17, 216: 1.5237857938957488e-16, 214: 6.095143175582995e-16, 212: 2.0723486796982186e-15, 210: 6.215036651234575e-15, 208: 1.684871720679014e-14, 206: 4.202634709362144e-14, 205: 4.207055362654325e-14, 204: 9.787594307270242e-14, 203: 9.840642146776415e-14, 202: 2.1577476637517166e-13, 201: 2.1922287594307288e-13, 200: 4.564170471107685e-13, 199: 4.724640185613857e-13, 198: 9.381815173825455e-13, 197: 9.979929564257554e-13, 196: 1.894035266096538e-12, 195: 2.0836370857981843e-12, 194: 3.782116381065675e-12, 193: 4.312373743462794e-12, 192: 7.490725670331794e-12, 191: 8.831111955054018e-12, 190: 1.469895351080248e-11, 189: 1.7816221386316884e-11, 188: 2.8485785590277794e-11, 187: 3.525517618312759e-11, 186: 5.43450946153764e-11, 185: 6.826147996774266e-11, 184: 1.0190801658682703e-10, 183: 1.293492741421254e-10, 182: 1.8796479284550764e-10, 181: 2.404885196973595e-10, 180: 3.4189264108581974e-10, 179: 4.4041658093278483e-10, 178: 6.153987295310359e-10, 177: 7.976379069680216e-10, 176: 1.0995802395458254e-09, 175: 1.4327500689889838e-09, 174: 1.953581400637111e-09, 173: 2.5550641922394777e-09, 172: 3.4505440521449563e-09, 171: 4.520119764539933e-09, 170: 6.048663293909147e-09, 169: 7.917773062749169e-09, 168: 1.0499883226099006e-08, 167: 1.370771379853342e-08, 166: 1.801930551905168e-08, 165: 2.3434061276617696e-08, 164: 3.0558302945695635e-08, 163: 3.957628425035099e-08, 162: 5.125581097621423e-08, 161: 6.613064980321398e-08, 160: 8.518360590009864e-08, 159: 1.0955997715326007e-07, 158: 1.405441421049142e-07, 157: 1.8028761197615264e-07, 156: 2.30509771015183e-07, 155: 2.94932555347584e-07, 154: 3.759062169325088e-07, 153: 4.794983995644319e-07, 152: 6.089779759292081e-07, 151: 7.738079701341335e-07, 150: 9.78627203739538e-07, 149: 1.2377360821514788e-06, 148: 1.5579160736276556e-06, 147: 1.960349573899593e-06, 146: 2.4553414604947656e-06, 145: 3.0739267436898533e-06, 144: 3.832354066045901e-06, 143: 4.775816119953758e-06, 142: 5.930493200215743e-06, 141: 7.361758400603958e-06, 140: 9.111773711737575e-06, 139: 1.1274013331472979e-05, 138: 1.3915037547681439e-05, 137: 1.7166086645201052e-05, 136: 2.1129024563820847e-05, 135: 2.598395582261527e-05, 134: 3.1881775105847536e-05, 133: 3.906413077198373e-05, 132: 4.7748903990092734e-05, 131: 5.825511076692209e-05, 130: 7.089317049781984e-05, 129: 8.608211897103202e-05, 128: 0.00010426368322232004, 127: 0.00012599198892655422, 126: 0.00015189877711226186, 125: 0.00018272625240665325, 124: 0.0002193795566208962, 123: 0.00026286147044929717, 122: 0.00031445682042577225, 121: 0.0003755142978165058, 120: 0.0004478158434089597, 119: 0.0005331385730093929, 118: 0.0006338710810288649, 117: 0.0007522808602403247, 116: 0.0008914505200766179, 115: 0.0010541761653392891, 114: 0.0012443078959162314, 113: 0.0014652119846524472, 112: 0.0017216077772940568, 111: 0.002017539041216797, 110: 0.002358791848553119, 109: 0.002750336522782816, 108: 0.0031993763767807397, 107: 0.0037122415110301286, 106: 0.004298044960182538, 105: 0.004965037380168065, 104: 0.005724736764713471, 103: 0.006587807620014312, 102: 0.007568398785385817, 101: 0.00867971945387806, 100: 0.009938197909649778, 99: 0.01135912744384013, 98: 0.012960168069877435, 97: 0.014757702669013209, 96: 0.01676913244746984, 95: 0.019010898812807514, 94: 0.021498876381575473, 93: 0.024249267701011077, 92: 0.027276167706109473, 91: 0.030596665421207488, 90: 0.03422432411331296, 89: 0.038179713540655295, 88: 0.0424782126956378, 87: 0.047146945699880594, 86: 0.05220507655449236, 85: 0.057687824241469156, 84: 0.06361753574352523, 83: 0.07003537356099147, 82: 0.07696220277604915, 81: 0.08443865488460411, 80: 0.09247599181630142, 79: 0.10110574281767015, 78: 0.11032188071791486, 77: 0.12014079812267253, 76: 0.13053662146814138, 75: 0.14151150383807942, 74: 0.15302620743663828, 73: 0.16507751746572974, 72: 0.17762717928100047, 71: 0.19067988082293333, 70: 0.20421502361061133, 69: 0.21825480312528056, 68: 0.23280495624969533, 67: 0.24790114745639263, 66: 0.2635684968163977, 65: 0.2798341501304599, 64: 0.29671934822880924, 63: 0.31420861788025733, 62: 0.3322937546566702, 61: 0.3508914971250305, 60: 0.3699549402124897, 59: 0.3893325016432111, 58: 0.4089647883711039, 57: 0.42866353434923893, 56: 0.4484001950710131, 55: 0.4679944953379022, 54: 0.48749538158475125, 53: 0.5067712708426519, 52: 0.5259638545070847, 51: 0.5449920070408331, 50: 0.5640658966755194, 49: 0.5831167055354866, 48: 0.6023500899743012, 47: 0.6216381879941681, 46: 0.6411023599755987, 45: 0.6605103369385857, 44: 0.6798613333655006, 43: 0.6988171472939357, 42: 0.7173121816674168, 41: 0.7350280002123484, 40: 0.7519377448973812, 39: 0.7678241411760898, 38: 0.7827733266043884, 37: 0.7968050725795138, 36: 0.8100143093064718, 35: 0.8226125220914361, 34: 0.8346429584849546, 33: 0.8464910173368065, 32: 0.8580443519525474, 31: 0.8696677468414362, 30: 0.8809902445266214, 29: 0.8923598312025474, 28: 0.9031346881481492, 27: 0.91360192888889, 26: 0.9229606266666678, 25: 0.9316321044444456, 24: 0.9388921251851863, 23: 0.945251231851853, 22: 0.9501086140740752, 21: 0.9547112000000011, 20: 0.9585666666666678, 19: 0.9624173333333345, 18: 0.9658290666666678, 17: 0.9695581333333345, 16: 0.9732658666666678, 15: 0.9769789333333345, 14: 0.9803904000000012, 13: 0.9839232000000012, 12: 0.9873024000000012, 11: 0.9902208000000012, 10: 0.9924736000000013, 9: 0.9942144000000013, 8: 0.9953408000000012, 7: 0.9959040000000012, 0: 1.0000000000000013} ss = pmf_map(sum, top_nth(3, *nd(4, 6))) assert pmf_if(lambda c: c > 15, d(20), n(1), n(0)) == {0: 0.7500000000000001, 1: 0.25}
en
0.430247
# from pprint import pprint # from multipledispatch import dispatch Probability mass function # print(outcome, prob, mapped_outcome) @classmethod def reduce(cls, op, pmfs, unit = None): def pmf_op(pmf1, pmf2): return cls.multimap(op, (pmf1, pmf2)) if unit == None: return reduce(pmf_op, pmfs) else: return reduce(pmf_op, pmfs, unit) # print(self, other) # return self.multimap(op, (self.to_value_pmf(), self.any_to_value_pmf(other))) # @dispatch(Pmf, Pmf, object) # def pmf_op(pmf1, pmf2, op): # return Pmf.multimap(op, (pmf1, pmf2))#JointPmf((pmf1, pmf2)).map(lambda p: op(*p)) @dispatch(Pmf, int, object) def pmf_op(pmf, c, op): return self.pmf_op(ConstantPmf(c), op) Table based Pmf Joint Pmf # print(1) # print(pmfs) # for p in pmfs: print(p) # print(2) # ((pmf if isinstance(pmf, Pmf) else ConstantPmf(pmf)) for pmf in pmfs) # print("r", self.pmfs) # print("l", self.pmfs) def __radd__(self, other): return self.sum().__radd__(other) def __add__(self, other): return self.sum().__add__(other) def __rsub__(self, other): return self.sum().__rsub__(other) def __sub__(self, other): return self.sum().__sub__(other) def __neg__(self): return self.sum().__neg__() def __rmul__(self, other): return self.sum().__rmul__(other) def __mul__(self, other): return self.sum().__mul__(other) def __rtruediv__(self, other): return self.sum().__rtruediv__(other) def __truediv__(self, other): return self.sum().__truediv__(other) def __rfloordiv__(self, other): return self.sum().__rfloordiv__(other) def __floordiv__(self, other): return self.sum().__floordiv__(other) def __lt__(self, other): return other > self.sum() def __le__(self, other): return other >= self.sum() def __ge__(self, other): return other <= self.sum() def __gt__(self, other): return other < self.sum() #return self.sum().__gt__(other) N many Pmfs N dice # print(ND(3, 4) > D(15)) # print(ND(3, 4) > 8) # print(D(4) + ND(2, 4)) # exit() roll = D(20) if roll > 15: x = roll condition roll > 15 hit = true else: condition roll <= 15 hit = false merge branch hit def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold = 20): #print("a1", damage_roll * 2 + damage_bonus) return Pmf.ifthenelse(attack_base >= critical_threshold, Pmf.maketuple((damage_roll * 2 + damage_bonus, ATTACK_HIT_CRIT)), Pmf.ifthenelse(attack_base > 1 and attack_base + attack_bonus >= ac, Pmf.maketuple((damage_roll + damage_bonus, ATTACK_HIT_NON_CRIT)), Pmf.maketuple((0, ATTACK_MISS)) ) ) # attack_base > 1 and def attack_basic(attack_base, attack_bonus, ac, damage_roll, damage_bonus, critical_threshold = 20): #print("a1", damage_roll * 2 + damage_bonus) return Pmf.ifthenelse(attack_base >= critical_threshold, ATTACK_HIT_CRIT, Pmf.ifthenelse(attack_base < critical_threshold and attack_base + attack_bonus >= ac, ATTACK_HIT_NON_CRIT, ATTACK_MISS ) ) # print(D(20) <= 8) # print(attack_basic(D(20), 9, 18, ND(2, 6), 5)) # @infix # print(no) # print(npmf) # print(pmf1) # print(pmf2) # print(pmf) # return pmf_map(lambda o: op(*o), pmf) # print(attack_base, attack_bonus) print(pmf_func(lambda aba, abo, ac, db: attack_basic( attack_base=aba, attack_bonus=abo, ac=ac, damage_roll=db, damage_bonus=5), d(20), n(9), n(18), pmf_sum(*nd(2, 6)))) # print(attack_pmfs[0]) # print(all_attacks_pmf) # print(at_least(damage_against_save(save_dc=17, save_bonus=7, damage=pmf_sum(n(0), *nd(3, 12)), is_save_half=True)))
2.912823
3
src/UQpy/SampleMethods/IS.py
marrov/UQpy
132
6623021
from UQpy.Distributions import Distribution import numpy as np ######################################################################################################################## ######################################################################################################################## # Generating random samples inside a Simplex ######################################################################################################################## class IS: """ Sample from a user-defined target density using importance sampling. **Inputs:** * **nsamples** (`int`): Number of samples to generate - see ``run`` method. If not `None`, the `run` method is called when the object is created. Default is None. * **pdf_target** (callable): Callable that evaluates the pdf of the target distribution. Either log_pdf_target or pdf_target must be specified (the former is preferred). * **log_pdf_target** (callable) Callable that evaluates the log-pdf of the target distribution. Either log_pdf_target or pdf_target must be specified (the former is preferred). * **args_target** (`tuple`): Positional arguments of the target log_pdf / pdf callable. * **proposal** (``Distribution`` object): Proposal to sample from. This ``UQpy.Distributions`` object must have an rvs method and a log_pdf (or pdf) method. * **verbose** (`boolean`) Set ``verbose = True`` to print status messages to the terminal during execution. * **random_state** (None or `int` or ``numpy.random.RandomState`` object): Random seed used to initialize the pseudo-random number generator. Default is None. If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the object itself can be passed directly. **Attributes:** * **samples** (`ndarray`): Set of samples, `ndarray` of shape (nsamples, dim) * **unnormalized_log_weights** (`ndarray`) Unnormalized log weights, i.e., log_w(x) = log_target(x) - log_proposal(x), `ndarray` of shape (nsamples, ) * **weights** (`ndarray`): Importance weights, weighted so that they sum up to 1, `ndarray` of shape (nsamples, ) * **unweighted_samples** (`ndarray`): Set of un-weighted samples (useful for instance for plotting), computed by calling the `resample` method **Methods:** """ # Last Modified: 10/05/2020 by <NAME> def __init__(self, nsamples=None, pdf_target=None, log_pdf_target=None, args_target=None, proposal=None, verbose=False, random_state=None): # Initialize proposal: it should have an rvs and log pdf or pdf method self.proposal = proposal if not isinstance(self.proposal, Distribution): raise TypeError('UQpy: The proposal should be of type Distribution.') if not hasattr(self.proposal, 'rvs'): raise AttributeError('UQpy: The proposal should have an rvs method') if not hasattr(self.proposal, 'log_pdf'): if not hasattr(self.proposal, 'pdf'): raise AttributeError('UQpy: The proposal should have a log_pdf or pdf method') self.proposal.log_pdf = lambda x: np.log(np.maximum(self.proposal.pdf(x), 10 ** (-320) * np.ones((x.shape[0],)))) # Initialize target self.evaluate_log_target = self._preprocess_target(log_pdf_=log_pdf_target, pdf_=pdf_target, args=args_target) self.verbose = verbose self.random_state = random_state if isinstance(self.random_state, int): self.random_state = np.random.RandomState(self.random_state) elif not isinstance(self.random_state, (type(None), np.random.RandomState)): raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.') # Initialize the samples and weights self.samples = None self.unnormalized_log_weights = None self.weights = None self.unweighted_samples = None # Run IS if nsamples is provided if nsamples is not None and nsamples != 0: self.run(nsamples) def run(self, nsamples): """ Generate and weight samples. This function samples from the proposal and appends samples to existing ones (if any). It then weights the samples as log_w_unnormalized) = log(target)-log(proposal). **Inputs:** * **nsamples** (`int`) Number of weighted samples to generate. * **Output/Returns:** This function has no returns, but it updates the output attributes `samples`, `unnormalized_log_weights` and `weights` of the ``IS`` object. """ if self.verbose: print('UQpy: Running Importance Sampling...') # Sample from proposal new_samples = self.proposal.rvs(nsamples=nsamples, random_state=self.random_state) # Compute un-scaled weights of new samples new_log_weights = self.evaluate_log_target(x=new_samples) - self.proposal.log_pdf(x=new_samples) # Save samples and weights (append to existing if necessary) if self.samples is None: self.samples = new_samples self.unnormalized_log_weights = new_log_weights else: self.samples = np.concatenate([self.samples, new_samples], axis=0) self.unnormalized_log_weights = np.concatenate([self.unnormalized_log_weights, new_log_weights], axis=0) # Take the exponential and normalize the weights weights = np.exp(self.unnormalized_log_weights - max(self.unnormalized_log_weights)) # note: scaling with max avoids having NaN of Inf when taking the exp sum_w = np.sum(weights, axis=0) self.weights = weights / sum_w if self.verbose: print('UQpy: Importance Sampling performed successfully') # If a set of unweighted samples exist, delete them as they are not representative of the distribution anymore if self.unweighted_samples is not None: if self.verbose: print('UQpy: unweighted samples are being deleted, call the resample method to regenerate them') self.unweighted_samples = None # def resample(self, method='multinomial', nsamples=None): # """ # Resample to get a set of un-weighted samples that represent the target pdf. # # Utility function that creates a set of un-weighted samples from a set of weighted samples. Can be useful for # plotting for instance. # # **Inputs:** # # * **method** (`str`) # Resampling method, as of V3 only multinomial resampling is supported. Default: 'multinomial'. # * **nsamples** (`int`) # Number of un-weighted samples to generate. Default: None (same number of samples is generated as number of # existing samples). # # **Output/Returns:** # # * (`ndarray`) # Un-weighted samples that represent the target pdf, `ndarray` of shape (nsamples, dimension) # # """ # from .Utilities import resample # return resample(self.samples, self.weights, method=method, size=nsamples) def resample(self, method='multinomial', nsamples=None): """ Resample to get a set of un-weighted samples that represent the target pdf. Utility function that creates a set of un-weighted samples from a set of weighted samples. Can be useful for plotting for instance. The ``resample`` method is not called automatically when instantiating the ``IS`` class or when invoking its ``run`` method. **Inputs:** * **method** (`str`) Resampling method, as of V3 only multinomial resampling is supported. Default: 'multinomial'. * **nsamples** (`int`) Number of un-weighted samples to generate. Default: None (sets `nsamples` equal to the number of existing weighted samples). **Output/Returns:** The method has no returns, but it computes the following attribute of the ``IS`` object. * **unweighted_samples** (`ndarray`) Un-weighted samples that represent the target pdf, `ndarray` of shape (nsamples, dimension) """ if nsamples is None: nsamples = self.samples.shape[0] if method == 'multinomial': multinomial_run = np.random.multinomial(nsamples, self.weights, size=1)[0] idx = list() for j in range(self.samples.shape[0]): if multinomial_run[j] > 0: idx.extend([j for _ in range(multinomial_run[j])]) self.unweighted_samples = self.samples[idx, :] else: raise ValueError('Exit code: Current available method: multinomial') @staticmethod def _preprocess_target(log_pdf_, pdf_, args): """ Preprocess the target pdf inputs. Utility function (static method), that transforms the log_pdf, pdf, args inputs into a function that evaluates log_pdf_target(x) for a given x. **Inputs:** * log_pdf_ ((list of) callables): Log of the target density function from which to draw random samples. Either pdf_target or log_pdf_target must be provided * pdf_ ((list of) callables): Target density function from which to draw random samples. * args (tuple): Positional arguments of the pdf target **Output/Returns:** * evaluate_log_pdf (callable): Callable that computes the log of the target density function """ # log_pdf is provided if log_pdf_ is not None: if callable(log_pdf_): if args is None: args = () evaluate_log_pdf = (lambda x: log_pdf_(x, *args)) else: raise TypeError('UQpy: log_pdf_target must be a callable') # pdf is provided elif pdf_ is not None: if callable(pdf_): if args is None: args = () evaluate_log_pdf = (lambda x: np.log(np.maximum(pdf_(x, *args), 10 ** (-320) * np.ones((x.shape[0],))))) else: raise TypeError('UQpy: pdf_target must be a callable') else: raise ValueError('UQpy: log_pdf_target or pdf_target should be provided.') return evaluate_log_pdf
from UQpy.Distributions import Distribution import numpy as np ######################################################################################################################## ######################################################################################################################## # Generating random samples inside a Simplex ######################################################################################################################## class IS: """ Sample from a user-defined target density using importance sampling. **Inputs:** * **nsamples** (`int`): Number of samples to generate - see ``run`` method. If not `None`, the `run` method is called when the object is created. Default is None. * **pdf_target** (callable): Callable that evaluates the pdf of the target distribution. Either log_pdf_target or pdf_target must be specified (the former is preferred). * **log_pdf_target** (callable) Callable that evaluates the log-pdf of the target distribution. Either log_pdf_target or pdf_target must be specified (the former is preferred). * **args_target** (`tuple`): Positional arguments of the target log_pdf / pdf callable. * **proposal** (``Distribution`` object): Proposal to sample from. This ``UQpy.Distributions`` object must have an rvs method and a log_pdf (or pdf) method. * **verbose** (`boolean`) Set ``verbose = True`` to print status messages to the terminal during execution. * **random_state** (None or `int` or ``numpy.random.RandomState`` object): Random seed used to initialize the pseudo-random number generator. Default is None. If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the object itself can be passed directly. **Attributes:** * **samples** (`ndarray`): Set of samples, `ndarray` of shape (nsamples, dim) * **unnormalized_log_weights** (`ndarray`) Unnormalized log weights, i.e., log_w(x) = log_target(x) - log_proposal(x), `ndarray` of shape (nsamples, ) * **weights** (`ndarray`): Importance weights, weighted so that they sum up to 1, `ndarray` of shape (nsamples, ) * **unweighted_samples** (`ndarray`): Set of un-weighted samples (useful for instance for plotting), computed by calling the `resample` method **Methods:** """ # Last Modified: 10/05/2020 by <NAME> def __init__(self, nsamples=None, pdf_target=None, log_pdf_target=None, args_target=None, proposal=None, verbose=False, random_state=None): # Initialize proposal: it should have an rvs and log pdf or pdf method self.proposal = proposal if not isinstance(self.proposal, Distribution): raise TypeError('UQpy: The proposal should be of type Distribution.') if not hasattr(self.proposal, 'rvs'): raise AttributeError('UQpy: The proposal should have an rvs method') if not hasattr(self.proposal, 'log_pdf'): if not hasattr(self.proposal, 'pdf'): raise AttributeError('UQpy: The proposal should have a log_pdf or pdf method') self.proposal.log_pdf = lambda x: np.log(np.maximum(self.proposal.pdf(x), 10 ** (-320) * np.ones((x.shape[0],)))) # Initialize target self.evaluate_log_target = self._preprocess_target(log_pdf_=log_pdf_target, pdf_=pdf_target, args=args_target) self.verbose = verbose self.random_state = random_state if isinstance(self.random_state, int): self.random_state = np.random.RandomState(self.random_state) elif not isinstance(self.random_state, (type(None), np.random.RandomState)): raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.') # Initialize the samples and weights self.samples = None self.unnormalized_log_weights = None self.weights = None self.unweighted_samples = None # Run IS if nsamples is provided if nsamples is not None and nsamples != 0: self.run(nsamples) def run(self, nsamples): """ Generate and weight samples. This function samples from the proposal and appends samples to existing ones (if any). It then weights the samples as log_w_unnormalized) = log(target)-log(proposal). **Inputs:** * **nsamples** (`int`) Number of weighted samples to generate. * **Output/Returns:** This function has no returns, but it updates the output attributes `samples`, `unnormalized_log_weights` and `weights` of the ``IS`` object. """ if self.verbose: print('UQpy: Running Importance Sampling...') # Sample from proposal new_samples = self.proposal.rvs(nsamples=nsamples, random_state=self.random_state) # Compute un-scaled weights of new samples new_log_weights = self.evaluate_log_target(x=new_samples) - self.proposal.log_pdf(x=new_samples) # Save samples and weights (append to existing if necessary) if self.samples is None: self.samples = new_samples self.unnormalized_log_weights = new_log_weights else: self.samples = np.concatenate([self.samples, new_samples], axis=0) self.unnormalized_log_weights = np.concatenate([self.unnormalized_log_weights, new_log_weights], axis=0) # Take the exponential and normalize the weights weights = np.exp(self.unnormalized_log_weights - max(self.unnormalized_log_weights)) # note: scaling with max avoids having NaN of Inf when taking the exp sum_w = np.sum(weights, axis=0) self.weights = weights / sum_w if self.verbose: print('UQpy: Importance Sampling performed successfully') # If a set of unweighted samples exist, delete them as they are not representative of the distribution anymore if self.unweighted_samples is not None: if self.verbose: print('UQpy: unweighted samples are being deleted, call the resample method to regenerate them') self.unweighted_samples = None # def resample(self, method='multinomial', nsamples=None): # """ # Resample to get a set of un-weighted samples that represent the target pdf. # # Utility function that creates a set of un-weighted samples from a set of weighted samples. Can be useful for # plotting for instance. # # **Inputs:** # # * **method** (`str`) # Resampling method, as of V3 only multinomial resampling is supported. Default: 'multinomial'. # * **nsamples** (`int`) # Number of un-weighted samples to generate. Default: None (same number of samples is generated as number of # existing samples). # # **Output/Returns:** # # * (`ndarray`) # Un-weighted samples that represent the target pdf, `ndarray` of shape (nsamples, dimension) # # """ # from .Utilities import resample # return resample(self.samples, self.weights, method=method, size=nsamples) def resample(self, method='multinomial', nsamples=None): """ Resample to get a set of un-weighted samples that represent the target pdf. Utility function that creates a set of un-weighted samples from a set of weighted samples. Can be useful for plotting for instance. The ``resample`` method is not called automatically when instantiating the ``IS`` class or when invoking its ``run`` method. **Inputs:** * **method** (`str`) Resampling method, as of V3 only multinomial resampling is supported. Default: 'multinomial'. * **nsamples** (`int`) Number of un-weighted samples to generate. Default: None (sets `nsamples` equal to the number of existing weighted samples). **Output/Returns:** The method has no returns, but it computes the following attribute of the ``IS`` object. * **unweighted_samples** (`ndarray`) Un-weighted samples that represent the target pdf, `ndarray` of shape (nsamples, dimension) """ if nsamples is None: nsamples = self.samples.shape[0] if method == 'multinomial': multinomial_run = np.random.multinomial(nsamples, self.weights, size=1)[0] idx = list() for j in range(self.samples.shape[0]): if multinomial_run[j] > 0: idx.extend([j for _ in range(multinomial_run[j])]) self.unweighted_samples = self.samples[idx, :] else: raise ValueError('Exit code: Current available method: multinomial') @staticmethod def _preprocess_target(log_pdf_, pdf_, args): """ Preprocess the target pdf inputs. Utility function (static method), that transforms the log_pdf, pdf, args inputs into a function that evaluates log_pdf_target(x) for a given x. **Inputs:** * log_pdf_ ((list of) callables): Log of the target density function from which to draw random samples. Either pdf_target or log_pdf_target must be provided * pdf_ ((list of) callables): Target density function from which to draw random samples. * args (tuple): Positional arguments of the pdf target **Output/Returns:** * evaluate_log_pdf (callable): Callable that computes the log of the target density function """ # log_pdf is provided if log_pdf_ is not None: if callable(log_pdf_): if args is None: args = () evaluate_log_pdf = (lambda x: log_pdf_(x, *args)) else: raise TypeError('UQpy: log_pdf_target must be a callable') # pdf is provided elif pdf_ is not None: if callable(pdf_): if args is None: args = () evaluate_log_pdf = (lambda x: np.log(np.maximum(pdf_(x, *args), 10 ** (-320) * np.ones((x.shape[0],))))) else: raise TypeError('UQpy: pdf_target must be a callable') else: raise ValueError('UQpy: log_pdf_target or pdf_target should be provided.') return evaluate_log_pdf
en
0.706602
######################################################################################################################## ######################################################################################################################## # Generating random samples inside a Simplex ######################################################################################################################## Sample from a user-defined target density using importance sampling. **Inputs:** * **nsamples** (`int`): Number of samples to generate - see ``run`` method. If not `None`, the `run` method is called when the object is created. Default is None. * **pdf_target** (callable): Callable that evaluates the pdf of the target distribution. Either log_pdf_target or pdf_target must be specified (the former is preferred). * **log_pdf_target** (callable) Callable that evaluates the log-pdf of the target distribution. Either log_pdf_target or pdf_target must be specified (the former is preferred). * **args_target** (`tuple`): Positional arguments of the target log_pdf / pdf callable. * **proposal** (``Distribution`` object): Proposal to sample from. This ``UQpy.Distributions`` object must have an rvs method and a log_pdf (or pdf) method. * **verbose** (`boolean`) Set ``verbose = True`` to print status messages to the terminal during execution. * **random_state** (None or `int` or ``numpy.random.RandomState`` object): Random seed used to initialize the pseudo-random number generator. Default is None. If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the object itself can be passed directly. **Attributes:** * **samples** (`ndarray`): Set of samples, `ndarray` of shape (nsamples, dim) * **unnormalized_log_weights** (`ndarray`) Unnormalized log weights, i.e., log_w(x) = log_target(x) - log_proposal(x), `ndarray` of shape (nsamples, ) * **weights** (`ndarray`): Importance weights, weighted so that they sum up to 1, `ndarray` of shape (nsamples, ) * **unweighted_samples** (`ndarray`): Set of un-weighted samples (useful for instance for plotting), computed by calling the `resample` method **Methods:** # Last Modified: 10/05/2020 by <NAME> # Initialize proposal: it should have an rvs and log pdf or pdf method # Initialize target # Initialize the samples and weights # Run IS if nsamples is provided Generate and weight samples. This function samples from the proposal and appends samples to existing ones (if any). It then weights the samples as log_w_unnormalized) = log(target)-log(proposal). **Inputs:** * **nsamples** (`int`) Number of weighted samples to generate. * **Output/Returns:** This function has no returns, but it updates the output attributes `samples`, `unnormalized_log_weights` and `weights` of the ``IS`` object. # Sample from proposal # Compute un-scaled weights of new samples # Save samples and weights (append to existing if necessary) # Take the exponential and normalize the weights # note: scaling with max avoids having NaN of Inf when taking the exp # If a set of unweighted samples exist, delete them as they are not representative of the distribution anymore # def resample(self, method='multinomial', nsamples=None): # """ # Resample to get a set of un-weighted samples that represent the target pdf. # # Utility function that creates a set of un-weighted samples from a set of weighted samples. Can be useful for # plotting for instance. # # **Inputs:** # # * **method** (`str`) # Resampling method, as of V3 only multinomial resampling is supported. Default: 'multinomial'. # * **nsamples** (`int`) # Number of un-weighted samples to generate. Default: None (same number of samples is generated as number of # existing samples). # # **Output/Returns:** # # * (`ndarray`) # Un-weighted samples that represent the target pdf, `ndarray` of shape (nsamples, dimension) # # """ # from .Utilities import resample # return resample(self.samples, self.weights, method=method, size=nsamples) Resample to get a set of un-weighted samples that represent the target pdf. Utility function that creates a set of un-weighted samples from a set of weighted samples. Can be useful for plotting for instance. The ``resample`` method is not called automatically when instantiating the ``IS`` class or when invoking its ``run`` method. **Inputs:** * **method** (`str`) Resampling method, as of V3 only multinomial resampling is supported. Default: 'multinomial'. * **nsamples** (`int`) Number of un-weighted samples to generate. Default: None (sets `nsamples` equal to the number of existing weighted samples). **Output/Returns:** The method has no returns, but it computes the following attribute of the ``IS`` object. * **unweighted_samples** (`ndarray`) Un-weighted samples that represent the target pdf, `ndarray` of shape (nsamples, dimension) Preprocess the target pdf inputs. Utility function (static method), that transforms the log_pdf, pdf, args inputs into a function that evaluates log_pdf_target(x) for a given x. **Inputs:** * log_pdf_ ((list of) callables): Log of the target density function from which to draw random samples. Either pdf_target or log_pdf_target must be provided * pdf_ ((list of) callables): Target density function from which to draw random samples. * args (tuple): Positional arguments of the pdf target **Output/Returns:** * evaluate_log_pdf (callable): Callable that computes the log of the target density function # log_pdf is provided # pdf is provided
2.266816
2
src/anomaly_toolbox/datasets/__init__.py
zurutech/anomaly-toolbox
73
6623022
<reponame>zurutech/anomaly-toolbox<filename>src/anomaly_toolbox/datasets/__init__.py # Copyright 2021 Zuru Tech HK Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Datasets for anomaly detection module.""" from .corrupted_mnist import CorruptedMNIST from .dataset import AnomalyDetectionDataset from .mnist import MNIST from .mvtecad import MVTecAD from .surface_cracks import SurfaceCracks __all__ = [ "MNIST", "CorruptedMNIST", "SurfaceCracks", "AnomalyDetectionDataset", "MVTecAD", ] __datasets__ = ["MNIST", "CorruptedMNIST", "SurfaceCracks", "MVTecAD"]
# Copyright 2021 Zuru Tech HK Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Datasets for anomaly detection module.""" from .corrupted_mnist import CorruptedMNIST from .dataset import AnomalyDetectionDataset from .mnist import MNIST from .mvtecad import MVTecAD from .surface_cracks import SurfaceCracks __all__ = [ "MNIST", "CorruptedMNIST", "SurfaceCracks", "AnomalyDetectionDataset", "MVTecAD", ] __datasets__ = ["MNIST", "CorruptedMNIST", "SurfaceCracks", "MVTecAD"]
en
0.830674
# Copyright 2021 Zuru Tech HK Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Datasets for anomaly detection module.
1.465679
1
spectator/events/migrations/0043_rename_ticket_to_thumbnail.py
philgyford/django-spectator
36
6623023
<gh_stars>10-100 # Generated by Django 3.0.5 on 2020-04-07 10:49 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("spectator_events", "0042_auto_20200407_1039"), ] operations = [ migrations.RenameField( model_name="event", old_name="ticket", new_name="thumbnail", ), ]
# Generated by Django 3.0.5 on 2020-04-07 10:49 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("spectator_events", "0042_auto_20200407_1039"), ] operations = [ migrations.RenameField( model_name="event", old_name="ticket", new_name="thumbnail", ), ]
en
0.833996
# Generated by Django 3.0.5 on 2020-04-07 10:49
1.693672
2
engine/run_preprocess.py
fhidalgor/erdos2021-project
1
6623024
<reponame>fhidalgor/erdos2021-project """ Module to run the preprocessing pipeline """ import pandas as pd from engine.preprocess.extract_pubmed import ExtractPubmedAbstracts from engine.preprocess.extract_mimic import ExtractMimicNotes from engine.preprocess.tokenize_text import Tokenize from engine.preprocess.identify_longforms import IdentifyLongForms from engine.preprocess.replace_longforms import ReplaceLongForms from engine.preprocess.sample_dataset import SampleDataset #obj = Tokenize('m<PASSWORD>iii') # Load short forms dataframe SUBSET_DF: pd.DataFrame = pd.read_csv("datasets/adam/train_2500_sort_AB_Exp.csv") #obj = IdentifyLongForms('mimiciii', SUBSET_DF) obj = ReplaceLongForms('mimiciii', SUBSET_DF) obj() #print(SUBSET_DF.columns)
""" Module to run the preprocessing pipeline """ import pandas as pd from engine.preprocess.extract_pubmed import ExtractPubmedAbstracts from engine.preprocess.extract_mimic import ExtractMimicNotes from engine.preprocess.tokenize_text import Tokenize from engine.preprocess.identify_longforms import IdentifyLongForms from engine.preprocess.replace_longforms import ReplaceLongForms from engine.preprocess.sample_dataset import SampleDataset #obj = Tokenize('m<PASSWORD>iii') # Load short forms dataframe SUBSET_DF: pd.DataFrame = pd.read_csv("datasets/adam/train_2500_sort_AB_Exp.csv") #obj = IdentifyLongForms('mimiciii', SUBSET_DF) obj = ReplaceLongForms('mimiciii', SUBSET_DF) obj() #print(SUBSET_DF.columns)
en
0.49282
Module to run the preprocessing pipeline #obj = Tokenize('m<PASSWORD>iii') # Load short forms dataframe #obj = IdentifyLongForms('mimiciii', SUBSET_DF) #print(SUBSET_DF.columns)
2.550356
3
src/config/device-manager/device_manager/plugins/ansible/overlay/overlay_conf.py
kaweue/contrail-controller
0
6623025
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of abstract config generation for leafs """ from ansible_role_common import AnsibleRoleCommon from abstract_device_api.abstract_device_xsd import * class OverlayConf(AnsibleRoleCommon): _roles = ['leaf', 'spine'] def __init__(self, logger, params={}): super(OverlayConf, self).__init__(logger, params) # end __init__ @classmethod def register(cls): qconf = { "roles": cls._roles, "class": cls } return super(OverlayConf, cls).register(qconf) # end register def push_conf(self, is_delete=False): if not self.physical_router: return 0 if is_delete: return self.send_conf(is_delete=True) self.set_common_config() return self.send_conf() # end push_conf # end LeafConf
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of abstract config generation for leafs """ from ansible_role_common import AnsibleRoleCommon from abstract_device_api.abstract_device_xsd import * class OverlayConf(AnsibleRoleCommon): _roles = ['leaf', 'spine'] def __init__(self, logger, params={}): super(OverlayConf, self).__init__(logger, params) # end __init__ @classmethod def register(cls): qconf = { "roles": cls._roles, "class": cls } return super(OverlayConf, cls).register(qconf) # end register def push_conf(self, is_delete=False): if not self.physical_router: return 0 if is_delete: return self.send_conf(is_delete=True) self.set_common_config() return self.send_conf() # end push_conf # end LeafConf
en
0.727189
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # This file contains implementation of abstract config generation for leafs # end __init__ # end register # end push_conf # end LeafConf
2.105291
2
platform_monitoring/base.py
neuro-inc/platform-monitoring
0
6623026
<gh_stars>0 import time from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field from datetime import datetime from typing import Any, Optional class LogReader(ABC): last_time: Optional[datetime] = None def __init__(self, container_runtime: str, timestamps: bool = False) -> None: super().__init__() self._container_runtime = container_runtime self._timestamps = timestamps def encode_log(self, time: str, log: str) -> bytes: result = log if self._timestamps: if self._container_runtime == "docker": result = f"{time} {log}" else: result = f"{time} {log}\n" else: if self._container_runtime != "docker": result = f"{log}\n" return result.encode() @abstractmethod async def __aenter__(self) -> AsyncIterator[bytes]: pass async def __aexit__(self, *args: Any) -> None: pass @dataclass(frozen=True) class JobStats: cpu: float memory: float gpu_utilization: Optional[int] = None gpu_memory_used_mb: Optional[int] = None timestamp: float = field(default_factory=time.time) class Telemetry(ABC): async def __aenter__(self) -> "Telemetry": return self async def __aexit__(self, *args: Any) -> None: pass @abstractmethod async def get_latest_stats(self) -> Optional[JobStats]: pass
import time from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field from datetime import datetime from typing import Any, Optional class LogReader(ABC): last_time: Optional[datetime] = None def __init__(self, container_runtime: str, timestamps: bool = False) -> None: super().__init__() self._container_runtime = container_runtime self._timestamps = timestamps def encode_log(self, time: str, log: str) -> bytes: result = log if self._timestamps: if self._container_runtime == "docker": result = f"{time} {log}" else: result = f"{time} {log}\n" else: if self._container_runtime != "docker": result = f"{log}\n" return result.encode() @abstractmethod async def __aenter__(self) -> AsyncIterator[bytes]: pass async def __aexit__(self, *args: Any) -> None: pass @dataclass(frozen=True) class JobStats: cpu: float memory: float gpu_utilization: Optional[int] = None gpu_memory_used_mb: Optional[int] = None timestamp: float = field(default_factory=time.time) class Telemetry(ABC): async def __aenter__(self) -> "Telemetry": return self async def __aexit__(self, *args: Any) -> None: pass @abstractmethod async def get_latest_stats(self) -> Optional[JobStats]: pass
none
1
2.752501
3
Scripts/003_hackerrank/30 Days of Code/d016.py
OrangePeelFX/Python-Tutorial
0
6623027
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Day 16: Exceptions - String to Integer Source : https://www.hackerrank.com/challenges/30-exceptions-string-to-integer/problem """ try: print(int(input().strip())) except: print("Bad String")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Day 16: Exceptions - String to Integer Source : https://www.hackerrank.com/challenges/30-exceptions-string-to-integer/problem """ try: print(int(input().strip())) except: print("Bad String")
en
0.488733
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Day 16: Exceptions - String to Integer Source : https://www.hackerrank.com/challenges/30-exceptions-string-to-integer/problem
3.437869
3
controllers/artist.py
Silve1ra/fyyur
1
6623028
<gh_stars>1-10 import sys from flask import render_template, redirect, url_for, request, flash from models.models import Artist, Venue, Show from forms import ArtistForm, datetime from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() def artists(): data = Artist.query.all() return render_template('pages/artists.html', artists=data) def search_artists(): search_term = request.form.get('search_term', '') response = Artist.query.filter(Artist.name.ilike('%' + search_term + '%')) results = { 'count': response.count(), 'data': response } return render_template('pages/search_artists.html', results=results, search_term=search_term) def get_shows(shows): data = [] for show in shows: venue = Venue.query.get(show.artist_id) data.append({ "venue_id": venue.id, "venue_name": venue.name, "venue_image_link": venue.image_link, "start_time": show.start_time.strftime('%m/%d/%Y') }) return data def show_artist(artist_id): artist = Artist.query.get(artist_id) current_time = datetime.now().strftime('%m/%d/%Y') past_shows= Show.query.filter_by(artist_id=artist_id).filter(Show.start_time <= current_time) past_shows_data = get_shows(past_shows) upcoming_shows= Show.query.filter_by(artist_id=artist_id).filter(Show.start_time > current_time) upcoming_shows_data = get_shows(upcoming_shows) data = { 'id': artist.id, 'name': artist.name, 'genres': artist.genres, 'city': artist.city, 'state': artist.state, 'phone': artist.phone, 'website': artist.website, 'facebook_link': artist.facebook_link, 'image_link': artist.image_link, 'seeking_venue': artist.seeking_venue, 'seeking_description': artist.seeking_description, 'past_shows': past_shows_data, 'upcoming_shows': upcoming_shows_data, 'past_shows_count': past_shows.count(), 'upcoming_shows_count': upcoming_shows.count(), } return render_template('pages/show_artist.html', artist=data) def create_artist_form(): form = ArtistForm() return render_template('forms/new_artist.html', form=form) def create_artist_submission(): if request.form.get('seeking_venue') == 'y': seeking_venue = True else: seeking_venue = False artist = Artist( name=request.form.get('name'), city=request.form.get('city'), state=request.form.get('state'), phone=request.form.get('phone'), genres=request.form.getlist('genres'), facebook_link=request.form.get('facebook_link'), image_link=request.form.get('image_link'), website=request.form.get('website'), seeking_venue=seeking_venue, seeking_description=request.form.get('seeking_description'), ) try: db.session.add(artist) db.session.commit() flash('Artist ' + request.form['name'] + ' was successfully added!') except: db.session.rollback() print(sys.exc_info()) flash('An error occurred. Artist ' + request.form['name'] + ' could not be added.') finally: db.session.close() return render_template('pages/home.html') def edit_artist(artist_id): artist = Artist.query.get(artist_id) form = ArtistForm() form.name.default = artist.name form.city.default = artist.city form.state.default = artist.state form.phone.default = artist.phone form.genres.default = artist.genres form.facebook_link.default = artist.facebook_link form.image_link.default = artist.image_link form.website_link.default = artist.website form.seeking_venue.default = artist.seeking_venue form.seeking_description.default = artist.seeking_description form.process() return render_template('forms/edit_artist.html', form=form, artist=artist) def edit_artist_submission(artist_id): if request.form.get('seeking_venue') == 'y': seeking_venue = True else: seeking_venue = False artist = Artist.query.get(artist_id) try: artist.name = request.form.get('name') artist.city = request.form.get('city') artist.state = request.form.get('state') artist.phone = request.form.get('phone') artist.genres = request.form.getlist('genres') artist.facebook_link = request.form.get('facebook_link') artist.image_link = request.form.get('image_link') artist.website_link = request.form.get('website_link') artist.seeking_venue = seeking_venue artist.seeking_description = request.form.get('seeking_description') db.session.merge(artist) db.session.commit() flash('Artist ' + request.form['name'] + ' was successfully updated!') except: db.session.rollback() print(sys.exc_info()) flash('An error occurred. Artist ' + request.form['name'] + ' could not be updated.') finally: db.session.close() return redirect(url_for('artist_bp.show_artist', artist_id=artist_id)) def delete_artist(artist_id): try: artist = Artist.query.filter_by(id=artist_id).first_or_404() current_session = db.object_session(artist) current_session.delete(artist) current_session.commit() db.session.delete(artist) db.session.commit() flash('This artist was successfully deleted!') return render_template('pages/home.html') except: db.session.rollback() print(sys.exc_info()) flash('An error occurred. This artist could not be deleted.') finally: db.session.close() return redirect(url_for('artist_bp.artists'))
import sys from flask import render_template, redirect, url_for, request, flash from models.models import Artist, Venue, Show from forms import ArtistForm, datetime from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() def artists(): data = Artist.query.all() return render_template('pages/artists.html', artists=data) def search_artists(): search_term = request.form.get('search_term', '') response = Artist.query.filter(Artist.name.ilike('%' + search_term + '%')) results = { 'count': response.count(), 'data': response } return render_template('pages/search_artists.html', results=results, search_term=search_term) def get_shows(shows): data = [] for show in shows: venue = Venue.query.get(show.artist_id) data.append({ "venue_id": venue.id, "venue_name": venue.name, "venue_image_link": venue.image_link, "start_time": show.start_time.strftime('%m/%d/%Y') }) return data def show_artist(artist_id): artist = Artist.query.get(artist_id) current_time = datetime.now().strftime('%m/%d/%Y') past_shows= Show.query.filter_by(artist_id=artist_id).filter(Show.start_time <= current_time) past_shows_data = get_shows(past_shows) upcoming_shows= Show.query.filter_by(artist_id=artist_id).filter(Show.start_time > current_time) upcoming_shows_data = get_shows(upcoming_shows) data = { 'id': artist.id, 'name': artist.name, 'genres': artist.genres, 'city': artist.city, 'state': artist.state, 'phone': artist.phone, 'website': artist.website, 'facebook_link': artist.facebook_link, 'image_link': artist.image_link, 'seeking_venue': artist.seeking_venue, 'seeking_description': artist.seeking_description, 'past_shows': past_shows_data, 'upcoming_shows': upcoming_shows_data, 'past_shows_count': past_shows.count(), 'upcoming_shows_count': upcoming_shows.count(), } return render_template('pages/show_artist.html', artist=data) def create_artist_form(): form = ArtistForm() return render_template('forms/new_artist.html', form=form) def create_artist_submission(): if request.form.get('seeking_venue') == 'y': seeking_venue = True else: seeking_venue = False artist = Artist( name=request.form.get('name'), city=request.form.get('city'), state=request.form.get('state'), phone=request.form.get('phone'), genres=request.form.getlist('genres'), facebook_link=request.form.get('facebook_link'), image_link=request.form.get('image_link'), website=request.form.get('website'), seeking_venue=seeking_venue, seeking_description=request.form.get('seeking_description'), ) try: db.session.add(artist) db.session.commit() flash('Artist ' + request.form['name'] + ' was successfully added!') except: db.session.rollback() print(sys.exc_info()) flash('An error occurred. Artist ' + request.form['name'] + ' could not be added.') finally: db.session.close() return render_template('pages/home.html') def edit_artist(artist_id): artist = Artist.query.get(artist_id) form = ArtistForm() form.name.default = artist.name form.city.default = artist.city form.state.default = artist.state form.phone.default = artist.phone form.genres.default = artist.genres form.facebook_link.default = artist.facebook_link form.image_link.default = artist.image_link form.website_link.default = artist.website form.seeking_venue.default = artist.seeking_venue form.seeking_description.default = artist.seeking_description form.process() return render_template('forms/edit_artist.html', form=form, artist=artist) def edit_artist_submission(artist_id): if request.form.get('seeking_venue') == 'y': seeking_venue = True else: seeking_venue = False artist = Artist.query.get(artist_id) try: artist.name = request.form.get('name') artist.city = request.form.get('city') artist.state = request.form.get('state') artist.phone = request.form.get('phone') artist.genres = request.form.getlist('genres') artist.facebook_link = request.form.get('facebook_link') artist.image_link = request.form.get('image_link') artist.website_link = request.form.get('website_link') artist.seeking_venue = seeking_venue artist.seeking_description = request.form.get('seeking_description') db.session.merge(artist) db.session.commit() flash('Artist ' + request.form['name'] + ' was successfully updated!') except: db.session.rollback() print(sys.exc_info()) flash('An error occurred. Artist ' + request.form['name'] + ' could not be updated.') finally: db.session.close() return redirect(url_for('artist_bp.show_artist', artist_id=artist_id)) def delete_artist(artist_id): try: artist = Artist.query.filter_by(id=artist_id).first_or_404() current_session = db.object_session(artist) current_session.delete(artist) current_session.commit() db.session.delete(artist) db.session.commit() flash('This artist was successfully deleted!') return render_template('pages/home.html') except: db.session.rollback() print(sys.exc_info()) flash('An error occurred. This artist could not be deleted.') finally: db.session.close() return redirect(url_for('artist_bp.artists'))
none
1
2.351197
2
run.py
Carmo-sousa/livro-flask
0
6623029
""" Responsável por rodar o programa. """ from app import create_app from config import app_active, app_config config = app_config[app_active] config.APP = create_app(app_active) if __name__ == "__main__": config.APP.run(host=config.IP_HOST, port=config.PORT_HOST)
""" Responsável por rodar o programa. """ from app import create_app from config import app_active, app_config config = app_config[app_active] config.APP = create_app(app_active) if __name__ == "__main__": config.APP.run(host=config.IP_HOST, port=config.PORT_HOST)
pt
0.891253
Responsável por rodar o programa.
2.187389
2
nets/GeneralizedWF.py
Andong-Li-speech/TaylorBeamformer
4
6623030
import torch import torch.nn as nn from torch import Tensor from torch.autograd import Variable from torch_complex.tensor import ComplexTensor import torch_complex.functional as F from utils.utils import complex_mul, complex_conj, NormSwitch class GeneralizedMultichannelWienerFiter(nn.Module): def __init__(self, k1: list, k2: list, c: int, M: int, fft_num: int, hid_node: int, kd1: int, cd1: int, d_feat: int, group_num: int, is_gate: bool, dilations: list, is_causal: bool, is_u2: bool, rnn_type: str, norm1d_type: str, norm2d_type: str, intra_connect: str, inter_connect: str, out_type: str, ): super(GeneralizedMultichannelWienerFiter, self).__init__() self.k1 = tuple(k1) self.k2 = tuple(k2) self.c = c self.M = M self.fft_num = fft_num self.hid_node = hid_node self.kd1 = kd1 self.cd1 = cd1 self.d_feat = d_feat self.group_num = group_num self.is_gate = is_gate self.dilations = dilations self.is_causal = is_causal self.is_u2 = is_u2 self.rnn_type = rnn_type self.norm1d_type = norm1d_type self.norm2d_type = norm2d_type self.intra_connect = intra_connect self.inter_connect = inter_connect self.out_type = out_type # Components # inv module self.inv_module = NeuralInvModule(M, hid_node, out_type, rnn_type) if is_u2: self.en = U2Net_Encoder(2*M, self.k1, self.k2, c, intra_connect, norm2d_type) self.de = U2Net_Decoder(c, self.k1, self.k2, fft_num, intra_connect, inter_connect, norm2d_type, out_type) else: self.en = UNet_Encoder(2*M, self.k1, c, norm2d_type) self.de = UNet_Decoder(c, self.k1, fft_num, inter_connect, norm2d_type, out_type) tcn_list = [] for i in range(group_num): tcn_list.append(TCMGroup(kd1, cd1, d_feat, is_gate, dilations, is_causal, norm1d_type)) self.tcns = nn.ModuleList(tcn_list) def forward(self, inpt): """ inpt: (B,T,F,M,2) """ inv_Phi_yy = self.inv_module(inpt) # (B,T,F,M,M,2) b_size, seq_len, freq_num, M, _ = inpt.shape inpt1 = inpt.view(b_size, seq_len, freq_num, -1).permute(0,3,1,2).contiguous() en_x, en_list = self.en(inpt1) en_x = en_x.transpose(-2, -1).contiguous().view(b_size, -1, seq_len) acc_x = Variable(torch.zeros_like(en_x), requires_grad=True).to(en_x.device) x = en_x for i in range(len(self.tcns)): x = self.tcns[i](x) acc_x = acc_x + x x = acc_x x = x.view(b_size, 64, 4, seq_len).transpose(-2, -1).contiguous() Vec_Ys = self.de(inpt, x, en_list) # (B,T,F,M,2) # derive wiener filter inpt_complex = ComplexTensor(inpt[...,0], inpt[...,-1]) # (B,T,F,M) inv_Phi_yy_complex = ComplexTensor(inv_Phi_yy[...,0], inv_Phi_yy[...,-1]) Vec_Ys_complex = ComplexTensor(Vec_Ys[...,0], Vec_Ys[...,-1]) mcwf_bf_complex = F.einsum("...mn,...p->...m", [inv_Phi_yy_complex, Vec_Ys_complex]) # (B,T,F,M) bf_x_complex = F.einsum("...m,...n->...", [mcwf_bf_complex.conj(), inpt_complex]) bf_x = torch.stack((bf_x_complex.real, bf_x_complex.imag), dim=-1) # (B,T,F,2) return bf_x class NeuralInvModule(nn.Module): def __init__(self, M: int, hid_node: int, out_type: str, rnn_type: str, ): super(NeuralInvModule, self).__init__() self.M = M self.hid_node = hid_node self.out_type = out_type self.rnn_type = rnn_type # Components inpt_dim = 2*M*M self.norm = nn.LayerNorm([inpt_dim]) self.rnn = getattr(nn, rnn_type)(input_size=inpt_dim, hidden_size=hid_node, num_layers=2) self.w_dnn = nn.Sequential( nn.Linear(hid_node, hid_node), nn.ReLU(True), nn.Linear(hid_node, inpt_dim)) def forward(self, inpt): """ inpt: (B,T,F,M,2) return: (B,T,F,M,M,2) """ b_size, seq_len, freq_num, M, _ = inpt.shape inpt_complex = ComplexTensor(inpt[...,0], inpt[...,-1]) # (B,T,F,M) inpt_cov = F.einsum("...m,...n->...mn", [inpt_complex.conj(), inpt_complex]) # (B,T,F,M,M) inpt_cov = inpt_cov.view(b_size, seq_len, freq_num, -1) inpt_cov = torch.cat((inpt_cov.real, inpt_cov.imag), dim=-1) # (B,T,F,2MM) inpt_cov = self.norm(inpt_cov) inpt_cov = inpt_cov.transpose(1,2).contiguous().view(b_size*freq_num, seq_len, -1) h, _ = self.rnn(inpt_cov) inv_cov = self.w_dnn(h) # (BF,T,2MM) inv_cov = inv_cov.view(b_size, freq_num, seq_len, M, M, 2) return inv_cov.transpose(1, 2).contiguous() class UNet_Encoder(nn.Module): def __init__(self, cin: int, k1: tuple, c: int, norm2d_type: str, ): super(UNet_Encoder, self).__init__() self.cin = cin self.k1 = k1 self.c = c self.norm2d_type = norm2d_type kernel_begin = (k1[0], 5) stride = (1, 2) c_final = 64 unet = [] unet.append(nn.Sequential( GateConv2d(cin, c, kernel_begin, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c, k1, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c, k1, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c, k1, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c_final, k1, (1,2), padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c_final), nn.PReLU(c_final))) self.unet_list = nn.ModuleList(unet) def forward(self, x: Tensor) -> tuple: en_list = [] for i in range(len(self.unet_list)): x = self.unet_list[i](x) en_list.append(x) return x, en_list class UNet_Decoder(nn.Module): def __init__(self, c: int, k1: tuple, fft_num: int, inter_connect: str, norm2d_type: str, out_type: str, ): super(UNet_Decoder, self).__init__() self.k1 = k1 self.c = c self.fft_num = fft_num self.inter_connect = inter_connect self.norm2d_type = norm2d_type self.out_type = out_type kernel_end = (k1[0], 5) stride = (1, 2) unet = [] if inter_connect == "add": inter_c = c c_begin = 64 elif inter_connect == "cat": inter_c = c*2 c_begin = 64*2 else: raise RuntimeError("Skip connections only support add or concatenate operation") unet.append(nn.Sequential( GateConvTranspose2d(c_begin, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, kernel_end, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) self.unet_list = nn.ModuleList(unet) self.out_r = nn.Sequential( nn.Conv2d(c, 1, (1,1), (1,1)), nn.Linear(fft_num//2+1, fft_num//2+1)) self.out_i = nn.Sequential( nn.Conv2d(c, 1, (1,1), (1,1)), nn.Linear(fft_num//2+1, fft_num//2+1)) def forward(self, inpt: Tensor, x: Tensor, en_list: list): """ inpt: (B,T,F,M,2) return: (B,T,F,M,2) """ b_size, seq_len, freq_num, _, _ = inpt.shape if self.inter_connect == "add": for i in range(len(self.unet_list)): tmp = x + en_list[-(i + 1)] x = self.unet_list[i](tmp) elif self.inter_connect == "cat": for i in range(len(self.unet_list)): tmp = torch.cat((x, en_list[-(i + 1)]), dim=1) x = self.unet_list[i](tmp) else: raise Exception("only add and cat are supported") # output if self.out_type == "mask": gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) ref_inpt = inpt[...,0,:] # (B,T,F,2) Yy = complex_mul(inpt, complex_conj(ref_inpt[...,None,:])) # (B,T,F,M,2) out = complex_mul(complex_conj(gain[...,None,:]), Yy) # (B,T,F,M,2) elif self.out_type == "mapping": map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) out = complex_mul(inpt, complex_conj(map[...,None,:])) # (B,T,F,M,2) else: raise Exception("only mask and mapping are supported") return out class U2Net_Encoder(nn.Module): def __init__(self, cin: int, k1: tuple, k2: tuple, c: int, intra_connect: str, norm2d_type: str, ): super(U2Net_Encoder, self).__init__() self.cin = cin self.k1 = k1 self.k2 = k2 self.c = c self.intra_connect = intra_connect self.norm2d_type = norm2d_type c_last = 64 kernel_begin = (k1[0], 5) stride = (1, 2) meta_unet = [] meta_unet.append( En_unet_module(cin, c, kernel_begin, k2, intra_connect, norm2d_type, scale=4, de_flag=False)) meta_unet.append( En_unet_module(c, c, k1, k2, intra_connect, norm2d_type, scale=3, de_flag=False)) meta_unet.append( En_unet_module(c, c, k1, k2, intra_connect, norm2d_type, scale=2, de_flag=False)) meta_unet.append( En_unet_module(c, c, k1, k2, intra_connect, norm2d_type, scale=1, de_flag=False)) self.meta_unet_list = nn.ModuleList(meta_unet) self.last_conv = nn.Sequential( GateConv2d(c, c_last, k1, stride, (0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c_last), nn.PReLU(c_last) ) def forward(self, x: Tensor) -> tuple: en_list = [] for i in range(len(self.meta_unet_list)): x = self.meta_unet_list[i](x) en_list.append(x) x = self.last_conv(x) en_list.append(x) return x, en_list class U2Net_Decoder(nn.Module): def __init__(self, c: int, k1: tuple, k2: tuple, fft_num: int, intra_connect: str, inter_connect: str, norm2d_type: str, out_type: str, ): super(U2Net_Decoder, self).__init__() self.c = c self.k1 = k1 self.k2 = k2 self.fft_num = fft_num self.intra_connect = intra_connect self.inter_connect = inter_connect self.norm2d_type = norm2d_type self.out_type = out_type kernel_end = (k1[0], 5) stride = (1, 2) meta_unet = [] if inter_connect == "add": inter_c = c c_begin = 64 elif inter_connect == "cat": inter_c = c*2 c_begin = 64*2 else: raise Exception("Skip connections only support add or concatenate operation") meta_unet.append( En_unet_module(c_begin, c, k1, k2, intra_connect, norm2d_type, scale=1, de_flag=True)) meta_unet.append( En_unet_module(inter_c, c, k1, k2, intra_connect, norm2d_type, scale=2, de_flag=True)) meta_unet.append( En_unet_module(inter_c, c, k1, k2, intra_connect, norm2d_type, scale=3, de_flag=True)) meta_unet.append( En_unet_module(inter_c, c, k1, k2, intra_connect, norm2d_type, scale=4, de_flag=True)) meta_unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, kernel_end, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) self.meta_unet_list = nn.ModuleList(meta_unet) self.out_r = nn.Sequential( nn.Conv2d(c, 1, (1, 1), (1, 1)), nn.Linear(fft_num//2+1, fft_num//2+1)) self.out_i = nn.Sequential( nn.Conv2d(c, 1, (1, 1), (1, 1)), nn.Linear(fft_num//2+1, fft_num//2+1)) def forward(self, inpt: Tensor, x: Tensor, en_list: list): """ inpt: (B,T,F,M,2) return: (B,T,F,M,2) """ b_size, seq_len, freq_num, M, _ = inpt.shape if self.inter_connect == "add": for i in range(len(self.meta_unet_list)): tmp = x + en_list[-(i+1)] x = self.meta_unet_list[i](tmp) elif self.inter_connect == "cat": for i in range(len(self.meta_unet_list)): tmp = torch.cat((x, en_list[-(i+1)]), dim=1) x = self.meta_unet_list[i](tmp) else: raise Exception("only add and cat are supported") # output if self.out_type == "mask": gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) ref_inpt = inpt[..., 0, :] # (B,T,F,2) Yy = complex_mul(inpt, complex_conj(ref_inpt[..., None, :])) # (B,T,F,M,2) out = complex_mul(complex_conj(gain[..., None, :]), Yy) # (B,T,F,M,2) elif self.out_type == "mapping": map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) out = complex_mul(inpt, complex_conj(map[..., None, :])) # (B,T,F,M,2) else: raise Exception("only mask and mapping are supported") return out class En_unet_module(nn.Module): def __init__(self, cin: int, cout: int, k1: tuple, k2: tuple, intra_connect: str, norm2d_type: str, scale: int, de_flag: bool = False, ): super(En_unet_module, self).__init__() self.cin = cin self.cout = cout self.k1 = k1 self.k2 = k2 self.intra_connect = intra_connect self.norm2d_type = norm2d_type self.scale = scale self.de_flag = de_flag in_conv_list = [] if de_flag is False: in_conv_list.append(GateConv2d(cin, cout, k1, (1, 2), (0, 0, k1[0]-1, 0))) else: in_conv_list.append(GateConvTranspose2d(cin, cout, k1, (1, 2))) in_conv_list.append(NormSwitch(norm2d_type, "2D", cout)) in_conv_list.append(nn.PReLU(cout)) self.in_conv = nn.Sequential(*in_conv_list) enco_list, deco_list = [], [] for _ in range(scale): enco_list.append(Conv2dunit(k2, cout, norm2d_type)) for i in range(scale): if i == 0: deco_list.append(Deconv2dunit(k2, cout, "add", norm2d_type)) else: deco_list.append(Deconv2dunit(k2, cout, intra_connect, norm2d_type)) self.enco = nn.ModuleList(enco_list) self.deco = nn.ModuleList(deco_list) self.skip_connect = Skip_connect(intra_connect) def forward(self, inputs: Tensor) -> Tensor: x_resi = self.in_conv(inputs) x = x_resi x_list = [] for i in range(len(self.enco)): x = self.enco[i](x) x_list.append(x) for i in range(len(self.deco)): if i == 0: x = self.deco[i](x) else: x_con = self.skip_connect(x, x_list[-(i+1)]) x = self.deco[i](x_con) x_resi = x_resi + x del x_list return x_resi class Conv2dunit(nn.Module): def __init__(self, k: tuple, c: int, norm2d_type: str, ): super(Conv2dunit, self).__init__() self.k, self.c = k, c self.norm2d_type = norm2d_type k_t = k[0] stride = (1, 2) if k_t > 1: self.conv = nn.Sequential( nn.ConstantPad2d((0, 0, k_t-1, 0), value=0.), nn.Conv2d(c, c, k, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c) ) else: self.conv = nn.Sequential( nn.Conv2d(c, c, k, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c) ) def forward(self, inputs: Tensor) -> Tensor: return self.conv(inputs) class Deconv2dunit(nn.Module): def __init__(self, k: tuple, c: int, intra_connect: str, norm2d_type: str, ): super(Deconv2dunit, self).__init__() self.k, self.c = k, c self.intra_connect = intra_connect self.norm2d_type = norm2d_type k_t = k[0] stride = (1, 2) deconv_list = [] if self.intra_connect == "add": if k_t > 1: deconv_list.append(nn.ConvTranspose2d(c, c, k, stride)), deconv_list.append(Chomp_T(k_t-1)) else: deconv_list.append(nn.ConvTranspose2d(c, c, k, stride)) elif self.intra_connect == "cat": if k_t > 1: deconv_list.append(nn.ConvTranspose2d(2*c, c, k, stride)) deconv_list.append(Chomp_T(k_t-1)) else: deconv_list.append(nn.ConvTranspose2d(2*c, c, k, stride)) deconv_list.append(NormSwitch(norm2d_type, "2D", c)) deconv_list.append(nn.PReLU(c)) self.deconv = nn.Sequential(*deconv_list) def forward(self, inputs: Tensor) -> Tensor: assert inputs.dim() == 4 return self.deconv(inputs) class GateConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: tuple, stride: tuple, padding: tuple, ): super(GateConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding k_t = kernel_size[0] if k_t > 1: self.conv = nn.Sequential( nn.ConstantPad2d(padding, value=0.), nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride)) else: self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride) def forward(self, inputs: Tensor) -> Tensor: if inputs.dim() == 3: inputs = inputs.unsqueeze(dim=1) x = self.conv(inputs) outputs, gate = x.chunk(2, dim=1) return outputs * gate.sigmoid() class GateConvTranspose2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: tuple, stride: tuple, ): super(GateConvTranspose2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride k_t = kernel_size[0] if k_t > 1: self.conv = nn.Sequential( nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride), Chomp_T(k_t-1)) else: self.conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride) def forward(self, inputs: Tensor) -> Tensor: assert inputs.dim() == 4 x = self.conv(inputs) outputs, gate = x.chunk(2, dim=1) return outputs * gate.sigmoid() class Skip_connect(nn.Module): def __init__(self, connect): super(Skip_connect, self).__init__() self.connect = connect def forward(self, x_main, x_aux): if self.connect == "add": x = x_main + x_aux elif self.connect == "cat": x = torch.cat((x_main, x_aux), dim=1) return x class TCMGroup(nn.Module): def __init__(self, kd1: int, cd1: int, d_feat: int, is_gate: bool, dilations: list, is_causal: bool, norm1d_type: str, ): super(TCMGroup, self).__init__() self.kd1 = kd1 self.cd1 = cd1 self.d_feat = d_feat self.is_gate = is_gate self.dilations = dilations self.is_causal = is_causal self.norm1d_type = norm1d_type tcm_list = [] for i in range(len(dilations)): tcm_list.append(SqueezedTCM(kd1, cd1, dilation=dilations[i], d_feat=d_feat, is_gate=is_gate, is_causal=is_causal, norm1d_type=norm1d_type)) self.tcm_list = nn.ModuleList(tcm_list) def forward(self, inputs: Tensor) -> Tensor: x = inputs for i in range(len(self.dilations)): x = self.tcm_list[i](x) return x class SqueezedTCM(nn.Module): def __init__(self, kd1: int, cd1: int, dilation: int, d_feat: int, is_gate: bool, is_causal: bool, norm1d_type: str, ): super(SqueezedTCM, self).__init__() self.kd1 = kd1 self.cd1 = cd1 self.dilation = dilation self.d_feat = d_feat self.is_gate = is_gate self.is_causal = is_causal self.norm1d_type = norm1d_type self.in_conv = nn.Conv1d(d_feat, cd1, kernel_size=1, bias=False) if is_causal: pad = ((kd1-1)*dilation, 0) else: pad = ((kd1-1)*dilation//2, (kd1-1)*dilation//2) self.left_conv = nn.Sequential( nn.PReLU(cd1), NormSwitch(norm1d_type, "1D", cd1), nn.ConstantPad1d(pad, value=0.), nn.Conv1d(cd1, cd1, kernel_size=kd1, dilation=dilation, bias=False) ) if is_gate: self.right_conv = nn.Sequential( nn.PReLU(cd1), NormSwitch(norm1d_type, "1D", cd1), nn.ConstantPad1d(pad, value=0.), nn.Conv1d(cd1, cd1, kernel_size=kd1, dilation=dilation, bias=False), nn.Sigmoid() ) self.out_conv = nn.Sequential( nn.PReLU(cd1), NormSwitch(norm1d_type, "1D", cd1), nn.Conv1d(cd1, d_feat, kernel_size=1, bias=False) ) def forward(self, inputs: Tensor) -> Tensor: resi = inputs x = self.in_conv(inputs) if self.is_gate: x = self.left_conv(x) * self.right_conv(x) else: x = self.left_conv(x) x = self.out_conv(x) x = x + resi return x class Chomp_T(nn.Module): def __init__(self, t: int): super(Chomp_T, self).__init__() self.t = t def forward(self, x): return x[:, :, :-self.t, :] if __name__ == "__main__": net = GeneralizedMultichannelWienerFiter(k1=[2,3], k2=[1,3], c=64, M=6, fft_num=320, hid_node=64, kd1=5, cd1=64, d_feat=256, group_num=2, is_gate=True, dilations=[1,2,5,9], is_causal=True, is_u2=True, rnn_type="LSTM", norm1d_type="BN", norm2d_type="BN", intra_connect="cat", inter_connect="cat", out_type="mask", ).cuda() from utils.utils import numParams print(f"The number of trainable parameters:{numParams(net)}") import ptflops flops, macs = ptflops.get_model_complexity_info(net, (101,161,6,2)) x = torch.rand([2,51,161,6,2]).cuda() y = net(x) print(f"{x.shape}->{y.shape}")
import torch import torch.nn as nn from torch import Tensor from torch.autograd import Variable from torch_complex.tensor import ComplexTensor import torch_complex.functional as F from utils.utils import complex_mul, complex_conj, NormSwitch class GeneralizedMultichannelWienerFiter(nn.Module): def __init__(self, k1: list, k2: list, c: int, M: int, fft_num: int, hid_node: int, kd1: int, cd1: int, d_feat: int, group_num: int, is_gate: bool, dilations: list, is_causal: bool, is_u2: bool, rnn_type: str, norm1d_type: str, norm2d_type: str, intra_connect: str, inter_connect: str, out_type: str, ): super(GeneralizedMultichannelWienerFiter, self).__init__() self.k1 = tuple(k1) self.k2 = tuple(k2) self.c = c self.M = M self.fft_num = fft_num self.hid_node = hid_node self.kd1 = kd1 self.cd1 = cd1 self.d_feat = d_feat self.group_num = group_num self.is_gate = is_gate self.dilations = dilations self.is_causal = is_causal self.is_u2 = is_u2 self.rnn_type = rnn_type self.norm1d_type = norm1d_type self.norm2d_type = norm2d_type self.intra_connect = intra_connect self.inter_connect = inter_connect self.out_type = out_type # Components # inv module self.inv_module = NeuralInvModule(M, hid_node, out_type, rnn_type) if is_u2: self.en = U2Net_Encoder(2*M, self.k1, self.k2, c, intra_connect, norm2d_type) self.de = U2Net_Decoder(c, self.k1, self.k2, fft_num, intra_connect, inter_connect, norm2d_type, out_type) else: self.en = UNet_Encoder(2*M, self.k1, c, norm2d_type) self.de = UNet_Decoder(c, self.k1, fft_num, inter_connect, norm2d_type, out_type) tcn_list = [] for i in range(group_num): tcn_list.append(TCMGroup(kd1, cd1, d_feat, is_gate, dilations, is_causal, norm1d_type)) self.tcns = nn.ModuleList(tcn_list) def forward(self, inpt): """ inpt: (B,T,F,M,2) """ inv_Phi_yy = self.inv_module(inpt) # (B,T,F,M,M,2) b_size, seq_len, freq_num, M, _ = inpt.shape inpt1 = inpt.view(b_size, seq_len, freq_num, -1).permute(0,3,1,2).contiguous() en_x, en_list = self.en(inpt1) en_x = en_x.transpose(-2, -1).contiguous().view(b_size, -1, seq_len) acc_x = Variable(torch.zeros_like(en_x), requires_grad=True).to(en_x.device) x = en_x for i in range(len(self.tcns)): x = self.tcns[i](x) acc_x = acc_x + x x = acc_x x = x.view(b_size, 64, 4, seq_len).transpose(-2, -1).contiguous() Vec_Ys = self.de(inpt, x, en_list) # (B,T,F,M,2) # derive wiener filter inpt_complex = ComplexTensor(inpt[...,0], inpt[...,-1]) # (B,T,F,M) inv_Phi_yy_complex = ComplexTensor(inv_Phi_yy[...,0], inv_Phi_yy[...,-1]) Vec_Ys_complex = ComplexTensor(Vec_Ys[...,0], Vec_Ys[...,-1]) mcwf_bf_complex = F.einsum("...mn,...p->...m", [inv_Phi_yy_complex, Vec_Ys_complex]) # (B,T,F,M) bf_x_complex = F.einsum("...m,...n->...", [mcwf_bf_complex.conj(), inpt_complex]) bf_x = torch.stack((bf_x_complex.real, bf_x_complex.imag), dim=-1) # (B,T,F,2) return bf_x class NeuralInvModule(nn.Module): def __init__(self, M: int, hid_node: int, out_type: str, rnn_type: str, ): super(NeuralInvModule, self).__init__() self.M = M self.hid_node = hid_node self.out_type = out_type self.rnn_type = rnn_type # Components inpt_dim = 2*M*M self.norm = nn.LayerNorm([inpt_dim]) self.rnn = getattr(nn, rnn_type)(input_size=inpt_dim, hidden_size=hid_node, num_layers=2) self.w_dnn = nn.Sequential( nn.Linear(hid_node, hid_node), nn.ReLU(True), nn.Linear(hid_node, inpt_dim)) def forward(self, inpt): """ inpt: (B,T,F,M,2) return: (B,T,F,M,M,2) """ b_size, seq_len, freq_num, M, _ = inpt.shape inpt_complex = ComplexTensor(inpt[...,0], inpt[...,-1]) # (B,T,F,M) inpt_cov = F.einsum("...m,...n->...mn", [inpt_complex.conj(), inpt_complex]) # (B,T,F,M,M) inpt_cov = inpt_cov.view(b_size, seq_len, freq_num, -1) inpt_cov = torch.cat((inpt_cov.real, inpt_cov.imag), dim=-1) # (B,T,F,2MM) inpt_cov = self.norm(inpt_cov) inpt_cov = inpt_cov.transpose(1,2).contiguous().view(b_size*freq_num, seq_len, -1) h, _ = self.rnn(inpt_cov) inv_cov = self.w_dnn(h) # (BF,T,2MM) inv_cov = inv_cov.view(b_size, freq_num, seq_len, M, M, 2) return inv_cov.transpose(1, 2).contiguous() class UNet_Encoder(nn.Module): def __init__(self, cin: int, k1: tuple, c: int, norm2d_type: str, ): super(UNet_Encoder, self).__init__() self.cin = cin self.k1 = k1 self.c = c self.norm2d_type = norm2d_type kernel_begin = (k1[0], 5) stride = (1, 2) c_final = 64 unet = [] unet.append(nn.Sequential( GateConv2d(cin, c, kernel_begin, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c, k1, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c, k1, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c, k1, stride, padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConv2d(c, c_final, k1, (1,2), padding=(0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c_final), nn.PReLU(c_final))) self.unet_list = nn.ModuleList(unet) def forward(self, x: Tensor) -> tuple: en_list = [] for i in range(len(self.unet_list)): x = self.unet_list[i](x) en_list.append(x) return x, en_list class UNet_Decoder(nn.Module): def __init__(self, c: int, k1: tuple, fft_num: int, inter_connect: str, norm2d_type: str, out_type: str, ): super(UNet_Decoder, self).__init__() self.k1 = k1 self.c = c self.fft_num = fft_num self.inter_connect = inter_connect self.norm2d_type = norm2d_type self.out_type = out_type kernel_end = (k1[0], 5) stride = (1, 2) unet = [] if inter_connect == "add": inter_c = c c_begin = 64 elif inter_connect == "cat": inter_c = c*2 c_begin = 64*2 else: raise RuntimeError("Skip connections only support add or concatenate operation") unet.append(nn.Sequential( GateConvTranspose2d(c_begin, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, k1, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, kernel_end, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) self.unet_list = nn.ModuleList(unet) self.out_r = nn.Sequential( nn.Conv2d(c, 1, (1,1), (1,1)), nn.Linear(fft_num//2+1, fft_num//2+1)) self.out_i = nn.Sequential( nn.Conv2d(c, 1, (1,1), (1,1)), nn.Linear(fft_num//2+1, fft_num//2+1)) def forward(self, inpt: Tensor, x: Tensor, en_list: list): """ inpt: (B,T,F,M,2) return: (B,T,F,M,2) """ b_size, seq_len, freq_num, _, _ = inpt.shape if self.inter_connect == "add": for i in range(len(self.unet_list)): tmp = x + en_list[-(i + 1)] x = self.unet_list[i](tmp) elif self.inter_connect == "cat": for i in range(len(self.unet_list)): tmp = torch.cat((x, en_list[-(i + 1)]), dim=1) x = self.unet_list[i](tmp) else: raise Exception("only add and cat are supported") # output if self.out_type == "mask": gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) ref_inpt = inpt[...,0,:] # (B,T,F,2) Yy = complex_mul(inpt, complex_conj(ref_inpt[...,None,:])) # (B,T,F,M,2) out = complex_mul(complex_conj(gain[...,None,:]), Yy) # (B,T,F,M,2) elif self.out_type == "mapping": map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) out = complex_mul(inpt, complex_conj(map[...,None,:])) # (B,T,F,M,2) else: raise Exception("only mask and mapping are supported") return out class U2Net_Encoder(nn.Module): def __init__(self, cin: int, k1: tuple, k2: tuple, c: int, intra_connect: str, norm2d_type: str, ): super(U2Net_Encoder, self).__init__() self.cin = cin self.k1 = k1 self.k2 = k2 self.c = c self.intra_connect = intra_connect self.norm2d_type = norm2d_type c_last = 64 kernel_begin = (k1[0], 5) stride = (1, 2) meta_unet = [] meta_unet.append( En_unet_module(cin, c, kernel_begin, k2, intra_connect, norm2d_type, scale=4, de_flag=False)) meta_unet.append( En_unet_module(c, c, k1, k2, intra_connect, norm2d_type, scale=3, de_flag=False)) meta_unet.append( En_unet_module(c, c, k1, k2, intra_connect, norm2d_type, scale=2, de_flag=False)) meta_unet.append( En_unet_module(c, c, k1, k2, intra_connect, norm2d_type, scale=1, de_flag=False)) self.meta_unet_list = nn.ModuleList(meta_unet) self.last_conv = nn.Sequential( GateConv2d(c, c_last, k1, stride, (0, 0, k1[0]-1, 0)), NormSwitch(norm2d_type, "2D", c_last), nn.PReLU(c_last) ) def forward(self, x: Tensor) -> tuple: en_list = [] for i in range(len(self.meta_unet_list)): x = self.meta_unet_list[i](x) en_list.append(x) x = self.last_conv(x) en_list.append(x) return x, en_list class U2Net_Decoder(nn.Module): def __init__(self, c: int, k1: tuple, k2: tuple, fft_num: int, intra_connect: str, inter_connect: str, norm2d_type: str, out_type: str, ): super(U2Net_Decoder, self).__init__() self.c = c self.k1 = k1 self.k2 = k2 self.fft_num = fft_num self.intra_connect = intra_connect self.inter_connect = inter_connect self.norm2d_type = norm2d_type self.out_type = out_type kernel_end = (k1[0], 5) stride = (1, 2) meta_unet = [] if inter_connect == "add": inter_c = c c_begin = 64 elif inter_connect == "cat": inter_c = c*2 c_begin = 64*2 else: raise Exception("Skip connections only support add or concatenate operation") meta_unet.append( En_unet_module(c_begin, c, k1, k2, intra_connect, norm2d_type, scale=1, de_flag=True)) meta_unet.append( En_unet_module(inter_c, c, k1, k2, intra_connect, norm2d_type, scale=2, de_flag=True)) meta_unet.append( En_unet_module(inter_c, c, k1, k2, intra_connect, norm2d_type, scale=3, de_flag=True)) meta_unet.append( En_unet_module(inter_c, c, k1, k2, intra_connect, norm2d_type, scale=4, de_flag=True)) meta_unet.append(nn.Sequential( GateConvTranspose2d(inter_c, c, kernel_end, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c))) self.meta_unet_list = nn.ModuleList(meta_unet) self.out_r = nn.Sequential( nn.Conv2d(c, 1, (1, 1), (1, 1)), nn.Linear(fft_num//2+1, fft_num//2+1)) self.out_i = nn.Sequential( nn.Conv2d(c, 1, (1, 1), (1, 1)), nn.Linear(fft_num//2+1, fft_num//2+1)) def forward(self, inpt: Tensor, x: Tensor, en_list: list): """ inpt: (B,T,F,M,2) return: (B,T,F,M,2) """ b_size, seq_len, freq_num, M, _ = inpt.shape if self.inter_connect == "add": for i in range(len(self.meta_unet_list)): tmp = x + en_list[-(i+1)] x = self.meta_unet_list[i](tmp) elif self.inter_connect == "cat": for i in range(len(self.meta_unet_list)): tmp = torch.cat((x, en_list[-(i+1)]), dim=1) x = self.meta_unet_list[i](tmp) else: raise Exception("only add and cat are supported") # output if self.out_type == "mask": gain = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) ref_inpt = inpt[..., 0, :] # (B,T,F,2) Yy = complex_mul(inpt, complex_conj(ref_inpt[..., None, :])) # (B,T,F,M,2) out = complex_mul(complex_conj(gain[..., None, :]), Yy) # (B,T,F,M,2) elif self.out_type == "mapping": map = torch.stack((self.out_r(x).squeeze(dim=1), self.out_i(x).squeeze(dim=1)), dim=-1) # (B,T,F,2) out = complex_mul(inpt, complex_conj(map[..., None, :])) # (B,T,F,M,2) else: raise Exception("only mask and mapping are supported") return out class En_unet_module(nn.Module): def __init__(self, cin: int, cout: int, k1: tuple, k2: tuple, intra_connect: str, norm2d_type: str, scale: int, de_flag: bool = False, ): super(En_unet_module, self).__init__() self.cin = cin self.cout = cout self.k1 = k1 self.k2 = k2 self.intra_connect = intra_connect self.norm2d_type = norm2d_type self.scale = scale self.de_flag = de_flag in_conv_list = [] if de_flag is False: in_conv_list.append(GateConv2d(cin, cout, k1, (1, 2), (0, 0, k1[0]-1, 0))) else: in_conv_list.append(GateConvTranspose2d(cin, cout, k1, (1, 2))) in_conv_list.append(NormSwitch(norm2d_type, "2D", cout)) in_conv_list.append(nn.PReLU(cout)) self.in_conv = nn.Sequential(*in_conv_list) enco_list, deco_list = [], [] for _ in range(scale): enco_list.append(Conv2dunit(k2, cout, norm2d_type)) for i in range(scale): if i == 0: deco_list.append(Deconv2dunit(k2, cout, "add", norm2d_type)) else: deco_list.append(Deconv2dunit(k2, cout, intra_connect, norm2d_type)) self.enco = nn.ModuleList(enco_list) self.deco = nn.ModuleList(deco_list) self.skip_connect = Skip_connect(intra_connect) def forward(self, inputs: Tensor) -> Tensor: x_resi = self.in_conv(inputs) x = x_resi x_list = [] for i in range(len(self.enco)): x = self.enco[i](x) x_list.append(x) for i in range(len(self.deco)): if i == 0: x = self.deco[i](x) else: x_con = self.skip_connect(x, x_list[-(i+1)]) x = self.deco[i](x_con) x_resi = x_resi + x del x_list return x_resi class Conv2dunit(nn.Module): def __init__(self, k: tuple, c: int, norm2d_type: str, ): super(Conv2dunit, self).__init__() self.k, self.c = k, c self.norm2d_type = norm2d_type k_t = k[0] stride = (1, 2) if k_t > 1: self.conv = nn.Sequential( nn.ConstantPad2d((0, 0, k_t-1, 0), value=0.), nn.Conv2d(c, c, k, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c) ) else: self.conv = nn.Sequential( nn.Conv2d(c, c, k, stride), NormSwitch(norm2d_type, "2D", c), nn.PReLU(c) ) def forward(self, inputs: Tensor) -> Tensor: return self.conv(inputs) class Deconv2dunit(nn.Module): def __init__(self, k: tuple, c: int, intra_connect: str, norm2d_type: str, ): super(Deconv2dunit, self).__init__() self.k, self.c = k, c self.intra_connect = intra_connect self.norm2d_type = norm2d_type k_t = k[0] stride = (1, 2) deconv_list = [] if self.intra_connect == "add": if k_t > 1: deconv_list.append(nn.ConvTranspose2d(c, c, k, stride)), deconv_list.append(Chomp_T(k_t-1)) else: deconv_list.append(nn.ConvTranspose2d(c, c, k, stride)) elif self.intra_connect == "cat": if k_t > 1: deconv_list.append(nn.ConvTranspose2d(2*c, c, k, stride)) deconv_list.append(Chomp_T(k_t-1)) else: deconv_list.append(nn.ConvTranspose2d(2*c, c, k, stride)) deconv_list.append(NormSwitch(norm2d_type, "2D", c)) deconv_list.append(nn.PReLU(c)) self.deconv = nn.Sequential(*deconv_list) def forward(self, inputs: Tensor) -> Tensor: assert inputs.dim() == 4 return self.deconv(inputs) class GateConv2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: tuple, stride: tuple, padding: tuple, ): super(GateConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding k_t = kernel_size[0] if k_t > 1: self.conv = nn.Sequential( nn.ConstantPad2d(padding, value=0.), nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride)) else: self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride) def forward(self, inputs: Tensor) -> Tensor: if inputs.dim() == 3: inputs = inputs.unsqueeze(dim=1) x = self.conv(inputs) outputs, gate = x.chunk(2, dim=1) return outputs * gate.sigmoid() class GateConvTranspose2d(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: tuple, stride: tuple, ): super(GateConvTranspose2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride k_t = kernel_size[0] if k_t > 1: self.conv = nn.Sequential( nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride), Chomp_T(k_t-1)) else: self.conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels*2, kernel_size=kernel_size, stride=stride) def forward(self, inputs: Tensor) -> Tensor: assert inputs.dim() == 4 x = self.conv(inputs) outputs, gate = x.chunk(2, dim=1) return outputs * gate.sigmoid() class Skip_connect(nn.Module): def __init__(self, connect): super(Skip_connect, self).__init__() self.connect = connect def forward(self, x_main, x_aux): if self.connect == "add": x = x_main + x_aux elif self.connect == "cat": x = torch.cat((x_main, x_aux), dim=1) return x class TCMGroup(nn.Module): def __init__(self, kd1: int, cd1: int, d_feat: int, is_gate: bool, dilations: list, is_causal: bool, norm1d_type: str, ): super(TCMGroup, self).__init__() self.kd1 = kd1 self.cd1 = cd1 self.d_feat = d_feat self.is_gate = is_gate self.dilations = dilations self.is_causal = is_causal self.norm1d_type = norm1d_type tcm_list = [] for i in range(len(dilations)): tcm_list.append(SqueezedTCM(kd1, cd1, dilation=dilations[i], d_feat=d_feat, is_gate=is_gate, is_causal=is_causal, norm1d_type=norm1d_type)) self.tcm_list = nn.ModuleList(tcm_list) def forward(self, inputs: Tensor) -> Tensor: x = inputs for i in range(len(self.dilations)): x = self.tcm_list[i](x) return x class SqueezedTCM(nn.Module): def __init__(self, kd1: int, cd1: int, dilation: int, d_feat: int, is_gate: bool, is_causal: bool, norm1d_type: str, ): super(SqueezedTCM, self).__init__() self.kd1 = kd1 self.cd1 = cd1 self.dilation = dilation self.d_feat = d_feat self.is_gate = is_gate self.is_causal = is_causal self.norm1d_type = norm1d_type self.in_conv = nn.Conv1d(d_feat, cd1, kernel_size=1, bias=False) if is_causal: pad = ((kd1-1)*dilation, 0) else: pad = ((kd1-1)*dilation//2, (kd1-1)*dilation//2) self.left_conv = nn.Sequential( nn.PReLU(cd1), NormSwitch(norm1d_type, "1D", cd1), nn.ConstantPad1d(pad, value=0.), nn.Conv1d(cd1, cd1, kernel_size=kd1, dilation=dilation, bias=False) ) if is_gate: self.right_conv = nn.Sequential( nn.PReLU(cd1), NormSwitch(norm1d_type, "1D", cd1), nn.ConstantPad1d(pad, value=0.), nn.Conv1d(cd1, cd1, kernel_size=kd1, dilation=dilation, bias=False), nn.Sigmoid() ) self.out_conv = nn.Sequential( nn.PReLU(cd1), NormSwitch(norm1d_type, "1D", cd1), nn.Conv1d(cd1, d_feat, kernel_size=1, bias=False) ) def forward(self, inputs: Tensor) -> Tensor: resi = inputs x = self.in_conv(inputs) if self.is_gate: x = self.left_conv(x) * self.right_conv(x) else: x = self.left_conv(x) x = self.out_conv(x) x = x + resi return x class Chomp_T(nn.Module): def __init__(self, t: int): super(Chomp_T, self).__init__() self.t = t def forward(self, x): return x[:, :, :-self.t, :] if __name__ == "__main__": net = GeneralizedMultichannelWienerFiter(k1=[2,3], k2=[1,3], c=64, M=6, fft_num=320, hid_node=64, kd1=5, cd1=64, d_feat=256, group_num=2, is_gate=True, dilations=[1,2,5,9], is_causal=True, is_u2=True, rnn_type="LSTM", norm1d_type="BN", norm2d_type="BN", intra_connect="cat", inter_connect="cat", out_type="mask", ).cuda() from utils.utils import numParams print(f"The number of trainable parameters:{numParams(net)}") import ptflops flops, macs = ptflops.get_model_complexity_info(net, (101,161,6,2)) x = torch.rand([2,51,161,6,2]).cuda() y = net(x) print(f"{x.shape}->{y.shape}")
en
0.580407
# Components # inv module inpt: (B,T,F,M,2) # (B,T,F,M,M,2) # (B,T,F,M,2) # derive wiener filter # (B,T,F,M) # (B,T,F,M) # (B,T,F,2) # Components inpt: (B,T,F,M,2) return: (B,T,F,M,M,2) # (B,T,F,M) # (B,T,F,M,M) # (B,T,F,2MM) # (BF,T,2MM) inpt: (B,T,F,M,2) return: (B,T,F,M,2) # output # (B,T,F,2) # (B,T,F,2) # (B,T,F,M,2) # (B,T,F,M,2) # (B,T,F,2) # (B,T,F,M,2) inpt: (B,T,F,M,2) return: (B,T,F,M,2) # output # (B,T,F,2) # (B,T,F,2) # (B,T,F,M,2) # (B,T,F,M,2) # (B,T,F,2) # (B,T,F,M,2)
2.134341
2
atm90e36a/registers.py
aradian/pipdu_host
0
6623031
<reponame>aradian/pipdu_host<filename>atm90e36a/registers.py # Status and Special Register SoftReset = 0x00 SysStatus0 = 0x01 SysStatus1 = 0x02 FuncEn0 = 0x03 FuncEn1 = 0x04 ZXConfig = 0x07 SagTh = 0x08 PhaseLossTh = 0x09 INWarnTh0 = 0x0A INWarnTh1 = 0x0B THDNUTh = 0x0C THDNITh = 0x0D DMACtrl = 0x0E LastSPIData = 0x0F # Low Power Mode Register DetectCtrl = 0x10 DetectTh1 = 0x11 DetectTh2 = 0x12 DetectTh3 = 0x13 PMOffsetA = 0x14 PMOffsetB = 0x15 PMOffsetC = 0x16 PMPGA = 0x17 PMIrmsA = 0x18 PMIrmsB = 0x19 PMIrmsC = 0x1A PMConfig = 0x1B PMAvgSamples = 0x1C PMIrmsLSB = 0x1D # Configuration Registers ConfigStart = 0x30 PLconstH = 0x31 PLconstL = 0x32 MMode0 = 0x33 MMode1 = 0x34 PStartTh = 0x35 QStartTh = 0x36 SStartTh = 0x37 PPhaseTh = 0x38 QPhaseTh = 0x39 SPhaseTh = 0x3A CS0 = 0x3B # Calibration Registers CalStart = 0x40 PoffsetA = 0x41 QoffsetA = 0x42 PoffsetB = 0x43 QoffsetB = 0x44 PoffsetC = 0x45 QoffsetC = 0x46 GainA = 0x47 PhiA = 0x48 GainB = 0x49 PhiB = 0x4A GainC = 0x4B PhiC = 0x4C CS1 = 0x4D # Fundamental/Harmonic Energy Calibration regis ters HarmStart = 0x50 PoffsetAF = 0x51 PoffsetBF = 0x52 PoffsetCF = 0x53 PgainAF = 0x54 PgainBF = 0x55 PgainCF = 0x56 CS2 = 0x57 # Measurement Calibration AdjStart = 0x60 UgainA = 0x61 IgainA = 0x62 UoffsetA = 0x63 IoffsetA = 0x64 UgainB = 0x65 IgainB = 0x66 UoffsetB = 0x67 IoffsetB = 0x68 UgainC = 0x69 IgainC = 0x6A UoffsetC = 0x6B IoffsetC = 0x6C IgainN = 0x6D IoffsetN = 0x6E CS3 = 0x6F # Energy Register APenergyT = 0x80 APenergyA = 0x81 APenergyB = 0x82 APenergyC = 0x83 ANenergyT = 0x84 ANenergyA = 0x85 ANenergyB = 0x86 ANenergyC = 0x87 RPenergyT = 0x88 RPenergyA = 0x89 RPenergyB = 0x8A RPenergyC = 0x8B RNenergyT = 0x8C RNenergyA = 0x8D RNenergyB = 0x8E RNenergyC = 0x8F SAenergyT = 0x90 SenergyA = 0x91 SenergyB = 0x92 SenergyC = 0x93 SVenergyT = 0x94 EnStatus0 = 0x95 EnStatus1 = 0x96 SVmeanT = 0x98 SVmeanTLSB = 0x99 # Fundamental / Harmonic Energy Register APenergyTF = 0xA0 APenergyAF = 0xA1 APenergyBF = 0xA2 APenergyCF = 0xA3 ANenergyTF = 0xA4 ANenergyAF = 0xA5 ANenergyBF = 0xA6 ANenergyCF = 0xA7 APenergyTH = 0xA8 APenergyAH = 0xA9 APenergyBH = 0xAA APenergyCH = 0xAB ANenergyTH = 0xAC ANenergyAH = 0xAD ANenergyBH = 0xAE ANenergyCH = 0xAF # Power and Power Factor Registers PmeanT = 0xB0 PmeanA = 0xB1 PmeanB = 0xB2 PmeanC = 0xB3 QmeanT = 0xB4 QmeanA = 0xB5 QmeanB = 0xB6 QmeanC = 0xB7 SAmeanT = 0xB8 SmeanA = 0xB9 SmeanB = 0xBA SmeanC = 0xBB PFmeanT = 0xBC PFmeanA = 0xBD PFmeanB = 0xBE PFmeanC = 0xBF PmeanTLSB = 0xC0 PmeanALSB = 0xC1 PmeanBLSB = 0xC2 PmeanCLSB = 0xC3 QmeanTLSB = 0xC4 QmeanALSB = 0xC5 QmeanBLSB = 0xC6 QmeanCLSB = 0xC7 SAmeanTLSB = 0xC8 SmeanALSB = 0xC9 SmeanBLSB = 0xCA SmeanCLSB = 0xCB # Fundamental/ Harmonic Power and Voltage / Current RMS Registers PmeanTF = 0xD0 PmeanAF = 0xD1 PmeanBF = 0xD2 PmeanCF = 0xD3 PmeanTH = 0xD4 PmeanAH = 0xD5 PmeanBH = 0xD6 PmeanCH = 0xD7 IrmsN1 = 0xD8 UrmsA = 0xD9 UrmsB = 0xDA UrmsC = 0xDB IrmsN0 = 0xDC IrmsA = 0xDD IrmsB = 0xDE IrmsC = 0xDF PmeanTFLSB = 0xE0 PmeanAFLSB = 0xE1 PmeanBFLSB = 0xE2 PmeanCFLSB = 0xE3 PmeanTHLSB = 0xE4 PmeanAHLSB = 0xE5 PmeanBHLSB = 0xE6 PmeanCHLSB = 0xE7 UrmsALSB = 0xE9 UrmsBLSB = 0xEA UrmsCLSB = 0xEB IrmsALSB = 0xED IrmsBLSB = 0xEE IrmsCLSB = 0xEF # THD +N, Frequency, Angle and Temperature Regi sters THDNUA = 0xF1 THDNUB = 0xF2 THDNUC = 0xF3 THDNIA = 0xF5 THDNIB = 0xF6 THDNIC = 0xF7 Freq = 0xF8 PAngleA = 0xF9 PAngleB = 0xFA PAngleC = 0xFB Temp = 0xFC UangleA = 0xFD UangleB = 0xFE UangleC = 0xFF HarmRatiosIA = range(0x100, 0x11F) HarmTHDRatioIA = 0x11F HarmRatiosIB = range(0x120, 0x13F) HarmTHDRatioIB = 0x13F HarmRatiosIC = range(0x140, 0x15F) HarmTHDRatioIC = 0x15F HarmRatiosVA = range(0x160, 0x17F) HarmTHDRatioVA = 0x17F HarmRatiosVB = range(0x180, 0x19F) HarmTHDRatioVB = 0x19F HarmRatiosVC = range(0x1A0, 0x1BF) HarmTHDRatioVC = 0x1BF FundCompValIA = 0x1C0 FundCompValVA = 0x1C1 FundCompValIB = 0x1C2 FundCompValVB = 0x1C3 FundCompValIC = 0x1C4 FundCompValVC = 0x1C5 DFTConfig = 0x1D0 DFTCtrl = 0x1D1 TempSensorConfig1 = 0x2FD TempSensorConfig2 = 0x216 TempSensorConfig3 = 0x219
# Status and Special Register SoftReset = 0x00 SysStatus0 = 0x01 SysStatus1 = 0x02 FuncEn0 = 0x03 FuncEn1 = 0x04 ZXConfig = 0x07 SagTh = 0x08 PhaseLossTh = 0x09 INWarnTh0 = 0x0A INWarnTh1 = 0x0B THDNUTh = 0x0C THDNITh = 0x0D DMACtrl = 0x0E LastSPIData = 0x0F # Low Power Mode Register DetectCtrl = 0x10 DetectTh1 = 0x11 DetectTh2 = 0x12 DetectTh3 = 0x13 PMOffsetA = 0x14 PMOffsetB = 0x15 PMOffsetC = 0x16 PMPGA = 0x17 PMIrmsA = 0x18 PMIrmsB = 0x19 PMIrmsC = 0x1A PMConfig = 0x1B PMAvgSamples = 0x1C PMIrmsLSB = 0x1D # Configuration Registers ConfigStart = 0x30 PLconstH = 0x31 PLconstL = 0x32 MMode0 = 0x33 MMode1 = 0x34 PStartTh = 0x35 QStartTh = 0x36 SStartTh = 0x37 PPhaseTh = 0x38 QPhaseTh = 0x39 SPhaseTh = 0x3A CS0 = 0x3B # Calibration Registers CalStart = 0x40 PoffsetA = 0x41 QoffsetA = 0x42 PoffsetB = 0x43 QoffsetB = 0x44 PoffsetC = 0x45 QoffsetC = 0x46 GainA = 0x47 PhiA = 0x48 GainB = 0x49 PhiB = 0x4A GainC = 0x4B PhiC = 0x4C CS1 = 0x4D # Fundamental/Harmonic Energy Calibration regis ters HarmStart = 0x50 PoffsetAF = 0x51 PoffsetBF = 0x52 PoffsetCF = 0x53 PgainAF = 0x54 PgainBF = 0x55 PgainCF = 0x56 CS2 = 0x57 # Measurement Calibration AdjStart = 0x60 UgainA = 0x61 IgainA = 0x62 UoffsetA = 0x63 IoffsetA = 0x64 UgainB = 0x65 IgainB = 0x66 UoffsetB = 0x67 IoffsetB = 0x68 UgainC = 0x69 IgainC = 0x6A UoffsetC = 0x6B IoffsetC = 0x6C IgainN = 0x6D IoffsetN = 0x6E CS3 = 0x6F # Energy Register APenergyT = 0x80 APenergyA = 0x81 APenergyB = 0x82 APenergyC = 0x83 ANenergyT = 0x84 ANenergyA = 0x85 ANenergyB = 0x86 ANenergyC = 0x87 RPenergyT = 0x88 RPenergyA = 0x89 RPenergyB = 0x8A RPenergyC = 0x8B RNenergyT = 0x8C RNenergyA = 0x8D RNenergyB = 0x8E RNenergyC = 0x8F SAenergyT = 0x90 SenergyA = 0x91 SenergyB = 0x92 SenergyC = 0x93 SVenergyT = 0x94 EnStatus0 = 0x95 EnStatus1 = 0x96 SVmeanT = 0x98 SVmeanTLSB = 0x99 # Fundamental / Harmonic Energy Register APenergyTF = 0xA0 APenergyAF = 0xA1 APenergyBF = 0xA2 APenergyCF = 0xA3 ANenergyTF = 0xA4 ANenergyAF = 0xA5 ANenergyBF = 0xA6 ANenergyCF = 0xA7 APenergyTH = 0xA8 APenergyAH = 0xA9 APenergyBH = 0xAA APenergyCH = 0xAB ANenergyTH = 0xAC ANenergyAH = 0xAD ANenergyBH = 0xAE ANenergyCH = 0xAF # Power and Power Factor Registers PmeanT = 0xB0 PmeanA = 0xB1 PmeanB = 0xB2 PmeanC = 0xB3 QmeanT = 0xB4 QmeanA = 0xB5 QmeanB = 0xB6 QmeanC = 0xB7 SAmeanT = 0xB8 SmeanA = 0xB9 SmeanB = 0xBA SmeanC = 0xBB PFmeanT = 0xBC PFmeanA = 0xBD PFmeanB = 0xBE PFmeanC = 0xBF PmeanTLSB = 0xC0 PmeanALSB = 0xC1 PmeanBLSB = 0xC2 PmeanCLSB = 0xC3 QmeanTLSB = 0xC4 QmeanALSB = 0xC5 QmeanBLSB = 0xC6 QmeanCLSB = 0xC7 SAmeanTLSB = 0xC8 SmeanALSB = 0xC9 SmeanBLSB = 0xCA SmeanCLSB = 0xCB # Fundamental/ Harmonic Power and Voltage / Current RMS Registers PmeanTF = 0xD0 PmeanAF = 0xD1 PmeanBF = 0xD2 PmeanCF = 0xD3 PmeanTH = 0xD4 PmeanAH = 0xD5 PmeanBH = 0xD6 PmeanCH = 0xD7 IrmsN1 = 0xD8 UrmsA = 0xD9 UrmsB = 0xDA UrmsC = 0xDB IrmsN0 = 0xDC IrmsA = 0xDD IrmsB = 0xDE IrmsC = 0xDF PmeanTFLSB = 0xE0 PmeanAFLSB = 0xE1 PmeanBFLSB = 0xE2 PmeanCFLSB = 0xE3 PmeanTHLSB = 0xE4 PmeanAHLSB = 0xE5 PmeanBHLSB = 0xE6 PmeanCHLSB = 0xE7 UrmsALSB = 0xE9 UrmsBLSB = 0xEA UrmsCLSB = 0xEB IrmsALSB = 0xED IrmsBLSB = 0xEE IrmsCLSB = 0xEF # THD +N, Frequency, Angle and Temperature Regi sters THDNUA = 0xF1 THDNUB = 0xF2 THDNUC = 0xF3 THDNIA = 0xF5 THDNIB = 0xF6 THDNIC = 0xF7 Freq = 0xF8 PAngleA = 0xF9 PAngleB = 0xFA PAngleC = 0xFB Temp = 0xFC UangleA = 0xFD UangleB = 0xFE UangleC = 0xFF HarmRatiosIA = range(0x100, 0x11F) HarmTHDRatioIA = 0x11F HarmRatiosIB = range(0x120, 0x13F) HarmTHDRatioIB = 0x13F HarmRatiosIC = range(0x140, 0x15F) HarmTHDRatioIC = 0x15F HarmRatiosVA = range(0x160, 0x17F) HarmTHDRatioVA = 0x17F HarmRatiosVB = range(0x180, 0x19F) HarmTHDRatioVB = 0x19F HarmRatiosVC = range(0x1A0, 0x1BF) HarmTHDRatioVC = 0x1BF FundCompValIA = 0x1C0 FundCompValVA = 0x1C1 FundCompValIB = 0x1C2 FundCompValVB = 0x1C3 FundCompValIC = 0x1C4 FundCompValVC = 0x1C5 DFTConfig = 0x1D0 DFTCtrl = 0x1D1 TempSensorConfig1 = 0x2FD TempSensorConfig2 = 0x216 TempSensorConfig3 = 0x219
en
0.651088
# Status and Special Register # Low Power Mode Register # Configuration Registers # Calibration Registers # Fundamental/Harmonic Energy Calibration regis ters # Measurement Calibration # Energy Register # Fundamental / Harmonic Energy Register # Power and Power Factor Registers # Fundamental/ Harmonic Power and Voltage / Current RMS Registers # THD +N, Frequency, Angle and Temperature Regi sters
1.059526
1
app.py
rudyduvnjak/sqlalchemy-challenge
0
6623032
import numpy as np import pandas as pd import sqlalchemy import datetime as dt from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from flask import Flask, jsonify ################################################# # Database Setup ################################################# engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # Save reference to the table Station = Base.classes.station Measurement = Base.classes.measurement session = Session(engine) ################################################# # Flask Setup ################################################# app = Flask(__name__) ################################################# # Flask Routes ################################################# #Create home page with urls @app.route("/") def welcome(): """List all available api routes.""" return ( f"Available Routes:<br/>" f"/api/v1.0/precipitation<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/tobs<br/>" f"/api/v1.0/<start><br/>" f"/api/v1.0/<start>/<end>" ) #Convert the query results to a dictionary using `date` as the key and `prcp` as the value. #Return the JSON representation of your dictionary. @app.route("/api/v1.0/precipitation") def precipitation(): session = Session(engine) last_yr = dt.date(2017,8,23) - dt.timedelta(days = 365) last_day = session.query(Measurement.date).order_by(Measurement.date.desc()).first() results = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date > last_yr).order_by(Measurement.date).all() #Create precipitation list from queried data precipitation_list = [] for date, prcp in results: data_dict = {} data_dict['date'] = date data_dict['prcp'] = prcp precipitation_list.append(data_dict) return jsonify(precipitation_list) #Return a JSON list of stations from the dataset. @app.route("/api/v1.0/stations") def stations(): #Return a JSON list of stations from the dataset. session = Session(engine) station_call = session.query(Station.station).all() station_list = list(np.ravel(station_call)) return jsonify(station_list) session.close() #Query the dates and temperature observations of the most active station for the last year of data. #Return a JSON list of temperature observations (TOBS) for the previous year. @app.route("/api/v1.0/tobs") def tobs(): session = Session(engine) #Obtain last data in the sqlite last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date last_date = dt.datetime.strptime(last_date, "%Y-%m-%d") first_date = last_date - dt.timedelta(days = 365) most_active_stations = (session.query(Measurement.station, func.count(Measurement.station)).\ group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()) most_active_station = most_active_stations[0][0] #return jsonify(most_active_station) Just used for verification of getting correct station tobs_results = session.query(Measurement.station, Measurement.tobs).\ filter(Measurement.date.between(first_date, last_date)).filter(Measurement.station == most_active_station).all() tobs_list = [] #Create dictionary for stations and temperatures in tobs_dict for station, tobs in tobs_results: tobs_dict = {} tobs_dict["station"] = station tobs_dict["tobs"] = round(float(tobs),2) tobs_list.append(tobs_dict) return jsonify(tobs_list) session.close() #Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range. #When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date. #When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive. @app.route("/api/v1.0/<start>") def start_day(start): """Fetch the minimun, average and maximum temperature for the dates greater or equal to the start date, or a 404 if not.""" start_day_list = [] session = Session(engine) #Get last date in the data set last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date last_date = dt.datetime.strptime(last_date, "%Y-%m-%d") #Get first date in the data set initial_date = session.query(Measurement.date).order_by(Measurement.date.asc()).first().date initial_date = dt.datetime.strptime(initial_date, "%Y-%m-%d") #Define sel for temperature query sel=[Measurement.date,func.min(Measurement.tobs),func.max(Measurement.tobs), func.avg(Measurement.tobs)] response_temps = session.query(*sel).filter(Measurement.date >= start).all() session.close() #Set start_date = start start_date = start #Set start_date into date time format start_date = dt.datetime.strptime(start_date,"%Y-%m-%d") min_max_dict = (f"error: Please enter date between:" + str(initial_date) + "and" + str(last_date) + "in the format YYYY-MM-DD"), 404 #Check if correct dates are entered and display data for temps in response_temps: if (start_date >= initial_date and start_date <= last_date): min_max_dict = {} min_max_dict = { "Start Date": start, "Temperature Min:": response_temps[0][1], "Temperature Max": response_temps[0][2], "Temperature Mean:": round(response_temps[0][3],2), "End Date": last_date } # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start Range return jsonify(min_max_dict) else: return min_max_dict @app.route("/api/v1.0/<start>/<end>") def start_end_date(start, end): #Get last date in the data set last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date last_date = dt.datetime.strptime(last_date, "%Y-%m-%d") #Get first date in the data set initial_date = session.query(Measurement.date).order_by(Measurement.date.asc()).first().date initial_date = dt.datetime.strptime(initial_date, "%Y-%m-%d") #Define sel for temperature query sel=[Measurement.date,func.min(Measurement.tobs),func.max(Measurement.tobs), func.avg(Measurement.tobs)] response_temps = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all() session.close() #Set start_date = start start_date = start start_date = dt.datetime.strptime(start_date,"%Y-%m-%d") #Set end_date = date end_date = end end_date = dt.datetime.strptime(end_date,"%Y-%m-%d") min_max_dict = (f"error: Please enter start and end date between:" + str(initial_date) + " and " + str(last_date) + " in the format YYYY-MM-DD end date should be greater than start date"), 404 #Check if correct dates are entered and display data for temps in response_temps: if (start_date >= initial_date and start_date <= last_date and end_date >= start_date): min_max_dict = {} min_max_dict = { "Start Date": start, "Temperature Min:": response_temps[0][1], "Temperature Max": response_temps[0][2], "Temperature Avg:": round(response_temps[0][3],2), "End Date": end } # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start and End Range return jsonify(min_max_dict) else: return min_max_dict #Define main behaviour if __name__ == '__main__': app.run(debug=True)
import numpy as np import pandas as pd import sqlalchemy import datetime as dt from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from flask import Flask, jsonify ################################################# # Database Setup ################################################# engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # Save reference to the table Station = Base.classes.station Measurement = Base.classes.measurement session = Session(engine) ################################################# # Flask Setup ################################################# app = Flask(__name__) ################################################# # Flask Routes ################################################# #Create home page with urls @app.route("/") def welcome(): """List all available api routes.""" return ( f"Available Routes:<br/>" f"/api/v1.0/precipitation<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/tobs<br/>" f"/api/v1.0/<start><br/>" f"/api/v1.0/<start>/<end>" ) #Convert the query results to a dictionary using `date` as the key and `prcp` as the value. #Return the JSON representation of your dictionary. @app.route("/api/v1.0/precipitation") def precipitation(): session = Session(engine) last_yr = dt.date(2017,8,23) - dt.timedelta(days = 365) last_day = session.query(Measurement.date).order_by(Measurement.date.desc()).first() results = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date > last_yr).order_by(Measurement.date).all() #Create precipitation list from queried data precipitation_list = [] for date, prcp in results: data_dict = {} data_dict['date'] = date data_dict['prcp'] = prcp precipitation_list.append(data_dict) return jsonify(precipitation_list) #Return a JSON list of stations from the dataset. @app.route("/api/v1.0/stations") def stations(): #Return a JSON list of stations from the dataset. session = Session(engine) station_call = session.query(Station.station).all() station_list = list(np.ravel(station_call)) return jsonify(station_list) session.close() #Query the dates and temperature observations of the most active station for the last year of data. #Return a JSON list of temperature observations (TOBS) for the previous year. @app.route("/api/v1.0/tobs") def tobs(): session = Session(engine) #Obtain last data in the sqlite last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date last_date = dt.datetime.strptime(last_date, "%Y-%m-%d") first_date = last_date - dt.timedelta(days = 365) most_active_stations = (session.query(Measurement.station, func.count(Measurement.station)).\ group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()) most_active_station = most_active_stations[0][0] #return jsonify(most_active_station) Just used for verification of getting correct station tobs_results = session.query(Measurement.station, Measurement.tobs).\ filter(Measurement.date.between(first_date, last_date)).filter(Measurement.station == most_active_station).all() tobs_list = [] #Create dictionary for stations and temperatures in tobs_dict for station, tobs in tobs_results: tobs_dict = {} tobs_dict["station"] = station tobs_dict["tobs"] = round(float(tobs),2) tobs_list.append(tobs_dict) return jsonify(tobs_list) session.close() #Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range. #When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date. #When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive. @app.route("/api/v1.0/<start>") def start_day(start): """Fetch the minimun, average and maximum temperature for the dates greater or equal to the start date, or a 404 if not.""" start_day_list = [] session = Session(engine) #Get last date in the data set last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date last_date = dt.datetime.strptime(last_date, "%Y-%m-%d") #Get first date in the data set initial_date = session.query(Measurement.date).order_by(Measurement.date.asc()).first().date initial_date = dt.datetime.strptime(initial_date, "%Y-%m-%d") #Define sel for temperature query sel=[Measurement.date,func.min(Measurement.tobs),func.max(Measurement.tobs), func.avg(Measurement.tobs)] response_temps = session.query(*sel).filter(Measurement.date >= start).all() session.close() #Set start_date = start start_date = start #Set start_date into date time format start_date = dt.datetime.strptime(start_date,"%Y-%m-%d") min_max_dict = (f"error: Please enter date between:" + str(initial_date) + "and" + str(last_date) + "in the format YYYY-MM-DD"), 404 #Check if correct dates are entered and display data for temps in response_temps: if (start_date >= initial_date and start_date <= last_date): min_max_dict = {} min_max_dict = { "Start Date": start, "Temperature Min:": response_temps[0][1], "Temperature Max": response_temps[0][2], "Temperature Mean:": round(response_temps[0][3],2), "End Date": last_date } # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start Range return jsonify(min_max_dict) else: return min_max_dict @app.route("/api/v1.0/<start>/<end>") def start_end_date(start, end): #Get last date in the data set last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date last_date = dt.datetime.strptime(last_date, "%Y-%m-%d") #Get first date in the data set initial_date = session.query(Measurement.date).order_by(Measurement.date.asc()).first().date initial_date = dt.datetime.strptime(initial_date, "%Y-%m-%d") #Define sel for temperature query sel=[Measurement.date,func.min(Measurement.tobs),func.max(Measurement.tobs), func.avg(Measurement.tobs)] response_temps = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all() session.close() #Set start_date = start start_date = start start_date = dt.datetime.strptime(start_date,"%Y-%m-%d") #Set end_date = date end_date = end end_date = dt.datetime.strptime(end_date,"%Y-%m-%d") min_max_dict = (f"error: Please enter start and end date between:" + str(initial_date) + " and " + str(last_date) + " in the format YYYY-MM-DD end date should be greater than start date"), 404 #Check if correct dates are entered and display data for temps in response_temps: if (start_date >= initial_date and start_date <= last_date and end_date >= start_date): min_max_dict = {} min_max_dict = { "Start Date": start, "Temperature Min:": response_temps[0][1], "Temperature Max": response_temps[0][2], "Temperature Avg:": round(response_temps[0][3],2), "End Date": end } # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start and End Range return jsonify(min_max_dict) else: return min_max_dict #Define main behaviour if __name__ == '__main__': app.run(debug=True)
en
0.63515
################################################# # Database Setup ################################################# # reflect an existing database into a new model # reflect the tables # Save reference to the table ################################################# # Flask Setup ################################################# ################################################# # Flask Routes ################################################# #Create home page with urls List all available api routes. #Convert the query results to a dictionary using `date` as the key and `prcp` as the value. #Return the JSON representation of your dictionary. #Create precipitation list from queried data #Return a JSON list of stations from the dataset. #Return a JSON list of stations from the dataset. #Query the dates and temperature observations of the most active station for the last year of data. #Return a JSON list of temperature observations (TOBS) for the previous year. #Obtain last data in the sqlite #return jsonify(most_active_station) Just used for verification of getting correct station #Create dictionary for stations and temperatures in tobs_dict #Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range. #When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date. #When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive. Fetch the minimun, average and maximum temperature for the dates greater or equal to the start date, or a 404 if not. #Get last date in the data set #Get first date in the data set #Define sel for temperature query #Set start_date = start #Set start_date into date time format #Check if correct dates are entered and display data # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start Range #Get last date in the data set #Get first date in the data set #Define sel for temperature query #Set start_date = start #Set end_date = date #Check if correct dates are entered and display data # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start and End Range #Define main behaviour
2.650475
3
globagrim/boundary_conditions.py
neumannd/globagrim_project
3
6623033
from .variables import global_const, global_array def boundary_conditions(): # # poles # for j in range(0, global_const.NJ + 1): joppos = j + int(global_const.NJ / 2) if joppos > global_const.NJ: joppos = joppos - global_const.NJ global_array.ps[j, 0] = global_array.ps[joppos, 1] global_array.ps[j, global_const.NK + 1] = global_array.ps[ joppos, global_const.NK ] global_array.phis[j, 0] = global_array.phis[joppos, 1] global_array.phis[j, global_const.NK + 1] = global_array.phis[ joppos, global_const.NK ] global_array.u[j, 0, :] = -global_array.u[joppos, 1, :] global_array.u[j, global_const.NK + 1, :] = -global_array.u[ joppos, global_const.NK, : ] global_array.v[j, 0, :] = -global_array.v[joppos, 1, :] global_array.v[j, global_const.NK + 1, :] = -global_array.v[ joppos, global_const.NK, : ] global_array.t[j, 0, :] = global_array.t[joppos, 1, :] global_array.t[j, global_const.NK + 1, :] = global_array.t[ joppos, global_const.NK, : ] # # east/west # for k in range(0, global_const.NK + 2): global_array.ps[0, k] = global_array.ps[global_const.NJ, k] global_array.ps[global_const.NJ + 1, k] = global_array.ps[1, k] global_array.phis[0, k] = global_array.phis[global_const.NJ, k] global_array.phis[global_const.NJ + 1, k] = global_array.phis[1, k] global_array.u[0, k, :] = global_array.u[global_const.NJ, k, :] global_array.u[global_const.NJ + 1, k, :] = global_array.u[1, k, :] global_array.v[0, k, :] = global_array.v[global_const.NJ, k, :] global_array.v[global_const.NJ + 1, k, :] = global_array.v[1, k, :] global_array.t[0, k, :] = global_array.t[global_const.NJ, k, :] global_array.t[global_const.NJ + 1, k, :] = global_array.t[1, k, :] ######################### if __name__ == "__main__": import grid grid.grid() import init init.init_case() boundary_conditions()
from .variables import global_const, global_array def boundary_conditions(): # # poles # for j in range(0, global_const.NJ + 1): joppos = j + int(global_const.NJ / 2) if joppos > global_const.NJ: joppos = joppos - global_const.NJ global_array.ps[j, 0] = global_array.ps[joppos, 1] global_array.ps[j, global_const.NK + 1] = global_array.ps[ joppos, global_const.NK ] global_array.phis[j, 0] = global_array.phis[joppos, 1] global_array.phis[j, global_const.NK + 1] = global_array.phis[ joppos, global_const.NK ] global_array.u[j, 0, :] = -global_array.u[joppos, 1, :] global_array.u[j, global_const.NK + 1, :] = -global_array.u[ joppos, global_const.NK, : ] global_array.v[j, 0, :] = -global_array.v[joppos, 1, :] global_array.v[j, global_const.NK + 1, :] = -global_array.v[ joppos, global_const.NK, : ] global_array.t[j, 0, :] = global_array.t[joppos, 1, :] global_array.t[j, global_const.NK + 1, :] = global_array.t[ joppos, global_const.NK, : ] # # east/west # for k in range(0, global_const.NK + 2): global_array.ps[0, k] = global_array.ps[global_const.NJ, k] global_array.ps[global_const.NJ + 1, k] = global_array.ps[1, k] global_array.phis[0, k] = global_array.phis[global_const.NJ, k] global_array.phis[global_const.NJ + 1, k] = global_array.phis[1, k] global_array.u[0, k, :] = global_array.u[global_const.NJ, k, :] global_array.u[global_const.NJ + 1, k, :] = global_array.u[1, k, :] global_array.v[0, k, :] = global_array.v[global_const.NJ, k, :] global_array.v[global_const.NJ + 1, k, :] = global_array.v[1, k, :] global_array.t[0, k, :] = global_array.t[global_const.NJ, k, :] global_array.t[global_const.NJ + 1, k, :] = global_array.t[1, k, :] ######################### if __name__ == "__main__": import grid grid.grid() import init init.init_case() boundary_conditions()
de
0.553194
# # poles # # # east/west # #########################
2.239837
2
src/main/python/views/ImagePreviewDialog.py
sudoparsa/paperECG
14
6623034
from PyQt5 import QtCore, QtGui, QtWidgets from QtWrapper import * from ImageUtilities import opencvImageToPixmap class ImagePreviewDialog(QtWidgets.QDialog): def __init__(self, image, leadId): super().__init__() self.pixmap = opencvImageToPixmap(image) self.leadId = leadId self.initUI() def initUI(self): self.setWindowTitle("Lead " + str(self.leadId)) self.layout = QVBoxLayout() self.margins = QtCore.QMargins(4, 4, 4, 4) self.pixmapLabel = QLabel() self.pixmapLabel.setContentsMargins(self.margins) self.pixmapLabel.setPixmap(self.pixmap) self.pixmapLabel.setAlignment(QtCore.Qt.AlignCenter) self.pixmapLabel.setMinimumSize(1, 1) self.layout.addWidget(self.pixmapLabel) self.layout.setContentsMargins(0, 0, 0, 0) self.setLayout(self.layout) def resizeEvent(self, event): self.pixmapLabel.resize(self.width()-(self.margins.right()-self.margins.left()), self.height()-(self.margins.top()-self.margins.bottom())) self.pixmapLabel.setPixmap(self.pixmap.scaled(self.pixmapLabel.width(), self.pixmapLabel.height(), QtCore.Qt.KeepAspectRatio))
from PyQt5 import QtCore, QtGui, QtWidgets from QtWrapper import * from ImageUtilities import opencvImageToPixmap class ImagePreviewDialog(QtWidgets.QDialog): def __init__(self, image, leadId): super().__init__() self.pixmap = opencvImageToPixmap(image) self.leadId = leadId self.initUI() def initUI(self): self.setWindowTitle("Lead " + str(self.leadId)) self.layout = QVBoxLayout() self.margins = QtCore.QMargins(4, 4, 4, 4) self.pixmapLabel = QLabel() self.pixmapLabel.setContentsMargins(self.margins) self.pixmapLabel.setPixmap(self.pixmap) self.pixmapLabel.setAlignment(QtCore.Qt.AlignCenter) self.pixmapLabel.setMinimumSize(1, 1) self.layout.addWidget(self.pixmapLabel) self.layout.setContentsMargins(0, 0, 0, 0) self.setLayout(self.layout) def resizeEvent(self, event): self.pixmapLabel.resize(self.width()-(self.margins.right()-self.margins.left()), self.height()-(self.margins.top()-self.margins.bottom())) self.pixmapLabel.setPixmap(self.pixmap.scaled(self.pixmapLabel.width(), self.pixmapLabel.height(), QtCore.Qt.KeepAspectRatio))
none
1
2.340001
2
blackjack/hand.py
rockchalkwushock/pdx-code-labs
0
6623035
from deck import values class Hand: def __init__(self): self.cards = [] self.score = 0 self.aces = 0 def __str__(self): return f'Cards: {self.cards}\nScore: {self.score}\nAces: {self.aces}\n' def draw_card(self, card): self.cards.append(card) self.score += values[card.rank] if card.rank == 'Ace': self.aces += 1 def adjust_for_ace(self): while self.score > 21 and self.aces: self.score -= 10 self.aces -= 1
from deck import values class Hand: def __init__(self): self.cards = [] self.score = 0 self.aces = 0 def __str__(self): return f'Cards: {self.cards}\nScore: {self.score}\nAces: {self.aces}\n' def draw_card(self, card): self.cards.append(card) self.score += values[card.rank] if card.rank == 'Ace': self.aces += 1 def adjust_for_ace(self): while self.score > 21 and self.aces: self.score -= 10 self.aces -= 1
none
1
3.21269
3
app/site/views.py
perna/podigger
5
6623036
<filename>app/site/views.py from flask import Blueprint, render_template, request, flash, Markup from app.repository.episode import EpisodeRepository from app.repository.podcast import PodcastRepository from app.repository.topic_suggestion import TopicSuggestionRepository from app.repository.term import TermRepository from .forms import PodcastForm, PodcastSearchForm, TopicSuggestionForm from app.api.models import Podcast from app import cache site = Blueprint('site', __name__, template_folder='../templates/site') @site.context_processor def counter(): podcast = PodcastRepository() episode = EpisodeRepository() podcast_count = podcast.count_all() episode_count = episode.count_all() counter = {'podcasts': podcast_count, 'episodes': episode_count} return dict(counter=counter) @cache.cached(timeout=60) @site.route("/") def index(): podcast = PodcastRepository() last_podcasts = podcast.get_last_podcasts_thumbs() return render_template("home.html", podcasts=last_podcasts) @cache.cached(timeout=60) @site.route('/search') @site.route('/search/<int:page>') def search(page=1): term = request.args.get('term') if term: new_term = TermRepository() new_term.create_or_update(term) episode = EpisodeRepository() episodes = episode.result_search_paginate(term, page, 20) if episodes.total: flash('{} resultados para {}'.format(episodes.total, term)) else: message = Markup('<span>Nenhum resultado encontrado.</span> <a class="link-add-suggestion" href="/add_topic_suggestion">Gostaria de sugerir o tema?</a>') flash(message) return render_template('search.html', episodes=episodes, page="search") else: return render_template('search.html', page="search") @site.route('/add_podcast', methods=['GET', 'POST']) def add_podcast(): form = PodcastForm(request.form) if request.method == 'POST': if form.validate_on_submit(): podcast = PodcastRepository() podcast.create_or_update(form.name.data, form.feed.data) flash('Podcast cadastrado com sucesso.', 'success') return render_template("add_podcast.html", form=form) else: flash('Erro ao cadastrar o podcast. Verifique os dados e tente novamente', 'danger') return render_template("site/add_podcast.html", form=form) else: return render_template("add_podcast.html", form=form, page="add_podcast") @site.route('/podcasts', methods=['GET', 'POST']) @site.route('/podcasts/<int:page>') def list_podcasts(page=1): if request.method == 'POST': form = PodcastSearchForm(request.form) if form.validate_on_submit(): podcast = PodcastRepository() podcasts = podcast.search(form.term.data).paginate(page, per_page=20) if podcasts.items: return render_template("list_podcasts.html", podcasts=podcasts, form=form) else: flash('Podcast não encontrado') return render_template("list_podcasts.html", podcasts=podcasts, form=form) else: form = PodcastSearchForm() podcasts = Podcast.query.with_entities(Podcast.name, Podcast.feed, Podcast.total_episodes).order_by(Podcast.name).paginate(page, per_page=20) return render_template("list_podcasts.html", podcasts=podcasts, form=form) @site.route('/topic_suggestions') def list_topic_suggestion(): topic = TopicSuggestionRepository() topics = topic.list_topics() return render_template("list_topic_suggestions.html", topics=topics) @site.route('/add_topic_suggestion', methods=['GET', 'POST']) def add_topic_suggestion(): form = TopicSuggestionForm(request.form) if form.validate_on_submit(): topic = TopicSuggestionRepository() topic.create(form.title.data, form.description.data) flash('Sugestão adicionada com sucesso.') return render_template("add_topic_suggestion.html", form=form) return render_template("add_topic_suggestion.html", form=form) @site.route('/trends') @cache.cached(timeout=60) def trends(): return render_template("trends.html") @site.route('/about') @cache.cached(timeout=60) def about(): return render_template("about.html", page="about") @site.route('/contact') @cache.cached(timeout=60) def contact(): return render_template("contact.html", page="contact")
<filename>app/site/views.py from flask import Blueprint, render_template, request, flash, Markup from app.repository.episode import EpisodeRepository from app.repository.podcast import PodcastRepository from app.repository.topic_suggestion import TopicSuggestionRepository from app.repository.term import TermRepository from .forms import PodcastForm, PodcastSearchForm, TopicSuggestionForm from app.api.models import Podcast from app import cache site = Blueprint('site', __name__, template_folder='../templates/site') @site.context_processor def counter(): podcast = PodcastRepository() episode = EpisodeRepository() podcast_count = podcast.count_all() episode_count = episode.count_all() counter = {'podcasts': podcast_count, 'episodes': episode_count} return dict(counter=counter) @cache.cached(timeout=60) @site.route("/") def index(): podcast = PodcastRepository() last_podcasts = podcast.get_last_podcasts_thumbs() return render_template("home.html", podcasts=last_podcasts) @cache.cached(timeout=60) @site.route('/search') @site.route('/search/<int:page>') def search(page=1): term = request.args.get('term') if term: new_term = TermRepository() new_term.create_or_update(term) episode = EpisodeRepository() episodes = episode.result_search_paginate(term, page, 20) if episodes.total: flash('{} resultados para {}'.format(episodes.total, term)) else: message = Markup('<span>Nenhum resultado encontrado.</span> <a class="link-add-suggestion" href="/add_topic_suggestion">Gostaria de sugerir o tema?</a>') flash(message) return render_template('search.html', episodes=episodes, page="search") else: return render_template('search.html', page="search") @site.route('/add_podcast', methods=['GET', 'POST']) def add_podcast(): form = PodcastForm(request.form) if request.method == 'POST': if form.validate_on_submit(): podcast = PodcastRepository() podcast.create_or_update(form.name.data, form.feed.data) flash('Podcast cadastrado com sucesso.', 'success') return render_template("add_podcast.html", form=form) else: flash('Erro ao cadastrar o podcast. Verifique os dados e tente novamente', 'danger') return render_template("site/add_podcast.html", form=form) else: return render_template("add_podcast.html", form=form, page="add_podcast") @site.route('/podcasts', methods=['GET', 'POST']) @site.route('/podcasts/<int:page>') def list_podcasts(page=1): if request.method == 'POST': form = PodcastSearchForm(request.form) if form.validate_on_submit(): podcast = PodcastRepository() podcasts = podcast.search(form.term.data).paginate(page, per_page=20) if podcasts.items: return render_template("list_podcasts.html", podcasts=podcasts, form=form) else: flash('Podcast não encontrado') return render_template("list_podcasts.html", podcasts=podcasts, form=form) else: form = PodcastSearchForm() podcasts = Podcast.query.with_entities(Podcast.name, Podcast.feed, Podcast.total_episodes).order_by(Podcast.name).paginate(page, per_page=20) return render_template("list_podcasts.html", podcasts=podcasts, form=form) @site.route('/topic_suggestions') def list_topic_suggestion(): topic = TopicSuggestionRepository() topics = topic.list_topics() return render_template("list_topic_suggestions.html", topics=topics) @site.route('/add_topic_suggestion', methods=['GET', 'POST']) def add_topic_suggestion(): form = TopicSuggestionForm(request.form) if form.validate_on_submit(): topic = TopicSuggestionRepository() topic.create(form.title.data, form.description.data) flash('Sugestão adicionada com sucesso.') return render_template("add_topic_suggestion.html", form=form) return render_template("add_topic_suggestion.html", form=form) @site.route('/trends') @cache.cached(timeout=60) def trends(): return render_template("trends.html") @site.route('/about') @cache.cached(timeout=60) def about(): return render_template("about.html", page="about") @site.route('/contact') @cache.cached(timeout=60) def contact(): return render_template("contact.html", page="contact")
none
1
2.138465
2
ms_graph_exporter/celery/__init__.py
undp/MsGraphExporter
1
6623037
# -*- coding: utf-8 -*- """Package contains following modules. * :mod:`ms_graph_exporter.celery.app` - Celery app and worker customizations. * :mod:`ms_graph_exporter.celery.config` - app config objects. * :mod:`ms_graph_exporter.celery.graph_api_base` - base class for data extraction tasks. * :mod:`ms_graph_exporter.celery.tasks` - parallel data retrieval tasks. """
# -*- coding: utf-8 -*- """Package contains following modules. * :mod:`ms_graph_exporter.celery.app` - Celery app and worker customizations. * :mod:`ms_graph_exporter.celery.config` - app config objects. * :mod:`ms_graph_exporter.celery.graph_api_base` - base class for data extraction tasks. * :mod:`ms_graph_exporter.celery.tasks` - parallel data retrieval tasks. """
en
0.56084
# -*- coding: utf-8 -*- Package contains following modules. * :mod:`ms_graph_exporter.celery.app` - Celery app and worker customizations. * :mod:`ms_graph_exporter.celery.config` - app config objects. * :mod:`ms_graph_exporter.celery.graph_api_base` - base class for data extraction tasks. * :mod:`ms_graph_exporter.celery.tasks` - parallel data retrieval tasks.
1.196746
1
tests/web_platform/CSS2/positioning/test_position_relative_nested.py
fletchgraham/colosseum
0
6623038
<reponame>fletchgraham/colosseum<gh_stars>0 from tests.utils import W3CTestCase class TestPositionRelativeNested(W3CTestCase): vars().update(W3CTestCase.find_tests(__file__, 'position-relative-nested-'))
from tests.utils import W3CTestCase class TestPositionRelativeNested(W3CTestCase): vars().update(W3CTestCase.find_tests(__file__, 'position-relative-nested-'))
none
1
1.439668
1
install/core/python/tank/platform/events/event_file_close.py
JoanAzpeitia/lp_sg
0
6623039
<filename>install/core/python/tank/platform/events/event_file_close.py # Copyright (c) 2016 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. from .event_engine import EngineEvent class FileCloseEvent(EngineEvent): """ An object representation of a file-close event. The event holds a :meth:file_path property, indicating which open file or document the event is referring to. In engine implementations which integrate with MDI applications, the path is required in order to distinguish which document is being closed. In engine implementations where the current file isn't known, well defined or accessible, a None value should be returned to indicate this. Note that the file_path may represent a document that has not yet been saved. In this case, it may not be a full path but instead the name of the document, for example "untitled" or an empty string "". The event information should transparently reflect whatever is returned from the underlying application. """ def __init__(self, file_path): """ Constructor. :param str file_path: The path to the file closed. """ super(FileCloseEvent, self).__init__() self._file_path = file_path @property def file_path(self): """ The string path of the file that was closed. """ return self._file_path def __str__(self): return ("%s: %s" % ("FileCloseEvent", self.file_path))
<filename>install/core/python/tank/platform/events/event_file_close.py # Copyright (c) 2016 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. from .event_engine import EngineEvent class FileCloseEvent(EngineEvent): """ An object representation of a file-close event. The event holds a :meth:file_path property, indicating which open file or document the event is referring to. In engine implementations which integrate with MDI applications, the path is required in order to distinguish which document is being closed. In engine implementations where the current file isn't known, well defined or accessible, a None value should be returned to indicate this. Note that the file_path may represent a document that has not yet been saved. In this case, it may not be a full path but instead the name of the document, for example "untitled" or an empty string "". The event information should transparently reflect whatever is returned from the underlying application. """ def __init__(self, file_path): """ Constructor. :param str file_path: The path to the file closed. """ super(FileCloseEvent, self).__init__() self._file_path = file_path @property def file_path(self): """ The string path of the file that was closed. """ return self._file_path def __str__(self): return ("%s: %s" % ("FileCloseEvent", self.file_path))
en
0.913481
# Copyright (c) 2016 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. An object representation of a file-close event. The event holds a :meth:file_path property, indicating which open file or document the event is referring to. In engine implementations which integrate with MDI applications, the path is required in order to distinguish which document is being closed. In engine implementations where the current file isn't known, well defined or accessible, a None value should be returned to indicate this. Note that the file_path may represent a document that has not yet been saved. In this case, it may not be a full path but instead the name of the document, for example "untitled" or an empty string "". The event information should transparently reflect whatever is returned from the underlying application. Constructor. :param str file_path: The path to the file closed. The string path of the file that was closed.
2.710504
3
draugr/metrics/__init__.py
cnHeider/draugr
3
6623040
<filename>draugr/metrics/__init__.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "<NAME>" __doc__ = r""" """ from pathlib import Path with open(Path(__file__).parent / "README.md", "r") as this_init_file: __doc__ += this_init_file.read() from .accumulation import * from .meters import * from .metric_aggregator import * from .metric_collection import * from .metric_summary import *
<filename>draugr/metrics/__init__.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "<NAME>" __doc__ = r""" """ from pathlib import Path with open(Path(__file__).parent / "README.md", "r") as this_init_file: __doc__ += this_init_file.read() from .accumulation import * from .meters import * from .metric_aggregator import * from .metric_collection import * from .metric_summary import *
en
0.308914
#!/usr/bin/env python3 # -*- coding: utf-8 -*-
1.661848
2
shap/maskers/_fixed.py
zduey/shap
1
6623041
from ._masker import Masker class Fixed(Masker): """ This leaves the input unchanged during masking, and is used for things like scoring labels. Sometimes there are inputs to the model that we do not want to explain, but rather we want to consider them fixed. The primary example of this is when we explain the loss of the model using the labels. These "true" labels are inputs to the function we are explaining, but we don't want to attribute credit to them, instead we want to consider them fixed and assign all the credit to the model's input features. This is where the Fixed masker can help, since we can apply it to the label inputs. """ def __init__(self): pass def __call__(self, x, mask): return x def save(self, out_file, *args): super(Fixed, self).save(out_file) @classmethod def load(cls, in_file): masker_type = pickle.load(in_file) if not masker_type == cls: print("Warning: Saved masker type not same as the one that's attempting to be loaded. Saved masker type: ", masker_type) return Fixed._load(in_file) @classmethod def _load(cls, in_file): fixed_masker = Fixed() return fixed_masker
from ._masker import Masker class Fixed(Masker): """ This leaves the input unchanged during masking, and is used for things like scoring labels. Sometimes there are inputs to the model that we do not want to explain, but rather we want to consider them fixed. The primary example of this is when we explain the loss of the model using the labels. These "true" labels are inputs to the function we are explaining, but we don't want to attribute credit to them, instead we want to consider them fixed and assign all the credit to the model's input features. This is where the Fixed masker can help, since we can apply it to the label inputs. """ def __init__(self): pass def __call__(self, x, mask): return x def save(self, out_file, *args): super(Fixed, self).save(out_file) @classmethod def load(cls, in_file): masker_type = pickle.load(in_file) if not masker_type == cls: print("Warning: Saved masker type not same as the one that's attempting to be loaded. Saved masker type: ", masker_type) return Fixed._load(in_file) @classmethod def _load(cls, in_file): fixed_masker = Fixed() return fixed_masker
en
0.951966
This leaves the input unchanged during masking, and is used for things like scoring labels. Sometimes there are inputs to the model that we do not want to explain, but rather we want to consider them fixed. The primary example of this is when we explain the loss of the model using the labels. These "true" labels are inputs to the function we are explaining, but we don't want to attribute credit to them, instead we want to consider them fixed and assign all the credit to the model's input features. This is where the Fixed masker can help, since we can apply it to the label inputs.
3.548579
4
test_scripts/test_xml2yolo.py
wwdok/mask2json
27
6623042
<gh_stars>10-100 ''' lanhuage: python Descripttion: convert xmls to yolo txts. version: beta Author: xiaoshuyui Date: 2020-08-24 09:39:42 LastEditors: xiaoshuyui LastEditTime: 2020-10-20 09:49:55 ''' import sys sys.path.append("..") import os from convertmask.utils.xml2yolo.xml2yolo import x2yConvert BASE_DIR = os.path.abspath(os.path.dirname( os.getcwd())) + os.sep + 'static' + os.sep + 'test_xmls' if __name__ == "__main__": # single test sfile = BASE_DIR + os.sep + '1187_3.xml' x2yConvert(sfile) # multi file test x2yConvert(BASE_DIR + os.sep + 'xmls')
''' lanhuage: python Descripttion: convert xmls to yolo txts. version: beta Author: xiaoshuyui Date: 2020-08-24 09:39:42 LastEditors: xiaoshuyui LastEditTime: 2020-10-20 09:49:55 ''' import sys sys.path.append("..") import os from convertmask.utils.xml2yolo.xml2yolo import x2yConvert BASE_DIR = os.path.abspath(os.path.dirname( os.getcwd())) + os.sep + 'static' + os.sep + 'test_xmls' if __name__ == "__main__": # single test sfile = BASE_DIR + os.sep + '1187_3.xml' x2yConvert(sfile) # multi file test x2yConvert(BASE_DIR + os.sep + 'xmls')
en
0.650931
lanhuage: python Descripttion: convert xmls to yolo txts. version: beta Author: xiaoshuyui Date: 2020-08-24 09:39:42 LastEditors: xiaoshuyui LastEditTime: 2020-10-20 09:49:55 # single test # multi file test
1.929983
2
dagmc_stats/DagmcFile.py
svalinn/dagmc_stats
1
6623043
import pandas as pd import numpy as np from pymoab.rng import Range from pymoab import core, types import warnings class DagmcFile: def __init__(self, filename, populate=False): """Constructor inputs ------ filename : name of the file populate : boolean value that determines whether or not to populate the data outputs ------- none """ # read file self._my_moab_core = core.Core() self._my_moab_core.load_file(filename) self.root_set = self._my_moab_core.get_root_set() self.entity_types = [types.MBVERTEX, types.MBTRI, types.MBENTITYSET] self.entityset_types = {0: 'nodes', 1: 'curves', 2: 'surfaces', 3: 'volumes'} self.native_ranges = {} self.__set_native_ranges() self.dagmc_tags = {} self.__set_dagmc_tags() self.entityset_ranges = {} self.__set_entityset_ranges() self.dim_dict = {} self.__set_dimension_meshset() # if populate is True: # self.__populate_triangle_data(meshset) def __set_native_ranges(self): """Set the class native_ranges variable to a dictionary with MOAB ranges for each of the requested entity types inputs ------ none outputs ------- none """ for entity_type in self.entity_types: self.native_ranges[entity_type] = self._my_moab_core.get_entities_by_type( self.root_set, entity_type) def __set_dagmc_tags(self): """Set the class dagmc_tags variable to a dictionary with the important tags for DAGMC geometries inputs ------ none outputs ------- none """ tag_data_list = {'geom_dim': {'name': 'GEOM_DIMENSION', 'size': 1, 'type': types.MB_TYPE_INTEGER}, 'category': {'name': 'CATEGORY', 'size': 32, 'type': types.MB_TYPE_OPAQUE}, 'global_id': {'name': 'GLOBAL_ID', 'size': 1, 'type': types.MB_TYPE_INTEGER}} for key, tag_data in tag_data_list.items(): self.dagmc_tags[key] = self._my_moab_core.tag_get_handle(tag_data['name'], size=tag_data['size'], tag_type=tag_data['type'], storage_type=types.MB_TAG_SPARSE, create_if_missing=False) def __set_entityset_ranges(self): """Set a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET type inputs ------ none outputs ------- none """ for dimension, set_type in self.entityset_types.items(): self.entityset_ranges[set_type] = \ self._my_moab_core.get_entities_by_type_and_tag(self.root_set, types.MBENTITYSET, self.dagmc_tags['geom_dim'], [dimension]) def __set_dimension_meshset(self): """Set the class dim_dict variable to a dictionary with the meshset for each dimension inputs ------ none outputs ------- none """ for set_type, entityset_range in self.entityset_ranges.items(): dim_ms = self._my_moab_core.create_meshset() self._my_moab_core.add_entity(dim_ms, entityset_range) self.dim_dict[set_type] = dim_ms def get_meshset_by_id(self, dim, ids=[]): """Get meshset of the geometry with specified dimension and ids inputs ------ dim : (Integer or String) Dimension of the meshset. 0: 'node(s)', 1: 'curve(s)', 2: 'surface(s)', 3: 'volume(s)' ids : (Integer) Global ID(s) of the meshset outputs ------- meshset : meshset of the geometry with given dimension and ids. First, dim will be checked. If dim is invalid, the root set will be returned. Then, if id is empty, all entities with the given dim will be returned. If is is not in the given dim range), an empty list will be returned. """ plural_names = list(self.entityset_types.values()) sing_names = [name[:-1] for name in plural_names] all_names = plural_names + sing_names if isinstance(dim, int) and dim in self.entityset_types.keys(): dim = self.entityset_types[dim] elif type(dim) == str and dim.lower() in all_names: dim = dim.lower() if dim[-1] != 's': dim = dim + 's' else: # invalid dim warnings.warn('Invalid dim!') return [] # if no id is passed in if len(ids) == 0: return self.entityset_ranges[dim] meshset = [] for id in ids: meshset.extend(self._my_moab_core.get_entities_by_type_and_tag(self.dim_dict[dim], types.MBENTITYSET, self.dagmc_tags['global_id'], [id])) # if id is not in the given dim range if not meshset: warnings.warn( 'ID is not in the given dimension range! ' + 'Empty list will be returned.') return meshset ''' def __populate_triangle_data(self, meshset): """Populate triangle areas and triangle aspect ratios inputs ------ meshset : set of entities that are used to populate data. By default, the root set will be used and data of the whole geometry will be populated. outputs ------- none """ tris = self.get_tris(meshset) for tri in tris: if tri not in self._tri_data.tri_eh: tri_data_row = {'tri_eh': tri} side_lengths = list(self.get_tri_side_length(tri).values()) s = .5*(sum(side_lengths)) p = np.prod(s - side_lengths) # sqrt(s(s - a)(s - b)(s - c)), where s = (a + b + c)/2 tri_data_row['area'] = np.sqrt(s * p) tri_data_row['aspect_ratio'] = np.prod(side_lengths) / (8 * p) self._tri_data = self._tri_data.append(tri_data_row, ignore_index=True) return def get_tri_side_length(self, tri): """ Get side lengths of triangle inputs ------ tri : triangle entity outputs ------- side_lengths : a dictionary that stores vert : the opposite side length of the vert as key-value pair """ side_lengths = {} s = 0 coord_list = [] verts = list(self._my_moab_core.get_adjacencies(tri, 0)) for vert in verts: coords = self._my_moab_core.get_coords(vert) coord_list.append(coords) for side in range(3): side_lengths.update({verts[side-1]: np.linalg.norm(coord_list[side] - coord_list[side-2])}) # Although it may not be intuitive, the indexing of these lists takes # advantage of python's indexing syntax to rotate through # the `verts` of the triangle while simultaneously referencing the side # opposite each of the `verts` by the coordinates of the vertices that # define that side: # side side-1 index(side-1) side-2 index(side-2) # 0 -1 2 -2 1 # 1 0 0 -1 2 # 2 1 1 0 0 return side_lengths '''
import pandas as pd import numpy as np from pymoab.rng import Range from pymoab import core, types import warnings class DagmcFile: def __init__(self, filename, populate=False): """Constructor inputs ------ filename : name of the file populate : boolean value that determines whether or not to populate the data outputs ------- none """ # read file self._my_moab_core = core.Core() self._my_moab_core.load_file(filename) self.root_set = self._my_moab_core.get_root_set() self.entity_types = [types.MBVERTEX, types.MBTRI, types.MBENTITYSET] self.entityset_types = {0: 'nodes', 1: 'curves', 2: 'surfaces', 3: 'volumes'} self.native_ranges = {} self.__set_native_ranges() self.dagmc_tags = {} self.__set_dagmc_tags() self.entityset_ranges = {} self.__set_entityset_ranges() self.dim_dict = {} self.__set_dimension_meshset() # if populate is True: # self.__populate_triangle_data(meshset) def __set_native_ranges(self): """Set the class native_ranges variable to a dictionary with MOAB ranges for each of the requested entity types inputs ------ none outputs ------- none """ for entity_type in self.entity_types: self.native_ranges[entity_type] = self._my_moab_core.get_entities_by_type( self.root_set, entity_type) def __set_dagmc_tags(self): """Set the class dagmc_tags variable to a dictionary with the important tags for DAGMC geometries inputs ------ none outputs ------- none """ tag_data_list = {'geom_dim': {'name': 'GEOM_DIMENSION', 'size': 1, 'type': types.MB_TYPE_INTEGER}, 'category': {'name': 'CATEGORY', 'size': 32, 'type': types.MB_TYPE_OPAQUE}, 'global_id': {'name': 'GLOBAL_ID', 'size': 1, 'type': types.MB_TYPE_INTEGER}} for key, tag_data in tag_data_list.items(): self.dagmc_tags[key] = self._my_moab_core.tag_get_handle(tag_data['name'], size=tag_data['size'], tag_type=tag_data['type'], storage_type=types.MB_TAG_SPARSE, create_if_missing=False) def __set_entityset_ranges(self): """Set a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET type inputs ------ none outputs ------- none """ for dimension, set_type in self.entityset_types.items(): self.entityset_ranges[set_type] = \ self._my_moab_core.get_entities_by_type_and_tag(self.root_set, types.MBENTITYSET, self.dagmc_tags['geom_dim'], [dimension]) def __set_dimension_meshset(self): """Set the class dim_dict variable to a dictionary with the meshset for each dimension inputs ------ none outputs ------- none """ for set_type, entityset_range in self.entityset_ranges.items(): dim_ms = self._my_moab_core.create_meshset() self._my_moab_core.add_entity(dim_ms, entityset_range) self.dim_dict[set_type] = dim_ms def get_meshset_by_id(self, dim, ids=[]): """Get meshset of the geometry with specified dimension and ids inputs ------ dim : (Integer or String) Dimension of the meshset. 0: 'node(s)', 1: 'curve(s)', 2: 'surface(s)', 3: 'volume(s)' ids : (Integer) Global ID(s) of the meshset outputs ------- meshset : meshset of the geometry with given dimension and ids. First, dim will be checked. If dim is invalid, the root set will be returned. Then, if id is empty, all entities with the given dim will be returned. If is is not in the given dim range), an empty list will be returned. """ plural_names = list(self.entityset_types.values()) sing_names = [name[:-1] for name in plural_names] all_names = plural_names + sing_names if isinstance(dim, int) and dim in self.entityset_types.keys(): dim = self.entityset_types[dim] elif type(dim) == str and dim.lower() in all_names: dim = dim.lower() if dim[-1] != 's': dim = dim + 's' else: # invalid dim warnings.warn('Invalid dim!') return [] # if no id is passed in if len(ids) == 0: return self.entityset_ranges[dim] meshset = [] for id in ids: meshset.extend(self._my_moab_core.get_entities_by_type_and_tag(self.dim_dict[dim], types.MBENTITYSET, self.dagmc_tags['global_id'], [id])) # if id is not in the given dim range if not meshset: warnings.warn( 'ID is not in the given dimension range! ' + 'Empty list will be returned.') return meshset ''' def __populate_triangle_data(self, meshset): """Populate triangle areas and triangle aspect ratios inputs ------ meshset : set of entities that are used to populate data. By default, the root set will be used and data of the whole geometry will be populated. outputs ------- none """ tris = self.get_tris(meshset) for tri in tris: if tri not in self._tri_data.tri_eh: tri_data_row = {'tri_eh': tri} side_lengths = list(self.get_tri_side_length(tri).values()) s = .5*(sum(side_lengths)) p = np.prod(s - side_lengths) # sqrt(s(s - a)(s - b)(s - c)), where s = (a + b + c)/2 tri_data_row['area'] = np.sqrt(s * p) tri_data_row['aspect_ratio'] = np.prod(side_lengths) / (8 * p) self._tri_data = self._tri_data.append(tri_data_row, ignore_index=True) return def get_tri_side_length(self, tri): """ Get side lengths of triangle inputs ------ tri : triangle entity outputs ------- side_lengths : a dictionary that stores vert : the opposite side length of the vert as key-value pair """ side_lengths = {} s = 0 coord_list = [] verts = list(self._my_moab_core.get_adjacencies(tri, 0)) for vert in verts: coords = self._my_moab_core.get_coords(vert) coord_list.append(coords) for side in range(3): side_lengths.update({verts[side-1]: np.linalg.norm(coord_list[side] - coord_list[side-2])}) # Although it may not be intuitive, the indexing of these lists takes # advantage of python's indexing syntax to rotate through # the `verts` of the triangle while simultaneously referencing the side # opposite each of the `verts` by the coordinates of the vertices that # define that side: # side side-1 index(side-1) side-2 index(side-2) # 0 -1 2 -2 1 # 1 0 0 -1 2 # 2 1 1 0 0 return side_lengths '''
en
0.494838
Constructor inputs ------ filename : name of the file populate : boolean value that determines whether or not to populate the data outputs ------- none # read file # if populate is True: # self.__populate_triangle_data(meshset) Set the class native_ranges variable to a dictionary with MOAB ranges for each of the requested entity types inputs ------ none outputs ------- none Set the class dagmc_tags variable to a dictionary with the important tags for DAGMC geometries inputs ------ none outputs ------- none Set a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET type inputs ------ none outputs ------- none Set the class dim_dict variable to a dictionary with the meshset for each dimension inputs ------ none outputs ------- none Get meshset of the geometry with specified dimension and ids inputs ------ dim : (Integer or String) Dimension of the meshset. 0: 'node(s)', 1: 'curve(s)', 2: 'surface(s)', 3: 'volume(s)' ids : (Integer) Global ID(s) of the meshset outputs ------- meshset : meshset of the geometry with given dimension and ids. First, dim will be checked. If dim is invalid, the root set will be returned. Then, if id is empty, all entities with the given dim will be returned. If is is not in the given dim range), an empty list will be returned. # invalid dim # if no id is passed in # if id is not in the given dim range def __populate_triangle_data(self, meshset): """Populate triangle areas and triangle aspect ratios inputs ------ meshset : set of entities that are used to populate data. By default, the root set will be used and data of the whole geometry will be populated. outputs ------- none """ tris = self.get_tris(meshset) for tri in tris: if tri not in self._tri_data.tri_eh: tri_data_row = {'tri_eh': tri} side_lengths = list(self.get_tri_side_length(tri).values()) s = .5*(sum(side_lengths)) p = np.prod(s - side_lengths) # sqrt(s(s - a)(s - b)(s - c)), where s = (a + b + c)/2 tri_data_row['area'] = np.sqrt(s * p) tri_data_row['aspect_ratio'] = np.prod(side_lengths) / (8 * p) self._tri_data = self._tri_data.append(tri_data_row, ignore_index=True) return def get_tri_side_length(self, tri): """ Get side lengths of triangle inputs ------ tri : triangle entity outputs ------- side_lengths : a dictionary that stores vert : the opposite side length of the vert as key-value pair """ side_lengths = {} s = 0 coord_list = [] verts = list(self._my_moab_core.get_adjacencies(tri, 0)) for vert in verts: coords = self._my_moab_core.get_coords(vert) coord_list.append(coords) for side in range(3): side_lengths.update({verts[side-1]: np.linalg.norm(coord_list[side] - coord_list[side-2])}) # Although it may not be intuitive, the indexing of these lists takes # advantage of python's indexing syntax to rotate through # the `verts` of the triangle while simultaneously referencing the side # opposite each of the `verts` by the coordinates of the vertices that # define that side: # side side-1 index(side-1) side-2 index(side-2) # 0 -1 2 -2 1 # 1 0 0 -1 2 # 2 1 1 0 0 return side_lengths
2.498678
2
model.py
pskrunner14/face-gan
1
6623044
<filename>model.py """ Deep Convolutional Generative Adversarial Network (DCGAN). Using deep convolutional generative adversarial networks (DCGAN) to generate face images from a noise distribution. References: - Generative Adversarial Nets. Goodfellow et al. arXiv: 1406.2661. - Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks. <NAME>, <NAME>, <NAME>. arXiv: 1511.06434. Links: - [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf). - [DCGAN Paper](https://arxiv.org/abs/1511.06434.pdf). Author: <NAME> Project: https://github.com/pskrunner14/face-DCGAN """ import numpy as np import tensorflow as tf from ops import ( dense_layer, conv_layer, deconv_layer ) def generator(input_noise, train=True): """ Creates convolutional generator model. See https://arxiv.org/abs/1511.06434.pdf. Args: input_noise (tf.placeholder): Input noise distribution tensor. train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`. Returns: Tensor containing images generated from the noise distribution. """ dense_1_shape = [8, 8, 10] dense_1_units = np.prod(dense_1_shape) # We need to pass `batch_size` for using in `output_shape` in deconv op. # See https://riptutorial.com/tensorflow/example/29767/using-tf-nn-conv2d-transpose-for-arbitary-batch-sizes-and-with-automatic-output-shape-calculation- batch_size = tf.shape(input_noise)[0] with tf.variable_scope('generator', reuse=tf.AUTO_REUSE) as scope: dense_1 = dense_layer(input_noise, train, units=dense_1_units, name='dense_1') dense_1_reshaped = tf.reshape(dense_1, shape=[-1, ] + dense_1_shape, name='dense_1_reshaped') deconv_1 = deconv_layer(dense_1_reshaped, train, kernel_dims=(5, 5), in_channels=dense_1_shape[-1], out_channels=64, batch_size=batch_size, name='deconv_1') deconv_2 = deconv_layer(deconv_1, train, kernel_dims=(5, 5), in_channels=64, out_channels=64, batch_size=batch_size, name='deconv_2') # H, W = deconv_2.get_shape().as_list()[1: 3] # upsampled_deconv_2 = tf.image.resize_nearest_neighbor(deconv_2, (2 * H, 2 * W), name='upsampled_deconv_2') upsampled_deconv_2 = tf.keras.layers.UpSampling2D(size=(2, 2))(deconv_2) deconv_3 = deconv_layer(upsampled_deconv_2, train, kernel_dims=(7, 7), in_channels=64, out_channels=32, batch_size=batch_size, name='deconv_3') logits = conv_layer(deconv_3, train, kernel_dims=(3, 3), in_channels=32, out_channels=3, name='logits', padding='VALID', use_avgpool=False, use_batchnorm=False, activation=None) out = tf.nn.tanh(logits, name=scope.name) return out def discriminator(image_data, train=True): """ Creates convolutional discriminator model. See https://arxiv.org/abs/1511.06434.pdf. Args: image_data (tf.placeholder): Tensor containing real/fake images to classify. train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`. Returns: Tensors containing probabilites and logits pertaining to input images being real/fake. """ with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope: conv_1 = conv_layer(image_data, train, kernel_dims=(3, 3), in_channels=3, out_channels=32, name='conv_1') conv_2 = conv_layer(conv_1, train, kernel_dims=(3, 3), in_channels=32, out_channels=32, name='conv_2', strides=(2, 2)) dim = np.prod(conv_2.get_shape().as_list()[1: ]) flattened_1 = tf.reshape(conv_2, [-1, dim]) dense_1 = dense_layer(flattened_1, train, 256, name='dense_1') logits = dense_layer(dense_1, train, 1, name='logits', use_batchnorm=False, activation=None) probs = tf.nn.sigmoid(logits, name=scope.name) return probs, logits
<filename>model.py """ Deep Convolutional Generative Adversarial Network (DCGAN). Using deep convolutional generative adversarial networks (DCGAN) to generate face images from a noise distribution. References: - Generative Adversarial Nets. Goodfellow et al. arXiv: 1406.2661. - Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks. <NAME>, <NAME>, <NAME>. arXiv: 1511.06434. Links: - [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf). - [DCGAN Paper](https://arxiv.org/abs/1511.06434.pdf). Author: <NAME> Project: https://github.com/pskrunner14/face-DCGAN """ import numpy as np import tensorflow as tf from ops import ( dense_layer, conv_layer, deconv_layer ) def generator(input_noise, train=True): """ Creates convolutional generator model. See https://arxiv.org/abs/1511.06434.pdf. Args: input_noise (tf.placeholder): Input noise distribution tensor. train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`. Returns: Tensor containing images generated from the noise distribution. """ dense_1_shape = [8, 8, 10] dense_1_units = np.prod(dense_1_shape) # We need to pass `batch_size` for using in `output_shape` in deconv op. # See https://riptutorial.com/tensorflow/example/29767/using-tf-nn-conv2d-transpose-for-arbitary-batch-sizes-and-with-automatic-output-shape-calculation- batch_size = tf.shape(input_noise)[0] with tf.variable_scope('generator', reuse=tf.AUTO_REUSE) as scope: dense_1 = dense_layer(input_noise, train, units=dense_1_units, name='dense_1') dense_1_reshaped = tf.reshape(dense_1, shape=[-1, ] + dense_1_shape, name='dense_1_reshaped') deconv_1 = deconv_layer(dense_1_reshaped, train, kernel_dims=(5, 5), in_channels=dense_1_shape[-1], out_channels=64, batch_size=batch_size, name='deconv_1') deconv_2 = deconv_layer(deconv_1, train, kernel_dims=(5, 5), in_channels=64, out_channels=64, batch_size=batch_size, name='deconv_2') # H, W = deconv_2.get_shape().as_list()[1: 3] # upsampled_deconv_2 = tf.image.resize_nearest_neighbor(deconv_2, (2 * H, 2 * W), name='upsampled_deconv_2') upsampled_deconv_2 = tf.keras.layers.UpSampling2D(size=(2, 2))(deconv_2) deconv_3 = deconv_layer(upsampled_deconv_2, train, kernel_dims=(7, 7), in_channels=64, out_channels=32, batch_size=batch_size, name='deconv_3') logits = conv_layer(deconv_3, train, kernel_dims=(3, 3), in_channels=32, out_channels=3, name='logits', padding='VALID', use_avgpool=False, use_batchnorm=False, activation=None) out = tf.nn.tanh(logits, name=scope.name) return out def discriminator(image_data, train=True): """ Creates convolutional discriminator model. See https://arxiv.org/abs/1511.06434.pdf. Args: image_data (tf.placeholder): Tensor containing real/fake images to classify. train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`. Returns: Tensors containing probabilites and logits pertaining to input images being real/fake. """ with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope: conv_1 = conv_layer(image_data, train, kernel_dims=(3, 3), in_channels=3, out_channels=32, name='conv_1') conv_2 = conv_layer(conv_1, train, kernel_dims=(3, 3), in_channels=32, out_channels=32, name='conv_2', strides=(2, 2)) dim = np.prod(conv_2.get_shape().as_list()[1: ]) flattened_1 = tf.reshape(conv_2, [-1, dim]) dense_1 = dense_layer(flattened_1, train, 256, name='dense_1') logits = dense_layer(dense_1, train, 1, name='logits', use_batchnorm=False, activation=None) probs = tf.nn.sigmoid(logits, name=scope.name) return probs, logits
en
0.648709
Deep Convolutional Generative Adversarial Network (DCGAN). Using deep convolutional generative adversarial networks (DCGAN) to generate face images from a noise distribution. References: - Generative Adversarial Nets. Goodfellow et al. arXiv: 1406.2661. - Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks. <NAME>, <NAME>, <NAME>. arXiv: 1511.06434. Links: - [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf). - [DCGAN Paper](https://arxiv.org/abs/1511.06434.pdf). Author: <NAME> Project: https://github.com/pskrunner14/face-DCGAN Creates convolutional generator model. See https://arxiv.org/abs/1511.06434.pdf. Args: input_noise (tf.placeholder): Input noise distribution tensor. train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`. Returns: Tensor containing images generated from the noise distribution. # We need to pass `batch_size` for using in `output_shape` in deconv op. # See https://riptutorial.com/tensorflow/example/29767/using-tf-nn-conv2d-transpose-for-arbitary-batch-sizes-and-with-automatic-output-shape-calculation- # H, W = deconv_2.get_shape().as_list()[1: 3] # upsampled_deconv_2 = tf.image.resize_nearest_neighbor(deconv_2, (2 * H, 2 * W), name='upsampled_deconv_2') Creates convolutional discriminator model. See https://arxiv.org/abs/1511.06434.pdf. Args: image_data (tf.placeholder): Tensor containing real/fake images to classify. train (bool, optional): Flag for whether to freeze batch-norm layer vars. If unspecified, defaults to `True`. Returns: Tensors containing probabilites and logits pertaining to input images being real/fake.
2.965709
3
module/japanese_jisho.py
liangguo/ankiword
1
6623045
import urllib.request; from urllib.parse import quote from bs4 import BeautifulSoup import subprocess import platform import datetime import json import wget import re from re import compile as _Re _unicode_chr_splitter = _Re( '(?s)((?:[\u2e80-\u9fff])|.)' ).split def LookUp(word, data): result = {} # Eliminate the end of line delimiter word = word.splitlines()[0] wordUrl = urllib.parse.quote(word, safe='') url="http://jisho.org/search/{}".format(wordUrl) content = urllib.request.urlopen(url).read() soup = BeautifulSoup(content, 'lxml') front_word = "" back_word = "" furi = "" furiChild = [] furiList = [] text = "" textChild = [] textList = [] reading = "" cnt = 0 download_dir = "" if "download_dir" in data: download_dir = data['download_dir'] if word == "": return None wrongSpelling = soup.find('div', id='no-matches') if wrongSpelling is not None: return None print(" ") print('<<'+word+'>>') print(" ") for i in soup.find_all('div', class_='exact_block'): firstBlock = i.find('div', class_='concept_light clearfix') partJP = firstBlock.find('div', class_='concept_light-wrapper') partEN = firstBlock.find('div', class_='concept_light-meanings') status = partJP.find('div', class_='concept_light-status') if(status != None): audio = status.find('audio') if audio != None and download_dir != "": source = audio.find('source') wget.download('http:'+source['src'], out=download_dir+"Jp_"+word+".mp3") # Insert the sound media into the card front_word += "[sound:Jp_"+word+".mp3]" front_word += word + "<br>" for j in partJP.find_all('span', class_='furigana'): furiCnt=0 for child in j.children: furiChild.append(child.string) furiCnt = furiCnt + 1 furiList = list(filter(("\n").__ne__, furiChild)) for j in partJP.find_all('span', class_='text'): textCnt = 0 for child in j.children: textChild.append(child.string) textCnt = textCnt + 1 for k in range(0,len(textChild)): for chr in _unicode_chr_splitter( textChild[k] ): if chr != '\n' and chr != ' ' and chr != '': textList.append(chr) for j in range(0,len(textList)): if(furiList[j] == None): reading += textList[j] else: reading += " " + textList[j] + "[" + furiList[j] + "]" for j in partEN.find_all('div', class_="meanings-wrapper"): for k in j.find_all('div', class_="meaning-wrapper"): cnt = cnt + 1 back_word += str(cnt) + '. ' for q in k.find_all('span', class_="meaning-meaning"): back_word += q.get_text() + '<br>' result['read_word'] = reading result['front_word'] = front_word result['back_word'] = back_word return result
import urllib.request; from urllib.parse import quote from bs4 import BeautifulSoup import subprocess import platform import datetime import json import wget import re from re import compile as _Re _unicode_chr_splitter = _Re( '(?s)((?:[\u2e80-\u9fff])|.)' ).split def LookUp(word, data): result = {} # Eliminate the end of line delimiter word = word.splitlines()[0] wordUrl = urllib.parse.quote(word, safe='') url="http://jisho.org/search/{}".format(wordUrl) content = urllib.request.urlopen(url).read() soup = BeautifulSoup(content, 'lxml') front_word = "" back_word = "" furi = "" furiChild = [] furiList = [] text = "" textChild = [] textList = [] reading = "" cnt = 0 download_dir = "" if "download_dir" in data: download_dir = data['download_dir'] if word == "": return None wrongSpelling = soup.find('div', id='no-matches') if wrongSpelling is not None: return None print(" ") print('<<'+word+'>>') print(" ") for i in soup.find_all('div', class_='exact_block'): firstBlock = i.find('div', class_='concept_light clearfix') partJP = firstBlock.find('div', class_='concept_light-wrapper') partEN = firstBlock.find('div', class_='concept_light-meanings') status = partJP.find('div', class_='concept_light-status') if(status != None): audio = status.find('audio') if audio != None and download_dir != "": source = audio.find('source') wget.download('http:'+source['src'], out=download_dir+"Jp_"+word+".mp3") # Insert the sound media into the card front_word += "[sound:Jp_"+word+".mp3]" front_word += word + "<br>" for j in partJP.find_all('span', class_='furigana'): furiCnt=0 for child in j.children: furiChild.append(child.string) furiCnt = furiCnt + 1 furiList = list(filter(("\n").__ne__, furiChild)) for j in partJP.find_all('span', class_='text'): textCnt = 0 for child in j.children: textChild.append(child.string) textCnt = textCnt + 1 for k in range(0,len(textChild)): for chr in _unicode_chr_splitter( textChild[k] ): if chr != '\n' and chr != ' ' and chr != '': textList.append(chr) for j in range(0,len(textList)): if(furiList[j] == None): reading += textList[j] else: reading += " " + textList[j] + "[" + furiList[j] + "]" for j in partEN.find_all('div', class_="meanings-wrapper"): for k in j.find_all('div', class_="meaning-wrapper"): cnt = cnt + 1 back_word += str(cnt) + '. ' for q in k.find_all('span', class_="meaning-meaning"): back_word += q.get_text() + '<br>' result['read_word'] = reading result['front_word'] = front_word result['back_word'] = back_word return result
en
0.790026
# Eliminate the end of line delimiter # Insert the sound media into the card
2.776248
3
tests/browser/pages/invest/landing.py
mayank-sfdc/directory-tests
4
6623046
# -*- coding: utf-8 -*- """Invest in Great Home Page Object.""" import logging from typing import List from selenium.webdriver.common.by import By from selenium.webdriver.remote.webdriver import WebDriver from directory_tests_shared import URLs from directory_tests_shared.enums import PageType, Service from pages import ElementType, common_selectors from pages.common_actions import ( Selector, check_for_sections, check_url, find_element, go_to_url, ) NAME = "landing" URL = URLs.INVEST_LANDING.absolute SERVICE = Service.INVEST TYPE = PageType.LANDING PAGE_TITLE = "Invest in Great Britain - Home" SELECTORS = { "benefits": { "self": Selector(By.ID, "benefits"), "heading": Selector(By.CSS_SELECTOR, "#benefits h2"), "sub-section headings": Selector(By.CSS_SELECTOR, "#benefits h3"), "text": Selector(By.CSS_SELECTOR, "#benefits p"), "image": Selector(By.CSS_SELECTOR, "#benefits img"), }, "sectors": { "self": Selector(By.ID, "industries"), "heading": Selector(By.CSS_SELECTOR, "#industries h2"), "heading text": Selector(By.CSS_SELECTOR, "#industries h2 ~ div > p"), "first": Selector(By.CSS_SELECTOR, "#industries div:nth-child(1) > div > a"), "second": Selector(By.CSS_SELECTOR, "#industries div:nth-child(2) > div > a"), "third": Selector(By.CSS_SELECTOR, "#industries div:nth-child(3) > div > a"), "see more industries": Selector(By.ID, "see-more-industries"), }, "high-potential opportunities": { "hpo - section": Selector(By.ID, "high-potential-opportunities"), "hpo - headings": Selector(By.CSS_SELECTOR, "#high-potential-opportunities h2"), "hpo - texts": Selector( By.CSS_SELECTOR, "#high-potential-opportunities h2 ~ div > p" ), "aquaculture": Selector(By.PARTIAL_LINK_TEXT, "Aquaculture"), "high productivity food production": Selector( By.PARTIAL_LINK_TEXT, "High productivity food production" ), "high productivity food production (dev)": Selector( By.PARTIAL_LINK_TEXT, "High productivity food production" ), "high productivity food production (staging)": Selector( By.PARTIAL_LINK_TEXT, "High productivity food production" ), "lightweight structures": Selector( By.PARTIAL_LINK_TEXT, "Lightweight structures" ), "photonics and microelectronics": Selector( By.PARTIAL_LINK_TEXT, "Photonics and Microelectronics" ), "rail infrastructure": Selector(By.PARTIAL_LINK_TEXT, "Rail infrastructure"), "space": Selector(By.PARTIAL_LINK_TEXT, "Space"), "sustainable packaging": Selector( By.PARTIAL_LINK_TEXT, "Sustainable packaging" ), }, "how we help": { "how we help - section": Selector(By.ID, "how-we-help"), "how we help - icons": Selector(By.CSS_SELECTOR, "#how-we-help ul li img"), "how we help - texts": Selector(By.CSS_SELECTOR, "#how-we-help ul li p"), "find out more": Selector( By.CSS_SELECTOR, "#how-we-help a", type=ElementType.LINK ), }, "contact us": { "self": Selector(By.ID, "get-in-touch"), "heading": Selector(By.CSS_SELECTOR, "#get-in-touch h2"), "text": Selector(By.CSS_SELECTOR, "#get-in-touch p"), "speak to us": Selector( By.CSS_SELECTOR, "#get-in-touch a", type=ElementType.LINK ), }, } SELECTORS.update(common_selectors.INVEST_HEADER) SELECTORS.update(common_selectors.INVEST_HERO) SELECTORS.update(common_selectors.BREADCRUMBS) SELECTORS.update(common_selectors.ERROR_REPORTING) SELECTORS.update(common_selectors.INTERNATIONAL_FOOTER) def visit(driver: WebDriver): go_to_url(driver, URL, NAME) def should_be_here(driver: WebDriver): check_url(driver, URL) logging.debug("All expected elements are visible on '%s' page", PAGE_TITLE) def should_see_sections(driver: WebDriver, names: List[str]): check_for_sections(driver, all_sections=SELECTORS, sought_sections=names) def clean_name(name: str) -> str: return name.split(" - ")[1].strip() def open_industry(driver: WebDriver, industry_name: str): industry_name = clean_name(industry_name) selector = Selector(By.PARTIAL_LINK_TEXT, industry_name) logging.debug("Looking for: {}".format(industry_name)) industry_link = find_element( driver, selector, element_name="Industry card", wait_for_it=False ) industry_link.click() def open_guide(driver: WebDriver, guide_name: str): guide_name = clean_name(guide_name) selector = Selector(By.PARTIAL_LINK_TEXT, guide_name) logging.debug("Looking for: {}".format(guide_name)) guide = find_element(driver, selector, element_name="Guide card", wait_for_it=False) guide.click()
# -*- coding: utf-8 -*- """Invest in Great Home Page Object.""" import logging from typing import List from selenium.webdriver.common.by import By from selenium.webdriver.remote.webdriver import WebDriver from directory_tests_shared import URLs from directory_tests_shared.enums import PageType, Service from pages import ElementType, common_selectors from pages.common_actions import ( Selector, check_for_sections, check_url, find_element, go_to_url, ) NAME = "landing" URL = URLs.INVEST_LANDING.absolute SERVICE = Service.INVEST TYPE = PageType.LANDING PAGE_TITLE = "Invest in Great Britain - Home" SELECTORS = { "benefits": { "self": Selector(By.ID, "benefits"), "heading": Selector(By.CSS_SELECTOR, "#benefits h2"), "sub-section headings": Selector(By.CSS_SELECTOR, "#benefits h3"), "text": Selector(By.CSS_SELECTOR, "#benefits p"), "image": Selector(By.CSS_SELECTOR, "#benefits img"), }, "sectors": { "self": Selector(By.ID, "industries"), "heading": Selector(By.CSS_SELECTOR, "#industries h2"), "heading text": Selector(By.CSS_SELECTOR, "#industries h2 ~ div > p"), "first": Selector(By.CSS_SELECTOR, "#industries div:nth-child(1) > div > a"), "second": Selector(By.CSS_SELECTOR, "#industries div:nth-child(2) > div > a"), "third": Selector(By.CSS_SELECTOR, "#industries div:nth-child(3) > div > a"), "see more industries": Selector(By.ID, "see-more-industries"), }, "high-potential opportunities": { "hpo - section": Selector(By.ID, "high-potential-opportunities"), "hpo - headings": Selector(By.CSS_SELECTOR, "#high-potential-opportunities h2"), "hpo - texts": Selector( By.CSS_SELECTOR, "#high-potential-opportunities h2 ~ div > p" ), "aquaculture": Selector(By.PARTIAL_LINK_TEXT, "Aquaculture"), "high productivity food production": Selector( By.PARTIAL_LINK_TEXT, "High productivity food production" ), "high productivity food production (dev)": Selector( By.PARTIAL_LINK_TEXT, "High productivity food production" ), "high productivity food production (staging)": Selector( By.PARTIAL_LINK_TEXT, "High productivity food production" ), "lightweight structures": Selector( By.PARTIAL_LINK_TEXT, "Lightweight structures" ), "photonics and microelectronics": Selector( By.PARTIAL_LINK_TEXT, "Photonics and Microelectronics" ), "rail infrastructure": Selector(By.PARTIAL_LINK_TEXT, "Rail infrastructure"), "space": Selector(By.PARTIAL_LINK_TEXT, "Space"), "sustainable packaging": Selector( By.PARTIAL_LINK_TEXT, "Sustainable packaging" ), }, "how we help": { "how we help - section": Selector(By.ID, "how-we-help"), "how we help - icons": Selector(By.CSS_SELECTOR, "#how-we-help ul li img"), "how we help - texts": Selector(By.CSS_SELECTOR, "#how-we-help ul li p"), "find out more": Selector( By.CSS_SELECTOR, "#how-we-help a", type=ElementType.LINK ), }, "contact us": { "self": Selector(By.ID, "get-in-touch"), "heading": Selector(By.CSS_SELECTOR, "#get-in-touch h2"), "text": Selector(By.CSS_SELECTOR, "#get-in-touch p"), "speak to us": Selector( By.CSS_SELECTOR, "#get-in-touch a", type=ElementType.LINK ), }, } SELECTORS.update(common_selectors.INVEST_HEADER) SELECTORS.update(common_selectors.INVEST_HERO) SELECTORS.update(common_selectors.BREADCRUMBS) SELECTORS.update(common_selectors.ERROR_REPORTING) SELECTORS.update(common_selectors.INTERNATIONAL_FOOTER) def visit(driver: WebDriver): go_to_url(driver, URL, NAME) def should_be_here(driver: WebDriver): check_url(driver, URL) logging.debug("All expected elements are visible on '%s' page", PAGE_TITLE) def should_see_sections(driver: WebDriver, names: List[str]): check_for_sections(driver, all_sections=SELECTORS, sought_sections=names) def clean_name(name: str) -> str: return name.split(" - ")[1].strip() def open_industry(driver: WebDriver, industry_name: str): industry_name = clean_name(industry_name) selector = Selector(By.PARTIAL_LINK_TEXT, industry_name) logging.debug("Looking for: {}".format(industry_name)) industry_link = find_element( driver, selector, element_name="Industry card", wait_for_it=False ) industry_link.click() def open_guide(driver: WebDriver, guide_name: str): guide_name = clean_name(guide_name) selector = Selector(By.PARTIAL_LINK_TEXT, guide_name) logging.debug("Looking for: {}".format(guide_name)) guide = find_element(driver, selector, element_name="Guide card", wait_for_it=False) guide.click()
en
0.756747
# -*- coding: utf-8 -*- Invest in Great Home Page Object.
2.446408
2
designer_door_mat.py
nirobio/puzzles
0
6623047
# Hackerrank - Designer Door Mat # repetitions of '.|.' with remaining L/R filled with '-' # top and bottom of central 'WELCOME' belt are symmetrical (hence N//2) N, M = map(int, input().split()) pattern = [('.|.' * (2 * i + 1)).center(M, '-') for i in range(N // 2)] print('\n'.join(pattern + ['WELCOME'.center(M, '-')] + pattern[::-1]))
# Hackerrank - Designer Door Mat # repetitions of '.|.' with remaining L/R filled with '-' # top and bottom of central 'WELCOME' belt are symmetrical (hence N//2) N, M = map(int, input().split()) pattern = [('.|.' * (2 * i + 1)).center(M, '-') for i in range(N // 2)] print('\n'.join(pattern + ['WELCOME'.center(M, '-')] + pattern[::-1]))
en
0.816314
# Hackerrank - Designer Door Mat # repetitions of '.|.' with remaining L/R filled with '-' # top and bottom of central 'WELCOME' belt are symmetrical (hence N//2)
3.10084
3
tests/use_cases/test_git_clone.py
staticdev/github-portfolio
0
6623048
"""Test cases for the git clone use case.""" from typing import Any import pytest from pytest_mock import MockerFixture from tests.conftest import ERROR_MSG from tests.conftest import REPO from tests.conftest import REPO2 from tests.conftest import REPO_NAME import git_portfolio.responses as res from git_portfolio.use_cases import git_clone as gcuc @pytest.fixture def mock_popen(mocker: MockerFixture) -> Any: """Fixture for mocking subprocess.Popen.""" mock = mocker.patch("subprocess.Popen") mock.return_value.returncode = 0 mock.return_value.communicate.return_value = (b"some output", b"") return mock @pytest.fixture def mock_github_service(mocker: MockerFixture) -> MockerFixture: """Fixture for mocking GithubService.""" return mocker.patch("git_portfolio.github_service.GithubService", autospec=True) @pytest.fixture def mock_command_checker(mocker: MockerFixture) -> Any: """Fixture for mocking CommandChecker.check.""" return mocker.patch( "git_portfolio.use_cases.command_checker.CommandChecker.check", return_value="", ) def test_execute_success( mock_github_service: MockerFixture, mock_command_checker: MockerFixture, mock_popen: MockerFixture, ) -> None: """It returns success messages.""" github_service = mock_github_service.return_value responses = gcuc.GitCloneUseCase(github_service).execute([REPO, REPO2]) assert len(responses) == 2 assert isinstance(responses[0], res.ResponseSuccess) assert responses[0].value == f"{REPO_NAME}: clone successful.\n" def test_execute_git_not_installed( mock_github_service: MockerFixture, mock_command_checker: MockerFixture, ) -> None: """It returns failure with git not installed message.""" mock_command_checker.return_value = ERROR_MSG github_service = mock_github_service.return_value responses = gcuc.GitCloneUseCase(github_service).execute([REPO]) mock_command_checker.assert_called_with("git") assert isinstance(responses[0], res.ResponseFailure) assert ERROR_MSG == responses[0].value["message"] @pytest.mark.e2e def test_execute_git_not_installed_e2e( mock_github_service: MockerFixture, mock_popen: MockerFixture ) -> None: """It returns failure with git not installed message.""" github_service = mock_github_service.return_value mock_popen.side_effect = FileNotFoundError responses = gcuc.GitCloneUseCase(github_service).execute([REPO]) assert isinstance(responses[0], res.ResponseFailure) assert ( "This command requires git executable installed and on system path." == responses[0].value["message"] ) def test_execute_error_during_execution( mock_github_service: MockerFixture, mock_command_checker: MockerFixture, mock_popen: MockerFixture, ) -> None: """It returns error.""" github_service = mock_github_service.return_value mock_popen.return_value.returncode = 1 mock_popen().communicate.return_value = ( b"", ( f"fatal: destination path '{REPO_NAME}' already exists and is not " "an empty directory.\n" ).encode(), ) responses = gcuc.GitCloneUseCase(github_service).execute([REPO]) assert isinstance(responses[0], res.ResponseFailure) assert responses[0].value["message"] == ( f"{REPO_NAME}: fatal: destination path '{REPO_NAME}' already exists and is not " "an empty directory.\n" )
"""Test cases for the git clone use case.""" from typing import Any import pytest from pytest_mock import MockerFixture from tests.conftest import ERROR_MSG from tests.conftest import REPO from tests.conftest import REPO2 from tests.conftest import REPO_NAME import git_portfolio.responses as res from git_portfolio.use_cases import git_clone as gcuc @pytest.fixture def mock_popen(mocker: MockerFixture) -> Any: """Fixture for mocking subprocess.Popen.""" mock = mocker.patch("subprocess.Popen") mock.return_value.returncode = 0 mock.return_value.communicate.return_value = (b"some output", b"") return mock @pytest.fixture def mock_github_service(mocker: MockerFixture) -> MockerFixture: """Fixture for mocking GithubService.""" return mocker.patch("git_portfolio.github_service.GithubService", autospec=True) @pytest.fixture def mock_command_checker(mocker: MockerFixture) -> Any: """Fixture for mocking CommandChecker.check.""" return mocker.patch( "git_portfolio.use_cases.command_checker.CommandChecker.check", return_value="", ) def test_execute_success( mock_github_service: MockerFixture, mock_command_checker: MockerFixture, mock_popen: MockerFixture, ) -> None: """It returns success messages.""" github_service = mock_github_service.return_value responses = gcuc.GitCloneUseCase(github_service).execute([REPO, REPO2]) assert len(responses) == 2 assert isinstance(responses[0], res.ResponseSuccess) assert responses[0].value == f"{REPO_NAME}: clone successful.\n" def test_execute_git_not_installed( mock_github_service: MockerFixture, mock_command_checker: MockerFixture, ) -> None: """It returns failure with git not installed message.""" mock_command_checker.return_value = ERROR_MSG github_service = mock_github_service.return_value responses = gcuc.GitCloneUseCase(github_service).execute([REPO]) mock_command_checker.assert_called_with("git") assert isinstance(responses[0], res.ResponseFailure) assert ERROR_MSG == responses[0].value["message"] @pytest.mark.e2e def test_execute_git_not_installed_e2e( mock_github_service: MockerFixture, mock_popen: MockerFixture ) -> None: """It returns failure with git not installed message.""" github_service = mock_github_service.return_value mock_popen.side_effect = FileNotFoundError responses = gcuc.GitCloneUseCase(github_service).execute([REPO]) assert isinstance(responses[0], res.ResponseFailure) assert ( "This command requires git executable installed and on system path." == responses[0].value["message"] ) def test_execute_error_during_execution( mock_github_service: MockerFixture, mock_command_checker: MockerFixture, mock_popen: MockerFixture, ) -> None: """It returns error.""" github_service = mock_github_service.return_value mock_popen.return_value.returncode = 1 mock_popen().communicate.return_value = ( b"", ( f"fatal: destination path '{REPO_NAME}' already exists and is not " "an empty directory.\n" ).encode(), ) responses = gcuc.GitCloneUseCase(github_service).execute([REPO]) assert isinstance(responses[0], res.ResponseFailure) assert responses[0].value["message"] == ( f"{REPO_NAME}: fatal: destination path '{REPO_NAME}' already exists and is not " "an empty directory.\n" )
en
0.566197
Test cases for the git clone use case. Fixture for mocking subprocess.Popen. Fixture for mocking GithubService. Fixture for mocking CommandChecker.check. It returns success messages. It returns failure with git not installed message. It returns failure with git not installed message. It returns error.
2.438371
2
train.py
peterzheng98/fuzzy-system
0
6623049
import os import time import json import numpy as np from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim import torch.distributed as dist import torch.multiprocessing as mp from torch.utils.data import DataLoader from torch.nn.parallel import DistributedDataParallel from torch.utils.data.distributed import DistributedSampler from DataProcess import SourceDataset, linear_collate from models.optimizer import RAdam from models.DPCNN import DPCNN import sys global_label_cnt = 2 global_vocab_size = 5414 + 2 global_embed_size = 128 global_hidden_size = 128 global_num_layers = 5 global_batch_size = 128 global_num_workers = 10 global_learning_rate = 1e-3 global_epoch_count = 300 global_t_max = 64 global_eta_min = 1e-9 # Important Parameters def train(dataset, model_file, device, rank=0): torch.cuda.set_device(device) dataset_train, dataset_eval_in, dataset_eval_out = dataset dataset_train = SourceDataset(dataset_train, global_vocab_size, 25) dataset_eval_in = SourceDataset(dataset_eval_in, global_vocab_size, 25) dataset_eval_out = SourceDataset(dataset_eval_out, global_vocab_size, 25) model = DPCNN(global_label_cnt, global_vocab_size, global_embed_size, global_hidden_size, global_num_layers) criterion = nn.CrossEntropyLoss() model, criterion = model.to(device), criterion.to(device) dataloader_train = DataLoader(dataset_train, shuffle=True, pin_memory=True, num_workers=global_num_workers, batch_size=global_batch_size, drop_last=True, collate_fn=linear_collate) dataloader_eval_in = DataLoader(dataset_eval_in, shuffle=False, num_workers=global_num_workers, batch_size=global_batch_size, collate_fn=linear_collate) dataloader_eval_out = DataLoader(dataset_eval_out, shuffle=False, num_workers=global_num_workers, batch_size=global_batch_size, collate_fn=linear_collate) optimizer = RAdam(model.parameters(), lr=global_learning_rate) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=global_t_max, eta_min=global_eta_min) t1 = time.time() best_in_eval = 0.0 best_out_eval = 0.0 for epoch in range(global_epoch_count): total_loss = [] total_loss_eval_in = [] total_loss_eval_out = [] coarse_group_correct = [0, 0] deep_group_correct = [0, 0] total_group = [0, 0] bar = tqdm(desc='Train #{:02d}'.format(epoch), total=len(dataloader_train), leave=False) model.train() for data, label in dataloader_train: data, label = data.to(device), label.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, label) loss.backward() optimizer.step() total_loss.append(loss.item()) bar.update() bar.close() # bar2 = tqdm(desc='(1/3)Eval In #{:02d}'.format(epoch), total=len(dataset_eval_in), leave=False) # bar3 = tqdm(desc='(2/3)Eval In #{:02d}'.format(epoch), total=len(dataset_eval_out), leave=False) model.eval() with torch.no_grad(): for data, label in dataloader_eval_in: data, label = data.to(device), label.to(device) output_raw = model(data) loss = criterion(output_raw, label) _, output = torch.max(output_raw, 1) total_loss_eval_in.append(loss.item()) assert output.shape == label.shape for i in range(len(output)): if output[i] >= 2 and label[i] >= 2: coarse_group_correct[0] = coarse_group_correct[0] + 1 elif output[i] < 2 and label[i] < 2: coarse_group_correct[0] = coarse_group_correct[0] + 1 total_group[0] = total_group[0] + 1 if output[i] == label[i]: deep_group_correct[0] = deep_group_correct[0] + 1 # bar2.update() # bar2.close() for data, label in dataloader_eval_out: data, label = data.to(device), label.to(device) output_raw = model(data) loss = criterion(output_raw, label) _, output = torch.max(output_raw, 1) total_loss_eval_out.append(loss.item()) assert output.shape == label.shape for i in range(len(output)): if output[i] >= 2 and label[i] >= 2: coarse_group_correct[1] = coarse_group_correct[1] + 1 elif output[i] < 2 and label[i] < 2: coarse_group_correct[1] = coarse_group_correct[1] + 1 total_group[1] = total_group[1] + 1 if output[i] == label[i]: deep_group_correct[1] = deep_group_correct[1] + 1 # bar3.update() # bar3.close() epoch_train_loss = np.mean(total_loss) epoch_eval_in_loss = np.mean(total_loss_eval_in) epoch_eval_out_loss = np.mean(total_loss_eval_out) print('Report: Epoch #{:02d}: Training loss: {:.06F}, Eval Loss: (I {:.06F})(O {:.06F}), Eval Accuracy: (I {:.03F}%-{:.03F}%)(O {:.03F}%-{:.03F}%), {:.03F}%, lr: {:.06F}'.format( epoch, epoch_train_loss, epoch_eval_in_loss, epoch_eval_out_loss, 100.0 * coarse_group_correct[0] / total_group[0], 100.0 * deep_group_correct[0] / total_group[0], 100.0 * coarse_group_correct[1] / total_group[1], 100.0 * deep_group_correct[1] / total_group[1], 100.0 * (coarse_group_correct[0] + coarse_group_correct[1]) / (total_group[0] + total_group[1]), scheduler.get_lr()[0] )) state_dict = model.state_dict() torch.save(state_dict, '{}.{:03d}'.format(model_file, epoch)) scheduler.step() torch.cuda.empty_cache() if __name__ == '__main__': dataset = ('datasets/tokenized/in_domain_train.reformed.csv', 'datasets/tokenized/in_domain_dev.reformed.csv', 'datasets/tokenized/out_of_domain_dev.reformed.csv') train(dataset, sys.argv[1], 0)
import os import time import json import numpy as np from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim import torch.distributed as dist import torch.multiprocessing as mp from torch.utils.data import DataLoader from torch.nn.parallel import DistributedDataParallel from torch.utils.data.distributed import DistributedSampler from DataProcess import SourceDataset, linear_collate from models.optimizer import RAdam from models.DPCNN import DPCNN import sys global_label_cnt = 2 global_vocab_size = 5414 + 2 global_embed_size = 128 global_hidden_size = 128 global_num_layers = 5 global_batch_size = 128 global_num_workers = 10 global_learning_rate = 1e-3 global_epoch_count = 300 global_t_max = 64 global_eta_min = 1e-9 # Important Parameters def train(dataset, model_file, device, rank=0): torch.cuda.set_device(device) dataset_train, dataset_eval_in, dataset_eval_out = dataset dataset_train = SourceDataset(dataset_train, global_vocab_size, 25) dataset_eval_in = SourceDataset(dataset_eval_in, global_vocab_size, 25) dataset_eval_out = SourceDataset(dataset_eval_out, global_vocab_size, 25) model = DPCNN(global_label_cnt, global_vocab_size, global_embed_size, global_hidden_size, global_num_layers) criterion = nn.CrossEntropyLoss() model, criterion = model.to(device), criterion.to(device) dataloader_train = DataLoader(dataset_train, shuffle=True, pin_memory=True, num_workers=global_num_workers, batch_size=global_batch_size, drop_last=True, collate_fn=linear_collate) dataloader_eval_in = DataLoader(dataset_eval_in, shuffle=False, num_workers=global_num_workers, batch_size=global_batch_size, collate_fn=linear_collate) dataloader_eval_out = DataLoader(dataset_eval_out, shuffle=False, num_workers=global_num_workers, batch_size=global_batch_size, collate_fn=linear_collate) optimizer = RAdam(model.parameters(), lr=global_learning_rate) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=global_t_max, eta_min=global_eta_min) t1 = time.time() best_in_eval = 0.0 best_out_eval = 0.0 for epoch in range(global_epoch_count): total_loss = [] total_loss_eval_in = [] total_loss_eval_out = [] coarse_group_correct = [0, 0] deep_group_correct = [0, 0] total_group = [0, 0] bar = tqdm(desc='Train #{:02d}'.format(epoch), total=len(dataloader_train), leave=False) model.train() for data, label in dataloader_train: data, label = data.to(device), label.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, label) loss.backward() optimizer.step() total_loss.append(loss.item()) bar.update() bar.close() # bar2 = tqdm(desc='(1/3)Eval In #{:02d}'.format(epoch), total=len(dataset_eval_in), leave=False) # bar3 = tqdm(desc='(2/3)Eval In #{:02d}'.format(epoch), total=len(dataset_eval_out), leave=False) model.eval() with torch.no_grad(): for data, label in dataloader_eval_in: data, label = data.to(device), label.to(device) output_raw = model(data) loss = criterion(output_raw, label) _, output = torch.max(output_raw, 1) total_loss_eval_in.append(loss.item()) assert output.shape == label.shape for i in range(len(output)): if output[i] >= 2 and label[i] >= 2: coarse_group_correct[0] = coarse_group_correct[0] + 1 elif output[i] < 2 and label[i] < 2: coarse_group_correct[0] = coarse_group_correct[0] + 1 total_group[0] = total_group[0] + 1 if output[i] == label[i]: deep_group_correct[0] = deep_group_correct[0] + 1 # bar2.update() # bar2.close() for data, label in dataloader_eval_out: data, label = data.to(device), label.to(device) output_raw = model(data) loss = criterion(output_raw, label) _, output = torch.max(output_raw, 1) total_loss_eval_out.append(loss.item()) assert output.shape == label.shape for i in range(len(output)): if output[i] >= 2 and label[i] >= 2: coarse_group_correct[1] = coarse_group_correct[1] + 1 elif output[i] < 2 and label[i] < 2: coarse_group_correct[1] = coarse_group_correct[1] + 1 total_group[1] = total_group[1] + 1 if output[i] == label[i]: deep_group_correct[1] = deep_group_correct[1] + 1 # bar3.update() # bar3.close() epoch_train_loss = np.mean(total_loss) epoch_eval_in_loss = np.mean(total_loss_eval_in) epoch_eval_out_loss = np.mean(total_loss_eval_out) print('Report: Epoch #{:02d}: Training loss: {:.06F}, Eval Loss: (I {:.06F})(O {:.06F}), Eval Accuracy: (I {:.03F}%-{:.03F}%)(O {:.03F}%-{:.03F}%), {:.03F}%, lr: {:.06F}'.format( epoch, epoch_train_loss, epoch_eval_in_loss, epoch_eval_out_loss, 100.0 * coarse_group_correct[0] / total_group[0], 100.0 * deep_group_correct[0] / total_group[0], 100.0 * coarse_group_correct[1] / total_group[1], 100.0 * deep_group_correct[1] / total_group[1], 100.0 * (coarse_group_correct[0] + coarse_group_correct[1]) / (total_group[0] + total_group[1]), scheduler.get_lr()[0] )) state_dict = model.state_dict() torch.save(state_dict, '{}.{:03d}'.format(model_file, epoch)) scheduler.step() torch.cuda.empty_cache() if __name__ == '__main__': dataset = ('datasets/tokenized/in_domain_train.reformed.csv', 'datasets/tokenized/in_domain_dev.reformed.csv', 'datasets/tokenized/out_of_domain_dev.reformed.csv') train(dataset, sys.argv[1], 0)
en
0.302482
# Important Parameters #{:02d}'.format(epoch), total=len(dataloader_train), leave=False) # bar2 = tqdm(desc='(1/3)Eval In #{:02d}'.format(epoch), total=len(dataset_eval_in), leave=False) # bar3 = tqdm(desc='(2/3)Eval In #{:02d}'.format(epoch), total=len(dataset_eval_out), leave=False) # bar2.update() # bar2.close() # bar3.update() # bar3.close() #{:02d}: Training loss: {:.06F}, Eval Loss: (I {:.06F})(O {:.06F}), Eval Accuracy: (I {:.03F}%-{:.03F}%)(O {:.03F}%-{:.03F}%), {:.03F}%, lr: {:.06F}'.format(
2.062458
2
ARC/arc001-arc050/arc043/a.py
KATO-Hiro/AtCoder
2
6623050
<reponame>KATO-Hiro/AtCoder<gh_stars>1-10 # -*- coding: utf-8 -*- def main(): n, a, b = map(int, input().split()) s = [int(input()) for _ in range(n)] s_max = max(s) s_min = min(s) if s_max == s_min: print(-1) else: p = b / (s_max - s_min) s_sum = sum(s) q = a - p * s_sum / n print(p, q) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- def main(): n, a, b = map(int, input().split()) s = [int(input()) for _ in range(n)] s_max = max(s) s_min = min(s) if s_max == s_min: print(-1) else: p = b / (s_max - s_min) s_sum = sum(s) q = a - p * s_sum / n print(p, q) if __name__ == '__main__': main()
en
0.769321
# -*- coding: utf-8 -*-
2.875293
3