repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
|
|---|---|---|---|---|---|---|---|---|---|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-05-04 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0008_remove_precificacaomodel_custo'),
]
operations = [
migrations.DeleteModel(
name='PrecificacaoModel',
),
migrations.AddField(
model_name='dimensaomodel',
name='preco',
field=models.CharField(default=0, max_length=25),
),
migrations.AddField(
model_name='dimensaomodel',
name='produto',
field=models.CharField(default=0, max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.CharField(max_length=25),
),
]
|
Python
| 31
| 26.032259
| 61
|
/projeto/dimensoes/migrations/0009_auto_20200504_1529.py
| 0.570406
| 0.538186
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-04 18:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0013_remove_dimensaomodel_profundidade_media'),
]
operations = [
migrations.AddField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.CharField(default=0, max_length=25),
),
]
|
Python
| 18
| 23.277779
| 70
|
/projeto/dimensoes/migrations/0014_dimensaomodel_profundidade_media.py
| 0.622426
| 0.572082
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-05-11 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0009_auto_20200504_1529'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='comprimento',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='espessura',
field=models.CharField(max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='fornecedor',
field=models.CharField(max_length=8),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura_calcada',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_final',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_inicial',
field=models.FloatField(),
),
]
|
Python
| 48
| 26.604166
| 49
|
/projeto/dimensoes/migrations/0010_auto_20200511_1521.py
| 0.535849
| 0.510943
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-11 21:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0016_auto_20200611_1852'),
]
operations = [
migrations.RenameField(
model_name='clientemodel',
old_name='numero_casa',
new_name='numerocasa',
),
]
|
Python
| 18
| 20.111111
| 49
|
/projeto/dimensoes/migrations/0017_auto_20200611_1859.py
| 0.584211
| 0.502632
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-04 18:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0012_auto_20200603_1916'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='profundidade_media',
),
]
|
Python
| 17
| 19.529411
| 49
|
/projeto/dimensoes/migrations/0013_remove_dimensaomodel_profundidade_media.py
| 0.601719
| 0.512894
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-03-16 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClienteModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('sobrenome', models.CharField(max_length=30)),
('cidade', models.CharField(blank=True, max_length=20)),
('estado', models.CharField(blank=True, max_length=15)),
('rua', models.CharField(blank=True, max_length=100)),
('numero_casa', models.CharField(blank=True, max_length=6)),
('cep', models.CharField(blank=True, max_length=20)),
('telefone', models.CharField(blank=True, max_length=15)),
('email', models.EmailField(blank=True, help_text='Ex. clinte@gmail.com', max_length=50)),
],
options={
'ordering': ['nome', 'sobrenome'],
},
),
migrations.CreateModel(
name='DimensaoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comprimento', models.FloatField(help_text='Ex. 8.00', max_length=3)),
('largura', models.FloatField(help_text='Ex. 4.00', max_length=3)),
('prof_inicial', models.FloatField(help_text='Ex. 1.20', max_length=3)),
('prof_final', models.FloatField(help_text='Ex. 1.40', max_length=3)),
('largura_calcada', models.FloatField(blank=True, default=1, help_text='Ex. 1.00', max_length=3)),
('espessura', models.CharField(choices=[['0.6', '0.6 mm'], ['0.7', '0.7 mm'], ['0.8', '0.8 mm']], help_text='Espessura do vinil', max_length=3)),
('fornecedor', models.CharField(choices=[['sodramar', 'Sodramar'], ['viniplas', 'Viniplas']], help_text='Fornecedor do vinil', max_length=8)),
('profundidade_media', models.FloatField(max_length=5)),
('area_calcada', models.FloatField(max_length=5)),
('perimetro', models.FloatField(max_length=5)),
('m2_facial', models.FloatField(max_length=5)),
('m2_parede', models.FloatField(max_length=5)),
('m2_total', models.FloatField(max_length=5)),
('m3_total', models.FloatField(max_length=5)),
('m3_real', models.FloatField(max_length=5)),
('filtro', models.CharField(max_length=30)),
('motobomba', models.CharField(max_length=30)),
('tampa_casa_maquinas', models.CharField(max_length=30)),
('sacos_areia', models.CharField(max_length=30)),
('vinil_m2', models.FloatField(max_length=5)),
('isomanta_m2', models.FloatField(max_length=5)),
('perfil_fixo_m', models.FloatField(max_length=5)),
('escavacao', models.CharField(max_length=30)),
('construcao', models.CharField(max_length=30)),
('contra_piso', models.CharField(max_length=30)),
('remocao_terra', models.CharField(max_length=30)),
('instalacao_vinil', models.CharField(max_length=30)),
('data', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(blank=True, choices=[('Em negociação', 'Em negociação'), ('Contrato', 'Contrato'), ('Encerrado', 'Encerrado')], default='Em negociação', help_text='Status do Orçamento', max_length=15)),
],
),
]
|
Python
| 67
| 55.805969
| 230
|
/projeto/dimensoes/migrations/0001_initial.py
| 0.555439
| 0.527588
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-03-17 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0003_remove_dimensaomodel_data'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='construcao',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='contra_piso',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='escavacao',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='instalacao_vinil',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='remocao_terra',
field=models.CharField(default=0, max_length=30),
),
]
|
Python
| 38
| 28.631578
| 61
|
/projeto/dimensoes/migrations/0004_auto_20200317_0933.py
| 0.568384
| 0.538188
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-05-16 18:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0010_auto_20200511_1521'),
]
operations = [
migrations.AlterField(
model_name='clientemodel',
name='telefone',
field=models.IntegerField(blank=True, max_length=15),
),
]
|
Python
| 18
| 21.833334
| 65
|
/projeto/dimensoes/migrations/0011_auto_20200516_1518.py
| 0.603406
| 0.523114
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-03-18 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0005_dimensaomodel_data'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.FloatField(default=0, max_length=5),
),
]
|
Python
| 18
| 22.222221
| 61
|
/projeto/dimensoes/migrations/0006_auto_20200318_1831.py
| 0.610048
| 0.559809
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-04-29 20:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0007_auto_20200408_1540'),
]
operations = [
migrations.RemoveField(
model_name='precificacaomodel',
name='custo',
),
]
|
Python
| 17
| 19
| 49
|
/projeto/dimensoes/migrations/0008_remove_precificacaomodel_custo.py
| 0.594118
| 0.502941
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-03-16 21:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0002_auto_20200316_1609'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='data',
),
]
|
Python
| 17
| 18.705883
| 49
|
/projeto/dimensoes/migrations/0003_remove_dimensaomodel_data.py
| 0.58806
| 0.495522
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-11 21:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0015_auto_20200604_1710'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='status',
),
migrations.AlterField(
model_name='clientemodel',
name='telefone',
field=models.IntegerField(blank=True, default=0),
),
]
|
Python
| 22
| 22.5
| 61
|
/projeto/dimensoes/migrations/0016_auto_20200611_1852.py
| 0.576402
| 0.514507
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-04-08 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0006_auto_20200318_1831'),
]
operations = [
migrations.CreateModel(
name='PrecificacaoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('custo', models.CharField(max_length=30)),
('margem', models.CharField(max_length=30)),
('preco', models.CharField(max_length=30)),
('filtro_preco', models.CharField(max_length=30)),
('motobomba_preco', models.CharField(max_length=30)),
('tampa_casa_maquinas_preco', models.CharField(max_length=30)),
('sacos_areia_preco', models.CharField(max_length=30)),
('perfil_rigido_preco', models.CharField(max_length=30)),
('ralo_fundo_preco', models.CharField(max_length=30)),
('dispositivo_retorno_preco', models.CharField(max_length=30)),
('dispositivo_aspiracao_preco', models.CharField(max_length=30)),
('dispositivo_nivel_preco', models.CharField(max_length=30)),
('borda_preco', models.CharField(max_length=30)),
('skimmer_preco', models.CharField(max_length=30)),
('dispositivo_hidromassagem_preco', models.CharField(max_length=30)),
('escada_preco', models.CharField(max_length=30)),
('timer_preco', models.CharField(max_length=30)),
('capa_termica_preco', models.CharField(max_length=30)),
('capa_protecao_preco', models.CharField(max_length=30)),
('peneira_preco', models.CharField(max_length=30)),
('mangueira_preco', models.CharField(max_length=30)),
('ponteira_preco', models.CharField(max_length=30)),
('adaptador_giratorio_preco', models.CharField(max_length=30)),
('haste_aluminio_preco', models.CharField(max_length=30)),
('rodo_aspirador_preco', models.CharField(max_length=30)),
('escova_preco', models.CharField(max_length=30)),
('vinil_preco', models.CharField(max_length=25)),
('isomanta_preco', models.CharField(max_length=25)),
('perfil_fixo_preco', models.CharField(max_length=25)),
('escavacao_preco', models.CharField(default=0, max_length=30)),
('construcao_preco', models.CharField(default=0, max_length=30)),
('remocao_terra_preco', models.CharField(default=0, max_length=30)),
('colocacao_material_preco', models.CharField(default=0, max_length=30)),
('contra_piso_preco', models.CharField(default=0, max_length=30)),
('instalacao_skimmer_preco', models.CharField(default=0, max_length=30)),
('instalacao_borda_preco', models.CharField(default=0, max_length=30)),
('instalacao_escada_preco', models.CharField(default=0, max_length=30)),
('instalacao_capa_termica_preco', models.CharField(default=0, max_length=30)),
('instalacao_capa_protecao_preco', models.CharField(default=0, max_length=30)),
('instalacao_tampa_cm_preco', models.CharField(default=0, max_length=30)),
('instalacao_vinil_preco', models.CharField(default=0, max_length=30)),
('instalacao_filtro_preco', models.CharField(default=0, max_length=30)),
('instalacao_motobomba_preco', models.CharField(default=0, max_length=30)),
],
),
migrations.AddField(
model_name='clientemodel',
name='bairro',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='dimensaomodel',
name='area_calcada',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='comprimento',
field=models.CharField(default=0, help_text='Ex. 8.00', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='espessura',
field=models.CharField(choices=[['0.6', '0.6 mm'], ['0.7', '0.7 mm'], ['0.8', '0.8 mm']], max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='fornecedor',
field=models.CharField(choices=[['sodramar', 'Sodramar'], ['viniplas', 'Viniplas']], max_length=8),
),
migrations.AlterField(
model_name='dimensaomodel',
name='isomanta_m2',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura',
field=models.CharField(default=0, help_text='Ex. 4.00', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura_calcada',
field=models.CharField(blank=True, default=1, help_text='Ex. 1.00', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m2_facial',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m2_parede',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m2_total',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m3_real',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m3_total',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='perfil_fixo_m',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='perimetro',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_final',
field=models.CharField(default=0, help_text='Ex. 1.40', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_inicial',
field=models.CharField(default=0, help_text='Ex. 1.20', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.FloatField(default=0, max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='vinil_m2',
field=models.CharField(max_length=25),
),
]
|
Python
| 157
| 44.923569
| 116
|
/projeto/dimensoes/migrations/0007_auto_20200408_1540.py
| 0.561165
| 0.533148
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-04 20:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0014_dimensaomodel_profundidade_media'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='construcao',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='contra_piso',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='escavacao',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='instalacao_vinil',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='isomanta_m2',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='perfil_fixo_m',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='preco',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='produto',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='remocao_terra',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='vinil_m2',
),
]
|
Python
| 53
| 25.113207
| 63
|
/projeto/dimensoes/migrations/0015_auto_20200604_1710.py
| 0.524566
| 0.509393
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-18 18:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0018_auto_20200611_1905'),
]
operations = [
migrations.AlterField(
model_name='clientemodel',
name='numero_casa',
field=models.CharField(blank=True, max_length=10),
),
]
|
Python
| 18
| 21.833334
| 62
|
/projeto/dimensoes/migrations/0019_auto_20200618_1520.py
| 0.600973
| 0.520681
|
huzhaoyangcode/myAllWorkUsefullCode
|
refs/heads/master
|
#!/usr/bin/env python3
import xml.etree.ElementTree as ET
import os
import copy
import json
#读取序列文件,得到dir处理序列
with open('dirQueue.txt', 'r') as queueFile:
handleList = queueFile.readlines()
#设置用来做test的图片开始位置和结束位置
testStartId = 1000
testEndId = 6000
strJson = "{\"images\": "
strTestJson = "{\"images\": "
# print(strJson)
#构造单张图片的image结构
imageDict = {
"dataset": "BitVehicle",
"height": 540,
"id": 0,
"width": 960,
"file_name": "",
"coco_url": None,
"license": None,
"flickr_url": None,
"image": "",
"date_captured": None
}
#循环构造imagesList
imagesList = []
imagesTestList = []
id = 0
for line in handleList:
dirname = os.path.join("./images", line.strip())
fileNameList = os.listdir(dirname)
i = 0
fileListLen = len(fileNameList)
fileNameList.sort()
while i < fileListLen:
imageDictBuffer = imageDict.copy()
imageDictBuffer["file_name"] = fileNameList[i]
imageDictBuffer["image"] = os.path.join(dirname, fileNameList[i])
imageDictBuffer["id"] = id
if id >= testStartId and id <= testEndId:
imagesTestList.append(imageDictBuffer)
else:
imagesList.append(imageDictBuffer)
id = id + 1
i = i + 1
# print(len(imagesList), id)
#get training imageList
strImages = str(imagesList).replace("None", "null")
strImages = strImages.replace("\'", "\"")
strJson = strJson + strImages
#get test imageList
strTestImages = str(imagesTestList).replace("None", "null")
strTestImages = strTestImages.replace("\'", "\"")
strTestJson = strTestJson + strTestImages
# print(strJson)
#构造单个target的注释dict
annotationDict = {
"area": 109512.0,
"id": 0,
"iscrowd": 0,
"category_id": 1,
"is_occluded": False,
"image_id": 0,
"segmentation": None,
"bbox": [604.0, 0.0, 324.0, 338.0],
"attributes": {}
}
#所有图片放在一起的ID
imageSumId = -1
circleSumId = -1
#循环构造annotationsList
annotationsList = []
annotationsTestList = []
for line in handleList:
#获得本文件夹下有多少张图片
dirname = os.path.join("./images", line.strip())
fileNameList = os.listdir(dirname)
fileListLen = len(fileNameList)
# print(fileListLen)
#打开对应的xml文件
xmlFilePathName = os.path.join("./DETRAC-Train-Annotations-XML", line.strip())
xmlFilePathName = xmlFilePathName + ".xml"
#读取,得到根节点
tree = ET.ElementTree(file=xmlFilePathName)
root = tree.getroot()
# print(xmlFilePathName)
# 循环遍历和解析xml树
for child_of_root in root:
#获得frame结点
if child_of_root.tag == "frame":
#获得当前frame的target的density,和当前帧是在本文件夹下的第几张图片
density = int(child_of_root.attrib["density"])
num = int(child_of_root.attrib["num"])
# 循环获得该frame中的target参数
i = 0
while i < density:
#生成一个新的annotationDict, 并填充
annotationDictBuffer = copy.deepcopy(annotationDict)
annotationDictBuffer["image_id"] = imageSumId + num
target = child_of_root[0][i]
circleSumId = circleSumId + 1
annotationDictBuffer["id"] = circleSumId
for attribute in target:
if attribute.tag == "box":
annotationDictBuffer["bbox"][0] = float(attribute.attrib["left"])
annotationDictBuffer["bbox"][1] = float(attribute.attrib["top"])
annotationDictBuffer["bbox"][2] = float(attribute.attrib["width"])
annotationDictBuffer["bbox"][3] = float(attribute.attrib["height"])
annotationDictBuffer["area"] = annotationDictBuffer["bbox"][2] * annotationDictBuffer["bbox"][3]
# annotationDictBuffer["area"] = format(annotationDictBuffer["bbox"][2] * annotationDictBuffer["bbox"][3], "0.2f")
# if attribute.tag == "attribute":
# annotationDictBuffer["attributes"] = attribute.attrib
if attribute.tag == "attribute":
if attribute.attrib["vehicle_type"] == "car":
annotationDictBuffer["category_id"] = 1
if attribute.attrib["vehicle_type"] == "bus":
annotationDictBuffer["category_id"] = 2
if attribute.attrib["vehicle_type"] == "van":
annotationDictBuffer["category_id"] = 3
if attribute.attrib["vehicle_type"] == "others":
annotationDictBuffer["category_id"] = 4
if attribute.tag == "occlusion":
annotationDictBuffer["is_occluded"] = True
# print(annotationDictBuffer)
#把生成的annotationDict追加到annotationsList中
if annotationDictBuffer["image_id"] >= testStartId and annotationDictBuffer["image_id"] <= testEndId:
annotationsTestList.append(annotationDictBuffer)
else:
annotationsList.append(annotationDictBuffer)
i = i + 1
imageSumId = imageSumId + fileListLen
# print(annotationsList)
#get Training json
strAnnotations = str(annotationsList).replace("None", "null")
strAnnotations = strAnnotations.replace("False", "false")
strAnnotations = strAnnotations.replace("True", "true")
strAnnotations = strAnnotations.replace("\'", "\"")
strJson = strJson + ", \"annotations\": "
strJson = strJson + strAnnotations
strJson = strJson + ", \"categories\": [{\"id\": 0, \"name\": \"bg\", \"supercategory\": \"\"},{\"id\": 1, \"name\": \"car\", \"supercategory\": \"\"}, {\"id\": 2, \"name\": \"bus\", \"supercategory\": \"\"}, {\"id\": 3, \"name\": \"van\", \"supercategory\": \"\"}, {\"id\": 4, \"name\": \"others\", \"supercategory\": \"\"}]}"
Arr = json.loads(strJson)
js = json.dumps(Arr, sort_keys=True, indent=4, separators=(', ', ': '))
#get Test json
strTestAnnotations = str(annotationsTestList).replace("None", "null")
strTestAnnotations = strTestAnnotations.replace("False", "false")
strTestAnnotations = strTestAnnotations.replace("True", "true")
strTestAnnotations = strTestAnnotations.replace("\'", "\"")
strTestJson = strTestJson + ", \"annotations\": "
strTestJson = strTestJson + strTestAnnotations
strTestJson = strTestJson + ", \"categories\": [{\"id\": 0, \"name\": \"bg\", \"supercategory\": \"\"},{\"id\": 1, \"name\": \"car\", \"supercategory\": \"\"}, {\"id\": 2, \"name\": \"bus\", \"supercategory\": \"\"}, {\"id\": 3, \"name\": \"van\", \"supercategory\": \"\"}, {\"id\": 4, \"name\": \"others\", \"supercategory\": \"\"}]}"
ArrTest = json.loads(strTestJson)
jsTest = json.dumps(ArrTest, sort_keys=True, indent=4, separators=(', ', ': '))
print(js)
print("########Test########")
print(jsTest)
|
Python
| 173
| 37.878613
| 335
|
/xmlToJson/xmlToJson_new_12_24.py
| 0.597532
| 0.586084
|
huzhaoyangcode/myAllWorkUsefullCode
|
refs/heads/master
|
#!/usr/bin/env python
import threading
import time
import os
import sys
import signal
#Write First thread of creating raw file
class ThreadCreateFile (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
#set Dump environment variable
os.environ["cameraDump"] = "1"
#Delete all file in the sourcepng
os.system('rm -rf ./sourcePng/*')
print("[INFO-thread1]:Delete all file in sourcePng")
#create directory of handlePng
os.system('mkdir ./sourcePng/handlePng')
print("[INFO-thread1]:Create Dir of ./sourcePng/handlePng")
#change dir
os.chdir("./sourcePng")
print("[INFO-thread1]: Change Dir to ./sourcePng")
global startHandleFlag
startHandleFlag = 1
print("[INFO-thread1]: Start Create File")
os.system('gst-launch-1.0 icamerasrc device-name=imx185 scene-mode=2 ! fakesink >/dev/null')
global endHandleFlag
endHandleFlag = 0
print("[INFO-thread1]: End!")
# os.system('gst-launch-1.0 icamerasrc device-name=imx185 scene-mode=2 ! fakesink')
#Write Second thread of handle raw file to png file
class ThreadHandleRawFileToPng (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
#wait for thread one ready
global startHandleFlag
while not startHandleFlag:
print("[INFO-thread2]: Wait for starting")
time.sleep(1)
i=0
# wait thread1 create some file
time.sleep(2)
global endHandleFlag
#Get the lastest file need handle
while endHandleFlag:
# print(endHandleFlag)
global copyFlag
copyFlag = 1
#get filename and cp
p = os.popen('ls *.GRBG12V32 |tail -n 2 | head -n 1')
filename=p.read()
filename=filename[:-1]
command="cp ./"+filename+" ./handlePng/handlePng.raw"
print("[INFO-thread2]: Get the New file need be handled name:", filename)
# print(command)
os.system(command)
print("[INFO-thread2]: Copy file need be handled to ./handlePng")
copyFlag = 0
#use binary to preprocess file
command="../raw2vec bd 1920 1088 ./handlePng/handlePng.raw ./handlePng/readyHandlePng.raw"
print("[INFO-thread2]: Converted raw file by raw2vec")
os.system(command)
#use pythonfile to handle file
print("[INFO-thread2]: Start converting raw file by python script....")
command="python ../classification_sample_liz_png.py -i ./handlePng/readyHandlePng.raw -m ../DTTC2019/ispmodel/frozen_graph_DepthToSpace-hwc.xml>/dev/null"
os.system(command)
print("[INFO-thread2]: Converted raw file success by python script ")
# i=i+1
# command="mv ./created.png ./handlePng/created"+str(i)+".png"
command="mv ./created.png ./handlePng/"
# print(command)
os.system(command)
global thread3StartHandleFlag
thread3StartHandleFlag = 1
print("[INFO-thread2]: Copyed png to handlePng ")
print("[INFO-thread2]: End! ")
#Write third thread of show png
class ThreadShowPng (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
global thread3StartHandleFlag
while not thread3StartHandleFlag:
print("[INFO-thread3]: Wait for starting")
time.sleep(1)
os.system("../a.out >>/dev/null")
print("[INFO-thread3]: End! ")
#Write forth thread of delete raw
class ThreadDeletePng (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
global copyFlag
#This thread will start when thread3 begin
global thread3StartHandleFlag
while not thread3StartHandleFlag:
print("[INFO-thread4]: Wait for starting")
time.sleep(1)
while endHandleFlag:
print("[INFO-thread4]: CopyFlag= ", copyFlag)
if not copyFlag:
p = os.popen('ls *.GRBG12V32')
fileNameList=p.read()
# fileNameList.replace("\n"," ")
fileNameList = fileNameList.replace('\n',' ')
command="rm -f " + fileNameList
# print("[INFO-thread2]:",command)
#Delete all file all .GRBG12V32 file
print("[INFO-thread4]: Deleting all raw file in sourcePng")
os.system(command)
print("[INFO-thread4]: Deleted all raw file in sourcePng")
time.sleep(3)
print("[INFO-thread4]: End! ")
def quit(signum, frame):
# global endHandleFlag
# endHandleFlag = 0
# print(endHandleFlag)
print('You choose to stop me')
sys.exit()
exitFlag = 0
startHandleFlag = 0
thread3StartHandleFlag = 0
endHandleFlag = 1
copyFlag = 0;
if __name__ == '__main__':
#set signal to stop all thread
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGTERM, quit)
thread1 = ThreadCreateFile(1, "Thread-1", 1)
thread2 = ThreadHandleRawFileToPng(2, "Thread-2", 2)
thread3 = ThreadShowPng(3, "Thread-3", 3)
thread4 = ThreadDeletePng(4, "Thread-4", 4)
thread1.setDaemon(True)
thread1.start()
thread2.setDaemon(True)
thread2.start()
thread3.setDaemon(True)
thread3.start()
thread4.setDaemon(True)
thread4.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
print("[mainThread] Removing all dumped file.....")
os.system("rm *.GRBG12V32")
print("[mainThread] exit the main thread!")
|
Python
| 171
| 35.695908
| 166
|
/TwoThread/TwoThread.py
| 0.599841
| 0.580398
|
jettaponB/Practice
|
refs/heads/main
|
import tkinter as tk
def show_output():
number = int(number_input.get())
if number == 0:
output_label.configure(text='ผิด')
return
output = ''
for i in range(1, 13):
output += str(number) + ' * ' + str(i)
output += ' = ' + str(number * i) + '\n'
output_label.configure(text=output)
window = tk.Tk()
window.title('JustDoIT')
window.minsize(width=400, height=400)
title_label = tk.Label(master=window, text='สูตรคูณแม่')
title_label.pack(pady=20)
number_input = tk.Entry(master=window, width=15)
number_input.pack()
ok_button = tk.Button(
master=window, text='คือ', command=show_output,
width=6, height=1
)
ok_button.pack()
output_label = tk.Label(master=window)
output_label.pack(pady=20)
window.mainloop()
|
Python
| 38
| 19.473684
| 56
|
/Test07.py
| 0.633205
| 0.610039
|
jettaponB/Practice
|
refs/heads/main
|
class Tank:
def __init__(self, name, ammo) -> None:
self.name = name
self.ammo = ammo
first_tank = Tank('Serie1', 3)
print(first_tank.name)
second_tank = Tank('Serie2', 5)
print(second_tank.name)
|
Python
| 10
| 20.9
| 43
|
/Test13.py
| 0.614679
| 0.59633
|
jettaponB/Practice
|
refs/heads/main
|
class Tank:
def __init__(self, name, ammo) -> None:
self.name = name
self.ammo = ammo
def add_ammo(self, ammo):
if self.ammo + ammo <= 10:
self.ammo += ammo
def fire_ammo(self):
if self.ammo > 0:
self.ammo -= 1
|
Python
| 10
| 26.9
| 43
|
/class_tank.py
| 0.492806
| 0.478417
|
jettaponB/Practice
|
refs/heads/main
|
# message = 'วัชราวลี'
# result = len(message)
# print(result)
# message = 'วัชราวลี'
# result = 'วัช' in message
# print(result)
# message = '0982612325'
# result = message.isdigit()
# print(result)
# message = 'Just Python'
# result = message.replace('Python', 'Rabbit')
# print(result)
message = 'กระต่าย, กระรอก, หมี'
animals = message.split(', ')
new_message = '+'.join(animals)
print(new_message)
print(animals)
|
Python
| 21
| 19.095238
| 46
|
/Test12.py
| 0.656398
| 0.632701
|
jettaponB/Practice
|
refs/heads/main
|
# quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
# if 'ล้างปลา' in quests:
# print('ทำงานเสร็จ')
#----------------------------------------------------
# quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
# max_quests = 5
# if len(quests) < max_quests:
# quests.append('จับปลาดุก')
# print(quests)
#----------------------------------------------------
# quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
# for quest in quests:
# print(quest)
#----------------------------------------------------
quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
for i in range(len(quests)):
print(str(i + 1) + '. ' + quests[i])
|
Python
| 19
| 32.210526
| 53
|
/Test10.py
| 0.419304
| 0.416139
|
jettaponB/Practice
|
refs/heads/main
|
def get_circle_area(radius):
return 22 / 7 * (radius ** 2)
def get_triangle_area(width, heigth):
return 1 / 2 * width * heigth
def get_rectangle_area(width, heigth):
return width * heigth
|
Python
| 8
| 24.25
| 38
|
/shape.py
| 0.661692
| 0.631841
|
jettaponB/Practice
|
refs/heads/main
|
import class_tank as CT
first_tank = CT.Tank('Serie1', 3)
first_tank.fire_ammo()
print(first_tank.ammo)
first_tank.fire_ammo()
first_tank.fire_ammo()
print(first_tank.ammo)
first_tank.add_ammo(4)
print(first_tank.ammo)
|
Python
| 12
| 17.583334
| 34
|
/Test14.py
| 0.730942
| 0.717489
|
jettaponB/Practice
|
refs/heads/main
|
import tkinter as tk
def show_output():
number = int(input_number.get())
output = ''
for i in range(1, 13):
output += str(number) + ' * ' + str(i) + ' = ' + str(number * i) + '\n'
output_label.configure(text=output)
window = tk.Tk()
window.title('โปรแกรมคำนวนสูตรคูณ')
window.minsize(width=500, height=400)
title_label = tk.Label(master=window, text='กรุณาระบุแม่สูตรคูณ')
title_label.pack()
input_number = tk.Entry(master=window)
input_number.pack()
cal_button = tk.Button(master=window, text='คำนวน', command=show_output)
cal_button.pack()
output_label = tk.Label(master=window)
output_label.pack()
window.mainloop()
|
Python
| 28
| 22.428572
| 79
|
/test09.py
| 0.662595
| 0.648855
|
jettaponB/Practice
|
refs/heads/main
|
score = 55
if score >= 80:
print('Grade A')
print('dafdaf')
elif score >= 70:
print('Grade B')
elif score >= 60:
print('Grade C')
else:
print('Grade F')
|
Python
| 11
| 14.818182
| 20
|
/Test02.py
| 0.557471
| 0.511494
|
jettaponB/Practice
|
refs/heads/main
|
# number = 1
# double = number * 2
# print(number)
# for i in range(1, 7):
# double = i * 2
# print(double)
# for i in range(1, 7):
# if i % 3 == 0:
# continue
# print(i)
for i in range(1, 7):
if i % 3 == 0:
break
print(i)
|
Python
| 17
| 14.647058
| 23
|
/Test03.py
| 0.467925
| 0.418868
|
jettaponB/Practice
|
refs/heads/main
|
# x = '4.5'
# y = str(12)
# z = x + y
# print(z)
# final_score = 15
#
# age = 25 # ตัวเลขจำนวนเต็ม (integer)
# weight = 66.6 # ตัวเลขทศนิยม (Float)
# first_name = 'ศักรินทร์' # ข้อความ (String)
# has_notebook = True # Boolean
x = 5
y = 2
a1 = x + y # 7
a2 = x - y # 3
a3 = x * y # 10
a4 = x / y # 2.5
a5 = x % y # 1
a6 = x ** y # 25
a7 = x // y # 2
a8 = (x + 1) * (y - 1)
x = x + 3 # x += 3
print(a8)
|
Python
| 27
| 16.629629
| 53
|
/Test01.py
| 0.395789
| 0.32
|
jettaponB/Practice
|
refs/heads/main
|
import tkinter as tk
def set_message():
text = text_input.get()
title_label.configure(text=text)
window = tk.Tk()
window.title('Desktop Application')
window.minsize(width=300, height=400)
title_label = tk.Label(master=window, text='กรุณาระบุข้อความ')
title_label.pack()
text_input = tk.Entry(master=window)
text_input.pack()
ok_button = tk.Button(master=window, text='OK', command=set_message)
ok_button.pack()
window.mainloop()
|
Python
| 20
| 21.15
| 68
|
/Test08.py
| 0.714286
| 0.700893
|
jettaponB/Practice
|
refs/heads/main
|
# def get_box_area(width, length, height):
# box_area = width * length * height
# print(box_area)
#
# get_box_area(4, 4, 2)
# get_box_area(width=1, length=1, height=2)
def get_box_area(width, length, height):
if width < 0 or length < 0 or height < 0:
return 0
box_area = width * length * height
return box_area
box1 = get_box_area(4, -4, 2)
box2 = get_box_area(width=1, length=1, height=2)
print(box1, box2)
|
Python
| 20
| 21.1
| 48
|
/Test04.py
| 0.619048
| 0.573696
|
jettaponB/Practice
|
refs/heads/main
|
book = {
'name': 'C++',
'price': '299',
'page': '414'
}
# #ตัวแปลทีละตัว ... ตัวแปรจะเยอะเกิน
# book_name = 'C++'
# book_price = 299
# book_page = 414
# #เก็บใน List ... ลืมว่าข้อมูลไหนอยู่ที่ index ไหน
# book_data = ['C++', 299, 414]
#book['place'] = 'MU Salaya'
book.pop('price')
print(book)
|
Python
| 18
| 16.222221
| 51
|
/Test11.py
| 0.530744
| 0.472492
|
jettaponB/Practice
|
refs/heads/main
|
import shape as sh
circle = sh.get_circle_area(10)
print(circle)
triangle = sh.get_triangle_area(width=6, heigth=7)
print(triangle)
|
Python
| 7
| 18.142857
| 50
|
/Test05.py
| 0.75188
| 0.721804
|
gitclub-data/Alarm_clock
|
refs/heads/master
|
from tkinter import *
import tkinter.filedialog as fd
root = Tk()
def browsefunc():
filename = fd.askopenfilename()
pathlabel.config(text=filename)
browsebutton = Button(root, text="Browse", command=browsefunc)
browsebutton.pack()
pathlabel = Label(root)
pathlabel.pack()
root.mainloop()
|
Python
| 15
| 19
| 62
|
/test.py
| 0.745819
| 0.745819
|
gitclub-data/Alarm_clock
|
refs/heads/master
|
import tkinter as tk
from tkinter import ttk
class Alarm():
def __init__(self):
#Setting The Whole Window
self.root=tk.Tk()
self.root.geometry("852x552+250+80")
self.root.minsize("852","552")
self.root.maxsize("852","552")
self.root.title("Alarm Clock")
Icon = tk.PhotoImage(file="Icon/alarmclock.png")
self.root.iconphoto(False,Icon)
# self.root.configure(bg='ivory2')
#Setting Up Label Inside Of Window
self.set_alarm=tk.Frame(self.root)
self.set_alarm.pack(anchor="nw",side="top")
self.set_alarm_label=tk.Label(self.set_alarm,text="Set Alarm",font=("Times",30,"bold","italic"),width=88)
self.set_alarm_label.pack(side="right",padx=10)
#Setting Up Time Of the alarm,Label,Ringtone,Remind me After 10 min
self.Alarm_frame=tk.Frame(self.root,height=250,width=800,bg="white")
self.Alarm_frame.pack(side="top")
self.set_alarm_frame=tk.Frame(self.Alarm_frame)
self.set_alarm_frame.pack(anchor="nw",side="top",pady=7,padx=9)
self.Alarm_time=tk.Label(self.set_alarm_frame,text="Alarm Time : ",font=("Times",13,"bold"))
self.Alarm_time.grid(column=0,row=0)
self.slash_label=tk.Label(self.set_alarm_frame,text=":",font=("Times",16,"bold"))
clicked_hour=tk.StringVar()
self.hour=ttk.Combobox(self.set_alarm_frame,width=3,textvariable=clicked_hour)
self.hour['values']=('00','01','02','03','04','05','06','07','08','09','10','11','12')
self.hour.grid(column=1,row=0)
self.hour.current('00')
self.slash_label.grid(column=2,row=0)
clicked_min=tk.StringVar()
self.min=ttk.Combobox(self.set_alarm_frame,width=3,textvariable=clicked_min)
self.min['values'] = (
'00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17',
'18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35',
'36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53',
'54', '55', '56', '57', '58', '59')
self.min.current('00')
self.min.grid(column=3,row=0)
clicked_div=tk.StringVar()
self.div=ttk.Combobox(self.set_alarm_frame,width=3,textvariable=clicked_div)
self.div['values']=('AM','PM')
self.div.grid(column=4,row=0,padx=5)
self.set_alarm_label_frame=tk.Frame(self.Alarm_frame)
self.set_alarm_label_frame.pack(anchor="nw",side="top",padx=9,pady=7)
self.set_alarm_label_label=tk.Label(self.set_alarm_label_frame,text="Label : ",font=("Times",13,"bold"))
self.set_alarm_label_label.grid(column=0,row=0)
null_label=tk.Label(self.set_alarm_label_frame,text="")
null_label.grid(column=1,row=0,padx=19)
input_label = tk.StringVar()
self.label = ttk.Entry(self.set_alarm_label_frame, textvariable = input_label,width=23)
self.label.focus_force()
self.label.grid(column=2,row=0)
self.set_alarm_ringtone_frame=tk.Frame(self.Alarm_frame)
self.set_alarm_ringtone_frame.pack(anchor="nw",side="top",padx=9,pady=7)
self.set_alarm_ringtone_label=tk.Label(self.set_alarm_ringtone_frame,text="Ringtone :",font=("Times",13,"bold"))
self.set_alarm_ringtone_label.grid(column=0,row=0)
#will setting up browse ringtone box
self.set_alarm_remind_frame=tk.Frame(self.Alarm_frame)
self.set_alarm_remind_frame.pack(anchor="nw",side="top",padx=9,pady=7)
self.set_alarm_remind_label=tk.Label(self.set_alarm_remind_frame,text="Remind me after 10 min :",font=("Times",13,"bold"))
self.set_alarm_remind_label.grid(column=0,row=0)
#will setting up on off to use or not use this
self.root.mainloop()
Alarm_clock=Alarm()
|
Python
| 89
| 43.033707
| 130
|
/Alarm.py
| 0.604645
| 0.54441
|
manatbay/IxNetwork
|
refs/heads/master
|
# PLEASE READ DISCLAIMER
#
# This is a sample script for demo and reference purpose only.
# It is subject to change for content updates without warning.
#
# REQUIREMENTS
# - Python2.7 - Python 3.6
# - Python module: requests
#
# DESCRIPTION
# Capturing packets. Make sure traffic is running in continuous mode.
# Enable data plane and/or control plane capturing.
# Saved the .cap files (dataPlane and/or controlPlane) to local filesystem.
# Save packet capturing in wireshark style with header details.
#
# Tested in Windows only.
#
# USAGE
# python <script>.py windows
import sys, traceback, time
sys.path.insert(0, '../Modules')
from IxNetRestApi import *
from IxNetRestApiProtocol import Protocol
from IxNetRestApiTraffic import Traffic
from IxNetRestApiFileMgmt import FileMgmt
from IxNetRestApiPortMgmt import PortMgmt
from IxNetRestApiStatistics import Statistics
from IxNetRestApiPacketCapture import PacketCapture
connectToApiServer = 'windows'
try:
#---------- Preference Settings --------------
forceTakePortOwnership = True
releasePortsWhenDone = False
enableDebugTracing = True
deleteSessionAfterTest = True ;# For Windows Connection Mgr and Linux API server only
# Optional: Mainly for connecting to Linux API server.
licenseServerIp = '192.168.70.3'
licenseModel = 'subscription'
licenseTier = 'tier3'
ixChassisIp = '192.168.70.11'
# [chassisIp, cardNumber, slotNumber]
portList = [[ixChassisIp, '1', '1'],
[ixChassisIp, '2', '1']]
if connectToApiServer in ['windows', 'windowsConnectionMgr']:
mainObj = Connect(apiServerIp='192.168.70.3',
serverIpPort='11009',
serverOs=connectToApiServer,
deleteSessionAfterTest=deleteSessionAfterTest)
#---------- Preference Settings End --------------
# NOTE: Make sure traffic is running continuously
pktCaptureObj = PacketCapture(mainObj)
pktCaptureObj.packetCaptureConfigPortMode([ixChassisIp, '2', '1'], enableDataPlane=True, enableControlPlane=False)
pktCaptureObj.packetCaptureClearTabs()
pktCaptureObj.packetCaptureStart()
time.sleep(10)
pktCaptureObj.packetCaptureStop()
# If there is no folder called c:\\Results, it will be created. c:\\Results is an example. Give any name you like.
pktCaptureObj.getCapFile(port=[ixChassisIp, '2', '1'], typeOfCapture='data', saveToTempLocation='c:\\Results',
localLinuxLocation='.', appendToSavedCapturedFile=None)
# Optional: Wireshark style details
pktCaptureObj.packetCaptureGetCurrentPackets(getUpToPacketNumber=5, capturePacketsToFile=True)
pktCaptureObj.packetCaptureClearTabs()
except (IxNetRestApiException, Exception, KeyboardInterrupt) as errMsg:
if enableDebugTracing:
if not bool(re.search('ConnectionError', traceback.format_exc())):
print('\n%s' % traceback.format_exc())
print('\nException Error! %s\n' % errMsg)
if 'mainObj' in locals() and connectToApiServer in ['windows', 'windowsConnectionMgr']:
if releasePortsWhenDone and forceTakePortOwnership:
portObj.releasePorts(portList)
if connectToApiServer == 'windowsConnectionMgr':
if deleteSessionAfterTest:
mainObj.deleteSession()
|
Python
| 87
| 37.827587
| 119
|
/RestApi/Python/SampleScripts/packetCapture.py
| 0.696952
| 0.682154
|
martkins/images_exif_viewer
|
refs/heads/master
|
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.event import EventDispatcher
class LabelModel(Label):
def __init__(self, **kwargs):
super(Label, self).__init__(**kwargs)
|
Python
| 9
| 26.111111
| 45
|
/labelmodel.py
| 0.712551
| 0.712551
|
martkins/images_exif_viewer
|
refs/heads/master
|
from kivy.uix.image import Image
from kivy.properties import NumericProperty
class ImageModel(Image):
ang = NumericProperty()
def __init__(self, **kwargs):
super(Image, self).__init__(**kwargs)
def rotate_right(self):
self.ang += 90
def rotate_left(self):
self.ang -= 90
def reset_angle(self):
self.ang = 0
|
Python
| 19
| 18.263159
| 45
|
/imagemodel.py
| 0.60929
| 0.595628
|
martkins/images_exif_viewer
|
refs/heads/master
|
from kivy.app import App
from kivy.uix.image import Image
from kivy.properties import ObjectProperty
from kivy.uix.listview import ListView, SimpleListAdapter
from kivy.uix.label import Label
from imagemodel import ImageModel
from kivy.uix.button import Button
from kivy.factory import Factory
from buttonmodel import ButtonModel
from labelmodel import LabelModel
from kivy.core.window import Window
class ButtonWithModel(Button):
def __init__(self,model, **kwargs):
self.model = model
super().__init__(**kwargs)
class LabelWithModel(Label):
def __init__(self,model, **kwargs):
self.model = model
super().__init__(**kwargs)
class ImageWithModel(Image):
def __init__(self,model, **kwargs):
self.model = model
super().__init__(**kwargs)
class MainApp(App):
image = ObjectProperty()
exif = ObjectProperty()
def build(self):
Window.bind(on_keyboard=self.on_keyboard)
self.start_app()
def on_keyboard(self, window, key, scancode, codepoint, modifier):
if modifier == ['ctrl'] and codepoint == 'r':
self.image.model.rotate_right()
if modifier == ['ctrl'] and codepoint == 'l':
self.image.model.rotate_left()
if modifier == ['ctrl'] and codepoint == 'o':
self.exif.model.open_image()
if modifier == ['ctrl'] and codepoint == 'e':
self.exif.model.get_exif_data()
if modifier == ['ctrl'] and codepoint == 'n':
self.exif.model.next_image()
if modifier == ['ctrl'] and codepoint == 'p':
self.exif.model.previous_image()
if modifier == ['ctrl'] and codepoint == 'g':
self.exif.model.get_location()
def start_app(self):
labels = [LabelModel() for _ in range(100)]
self.image = Factory.MainImage(ImageModel())
self.root.ids.image_box.add_widget(self.image)
self.exif = Factory.GetExifData(ButtonModel(image=self.image, labels=labels))
self.root.ids.button_box.add_widget(self.exif)
right = Factory.RotateRight(self.exif.model)
self.root.ids.button_box.add_widget(right)
left = Factory.RotateLeft(self.exif.model)
self.root.ids.button_box.add_widget(left)
loc = Factory.GetLocation(self.exif.model)
self.root.ids.button_box.add_widget(loc)
next = Factory.NextImage(self.exif.model)
self.root.ids.cycle_box.add_widget(next)
prev = Factory.PreviousImage(self.exif.model)
self.root.ids.cycle_box.add_widget(prev)
get = Factory.OpenImage(self.exif.model)
self.root.ids.button_box.add_widget(get)
lab = Factory.ExifLabel(LabelModel())
self.root.ids.exif_container.add_widget(lab)
list_adapter = SimpleListAdapter(
data=labels,
args_converter=lambda row, model: {'model': model,
'size_hint_y': None,
'height':100},
cls=Factory.ExifTags)
self.root.ids.exif_container.add_widget(ListView(adapter=list_adapter))
if __name__ == "__main__":
MainApp().run()
|
Python
| 94
| 32.936169
| 85
|
/main.py
| 0.614734
| 0.612853
|
martkins/images_exif_viewer
|
refs/heads/master
|
import exifread
from kivy.uix.button import Button
from kivy.lang import Builder
from tkinter.filedialog import askopenfilenames
from kivy.properties import DictProperty, ListProperty, NumericProperty
import webbrowser
from tkinter import Tk
root = Tk()
root.withdraw()
Builder.load_file('./actionbutton.kv')
def _convert(value):
d = float(str(value[0]))
m = float(str(value[1]))
s1 = (str(value[2])).split('/')
s = float((s1[0])) / float((s1[1]))
return d + (m / 60.0) + (s / 3600.0)
class ButtonModel(Button):
tags = DictProperty()
images = ListProperty()
count = NumericProperty(0)
def __init__(self,image='', labels='', **kwargs):
self.image = image
self.labels = labels
super(Button, self).__init__(**kwargs)
def rotate_right(self):
self.image.model.rotate_right()
def rotate_left(self):
self.image.model.rotate_left()
def open_image(self):
try:
self.images = askopenfilenames(initialdir="/", title="Select file",
filetypes=(("jpeg files", "*.jpg"),("png files","*png"), ("all files", "*.*")))
self.reset_labels()
self.image.source = self.images[0]
self.image.model.reset_angle()
except:
pass
def get_exif_data(self):
print(self.image.source)
f = open(self.image.source, 'rb')
self.tags = exifread.process_file(f)
i = 0
for tag in self.tags.keys():
if tag not in ('EXIF MakerNote','User Comment','JPEGThumbnail', 'EXIF UserComment'):
self.labels[i].text = str(tag.split()[1])+' : '+str(self.tags[tag])
i = i+1
def get_location(self):
lat = None
lon = None
try:
gps_latitude = self.tags['GPS GPSLatitude'].values
gps_latitude_ref = self.tags['GPS GPSLatitudeRef'].values
gps_longitude = self.tags['GPS GPSLongitude'].values
gps_longitude_ref = self.tags['GPS GPSLongitudeRef'].values
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert(gps_latitude)
if gps_latitude_ref != 'N':
lat = 0 - lat
lon = _convert(gps_longitude)
if gps_longitude_ref != 'E':
lon = 0 - lon
webbrowser.open('https://www.google.com/maps/search/?api=1&query='+str(lat)+','+str(lon))
except:
pass
def next_image(self):
if len(self.images) > 1:
self.count = self.count + 1
if self.count >= len(self.images):
self.count = 0
self.image.model.reset_angle()
self.reset_labels()
self.image.source = self.images[self.count]
def previous_image(self):
if len(self.images) > 1:
self.count = self.count - 1
if self.count < 0:
self.count = len(self.images)-1
self.image.model.reset_angle()
self.reset_labels()
self.image.source = self.images[self.count]
def reset_labels(self):
self.tags.clear()
for i in range(0,len(self.labels)):
self.labels[i].text = ''
|
Python
| 103
| 31.019417
| 113
|
/buttonmodel.py
| 0.552624
| 0.542918
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
# Copyright (c) 2017 - 2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Models for MNIST experiments.
#
from __future__ import division, print_function
import numpy as np
import tensorflow as tf
def get_model(inputs,
labels,
is_training=True,
dtype=tf.float32,
w_dict=None,
ex_wts=None,
reuse=None,
):
"""
:param inputs: [Tensor] Inputs.
:param labels: [Tensor] Labels.
:param is_training: [bool] Whether in training mode, default True.
:param dtype: [dtype] Data type, default tf.float32.
:param w_dict: [dict] Dictionary of weights, default None.
:param ex_wts: [Tensor] Example weights placeholder, default None.
:param reuse: [bool] Whether to reuse variables, default None.
"""
if w_dict is None:
w_dict = {}
def _get_var(name, shape, dtype, initializer):
key = tf.get_variable_scope().name + '/' + name
if key in w_dict:
return w_dict[key]
else:
var = tf.get_variable(name, shape, dtype, initializer=initializer)
w_dict[key] = var
return var
with tf.variable_scope('Model', reuse=reuse):
shape_list = np.append(np.array([-1]), np.squeeze(inputs.shape[1:].as_list()))
shape_list_wts = np.append(np.array([-1]), np.squeeze(ex_wts.shape[1:].as_list()))
shape_list_fir = np.append(np.squeeze(inputs.shape[1:].as_list()), np.array([1024]))
shape_list_sec = np.array([1024, 256])
shape_list_thr = np.array([256, 64])
inputs_ = tf.cast(tf.reshape(inputs, shape_list), dtype)
inputs_w = tf.cast(tf.reshape(ex_wts, shape_list_wts), dtype)
# inputs_w = tf.matrix_diag(ex_wts)
labels = tf.cast(tf.reshape(labels, [-1, 1]), dtype)
w_init = tf.truncated_normal_initializer(stddev=0.1)
w1 = _get_var('w1', shape_list_fir, dtype, initializer=w_init)
w2 = _get_var('w2', shape_list_sec, dtype, initializer=w_init)
w3 = _get_var('w3', shape_list_thr, dtype, initializer=w_init)
w4 = _get_var('w4', [64, 32], dtype, initializer=w_init)
w5 = _get_var('w5', [32, 1], dtype, initializer=w_init)
b_init = tf.constant_initializer(0.0)
b1 = _get_var('b1', 1, dtype, initializer=b_init)
b2 = _get_var('b2', 1, dtype, initializer=b_init)
b3 = _get_var('b3', 64, dtype, initializer=b_init)
b4 = _get_var('b4', 32, dtype, initializer=b_init)
b5 = _get_var('b5', 1, dtype, initializer=b_init)
act = tf.nn.relu
l0 = tf.identity(inputs_, name='l0')
z1 = tf.add(tf.matmul(l0, w1), b1, name='z1')
l1 = act(z1, name='l1')
# h1 = tf.contrib.layers.batch_norm(l1, center=True, scale=True, is_training=True, scope='bn1')
z2 = tf.add(tf.matmul(l1, w2), b2, name='z2')
l2 = act(z2, name='l2')
# h2 = tf.contrib.layers.batch_norm(l2, center=True, scale=True, is_training=True, scope='bn2')
z3 = tf.add(tf.matmul(l2, w3), b3, name='z3')
l3 = act(z3, name='l3')
# h3 = tf.contrib.layers.batch_norm(l3, center=True, scale=True, is_training=True, scope='bn3')
z4 = tf.add(tf.matmul(l3, w4), b4, name='z4')
l4 = act(z4, name='l4')
# h4 = tf.contrib.layers.batch_norm(l4, center=True, scale=True, is_training=True, scope='bn4')
z5 = tf.add(tf.matmul(l4, w5), b5, name='z5')
pred = z5
if ex_wts is None:
# Average loss.
loss = tf.reduce_mean(tf.square(tf.subtract(pred, labels)))
else:
# Weighted loss.
squa = tf.square(tf.subtract(pred, labels)) * inputs_w
mse = tf.nn.l2_loss(tf.subtract(pred, labels)) * inputs_w
loss = tf.reduce_mean(squa)
return w_dict, loss, pred
def reweight_random(bsize, eps=0.0):
"""Reweight examples using random numbers.
:param bsize: [int] Batch size.
:param eps: [float] Minimum example weights, default 0.0.
"""
ex_weight = tf.random_normal([bsize], mean=0.0, stddev=1.0)
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm
def reweight_autodiff(inp_a,
label_a,
inp_b,
label_b,
ex_wts_a,
ex_wts_b,
bsize_a,
bsize_b,
eps=0,
gate_gradients=1):
"""Reweight examples using automatic differentiation.
:param inp_a: [Tensor] Inputs for the noisy pass.
:param label_a: [Tensor] Labels for the noisy pass.
:param inp_b: [Tensor] Inputs for the clean pass.
:param label_b: [Tensor] Labels for the clean pass.
:param bsize_a: [int] Batch size for the noisy pass.
:param bsize_b: [int] Batch size for the clean pass.
:param eps: [float] Minimum example weights, default 0.0.
:param gate_gradients: [int] Tensorflow gate gradients, reduce concurrency.
"""
# ex_wts_a = tf.ones([bsize_a], dtype=tf.float32)
# ex_wts_b = tf.ones([bsize_b], dtype=tf.float32) / float(bsize_b)
# ex_wts_b = tf.placeholder(tf.float32, [None, 1], name='ex_wts_b')
w_dict, loss_a, logits_a = get_model(
inp_a, label_a, ex_wts=ex_wts_a, is_training=True, reuse=True)
var_names = w_dict.keys()
var_list = [w_dict[kk] for kk in var_names]
grads = tf.gradients(loss_a, var_list, gate_gradients=gate_gradients)
# grads_w = tf.gradients(loss_a, [ex_wts_a], gate_gradients=gate_gradients)
var_list_new = [vv - gg for gg, vv in zip(grads, var_list)]
w_dict_new = dict(zip(var_names, var_list_new))
_, loss_b, logits_b = get_model(
inp_b, label_b, ex_wts=ex_wts_b, is_training=True, reuse=True, w_dict=w_dict_new)
grads_ex_wts = tf.gradients(loss_b, [ex_wts_a], gate_gradients=gate_gradients)[0]
ex_weight = -grads_ex_wts
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm, var_list, grads, ex_weight_plus
def reweight_hard_mining(inp, label, positive=False):
"""Reweight examples using hard mining.
:param inp: [Tensor] [N, ...] Inputs.
:param label: [Tensor] [N] Labels
:param positive: [bool] Whether perform hard positive mining or hard negative mining.
:return [Tensor] Examples weights of the same shape as the first dim of inp.
"""
_, loss, logits = get_model(inp, label, ex_wts=None, is_training=True, reuse=True)
# Mine for positive
if positive:
loss_mask = loss * label
else:
loss_mask = loss * (1 - label)
if positive:
k = tf.cast(tf.reduce_sum(1 - label), tf.int32)
else:
k = tf.cast(tf.reduce_sum(label), tf.int32)
k = tf.maximum(k, 1)
loss_sorted, loss_sort_idx = tf.nn.top_k(loss_mask, k)
if positive:
mask = 1 - label
else:
mask = label
updates = tf.ones([tf.shape(loss_sort_idx)[0]], dtype=label.dtype)
mask_add = tf.scatter_nd(tf.expand_dims(loss_sort_idx, axis=1), updates, [tf.shape(inp)[0]])
mask = tf.maximum(mask, mask_add)
mask_sum = tf.reduce_sum(mask)
mask_sum += tf.cast(tf.equal(mask_sum, 0.0), tf.float32)
mask = mask / mask_sum
return mask
|
Python
| 197
| 40.659897
| 103
|
/Regression/src/learn_rewieght/reweight.py
| 0.575241
| 0.555136
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
import pandas as pd
import numpy as np
from preprocess import plot_tabel
def get_dataset_(nor, train_data, test_data, clean_ratio, test_retio, seed, target='label', val_ratio=0):
if test_retio == 0 or test_data is not None:
train_set = train_data
test_set = test_data
else:
train_set, test_set = train_test_split(train_data, test_size=test_retio, random_state=seed)
if clean_ratio < 1:
train_set_, train_set_clean = train_test_split(train_set, test_size=clean_ratio, random_state=seed)
label_distrib = np.random.normal(loc=train_set_[target].describe().loc['mean'],
scale=train_set_[target].describe().loc['std'], size=train_set_[target].shape)
alpha = 1
beta = 1
train_label_ = train_set_[target] + \
alpha * np.random.normal(loc=0., scale=1., size=train_set_[target].shape) + beta * label_distrib
train_set_[target] = train_label_
train_set_['sup_label'] = 1
train_set_clean['sup_label'] = 0
test_set['sup_label'] = 0
else:
train_set_ = None
train_set_clean = train_set
train_set_mix = pd.concat([train_set_, train_set_clean], axis=0)
# mix_ratio = train_set[train_set[target] != train_set_mix[target]].index
# print('real mix ratio is {}'.format(mix_ratio))
if val_ratio > 0:
train_set_mix, val_set = train_test_split(train_set_mix, test_size=val_ratio, random_state=seed)
val_set_label = val_set[[target, 'sup_label']]
val_set.drop(columns=[target, 'sup_label'], inplace=True)
else:
val_set = None
val_set_label = None
train_set_mix_label = train_set_mix[[target, 'sup_label']]
test_set_label = test_set[[target, 'sup_label']]
# plot_tabel.metric_hist(test_set, nor)
train_set_mix.drop(columns=[target, 'sup_label'], inplace=True)
test_set.drop(columns=[target, 'sup_label'], inplace=True)
return train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label
def data_preprocessing(train_data, test_data=None, ca_feat_th=8, ca_co_sel_flag=True, onehot_flag=False, target='label'):
if test_data is not None:
train_data['tab'] = 1
test_data['tab'] = 0
data_raw = pd.concat([train_data, test_data], axis=0)
print('\ndata_raw', data_raw.shape)
data = data_raw.dropna(axis=1, how='all')
xx = data.isnull().sum()
data = data.fillna(0)
if ca_co_sel_flag:
ca_col = []
co_col = []
data_columns_label = data.filter(regex=r'label').columns
data_columns = data.columns.drop(data_columns_label)
# data_columns = data.columns.drop(['sup_label'])
for col in data_columns:
data_col = data[col]
col_feat_num = len(set(data_col))
if col_feat_num > ca_feat_th:
col_ = col + '_dense'
co_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
elif ca_feat_th >= col_feat_num > 1:
col_ = col + '_sparse'
ca_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
else:
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
data[ca_col] = pd.concat([data[ca_col].apply(lambda ser: pd.factorize(ser)[0])])
data[ca_col] = data[ca_col].apply(LabelEncoder().fit_transform)
if onehot_flag:
data = pd.get_dummies(data, columns=ca_col)
co_col = co_col.append(data.columns[data.columns == target]) # 回归目标也需要归一化避免在sup_label分类预测中的模型崩溃
mms = MinMaxScaler(feature_range=(0.1, 1.1))
std = StandardScaler()
xx = data.filter(regex=r'label').describe()
xx_col = xx.index
xx_min = xx.loc['min', :]
xx_max = xx.loc['max', :]
xx_std = xx.loc['std', :]
data[co_col] = pd.DataFrame(std.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data[co_col] = pd.DataFrame(mms.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data = pd.DataFrame(mms.fit_transform(data), columns=data.columns, index=data.index)
if test_data is not None:
train_data = data[data['tab'] == 1].drop(columns=['tab'])
test_data = data[data['tab'] == 0].drop(columns=['tab'])
else:
train_data = data
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
return train_data, test_data, co_col, ca_col, std
def anomaly_dectection(train_data=None, test_data=None, target='label'):
clean_data = []
for data in [train_data, test_data]:
if not data.empty:
std_ = data[target].std()
mean_ = data[target].mean()
data = data[data[target] < mean_ + 3 * std_]
data = data[data[target] > mean_ - 3 * std_]
clean_data.append(data)
return clean_data[0], clean_data[1]
|
Python
| 112
| 44.723213
| 121
|
/Regression/src/preprocess/get_dataset.py
| 0.60738
| 0.601523
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
# Copyright (c) 2017 - 2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Runs MNIST experitment. Default 10 runs for 10 random seeds.
#
# Usage:
# python -m mnist.imblanace_mnist_train_ad.py
#
# Flags:
# --exp [string] Experiment name, `ours`, `hm`, `ratio`, `random` or `baseline`.
# --pos_ratio [float] The ratio for the positive class, choose between 0.9 - 0.995.
# --nrun [int] Total number of runs with different random seeds.
# --ntrain [int] Number of training examples.
# --nval [int] Number of validation examples.
# --ntest [int] Number of test examples.
# --tensorboard Writes TensorBoard logs while training, default True.
# --notensorboard Disable TensorBoard.
# --verbose Print training progress, default False.
# --noverbose Disable printing.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import os
import six
import tensorflow as tf
from collections import namedtuple
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
from tensorflow.examples.tutorials.mnist import input_data
from tqdm import tqdm
from mnist_.reweight import get_model, reweight_random, reweight_autodiff, reweight_hard_mining
from utils.logger import get as get_logger
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.ERROR)
flags = tf.flags
flags.DEFINE_float('pos_ratio', 0.995, 'Ratio of positive examples in training')
flags.DEFINE_integer('nrun', 10, 'Number of runs')
flags.DEFINE_integer('ntest', 500, 'Number of testing examples')
flags.DEFINE_integer('ntrain', 5000, 'Number of training examples')
flags.DEFINE_integer('nval', 10, 'Number of validation examples')
flags.DEFINE_bool('verbose', False, 'Whether to print training progress')
flags.DEFINE_bool('tensorboard', True, 'Whether to save training progress')
flags.DEFINE_string('exp', 'baseline', 'Which experiment to run')
FLAGS = tf.flags.FLAGS
log = get_logger()
Config = namedtuple('Config', [
'reweight', 'lr', 'num_steps', 'random', 'ratio_weighted', 'nval', 'hard_mining', 'bsize'
])
exp_repo = dict()
def RegisterExp(name):
def _decorator(f):
exp_repo[name] = f
return f
return _decorator
LR = 0.001
NUM_STEPS = 4000
@RegisterExp('baseline')
def baseline_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('hm')
def baseline_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=True,
bsize=500,
nval=0)
@RegisterExp('ratio')
def ratio_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=True,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('random')
def dpfish_config():
return Config(
reweight=True,
num_steps=NUM_STEPS * 2,
lr=LR,
random=True,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('ours')
def ours_config():
return Config(
reweight=True,
num_steps=NUM_STEPS,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=FLAGS.nval)
def get_imbalance_dataset(mnist,
pos_ratio=0.9,
ntrain=5000,
nval=10,
ntest=500,
seed=0,
class_0=4,
class_1=9):
rnd = np.random.RandomState(seed)
# In training, we have 10% 4 and 90% 9.
# In testing, we have 50% 4 and 50% 9.
ratio = 1 - pos_ratio
ratio_test = 0.5
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
x_train_0 = x_train[y_train == class_0]
x_test_0 = x_test[y_test == class_0]
# First shuffle, negative.
idx = np.arange(x_train_0.shape[0])
rnd.shuffle(idx)
x_train_0 = x_train_0[idx]
nval_small_neg = int(np.floor(nval * ratio_test))
ntrain_small_neg = int(np.floor(ntrain * ratio)) - nval_small_neg
x_val_0 = x_train_0[:nval_small_neg] # 450 4 in validation.
x_train_0 = x_train_0[nval_small_neg:nval_small_neg + ntrain_small_neg] # 500 4 in training.
if FLAGS.verbose:
print('Number of train negative classes', ntrain_small_neg)
print('Number of val negative classes', nval_small_neg)
idx = np.arange(x_test_0.shape[0])
rnd.shuffle(idx)
x_test_0 = x_test_0[:int(np.floor(ntest * ratio_test))] # 450 4 in testing.
x_train_1 = x_train[y_train == class_1]
x_test_1 = x_test[y_test == class_1]
# First shuffle, positive.
idx = np.arange(x_train_1.shape[0])
rnd.shuffle(idx)
x_train_1 = x_train_1[idx]
nvalsmall_pos = int(np.floor(nval * (1 - ratio_test)))
ntrainsmall_pos = int(np.floor(ntrain * (1 - ratio))) - nvalsmall_pos
x_val_1 = x_train_1[:nvalsmall_pos] # 50 9 in validation.
x_train_1 = x_train_1[nvalsmall_pos:nvalsmall_pos + ntrainsmall_pos] # 4500 9 in training.
idx = np.arange(x_test_1.shape[0])
rnd.shuffle(idx)
x_test_1 = x_test_1[idx]
x_test_1 = x_test_1[:int(np.floor(ntest * (1 - ratio_test)))] # 500 9 in testing.
if FLAGS.verbose:
print('Number of train positive classes', ntrainsmall_pos)
print('Number of val positive classes', nvalsmall_pos)
y_train_subset = np.concatenate([np.zeros([x_train_0.shape[0]]), np.ones([x_train_1.shape[0]])])
y_val_subset = np.concatenate([np.zeros([x_val_0.shape[0]]), np.ones([x_val_1.shape[0]])])
y_test_subset = np.concatenate([np.zeros([x_test_0.shape[0]]), np.ones([x_test_1.shape[0]])])
y_train_pos_subset = np.ones([x_train_1.shape[0]])
y_train_neg_subset = np.zeros([x_train_0.shape[0]])
x_train_subset = np.concatenate([x_train_0, x_train_1], axis=0).reshape([-1, 28, 28, 1])
x_val_subset = np.concatenate([x_val_0, x_val_1], axis=0).reshape([-1, 28, 28, 1])
x_test_subset = np.concatenate([x_test_0, x_test_1], axis=0).reshape([-1, 28, 28, 1])
x_train_pos_subset = x_train_1.reshape([-1, 28, 28, 1])
x_train_neg_subset = x_train_0.reshape([-1, 28, 28, 1])
# Final shuffle.
idx = np.arange(x_train_subset.shape[0])
rnd.shuffle(idx)
x_train_subset = x_train_subset[idx]
y_train_subset = y_train_subset[idx]
idx = np.arange(x_val_subset.shape[0])
rnd.shuffle(idx)
x_val_subset = x_val_subset[idx]
y_val_subset = y_val_subset[idx]
idx = np.arange(x_test_subset.shape[0])
rnd.shuffle(idx)
x_test_subset = x_test_subset[idx]
y_test_subset = y_test_subset[idx]
train_set = DataSet(x_train_subset * 255.0, y_train_subset)
train_pos_set = DataSet(x_train_pos_subset * 255.0, y_train_pos_subset)
train_neg_set = DataSet(x_train_neg_subset * 255.0, y_train_neg_subset)
val_set = DataSet(x_val_subset * 255.0, y_val_subset)
test_set = DataSet(x_test_subset * 255.0, y_test_subset)
return train_set, val_set, test_set, train_pos_set, train_neg_set
def get_exp_logger(sess, log_folder):
"""Gets a TensorBoard logger."""
with tf.name_scope('Summary'):
writer = tf.summary.FileWriter(os.path.join(log_folder), sess.graph)
class ExperimentLogger():
def log(self, niter, name, value):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
writer.add_summary(summary, niter)
def flush(self):
"""Flushes results to disk."""
writer.flush()
return ExperimentLogger()
def evaluate(sess, x_, y_, acc_, train_set, test_set):
# Calculate final results.
acc_sum = 0.0
acc_test_sum = 0.0
train_bsize = 100
for step in six.moves.xrange(5000 // train_bsize):
x, y = train_set.next_batch(train_bsize)
acc = sess.run(acc_, feed_dict={x_: x, y_: y})
acc_sum += acc
test_bsize = 100
for step in six.moves.xrange(500 // test_bsize):
x_test, y_test = test_set.next_batch(test_bsize)
acc = sess.run(acc_, feed_dict={x_: x_test, y_: y_test})
acc_test_sum += acc
train_acc = acc_sum / float(5000 // train_bsize)
test_acc = acc_test_sum / float(500 // test_bsize)
return train_acc, test_acc
def get_acc(logits, y):
prediction = tf.cast(tf.sigmoid(logits) > 0.5, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(prediction, y), tf.float32))
def run(dataset, exp_name, seed, verbose=True):
pos_ratio = FLAGS.pos_ratio
ntrain = FLAGS.ntrain
nval = FLAGS.nval
ntest = FLAGS.ntest
folder = os.path.join('ckpt_mnist_imbalance_cnn_p{:d}'.format(int(FLAGS.pos_ratio * 100.0)),
exp_name + '_{:d}'.format(seed))
if not os.path.exists(folder):
os.makedirs(folder)
with tf.Graph().as_default(), tf.Session() as sess:
config = exp_repo[exp_name]()
bsize = config.bsize
train_set, val_set, test_set, train_pos_set, train_neg_set = get_imbalance_dataset(
dataset, pos_ratio=pos_ratio, ntrain=ntrain, nval=config.nval, ntest=ntest, seed=seed)
# if config.nval == 0:
# val_set = BalancedDataSet(train_pos_set, train_neg_set)
x_ = tf.placeholder(tf.float32, [None, 784], name='x')
y_ = tf.placeholder(tf.float32, [None], name='y')
x_val_ = tf.placeholder(tf.float32, [None, 784], name='x_val')
y_val_ = tf.placeholder(tf.float32, [None], name='y_val')
ex_wts_ = tf.placeholder(tf.float32, [None], name='ex_wts')
lr_ = tf.placeholder(tf.float32, [], name='lr')
# Build training model.
with tf.name_scope('Train'):
_, loss_c, logits_c = get_model(
x_, y_, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=None)
train_op = tf.train.MomentumOptimizer(config.lr, 0.9).minimize(loss_c)
# Build evaluation model.
with tf.name_scope('Val'):
_, loss_eval, logits_eval = get_model(
x_,
y_,
is_training=False,
dtype=tf.float32,
w_dict=None,
ex_wts=ex_wts_,
reuse=True)
acc_ = get_acc(logits_eval, y_)
# Build reweighting model.
if config.reweight:
if config.random:
ex_weights_ = reweight_random(bsize)
else:
ex_weights_ = reweight_autodiff(
x_,
y_,
x_val_,
y_val_,
bsize,
min(bsize, nval),
eps=0.0,
gate_gradients=1)
else:
if config.hard_mining:
ex_weights_ = reweight_hard_mining(x_, y_, positive=True)
else:
if config.ratio_weighted:
# Weighted by the ratio of each class.
ex_weights_ = pos_ratio * (1 - y_) + (1 - pos_ratio) * (y_)
else:
# Weighted by uniform.
ex_weights_ = tf.ones([bsize], dtype=tf.float32) / float(bsize)
if FLAGS.tensorboard:
exp_logger = get_exp_logger(sess, folder)
else:
exp_logger = None
lr = config.lr
num_steps = config.num_steps
acc_sum = 0.0
acc_test_sum = 0.0
loss_sum = 0.0
count = 0
sess.run(tf.global_variables_initializer())
for step in six.moves.xrange(num_steps):
x, y = train_set.next_batch(bsize)
x_val, y_val = val_set.next_batch(min(bsize, nval))
# Use 50% learning rate for the second half of training.
if step > num_steps // 2:
lr = config.lr / 2.0
else:
lr = config.lr
ex_weights = sess.run(
ex_weights_, feed_dict={x_: x,
y_: y,
x_val_: x_val,
y_val_: y_val})
loss, acc, _ = sess.run(
[loss_c, acc_, train_op],
feed_dict={
x_: x,
y_: y,
x_val_: x_val,
y_val_: y_val,
ex_wts_: ex_weights,
lr_: lr
})
if (step + 1) % 100 == 0:
train_acc, test_acc = evaluate(sess, x_, y_, acc_, train_set, test_set)
if verbose:
print('Step', step + 1, 'Loss', loss, 'Train acc', train_acc, 'Test acc',
test_acc)
if FLAGS.tensorboard:
exp_logger.log(step + 1, 'train acc', train_acc)
exp_logger.log(step + 1, 'test acc', test_acc)
exp_logger.flush()
acc_sum = 0.0
loss_sum = 0.0
acc_test_sum = 0.0
count = 0
# Final evaluation.
train_acc, test_acc = evaluate(sess, x_, y_, acc_, train_set, test_set)
if verbose:
print('Final', 'Train acc', train_acc, 'Test acc', test_acc)
return train_acc, test_acc
def run_many(dataset, exp_name):
train_acc_list = []
test_acc_list = []
for trial in tqdm(six.moves.xrange(FLAGS.nrun), desc=exp_name):
train_acc, test_acc = run(
dataset, exp_name, (trial * 123456789) % 100000, verbose=FLAGS.verbose)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
train_acc_list = np.array(train_acc_list)
test_acc_list = np.array(test_acc_list)
print(exp_name, 'Train acc {:.3f}% ({:.3f}%)'.format(train_acc_list.mean() * 100.0,
train_acc_list.std() * 100.0))
print(exp_name, 'Test acc {:.3f}% ({:.3f}%)'.format(test_acc_list.mean() * 100.0,
test_acc_list.std() * 100.0))
def main():
mnist = input_data.read_data_sets("data/mnist", one_hot=False)
for exp in FLAGS.exp.split(','):
run_many(mnist, exp)
if __name__ == '__main__':
main()
|
Python
| 441
| 33.253967
| 100
|
/Regression/src/learn_rewieght/mnist_train.py
| 0.557593
| 0.53343
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
plt.rc('font', family='Times New Roman')
font_size = 16
def plot_metric_df(history_list, task_name, val_flag='test_'):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['r2', 'mae', 'mse']
fig = plt.figure(figsize=(20, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_metric(history_list, metric, val_flag)
fig.subplots_adjust(top=0.8)
legend_labels = ['ours',
# 'enh_nonrelapse',
'ATT+MLP',
# 'vanilla_nonrelapse',
'LGB',
# 'lightgbm_nonrelapse',
'Lasso',
# 'lasso_nonrelapse'
]
plt.legend(labels= legend_labels,
ncol = len(legend_labels),
# loc='best',
loc='upper center',
fontsize=14,
bbox_to_anchor=(-1.2, 1, 1, 0.2),
borderaxespad = 0.,
)
# plt.title('{} {}'.format(task_name, metric), fontsize=font_size)
def show_metric(history_list, metrics_name, val_flag=''):
marker_list = ['*', 'd', 's', 'x', 'o']
metrics_name_dict = {'r2':'R-square', 'mae':'mean absolute error', 'mse':'mean squared error'}
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}\b'.format(val_flag, metrics_name))[:3000]
plt.plot(history_metric, linestyle=':', marker=marker_list[m], linewidth=2)
plt.xticks(range(0, 11), fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(metrics_name_dict[metrics_name], fontsize=font_size)
plt.xlabel('Round', fontsize=font_size)
def plot_history_df(history_list, task_name, val_flag=''):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_history(history_list, metric, val_flag)
plt.legend(labels=['attention', 'attention+mlp', 'attention+label corrected',
'attention+mlp+label corrected(ours)', 'mlp', 'mlp+label corrected'],
fontsize=14)
# plt.title('{} {}'.format(metric, task_name), fontsize=font_size)
def show_history(history_list, metrics_name, val_flag=''):
marker_list = ['^', 'd', 's', '*', 'x', 'o']
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}'.format(val_flag, metrics_name))[:3000]
history_ = np.mean(history_metric, axis=1)
len_ = history_.shape[0]
plt.plot(history_, linewidth=2, marker=marker_list[m], markevery=200)
plt.fill_between(range(len_), np.min(history_metric, axis=1), np.max(history_metric, axis=1), alpha=0.3)
plt.xticks(fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(val_flag + metrics_name, fontsize=font_size)
plt.xlabel('Epoch', fontsize=font_size)
def plot_history(history_list, task_name, val_flag=False):
if task_name == 'relapse_risk':
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
for i, metric in enumerate(metric_list):
plt.subplot(squrt(), L, i+1)
show_train_history(history_list, metric)
if val_flag:
show_train_history(history_list, 'val_{}'.format(metric))
plt.legend(labels=[metric, 'val_{}'.format(metric)], loc='upper left')
plt.title('{} {}'.format(task_name, metric))
def history_save(history_list, history_name):
history_all = pd.DataFrame([])
for history in history_list:
history_ = pd.DataFrame.from_dict(history.history, orient='index')
history_all = pd.concat([history_all, history_], axis=0)
history_all.to_csv('./hitory_{}.csv'.format(history_name))
def show_train_history(history_list, metrics_name):
metrics_list = None
for history in history_list:
history_metric = pd.DataFrame(np.array(history.history[metrics_name]).reshape(1, -1))
if metrics_list is None:
metrics_list = history_metric
else:
metrics_list = pd.concat([metrics_list, history_metric], axis=0)
# metrics = np.median(metrics_list, axis=0)
metrics = np.mean(metrics_list, axis=0)
plt.plot(metrics)
plt.ylabel(metrics_name)
plt.xlabel('Epoch')
|
Python
| 126
| 37.317459
| 112
|
/Regression/src/model/history_.py
| 0.583057
| 0.570215
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
# Copyright (c) 2017 - 2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Runs MNIST experitment. Default 10 runs for 10 random seeds.
#
# Usage:
# python -m mnist.imblanace_mnist_train_ad.py
#
# Flags:
# --exp [string] Experiment name, `ours`, `hm`, `ratio`, `random` or `baseline`.
# --pos_ratio [float] The ratio for the positive class, choose between 0.9 - 0.995.
# --nrun [int] Total number of runs with different random seeds.
# --ntrain [int] Number of training examples.
# --nval [int] Number of validation examples.
# --ntest [int] Number of test examples.
# --tensorboard Writes TensorBoard logs while training, default True.
# --notensorboard Disable TensorBoard.
# --verbose Print training progress, default False.
# --noverbose Disable printing.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import os
import six
import tensorflow as tf
from collections import namedtuple
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
from tensorflow.examples.tutorials.mnist import input_data
from tqdm import tqdm
from learn_rewieght.reweight import get_model, reweight_random, reweight_autodiff, reweight_hard_mining
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection
from model.training_ import training_model, model_training, precision, recall, f1, r2
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.ERROR)
flags = tf.flags
flags.DEFINE_float('pos_ratio', 0.995, 'Ratio of positive examples in training')
flags.DEFINE_integer('nrun', 10, 'Number of runs')
flags.DEFINE_integer('ntest', 500, 'Number of testing examples')
flags.DEFINE_integer('ntrain', 5000, 'Number of training examples')
flags.DEFINE_integer('nval', 10, 'Number of validation examples')
flags.DEFINE_bool('verbose', False, 'Whether to print training progress')
flags.DEFINE_bool('tensorboard', False, 'Whether to save training progress')
flags.DEFINE_string('exp', 'baseline', 'Which experiment to run')
FLAGS = tf.flags.FLAGS
Config = namedtuple('Config', [
'reweight', 'lr', 'num_steps', 'random', 'ratio_weighted', 'nval', 'hard_mining', 'bsize'
])
exp_repo = dict()
def RegisterExp(name):
def _decorator(f):
exp_repo[name] = f
return f
return _decorator
LR = 0.001
NUM_STEPS = 4000
@RegisterExp('baseline')
def baseline_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('hm')
def baseline_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=True,
bsize=500,
nval=0)
@RegisterExp('ratio')
def ratio_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=True,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('random')
def dpfish_config():
return Config(
reweight=True,
num_steps=NUM_STEPS * 2,
lr=LR,
random=True,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('ours')
def ours_config():
return Config(
reweight=True,
num_steps=NUM_STEPS,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=FLAGS.nval)
def get_imbalance_dataset(mnist,
pos_ratio=0.9,
ntrain=5000,
nval=10,
ntest=500,
seed=0,
class_0=4,
class_1=9):
rnd = np.random.RandomState(seed)
# In training, we have 10% 4 and 90% 9.
# In testing, we have 50% 4 and 50% 9.
ratio = 1 - pos_ratio
ratio_test = 0.5
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
x_train_0 = x_train[y_train == class_0]
x_test_0 = x_test[y_test == class_0]
# First shuffle, negative.
idx = np.arange(x_train_0.shape[0])
rnd.shuffle(idx)
x_train_0 = x_train_0[idx]
nval_small_neg = int(np.floor(nval * ratio_test))
ntrain_small_neg = int(np.floor(ntrain * ratio)) - nval_small_neg
x_val_0 = x_train_0[:nval_small_neg] # 450 4 in validation.
x_train_0 = x_train_0[nval_small_neg:nval_small_neg + ntrain_small_neg] # 500 4 in training.
if FLAGS.verbose:
print('Number of train negative classes', ntrain_small_neg)
print('Number of val negative classes', nval_small_neg)
idx = np.arange(x_test_0.shape[0])
rnd.shuffle(idx)
x_test_0 = x_test_0[:int(np.floor(ntest * ratio_test))] # 450 4 in testing.
x_train_1 = x_train[y_train == class_1]
x_test_1 = x_test[y_test == class_1]
# First shuffle, positive.
idx = np.arange(x_train_1.shape[0])
rnd.shuffle(idx)
x_train_1 = x_train_1[idx]
nvalsmall_pos = int(np.floor(nval * (1 - ratio_test)))
ntrainsmall_pos = int(np.floor(ntrain * (1 - ratio))) - nvalsmall_pos
x_val_1 = x_train_1[:nvalsmall_pos] # 50 9 in validation.
x_train_1 = x_train_1[nvalsmall_pos:nvalsmall_pos + ntrainsmall_pos] # 4500 9 in training.
idx = np.arange(x_test_1.shape[0])
rnd.shuffle(idx)
x_test_1 = x_test_1[idx]
x_test_1 = x_test_1[:int(np.floor(ntest * (1 - ratio_test)))] # 500 9 in testing.
if FLAGS.verbose:
print('Number of train positive classes', ntrainsmall_pos)
print('Number of val positive classes', nvalsmall_pos)
y_train_subset = np.concatenate([np.zeros([x_train_0.shape[0]]), np.ones([x_train_1.shape[0]])])
y_val_subset = np.concatenate([np.zeros([x_val_0.shape[0]]), np.ones([x_val_1.shape[0]])])
y_test_subset = np.concatenate([np.zeros([x_test_0.shape[0]]), np.ones([x_test_1.shape[0]])])
y_train_pos_subset = np.ones([x_train_1.shape[0]])
y_train_neg_subset = np.zeros([x_train_0.shape[0]])
x_train_subset = np.concatenate([x_train_0, x_train_1], axis=0).reshape([-1, 28, 28, 1])
x_val_subset = np.concatenate([x_val_0, x_val_1], axis=0).reshape([-1, 28, 28, 1])
x_test_subset = np.concatenate([x_test_0, x_test_1], axis=0).reshape([-1, 28, 28, 1])
x_train_pos_subset = x_train_1.reshape([-1, 28, 28, 1])
x_train_neg_subset = x_train_0.reshape([-1, 28, 28, 1])
# Final shuffle.
idx = np.arange(x_train_subset.shape[0])
rnd.shuffle(idx)
x_train_subset = x_train_subset[idx]
y_train_subset = y_train_subset[idx]
idx = np.arange(x_val_subset.shape[0])
rnd.shuffle(idx)
x_val_subset = x_val_subset[idx]
y_val_subset = y_val_subset[idx]
idx = np.arange(x_test_subset.shape[0])
rnd.shuffle(idx)
x_test_subset = x_test_subset[idx]
y_test_subset = y_test_subset[idx]
train_set = DataSet(x_train_subset * 255.0, y_train_subset)
train_pos_set = DataSet(x_train_pos_subset * 255.0, y_train_pos_subset)
train_neg_set = DataSet(x_train_neg_subset * 255.0, y_train_neg_subset)
val_set = DataSet(x_val_subset * 255.0, y_val_subset)
test_set = DataSet(x_test_subset * 255.0, y_test_subset)
return train_set, val_set, test_set, train_pos_set, train_neg_set
def get_exp_logger(sess, log_folder):
"""Gets a TensorBoard logger."""
with tf.name_scope('Summary'):
writer = tf.summary.FileWriter(os.path.join(log_folder), sess.graph)
class ExperimentLogger():
def log(self, niter, name, value):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
writer.add_summary(summary, niter)
def flush(self):
"""Flushes results to disk."""
writer.flush()
return ExperimentLogger()
def evaluate(sess, x_, y_, acc_, x, y, x_test, y_test):
# Calculate final results.
train_acc = sess.run(acc_, feed_dict={x_: x, y_: y})
test_acc = sess.run(acc_, feed_dict={x_: x_test, y_: y_test})
return train_acc, test_acc
def get_metric(pred, y):
total_error = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y, pred)))
R_squared = tf.reduce_mean(tf.subtract(1.0, tf.div(unexplained_error, total_error)))
mse = tf.reduce_mean(tf.square(pred - y))
return mse
def run(train_data, test_data, seed, task_name, target='label'):
train_data, test_data, co_col, ca_col = data_preprocessing(train_data, test_data,
ca_co_sel_flag=False, onehot_flag=True)
_, test_data = anomaly_dectection(train_data, test_data)
# train_data, test_data = anomaly_dectection(train_data, test_data)# Outlier detection
x, y, x_val, y_val, test_set, test_set_label = \
get_dataset_(train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, seed=seed, val_ratio=val_ratio) # label confusion according to requirements
x.reset_index(inplace=True)
x.drop(columns=['基线-患者基本信息-ID_sparse'], inplace=True)
y.reset_index(inplace=True)
y_val = y.loc[y['sup_label'] == 0].sample(n=clean_data_num, random_state=seed)
x_val = x.loc[y_val.index]
x.drop(index=x_val.index, inplace=True)
y.drop(index=x_val.index, inplace=True)
ntrain = FLAGS.ntrain
nval = FLAGS.nval
ntest = FLAGS.ntest
folder = os.path.join('ckpt_mnist_imbalance_cnn_p{:d}'.format(int(FLAGS.pos_ratio * 100.0)),
task_name + '_{:d}'.format(seed))
if not os.path.exists(folder):
os.makedirs(folder)
with tf.Graph().as_default(), tf.Session() as sess:
bsize = batchsize
x_ = tf.placeholder(tf.float32, [None, x.shape[1]], name='x')
y_ = tf.placeholder(tf.float32, [None], name='y')
x_val_ = tf.placeholder(tf.float32, [None, x.shape[1]], name='x_val')
y_val_ = tf.placeholder(tf.float32, [None], name='y_val')
ex_wts_ = tf.placeholder(tf.float32, [None, 1], name='ex_wts')
ex_wts_b = tf.placeholder(tf.float32, [None, 1], name='ex_wts_b')
lr_ = tf.placeholder(tf.float32, [], name='lr')
# Build training model.
with tf.name_scope('Train'):
_, loss_c, logits_c = get_model(
x_, y_, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=None)
train_op = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(loss_c)
# metric_ = get_metric(logits_c, y_)
# Build evaluation model.
with tf.name_scope('Val'):
_, loss_eval, logits_eval = get_model(
x_,
y_,
is_training=False,
dtype=tf.float32,
w_dict=None,
ex_wts=ex_wts_,
reuse=True)
metric_ = get_metric(logits_eval, y_)
# Build reweighting model.
if reweight:
if random:
ex_weights_ = reweight_random(bsize)
else:
ex_weights_, var_list_, grads_, grads_w_ = reweight_autodiff(
x_,
y_,
x_val_,
y_val_,
ex_wts_,
ex_wts_b,
bsize,
clean_data_num,
eps=0.1,
gate_gradients=1)
else:
if hard_mining:
ex_weights_ = reweight_hard_mining(x_, y_, positive=True)
else:
if ratio_weighted:
# Weighted by the ratio of each class.
ex_weights_ = pos_ratio * (1 - y_) + (1 - pos_ratio) * (y_)
else:
# Weighted by uniform.
ex_weights_ = tf.ones([bsize], dtype=tf.float32) / float(bsize)
if FLAGS.tensorboard:
exp_logger = get_exp_logger(sess, folder)
else:
exp_logger = None
num_steps = 10
acc_sum = 0.0
acc_test_sum = 0.0
loss_sum = 0.0
count = 0
sess.run(tf.global_variables_initializer())
history = pd.DataFrame([])
history_loss = []
history_loss_acc = []
history_metric_r2 = []
history_metric_mse = []
history_metric_mae = []
for i in range(2000):
kf = KFold(n_splits=2, shuffle=False, random_state=2020)
# for k, (train_index, val_index) in enumerate(kf.split(x)):
# x_batch, y_batch = x.iloc[train_index], y[target].iloc[train_index]
x_batch, y_batch = x, y[target]
ex_weights, var_list, grads, grads_w = sess.run(
[ex_weights_, var_list_, grads_, grads_w_], feed_dict={x_: x_batch,
y_: y_batch,
x_val_: x_val,
y_val_: y_val[target],
ex_wts_: np.ones((batchsize, 1)),
ex_wts_b: np.ones([clean_data_num, 1])})
# ww = var_list[0]
# bb = var_list[1]
# print(x_batch.shape)
# print(ww.shape)
# xx = np.matmul(np.array(x_batch), ww)
# xxx = xx + bb
# xxxx = xxx - np.array(y_batch).reshape(-1, 1)
# ss = (xxxx ** 2) / 2
# sss = np.mean(ss)
# ww_xx = xxxx.reshape(1, -1).dot(np.array(x_batch))
# re_xx = np.mean(np.abs(xxxx))
pred_tra, loss, acc, _ = sess.run(
[logits_c, loss_c, metric_, train_op],
feed_dict={
x_: x_batch,
y_: y_batch,
x_val_: x_val,
y_val_: y_val[target],
ex_wts_: ex_weights,
lr_: lr
})
print(np.unique(ex_weights))
pred = sess.run(logits_eval, feed_dict={x_: test_set, y_: test_set_label[target], ex_wts_: ex_weights})
r2 = r2_score(pred, test_set_label[target])
mse = mean_squared_error(pred, test_set_label[target])
mae = mean_absolute_error(pred, test_set_label[target])
history_loss.append(loss)
history_loss_acc.append(acc)
history_metric_r2.append(r2)
history_metric_mse.append(mse)
history_metric_mae.append(mae)
# Final evaluation.
history['loss'] = history_loss
history['acc'] = history_loss_acc
history['r2'] = history_metric_r2
history['mse'] = history_metric_mse
history['mae'] = history_metric_mae
pred_tra = sess.run(logits_eval, feed_dict={x_: x, y_: y[target], ex_wts_: ex_weights})
train_r2 = r2_score(pred_tra, y[target])
train_r2_ad = None
train_mse = mean_squared_error(pred_tra, y[target])
train_mae = mean_absolute_error(pred_tra, y[target])
train_mape = None
val_r2, val_r2_ad, val_mse, val_mae, val_mape, = None, None, None, None, None
test_r2, test_r2_ad, test_mse, test_mae, test_mape = r2, None, mse, mae, None
dict_ = dict(zip(['train_r2', 'train_r2_ad', 'train_mse', 'train_mae', 'train_mape',
'val_r2', 'val_r2_ad', 'val_mse', 'val_mae', 'val_mape',
'test_r2', 'test_r2_ad', 'test_mse', 'test_mae', 'test_mape'],
[train_r2, train_r2_ad, train_mse, train_mae, train_mape,
val_r2, val_r2_ad, val_mse, val_mae, val_mape,
test_r2, test_r2_ad, test_mse, test_mae, test_mape,
]))
metric_df = pd.DataFrame.from_dict([dict_])
return metric_df, pd.DataFrame([]), pd.DataFrame([])
def main():
metric_df_all = pd.DataFrame([])
test_prediction_all = pd.DataFrame([]) # for prediction of test data
history_df_all = pd.DataFrame([]) # for keras model
for i, trial in enumerate(tqdm(six.moves.xrange(FLAGS.nrun))):
print('rnum : {}'.format(i))
seed = (trial * 2718) % 2020 # a different random seed for each run
train_data, test_data = load_data_(datasets_name, task_name)
metric_df, test_prediction, history_df = run(train_data, test_data, seed, task_name)
metric_df_all = pd.concat([metric_df_all, metric_df], axis=0)
test_prediction_all = pd.concat([test_prediction_all, test_prediction], axis=1)
history_df_all = pd.concat([history_df_all, history_df], axis=1)
for col in metric_df_all.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df_all[col].mean(),
metric_df_all[col].std(),
metric_df_all[col].max(),
metric_df_all[col].median(),
metric_df_all[col].min()))
metric_df_all.to_csv('./metric_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
history_df_all.to_csv('./history_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
# test_prediction_all.columns = ['ab_time', 'ab_time_enh']
test_prediction_all.to_csv('./prediction{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits))
plt.show()
pass
np.random.seed(2020)
datasets_name = 'LiverAblation'
task_name = 'ablation_time_learn_weight' # ablation_time_enh / ablation_time_vanilla / relapse_risk
nrun = 10 # num of repeated experiments
clean_ratio = 1 # 1 for No label confusion
test_ratio = 0 # test data ratio for label confusion
val_ratio = 0 # val data ratio for label confusion
n_splits = 1 # n_splits > 1 for Kfold cross validation / n_splits==1 for training all data
epoch = 5000 # Kfold cross validation: a large number / training all data: mean epoch
batchsize = 348
lr = 1e-4
clean_data_num = 10
reweight = True
num_steps = NUM_STEPS
random = False
ratio_weighted = False
hard_mining = False
if __name__ == '__main__':
main()
|
Python
| 503
| 37.608349
| 120
|
/Regression/src/learn_weight_main.py
| 0.561586
| 0.542482
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error, mean_squared_error, \
confusion_matrix, precision_score, recall_score, f1_score, r2_score, accuracy_score
from sklearn.preprocessing import MinMaxScaler
def evaluate_classification(model, train_sets, train_label, val_sets, val_label, test_sets, test_label):
relapse_risk_test = model.predict(test_sets)
relapse_risk_tra = model.predict(train_sets)
con_mat = confusion_matrix(test_label, relapse_risk_test.round())
train_acc = accuracy_score(train_label, relapse_risk_tra.round())
test_acc = accuracy_score(test_label, relapse_risk_test.round())
train_f1 = f1_score(train_label, relapse_risk_tra.round())
test_f1 = f1_score(test_label, relapse_risk_test.round())
val_acc = None
val_f1=None
if val_label is not None:
relapse_risk_val = model.predict(val_sets)
val_acc = accuracy_score(val_label, relapse_risk_val.round())
val_f1 = f1_score(val_label, relapse_risk_val.round())
dict_ = dict(zip(['train_acc', 'test_acc', 'val_acc', 'val_f1', 'train_f1', 'test_f1'],
[train_acc, test_acc, val_acc, val_f1, train_f1, test_f1]))
return pd.DataFrame([dict_])
def mape(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def wmape(y_true, y_pred):
return np.mean(np.abs(y_true - y_pred)) / np.mean(np.abs(y_true)) * 100
def smape(y_true, y_pred):
return 2.0 * np.mean(np.abs(y_pred - y_true) / (np.abs(y_pred) + np.abs(y_true))) * 100
def evaluate_regression(model, train_sets, train_label, val_x, val_label, test_sets, test_label):
test_target_pred = model.predict(test_sets)
train_target_pred = model.predict(train_sets)
num_data_tra = train_sets.shape[0]
num_feat_tra = train_sets.shape[1]
num_data_test = train_sets.shape[0]
num_feat_test = train_sets.shape[1]
train_r2 = r2_score(train_label, train_target_pred)
train_r2_ad = 1 - ((1 - train_r2) * (num_data_tra - 1)) / abs(num_data_tra - num_feat_tra - 1)
test_r2 = r2_score(test_label, test_target_pred)
test_r2_ad = 1 - ((1 - test_r2) * (num_data_test - 1)) / abs(num_data_test - num_feat_test - 1)
train_mse = mean_squared_error(train_label, train_target_pred)
train_mae = mean_absolute_error(train_label, train_target_pred)
test_mse = mean_squared_error(test_label, test_target_pred)
test_mae = mean_absolute_error(test_label, test_target_pred)
mms = MinMaxScaler(feature_range=(0.1, 1))
train_label_mms = mms.fit_transform(np.array(train_label).reshape(-1, 1))
test_label_mms = mms.fit_transform(np.array(test_label).reshape(-1, 1))
train_target_pred_mns = mms.fit_transform(train_target_pred.reshape(-1, 1))
test_target_pred_mns = mms.fit_transform(test_target_pred.reshape(-1, 1))
train_mape = wmape(train_label_mms, train_target_pred_mns.reshape(-1, ))
test_mape = wmape(test_label_mms, test_target_pred_mns.reshape(-1, ))
err = test_label - np.squeeze(test_target_pred)
if not val_x.empty:
val_target_pred = model.predict(val_x)
num_data_val = val_x.shape[0]
num_feat_val = val_x.shape[1]
val_r2 = r2_score(val_label, val_target_pred)
val_r2_ad = 1 - ((1 - val_r2) * (num_data_val - 1)) / abs(num_data_val - num_feat_val - 1)
val_mse = mean_squared_error(val_label, val_target_pred)
val_mae = mean_absolute_error(val_label, val_target_pred)
val_label_mms = mms.fit_transform(np.array(val_label).reshape(-1, 1))
val_target_pred_mns = mms.fit_transform(val_target_pred.reshape(-1, 1))
val_mape = smape(val_label_mms, val_target_pred_mns.reshape(-1, ))
else:
val_r2, val_r2_ad, val_mse, val_mae, val_mape = None, None, None, None, None
dict_ = dict(zip(['train_r2', 'train_r2_ad', 'train_mse', 'train_mae', 'train_mape',
'val_r2', 'val_r2_ad', 'val_mse', 'val_mae', 'val_mape',
'test_r2', 'test_r2_ad', 'test_mse', 'test_mae', 'test_mape'],
[train_r2, train_r2_ad, train_mse, train_mae, train_mape,
val_r2, val_r2_ad, val_mse, val_mae, val_mape,
test_r2, test_r2_ad, test_mse, test_mae, test_mape,
]))
return pd.DataFrame.from_dict([dict_])
|
Python
| 83
| 51.385544
| 104
|
/Regression/src/model/evaluate.py
| 0.633165
| 0.612925
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import six
import tensorflow as tf
from keras import losses
from keras import backend as K
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, \
confusion_matrix, precision_score, recall_score, f1_score, r2_score
from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
import lightgbm as lgb
import matplotlib.pyplot as plt
# from deepctr.models import DeepFM, xDeepFM, DCN, WDL
# from deepctr.feature_column import SparseFeat, get_feature_names, DenseFeat
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def ctr_model(linear_feature_columns, dnn_feature_columns):
adam = tf.keras.optimizers.Adam(lr=0.0001)
model = WDL(linear_feature_columns, dnn_feature_columns, task='regression')
# model = xDeepFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile(adam, "huber_loss", metrics=['mae'],)
return model
def baseline_model(train_set_mix, train_set_mix_label, ca_col, co_col, seed):
clf = lgb.LGBMRegressor(max_depth=3,
bagging_fraction=0.7,
feature_fraction=0.7,
reg_alpha=0.5,
reg_lambda=0.5,
min_child_samples=10,
n_estimators=200,
learning_rate=1e-1,
random_state=seed,
)
# clf = lgb.LGBMRegressor(max_depth=4,
# bagging_fraction=0.8,
# feature_fraction=0.8,
# reg_alpha=0.8,
# reg_lambda=0.8,
# min_child_samples=10,
# n_estimators=500,
# learning_rate=1e-1,
# )
# clf = lgb.LGBMRegressor()
# clf = LassoCV()
# clf = RidgeCV()
return clf
def run(train_data, test_data, seed, target='label'):
np.random.seed(seed)
train_data, test_data, co_col, ca_col = data_preprocessing(train_data, test_data,
ca_co_sel_flag=False, onehot_flag=False)
# train_data, _ = anomaly_dectection(train_data, test_data=pd.DataFrame())
# _, test_data = anomaly_dectection(train_data=pd.DataFrame(), test_data=test_data)
# train_data, test_data = anomaly_dectection(train_data=train_data, test_data=test_data)
train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \
get_dataset_(train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, val_ratio=val_ratio, seed=seed)
# fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=pd.concat([train_set_mix, test_set], axis=0)[feat].nunique(), embedding_dim=4)
# for i, feat in enumerate(ca_col)] + [DenseFeat(feat, 1,)
# for feat in co_col]
#
# dnn_feature_columns = fixlen_feature_columns
# linear_feature_columns = fixlen_feature_columns
# feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# train_set_mix = {name: train_set_mix[name].values for name in feature_names}
# test_set = {name: test_set[name].values for name in feature_names}
# model = ctr_model(linear_feature_columns, dnn_feature_columns,)
# history = model.fit(train_set_mix, train_set_mix_label[target].values,
# batch_size=512, epochs=180, verbose=1, validation_split=0.2, )
# train_set_mix = train_set_mix.loc[train_set_mix_label['sup_label'] == 0]
# train_set_mix_label = train_set_mix_label.loc[train_set_mix_label['sup_label'] == 0]
model = baseline_model(train_set_mix, train_set_mix_label, ca_col, co_col, seed)
model.fit(train_set_mix, train_set_mix_label[target])
# feat_df = pd.DataFrame({'column': train_set_mix.columns, 'importance': model.feature_importances_.round(5)})
# feat_df_sort = feat_df.sort_values(by='importance', ascending=False)
# feat_df_sort_ = feat_df_sort.set_index(['column'])
# feat_df_sort_[:30].plot.barh(figsize=(15, 15), fontsize=12)
# plt.title("n61_lgb_特征重要性")
# plt.show()
train_target_pred = model.predict(train_set_mix)
test_target_pred = model.predict(test_set)
train_R2 = r2_score(train_set_mix_label[target], train_target_pred)
num_data = train_set_mix.shape[0]
num_feat = train_set_mix.shape[1]
train_R2_ad = 1 - ((1 - train_R2) * (num_data - 1)) / abs(num_data - num_feat - 1)
test_R2 = r2_score(test_set_label[target], test_target_pred)
num_data = test_set.shape[0]
num_feat = test_set.shape[1]
test_R2_ad = 1 - ((1 - test_R2) * (num_data - 1)) / abs(num_data - num_feat - 1)
train_mse = mean_squared_error(train_set_mix_label[target], train_target_pred)
train_mae = mean_absolute_error(train_set_mix_label[target], train_target_pred)
test_mse = mean_squared_error(test_set_label[target], test_target_pred)
test_mae = mean_absolute_error(test_set_label[target], test_target_pred)
test_mape = mean_absolute_percentage_error(test_set_label[target], test_target_pred.reshape(-1, ))
err = test_set_label[target] - np.squeeze(test_target_pred)
return [train_R2, test_R2, train_R2_ad, test_R2_ad, train_mse, test_mse, train_mae, test_mae, test_mape]
def run_many(train_data, test_data):
metric_list_all = []
for trial in tqdm(six.moves.xrange(nrun)):
metric_list = run(train_data, test_data, (trial * 2718) % 2020)
metric_list_all.append(metric_list)
metric_df = pd.DataFrame(np.array(metric_list_all))
metric_df.columns = ['train_R2', 'test_R2',
'train_R2_ad', 'test_R2_ad',
'train_mse', 'test_mse',
'train_mae', 'test_mae',
'test_mape',]
for col in metric_df.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} min: {:.4f}'.format(col, metric_df[col].mean(),
metric_df[col].std(),
metric_df[col].max(),
metric_df[col].min()))
pass
def main():
train_data, test_data = load_data_(datasets_name)
run_many(train_data, test_data)
pass
datasets_name = 'LiverAblation'
nrun = 10
clean_ratio = 1
test_ratio = 0.2
val_ratio = 0.2
epoch = 200
batchsize = 1
iter_ = 1
step_ = 0.1
if __name__ == '__main__':
main()
|
Python
| 157
| 45.305733
| 143
|
/Regression/src/useless/ave_logsit_baseline.py
| 0.605777
| 0.588996
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import copy
import pandas as pd
import matplotlib.pyplot as plt
from model.history_ import plot_history_df, plot_metric_df
import numpy as np
from scipy.stats import ttest_ind, levene
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def mape(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def metric_hist(data, nor=None):
root_ = '../report/result/'
file_list = [
'ablation_time_enh_1nrun_10Fold.csv',# ours
# 'ablation_time_vanilla_att_only__1nrun_10Fold.csv',# att only
# 'ablation_time_vanilla_natt_1nrun_10Fold.csv',#mlp only
# 'ablation_time_enh_att_only__10nrun_1Fold.csv',#
'ablation_time_enh_natt_1nrun_10Fold.csv',# mlp+lc
'lr_10nrun_1Fold.csv',# baseline_lasso
'lr_non_1nrun_10Fold.csv',# nonrelapse
'gbm_1nrun_10Fold.csv',# gbm
'gbm_non_1nrun_10Fold.csv',# nonrelapse
'ablation_time_vanilla_1nrun_10Fold.csv',# ATT+MLP
'ablation_time_vanilla_non_1nrun_10Fold.csv',# att+mlp+non relapse
# 'ablation_time_learn_weight_10nrun_1Fold.csv',
# 'ablation_time_enh_non_10nrun_1Fold.csv', # 0.2297
# 'ablation_time_vanilla_att_only_10nrun_1Fold.csv',#
# 'ablation_time_enh_natt__10nrun_1Fold.csv',# 0.5686
# 'ablation_time_enh_att_only__10nrun_1Fold.csv',# 0.5690
# 'ablation_time_enh_natt__10nrun_1Fold.csv',# 0.5686
]
metric_file_list = ['metric_' + file for file in file_list]
history_file_list = ['history_' + file for file in file_list]
pred_file_list = ['prediction' + file for file in file_list]
tt_pvalue_list = np.array([])
lv_pvalue_list = np.array([])
metric_file_base = metric_file_list[0]
metric_df_base = pd.read_csv(root_ + metric_file_base)
for metric_file in metric_file_list:
metric_df = pd.read_csv(root_ + metric_file)
mae_col = metric_df.filter(regex=r'mae').columns
mse_col = metric_df.filter(regex=r'mse').columns
# metric_df[mae_col] = metric_df.loc[:, mae_col] * 562.062540
# metric_df[mse_col] = metric_df.loc[:, mse_col] * 562.062540**2
print('\n', metric_file)
for col in metric_df.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df[col].mean(),
metric_df[col].std(),
metric_df[col].max(),
metric_df[col].median(),
metric_df[col].min()))
v1 = metric_df_base['test_mae']
v2 = metric_df['test_mae']
std_ = levene(v1, v2).pvalue
lv_pvalue_list = np.append(lv_pvalue_list, std_)
equal_var_ = False
if std_ > 0.05:
equal_var_ = True
res = ttest_ind(v1, v2, equal_var=equal_var_).pvalue
tt_pvalue_list = np.append(tt_pvalue_list, res)
tt_pvalue_list = tt_pvalue_list.reshape(-1, 1)
for pred_file in pred_file_list:
pred_df = pd.read_csv(root_ + pred_file, index_col=0)
data_inver_label_df = pd.DataFrame([])
metric_df = pd.DataFrame([])
for pred in pred_df:
data_co = data.filter(regex=r'dense|^label')
data_ = copy.deepcopy(data_co)
data_.loc[:, 'label'] = np.array(pred_df[pred])
data_inver_pred = pd.DataFrame(nor.inverse_transform(data_), columns=data_.columns)
data_inver = pd.DataFrame(nor.inverse_transform(data_co), columns=data_co.columns)
data_inver_pred_label = data_inver_pred['label']
data_inver_label = data_inver['label']
mae = mean_absolute_error(data_inver_label, data_inver_pred_label)
mse = mean_squared_error(data_inver_label, data_inver_pred_label)
mape_ = mape(data_inver_label, data_inver_pred_label)
r2 = r2_score(data_inver_label, data_inver_pred_label)
dict_ = dict(zip([
'test_r2', 'test_mse', 'test_mae', 'test_mape'],
[
r2, mse, mae, mape_,
]))
metric_ = pd.DataFrame.from_dict([dict_])
metric_df = pd.concat([metric_df, metric_], axis=0)
data_inver_label_df = pd.concat([data_inver_label_df, data_inver_label], axis=1)
# data_inver.to_csv(root_ + 'inver' + pred_file)
history_df_all_list = []
for history_file in history_file_list:
history_df_all = pd.read_csv(root_ + history_file)
history_df_all_list.append(history_df_all)
# plot_history_df(history_df_all_list, task_name='ablation_time', val_flag='')
plot_history_df(history_df_all_list, task_name='of the experimental results of ablation time prediction ', val_flag='val_')
plt.show()
metric_df_all_list = []
metric_file_list = ['metric_ablation_time_enh_10nrun_1Fold.csv',
# 'metric_ablation_time_enh_non_10nrun_1Fold.csv',
'metric_ablation_time_vanilla_10nrun_1Fold.csv',
# 'metric_ablation_time_vanilla_non_10nrun_1Fold.csv',
'metric_gbm_10nrun_1Fold.csv',
# 'metric_gbm_non_10nrun_1Fold.csv',
'metric_lr_10nrun_1Fold.csv',
# 'metric_lr_non_10nrun_1Fold.csv',
]
for history_file in metric_file_list:
history_df_all = pd.read_csv(root_ + history_file)
metric_df_all_list.append(history_df_all)
# plot_history_df(history_df_all_list, task_name='ablation_time', val_flag='')
plot_metric_df(metric_df_all_list, task_name='ablation_time', val_flag='test_')
plt.show()
pass
|
Python
| 126
| 46.674603
| 127
|
/Regression/src/preprocess/plot_tabel.py
| 0.554779
| 0.531136
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
#coding=gb18030
import numpy as np
import pandas as pd
def load_data_(datasets, task_name='', seed=2020):
if datasets == 'winequality_white':
data_path = '../DataSet/wine/{}.csv'.format(datasets)
data = pd.read_csv(data_path)
data.rename(columns={'quality': 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
train_data = data.fillna(0)
test_data = None
elif datasets == 'PPH':
data_path = '../DataSet/PPH/{}.csv'.format(datasets)
data_head = pd.read_csv('../DataSet/PPH/PPH_head.csv', encoding='gb18030')
data = pd.read_csv(data_path, encoding='gb18030', index_col='index')
col = []
for col_ in data.columns:
col.append(col_ + np.squeeze(data_head[col_].values))
data.columns = np.array(col)
# data.to_csv('../DataSet/PPH/data_feat_name_add.csv', index=False, encoding='gb18030')
data['sup_label'] = 0
label_col = data.filter(regex=r'n61').columns.values[0]
data.rename(columns={label_col: 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
data['hours'] = data.filter(regex=r'field12').values - data.filter(regex=r'field9').values
data['hours'] = data['hours'].apply(lambda x: 24 + x if x < 0 else x)
data['minutes'] = data.filter(regex=r'field13').values - data.filter(regex=r'field10').values
data['minutes'] = data['minutes'].apply(lambda x: 60 + x if x < 0 else x)
data['minutes'] += data['hours'] * 60
drop_columns = data.filter(
regex=r'n421|field11|其他|field28|其他.1|n262|hours|n61|n51|n4417|n4318|field9|field10|field12|field13').columns
train_data = data.drop(columns=drop_columns)
# data.fillna(0, inplace=True)
test_data = None
elif datasets == 'LiverAblation':
data_path = '../DataSet/LiverAblation/{}.csv'.format(datasets)
data = pd.read_csv(data_path, encoding='gb18030', index_col='基线-患者基本信息-ID_sparse')
# data_path = '../DataSet/LiverAblation/{}_trans.csv'.format(datasets)
# data = pd.read_csv(data_path, encoding='gb18030', index_col='baseline_info_ID_sparse')
data.rename(columns={'time_dense': 'label'}, inplace=True)
data.rename(columns={'relapse_sparse': 'sup_label'}, inplace=True)
drop_columns_ = data.filter(regex=r'随|ID|cluster|followupInfomation').columns
data.drop(columns=drop_columns_, inplace=True)
data_1 = data.loc[data['sup_label'] == 1]
data_0 = data.loc[data['sup_label'] == 0].sample(n=data_1.shape[0] * 1, random_state=seed)
data_undersmapling = pd.concat([data_1, data_0]).sample(frac=1, random_state=seed)
test_data = data.drop(index=data_undersmapling.index)
if 'non' in task_name:
train_data = data_0
else:
train_data = data_undersmapling
else:
train_data = None
test_data = None
return train_data, test_data
|
Python
| 61
| 48.360657
| 120
|
/Regression/src/preprocess/load_data.py
| 0.61541
| 0.582863
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import six
import tensorflow as tf
from keras import losses
from keras import backend as K
from keras import optimizers
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping
from keras.layers import Input, Dense, Multiply, Activation, Layer, \
GlobalAveragePooling1D, Reshape, RepeatVector, Flatten, Lambda, Add, Embedding
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, \
confusion_matrix, precision_score, recall_score, f1_score, r2_score
import matplotlib.pyplot as plt
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, foo, anomaly_dectection
class Self_Attention(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(Self_Attention, self).__init__(**kwargs)
def build(self, input_shape):
# 为该层创建一个可训练的权重
# inputs.shape = (batch_size, time_steps, seq_len)
self.kernel = self.add_weight(name='kernel',
shape=(3, 1, self.output_dim),
initializer='uniform',
trainable=True)
super(Self_Attention, self).build(input_shape)
def call(self, x):
x = K.expand_dims(x, axis=2)
WQ = K.dot(x, self.kernel[0])
WK = K.dot(x, self.kernel[1])
WV = K.dot(x, self.kernel[2])
print("WQ.shape", WQ.shape)
print("K.permute_dimensions(WK, [0, 2, 1]).shape", K.permute_dimensions(WK, [0, 2, 1]).shape)
QK = K.batch_dot(WQ, K.permute_dimensions(WK, [0, 2, 1]))
QK = QK / (x.shape.as_list()[-1] ** 0.5)
QK = K.softmax(QK)
print("QK.shape", QK.shape)
V = K.batch_dot(QK, WV)
return V
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.output_dim)
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def get_activations(model, inputs, print_shape_only=False, layer_name=None):
activations = []
input = model.input
if layer_name is None:
outputs = [layer.output for layer in model.layers]
else:
outputs = [layer.output for layer in model.layers if layer.name == layer_name] # all layer outputs
funcs = [K.function([input] + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
layer_outputs = [func([inputs, 1.])[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
def r2(y_true, y_pred):
return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true)))
def r_square(y_true, y_pred):
SSR = K.mean(K.square(y_pred-K.mean(y_true)), axis=-1)
SST = K.mean(K.square(y_true-K.mean(y_true)), axis=-1)
return SSR/SST
def Att(att_dim, inputs, name):
V = inputs
QK = Dense(att_dim, bias=None)(inputs)
QK = Dense(att_dim, bias=None)(QK)
QK = Activation("softmax", name=name)(QK)
MV = Multiply()([V, QK])
return(MV)
def bulid_model(train_set_mix, train_set_mix_label, ca_col, co_col):
input_dim = train_set_mix.shape[-1]
inputs = Input(shape=(input_dim,))
atts1 = Att(input_dim, inputs, "attention_vec")
x = Dense(64, activation='relu')(atts1)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
# atts2 = Att(4, atts2, "attention_vec1")
output = Dense(1)(x)
model = Model(input=inputs, output=output)
return model
def Expand_Dim_Layer(tensor):
def expand_dim(tensor):
return K.expand_dims(tensor, axis=1)
return Lambda(expand_dim)(tensor)
def bulid_model_atts(train_set_mix, train_set_mix_label, ca_col, co_col):
input_dim = train_set_mix.shape[-1]
inputs_ = Input(shape=(input_dim,))
# inputs_emb = Embedding(10000, input_dim)(inputs_)
atts1 = Self_Attention(input_dim)(inputs_)
atts1 = GlobalAveragePooling1D()(atts1)
x = Dense(64, activation='relu')(atts1)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
outputs = Dense(1)(x)
model = Model(inputs=inputs_, outputs=outputs)
model.summary()
return model
def run(train_data, test_data, seed, reg_flag=False, label_enh_flag=False, reg_enh_flag=False, target='label'):
train_data, test_data, co_col, ca_col = foo(train_data, test_data, ca_co_sel_flag=False, onehot_flag=True)
train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \
get_dataset_(train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, seed=seed, val_ratio=val_ratio)
train_curr_label = train_set_mix_label[target]
test_curr_label = test_set_label[target]
model = bulid_model_atts(train_set_mix, train_set_mix_label, ca_col, co_col)
rms = optimizers.RMSprop(lr=1e-4)
model.compile(optimizer=rms, loss='mean_squared_error', metrics=['mse', 'mae', r2, r_square])
model.fit(train_set_mix, train_curr_label, epochs=epoch, batch_size=batchsize, validation_split=0.2,
callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)])
train_target_pred = model.predict(train_set_mix)
test_target_pred = model.predict(test_set)
num_data = train_set_mix.shape[0]
num_feat = train_set_mix.shape[1]
train_r2 = r2_score(train_set_mix_label[target], train_target_pred)
train_r2_ad = 1 - ((1 - train_r2) * (num_data - 1)) / abs(num_data - num_feat - 1)
test_r2 = r2_score(test_set_label[target], test_target_pred)
test_r2_ad = 1 - ((1 - test_r2) * (num_data - 1)) / abs(num_data - num_feat - 1)
train_mse = mean_squared_error(train_set_mix_label[target], train_target_pred)
train_mae = mean_absolute_error(train_set_mix_label[target], train_target_pred)
test_mse = mean_squared_error(test_set_label[target], test_target_pred)
test_mae = mean_absolute_error(test_set_label[target], test_target_pred)
test_mape = mean_absolute_percentage_error(test_set_label[target], test_target_pred.reshape(-1, ))
err_enh = test_set_label[target] - np.squeeze(test_target_pred)
# attention_vector = get_activations(model, train_set_mix[:1],
# print_shape_only=True,
# layer_name='attention_vec')[0].flatten()
# pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar',
# title='Attention Mechanism as a '
# 'function of input dimensions.')
# plt.show()
return test_r2, test_r2_ad, test_mse
def run_many(train_data, test_data):
metric_list_all = []
for trial in tqdm(six.moves.xrange(nrun)):
# train_metric, test_metric, train_metric_enh, test_metric_enh = \
# run(train_data, test_data, (trial * 2020) % 1000, reg_flag=True, label_enh_flag=True, reg_enh_flag=True)
metric_list = run(train_data, test_data, (trial * 2020) % 1000,
reg_flag=True, label_enh_flag=True, reg_enh_flag=True)
metric_list_all.append(metric_list)
metric_df = pd.DataFrame(np.array(metric_list_all))
metric_df.columns = ['train_metric', 'train_metric_enh', 'test_metric', 'test_metric_enh']
for col in metric_df.columns:
print('{} metric {:.3f} ({:.3f}) max: {:.3f}'.format(col, metric_df[col].mean(),
metric_df[col].std(),
metric_df[col].max()))
pass
def main():
train_data, test_data = load_data_(datasets_name)
run_many(train_data, test_data)
pass
np.random.seed(2020)
datasets_name = 'LiverAblation'
nrun = 5
clean_ratio = 1
test_ratio = 0.2
val_ratio = 0
epoch = 3000
batchsize = 16
iter_ = 10
step_ = 0.001
if __name__ == '__main__':
main()
|
Python
| 216
| 37.666668
| 118
|
/Regression/src/useless/keras_att.py
| 0.617816
| 0.601173
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import copy
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression
from keras.models import load_model
from keras import backend as K
from keras.optimizers import Adam, RMSprop, SGD
from keras.callbacks import EarlyStopping
from model.bulid_model import classifer_, regression_, label_correction
from model.evaluate import evaluate_classification, evaluate_regression
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def r2(y_true, y_pred):
return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true)))
def f1(y_true, y_pred):
return 2 * precision(y_true, y_pred) * \
recall(y_true, y_pred) / (precision(y_true, y_pred) + recall(y_true, y_pred) + 1e-7)
# model compile and fit
def model_training(model, train_sets, train_label, val_data, val_label, lr, task, epoch, batch_size, patience=100):
if task == 'classification':
metrics = ['acc', f1, precision, recall]
loss = 'binary_crossentropy'
val_metric = 'val_f1'
elif task == 'regression':
metrics = ['mse', 'mae', r2]
metrics = [r2]
loss = 'mean_squared_error'
val_metric = 'val_r2'
model.compile(optimizer=RMSprop(lr=lr), loss=loss, metrics=metrics)
model.summary()
if val_label is None:
history = model.fit(train_sets, train_label,
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
else:
history = model.fit(train_sets, train_label,
# validation_split=0.3,
validation_data=(val_data, val_label),
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
return history, model
# select model
def training_model(train_set, train_set_label, task_name, train_index, val_index, test_set, test_set_label,
epoch, batchsize, iter_=None, step_=None, target='label', seed=2020, label_corr_epoch=2):
if train_index is not None:
train_x, val_x = train_set.iloc[train_index], train_set.iloc[val_index]
train_y, val_y = train_set_label.iloc[train_index], train_set_label.iloc[val_index]
val_label = val_y[target]
val_suplabel = val_y['sup_label']
val_x_time = val_x.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
else:
train_x = train_set
train_y = train_set_label
val_x = test_set
val_x_time = test_set.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
val_label = test_set_label[target]
val_suplabel = test_set_label['sup_label']
train_x_time = train_x.drop(columns=train_x.filter(regex=r'术后|出院|Post').columns)
test_set_time = test_set.drop(columns=test_set.filter(regex=r'术后|出院|Post').columns)
# train_x_time.to_csv('train_data.csv', encoding='gb18030')
train_data_raw = pd.read_csv('train_data.csv', encoding='gb18030')
xx = set(train_data_raw.columns) - set(train_x_time.columns)
rr = set(train_x_time.columns) - set(train_data_raw.columns)
if 'risk' in task_name:
classifer, att_weight = classifer_(train_x)
# epoch=130 for training whole data 107
# lr=8e-5 batchsize=8 patience= 90
history, model = model_training(classifer,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
8e-5, 'classification', 120, 16, 190)
metric = evaluate_classification(model,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
[test_set, test_set_label[target]], test_set_label['sup_label'])
test_pred = model.predict([test_set, test_set_label[target]])
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = history_df.shape[0] # count the number of epoch
elif 'vanilla' in task_name:
regression = regression_(train_x_time)
# epoch=2926 for training whole data 2709 for non-relapse data
# lr=9e-6 batchsize=256 patience= 350
history, model = model_training(regression, train_x_time, train_y[target], val_x_time, val_label,
9e-6, 'regression', 15000, batchsize, 2500) #240 2335
metric = evaluate_regression(model, train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = len(history.history['loss']) # count the number of epoch
elif 'load' in task_name:
model = load_model('ablation_time_enh_10nrun_1Fold.h5', custom_objects={'r2': r2})
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame([])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
len_ = 0
elif 'enh' in task_name:
history_df = pd.DataFrame([])
classifer, att_weight = classifer_(train_x)
# lr=8e-5 batchsize=16 epoch= 120
history, classifer = model_training(classifer,
[train_set, train_set_label[target]], train_set_label['sup_label'],
[pd.DataFrame([]), None], None,
8e-5, 'classification', 120, 16, 130)
label_target = copy.deepcopy(train_set_label[target])
regression_enh = regression_(train_x_time)
len_ = 0
for i in range(label_corr_epoch):
print('iter {}'.format(i))
label_target = label_correction(classifer, train_set, label_target, iter_=iter_, step_=step_)
# label_target = train_y[target]
if train_index is not None:
label_target_train = label_target.iloc[train_index]
val_label = label_target.iloc[val_index]
else:
label_target_train = label_target
# lr=9e-6 batchsize=256 epoch= 600
history, model = model_training(regression_enh,
train_x_time, label_target_train, val_x_time, val_label,
7e-5, 'regression', 225, batchsize, 220,)
# 1e-5, 'regression', 1750, batchsize, 2120, )
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
if history_df.empty:
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
else:
history_df = pd.concat([history_df, pd.DataFrame.from_dict(history.history, orient='columns')], axis=0)
len_ += history_df.shape[0] # count the number of epoch
history_df.reset_index(drop=True, inplace=True)
if train_index is not None:
val_pred = model.predict(val_x_time)
risk = classifer.predict([val_x, train_set_label[target].iloc[val_index]])
risk_corr = classifer.predict([val_x, val_pred])
risk_change = risk - risk_corr
risk_change_max = risk_change.max()
risk_change_mean = risk_change.mean()
x = 1
elif 'lr' in task_name:
model = LassoCV(random_state=seed)
# model = RidgeCV()
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
elif 'gbm' in task_name:
model = lgb.LGBMRegressor(
max_depth=3,
bagging_fraction=0.5,
feature_fraction=0.5,
reg_alpha=1,
reg_lambda=1,
min_child_samples=10,
n_estimators=200,
learning_rate=1e-1,
random_state=seed,
)
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
return model, history_df, metric, test_pred, len_
|
Python
| 213
| 48.685448
| 119
|
/Regression/src/model/training_.py
| 0.534159
| 0.517245
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
from model.history_ import plot_metric_df
import pandas as pd
import matplotlib.pyplot as plt
import os
xx = os.getcwd()
path_root = '../report/result/'
task_name = 'ablation_time_all'
metric_list = []
metric_list_dir = ['metric_ablation_time_enh_10nrun_1Fold.csv',
'metric_ablation_time_vanilla_10nrun_1Fold.csv',
'metric_gbm_10nrun_1Fold.csv',
'metric_lr_10nrun_1Fold.csv',
]
for metric_dir in metric_list_dir:
dir = path_root + metric_dir
metric_df = pd.read_csv(dir)
metric_list.append(metric_df)
plot_metric_df(metric_list, task_name, val_flag='val_')
plt.show()
pass
|
Python
| 22
| 25.681818
| 63
|
/Regression/src/eval.py
| 0.71891
| 0.698467
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import tensorflow as tf
import numpy as np
import pandas as pd
from keras import backend as K
from keras import regularizers, activations
from keras.layers import Dense, Input, Add, Concatenate, Dropout, \
BatchNormalization, Activation, Multiply, Embedding, Layer, GlobalAveragePooling1D
from keras.models import Model
import copy
class Self_Attention(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(Self_Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(3, input_shape[2], self.output_dim),
initializer='uniform',
trainable=True)
super(Self_Attention, self).build(input_shape)
def call(self, x):
WQ = K.dot(x, self.kernel[0])
WK = K.dot(x, self.kernel[1])
WV = K.dot(x, self.kernel[2])
print("WQ.shape", WQ.shape)
print("K.permute_dimensions(WK, [0, 2, 1]).shape", K.permute_dimensions(WK, [0, 2, 1]).shape)
QK = K.batch_dot(WQ, K.permute_dimensions(WK, [0, 2, 1]))
QK = QK / (x.shape.as_list()[1] ** 0.5)
QK = K.softmax(QK)
print("QK.shape", QK.shape)
V = K.batch_dot(QK, WV)
return V
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.output_dim)
class FM(Layer):
def __init__(self, output_dim, latent=32, activation='relu', **kwargs):
self.latent = latent
self.output_dim = output_dim
self.activation = activations.get(activation)
super(FM, self).__init__(**kwargs)
def build(self, input_shape):
self.b = self.add_weight(name='W0',
shape=(self.output_dim,),
trainable=True,
initializer='zeros')
self.w = self.add_weight(name='W',
shape=(input_shape[1], self.output_dim),
trainable=True,
initializer='random_uniform')
self.v= self.add_weight(name='V',
shape=(input_shape[1], self.latent),
trainable=True,
initializer='random_uniform')
super(FM, self).build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
x_square = K.square(x)
xv = K.square(K.dot(x, self.v))
xw = K.dot(x, self.w)
p = 0.5*K.sum(xv-K.dot(x_square, K.square(self.v)), 1)
rp = K.repeat_elements(K.reshape(p, (-1, 1)), self.output_dim, axis=-1)
f = xw + rp + self.b
output = K.reshape(f, (-1, self.output_dim))
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape)==2
return input_shape[0],self.output_dim
def Att(att_dim, inputs, name):
V = inputs
QK = Dense(att_dim//4, bias=None, activation='relu')(inputs)
QK = Dense(att_dim, bias=None, activation='relu')(QK)
QK = Activation("softmax", name=name)(QK)
MV = Multiply()([V, QK])
return(MV)
def regression_(train_x):
input_dim = train_x.shape[1]
l1_regul = 0
l2_regul = 0
input = Input(shape=(input_dim,))
# input_ = BatchNormalization()(input, training=False)
# input_fm = FM(input_dim)(input_)
# input_emb = Embedding(input_dim + 1, input_dim//2)(input)
# att = Self_Attention(input_dim//2)(input_emb)
# att = GlobalAveragePooling1D()(att)
atts1 = Att(input_dim, input, "attention_vec10")
# atts11 = Att(input_dim, input_, "attention_vec11")
# mlp_layer = Add()([atts1, atts11])
# mlp_layer = Att(input_dim, mlp_layer, "attention_vec20")
mlp_layer = atts1
for units_ in [64, 16]:
mlp_layer = Dense(units_, activation='relu',
kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(mlp_layer)
# mlp_layer = Dropout(0.5)(mlp_layer)
# mlp_layer = BatchNormalization()(mlp_layer, training=False)
# atts2 = Att(32, mlp_layer, "attention_vec2")
mlp_layer_output = Dense(1)(mlp_layer)
regression = Model(input=input, output=mlp_layer_output)
return regression
def classifer_(train_x):
input_dim = train_x.shape[1]
input_dim_emb = (input_dim + 1)
input_ = Input(shape=(input_dim,))
input_c = Input(shape=(1,))
l1_regul = 0
l2_regul = 0
# encoder layers
inputs = Concatenate()([input_, input_c])
atts1 = Att(input_dim_emb, inputs, "attention_vec10")
# atts2 = Att(input_dim + 1, inputs, "attention_vec11")
# input_fm = FM(input_dim + 1)(atts1)
encoded_layer = atts1
# encoded_layer = Concatenate()([atts1, atts2])
for units_ in [64]:
encoded_layer = Dense(units_, activation='relu',
kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l1_regul))(encoded_layer)
encoded_layer = Dropout(0.5)(encoded_layer)
encoded_layer = BatchNormalization()(encoded_layer, training=False)
encoder_output = Concatenate()([encoded_layer, input_c])
# decoder layers
decoded_layer = encoded_layer
for units_ in [16, 128, train_x.shape[1]]:
decoded_layer = Dense(units_, activation='relu',
kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l1_regul))(decoded_layer)
# decoded_layer = Dropout(0.2)(decoded_layer)
decoded_layer = BatchNormalization()(decoded_layer, training=False)
# classifer layers
classifer_layer = Dense(8, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(
encoded_layer)
classifer_layer = Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(
classifer_layer)
# encoder = Model(input=[input_, input_c], output=encoded_layer)
classifer = Model(input=[input_, input_c], output=classifer_layer)
# autoencoder = Model(input=[input_, input_c], output=decoded_layer)
att_weight = Model(input=[input_, input_c], output=atts1)
# classifer.add_loss(recon_loss(y_true=input_, y_pred=decoded_layer))
return classifer, att_weight
def eval_loss_and_grads(x, fetch_loss_and_grads):
outs = fetch_loss_and_grads(x)
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def gradient_ascent(x, fetch_loss_and_grads, iter, step, max_loss=None, min_loss=None):
"""get gradient
:param x: [dataframe list] inputs and label
:param fetch_loss_and_grads: [ ] K.function
:param iter_: [int] Number of iterations for label modification
:param step_: [float] Learning rate for label modification
:return label_target: [nparray] Corrected label
"""
for i in range(iter):
loss_value, grad_values = eval_loss_and_grads(x, fetch_loss_and_grads)
# if max_loss is not None and loss_value > max_loss:
# break
x[1] = x[1] - step * np.squeeze(grad_values).reshape(-1, 1)
return x
def label_correction(model, model_input, label, iter_=1, step_=1e-3):
"""correct label
:param model: [keras model] Relapse risk prediction model
:param model_input: [dataframe] Inputs
:param label: [series] Labels that need to be corrected
:param iter_: [int] Number of iterations for label modification
:param step_: [float] Learning rate for label modification
:return label_target: [dataframe] Corrected label
"""
loss = K.variable(0.)
coeff = 1
activation = model.get_layer(index=-1).output
scaling = K.prod(K.cast(K.shape(activation), 'float32'))
loss = loss + coeff * K.sum(K.square(activation[:, :])) / scaling
dream = model.input
grads = K.gradients(loss, dream[1])
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream[0], dream[1]], outputs, K.set_learning_phase(0))
label_target = pd.DataFrame(copy.deepcopy(label))
label_target = gradient_ascent([model_input, label_target], fetch_loss_and_grads, iter=iter_, step=step_)[1]
return label_target
def get_model(inputs,
labels,
is_training=True,
dtype=tf.float32,
w_dict=None,
ex_wts=None,
reuse=None):
"""
:param inputs: [Tensor] Inputs.
:param labels: [Tensor] Labels.
:param is_training: [bool] Whether in training mode, default True.
:param dtype: [dtype] Data type, default tf.float32.
:param w_dict: [dict] Dictionary of weights, default None.
:param ex_wts: [Tensor] Example weights placeholder, default None.
:param reuse: [bool] Whether to reuse variables, default None.
"""
if w_dict is None:
w_dict = {}
def _get_var(name, shape, dtype, initializer):
key = tf.get_variable_scope().name + '/' + name
if key in w_dict:
return w_dict[key]
else:
var = tf.get_variable(name, shape, dtype, initializer=initializer)
w_dict[key] = var
return var
with tf.variable_scope('Model', reuse=reuse):
shape_list = np.append(np.array([-1]), np.squeeze(inputs.shape[1:].as_list()))
# shape_list_fir = np.append(np.squeeze(inputs.shape[1:].as_list()), np.array([16]))
# shape_list_sec = np.array([16, 8])
# shape_list_thr = np.array([8, 1])
inputs_ = tf.cast(tf.reshape(inputs, shape_list), dtype)
labels = tf.cast(tf.reshape(labels, [-1, 1]), dtype)
# w_init = tf.truncated_normal_initializer(stddev=0.1)
# w1 = _get_var('w1', shape_list_fir, dtype, initializer=w_init)
# w2 = _get_var('w2', shape_list_sec, dtype, initializer=w_init)
# w3 = _get_var('w3', shape_list_thr, dtype, initializer=w_init)
# w4 = _get_var('w4', [1, 1], dtype, initializer=w_init)
#
# b_init = tf.constant_initializer(0.0)
# b1 = _get_var('b1', 1, dtype, initializer=b_init)
# b2 = _get_var('b2', 1, dtype, initializer=b_init)
# b3 = _get_var('b3', 1, dtype, initializer=b_init)
# b4 = _get_var('b4', 1, dtype, initializer=b_init)
#
# act = tf.nn.relu
#
# l0 = tf.identity(inputs_, name='l0')
# z1 = tf.add(tf.matmul(l0, w1), b1, name='z1')
# l1 = act(z1, name='l1')
# z2 = tf.add(tf.matmul(l1, w2), b2, name='z2')
# l2 = act(z2, name='l2')
# z3 = tf.add(tf.matmul(l2, w3), b3, name='z3')
# l3 = act(z3, name='l3')
# z4 = tf.add(tf.matmul(l3, w4), b4, name='z4')
# logits = tf.squeeze(l3)
# out = tf.sigmoid(logits)
dense1 = tf.layers.dense(inputs=inputs_, units=64, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=16, activation=tf.nn.relu)
logits = tf.layers.dense(inputs=dense2, units=1, activation=tf.nn.sigmoid)
if ex_wts is None:
# Average loss.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
else:
# Weighted loss.
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels) * ex_wts)
return w_dict, loss, logits
def reweight_random(bsize, eps=0.0):
"""Reweight examples using random numbers.
:param bsize: [int] Batch size.
:param eps: [float] Minimum example weights, default 0.0.
"""
ex_weight = tf.random_normal([bsize], mean=0.0, stddev=1.0)
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm
def reweight_autodiff(inp_a,
label_a,
inp_b,
label_b,
bsize_a,
bsize_b,
eps=0.0,
gate_gradients=1):
"""Reweight examples using automatic differentiation.
:param inp_a: [Tensor] Inputs for the noisy pass.
:param label_a: [Tensor] Labels for the noisy pass.
:param inp_b: [Tensor] Inputs for the clean pass.
:param label_b: [Tensor] Labels for the clean pass.
:param bsize_a: [int] Batch size for the noisy pass.
:param bsize_b: [int] Batch size for the clean pass.
:param eps: [float] Minimum example weights, default 0.0.
:param gate_gradients: [int] Tensorflow gate gradients, reduce concurrency.
"""
ex_wts_a = tf.zeros([bsize_a], dtype=tf.float32)
ex_wts_b = tf.ones([bsize_b], dtype=tf.float32) / float(bsize_b)
w_dict, loss_a, logits_a = get_model(
inp_a, label_a, ex_wts=ex_wts_a, is_training=True, reuse=True)
var_names = w_dict.keys()
var_list = [w_dict[kk] for kk in var_names]
grads = tf.gradients(loss_a, var_list, gate_gradients=gate_gradients)
var_list_new = [vv - gg for gg, vv in zip(grads, var_list)]
w_dict_new = dict(zip(var_names, var_list_new))
_, loss_b, logits_b = get_model(
inp_b, label_b, ex_wts=ex_wts_b, is_training=True, reuse=True, w_dict=w_dict_new)
grads_ex_wts = tf.gradients(loss_b, [ex_wts_a], gate_gradients=gate_gradients)[0]
ex_weight = -grads_ex_wts
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm
def reweight_hard_mining(inp, label, positive=False):
"""Reweight examples using hard mining.
:param inp: [Tensor] [N, ...] Inputs.
:param label: [Tensor] [N] Labels
:param positive: [bool] Whether perform hard positive mining or hard negative mining.
:return [Tensor] Examples weights of the same shape as the first dim of inp.
"""
_, loss, logits = get_model(inp, label, ex_wts=None, is_training=True, reuse=True)
# Mine for positive
if positive:
loss_mask = loss * label
else:
loss_mask = loss * (1 - label)
if positive:
k = tf.cast(tf.reduce_sum(1 - label), tf.int32)
else:
k = tf.cast(tf.reduce_sum(label), tf.int32)
k = tf.maximum(k, 1)
loss_sorted, loss_sort_idx = tf.nn.top_k(loss_mask, k)
if positive:
mask = 1 - label
else:
mask = label
updates = tf.ones([tf.shape(loss_sort_idx)[0]], dtype=label.dtype)
mask_add = tf.scatter_nd(tf.expand_dims(loss_sort_idx, axis=1), updates, [tf.shape(inp)[0]])
mask = tf.maximum(mask, mask_add)
mask_sum = tf.reduce_sum(mask)
mask_sum += tf.cast(tf.equal(mask_sum, 0.0), tf.float32)
mask = mask / mask_sum
return mask
def get_lenet_model(inputs,
labels,
is_training=True,
dtype=tf.float32,
w_dict=None,
ex_wts=None,
reuse=None):
"""Builds a simple LeNet.
:param inputs: [Tensor] Inputs.
:param labels: [Tensor] Labels.
:param is_training: [bool] Whether in training mode, default True.
:param dtype: [dtype] Data type, default tf.float32.
:param w_dict: [dict] Dictionary of weights, default None.
:param ex_wts: [Tensor] Example weights placeholder, default None.
:param reuse: [bool] Whether to reuse variables, default None.
"""
if w_dict is None:
w_dict = {}
def _get_var(name, shape, dtype, initializer):
key = tf.get_variable_scope().name + '/' + name
if key in w_dict:
return w_dict[key]
else:
var = tf.get_variable(name, shape, dtype, initializer=initializer)
w_dict[key] = var
return var
with tf.variable_scope('Model', reuse=reuse):
inputs_ = tf.cast(tf.reshape(inputs, [-1, 28, 28, 1]), dtype)
labels = tf.cast(labels, dtype)
w_init = tf.truncated_normal_initializer(stddev=0.1)
w1 = _get_var('w1', [5, 5, 1, 16], dtype, initializer=w_init) # [14, 14, 16]
w2 = _get_var('w2', [5, 5, 16, 32], dtype, initializer=w_init) # [7, 7, 32]
w3 = _get_var('w3', [5, 5, 32, 64], dtype, initializer=w_init) # [4, 4, 64]
w4 = _get_var('w4', [1024, 100], dtype, initializer=w_init)
w5 = _get_var('w5', [100, 1], dtype, initializer=w_init)
b_init = tf.constant_initializer(0.0)
b1 = _get_var('b1', [16], dtype, initializer=b_init)
b2 = _get_var('b2', [32], dtype, initializer=b_init)
b3 = _get_var('b3', [64], dtype, initializer=b_init)
b4 = _get_var('b4', [100], dtype, initializer=b_init)
b5 = _get_var('b5', [1], dtype, initializer=b_init)
act = tf.nn.relu
# Conv-1
l0 = tf.identity(inputs_, name='l0')
z1 = tf.add(tf.nn.conv2d(inputs_, w1, [1, 1, 1, 1], 'SAME'), b1, name='z1')
l1 = act(tf.nn.max_pool(z1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l1')
# Conv-2
z2 = tf.add(tf.nn.conv2d(l1, w2, [1, 1, 1, 1], 'SAME'), b2, name='z2')
l2 = act(tf.nn.max_pool(z2, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l2')
# Conv-3
z3 = tf.add(tf.nn.conv2d(l2, w3, [1, 1, 1, 1], 'SAME'), b3, name='z3')
l3 = act(tf.nn.max_pool(z3, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l3')
# FC-4
z4 = tf.add(tf.matmul(tf.reshape(l3, [-1, 1024]), w4), b4, name='z4')
l4 = act(z4, name='l4')
# FC-5
z5 = tf.add(tf.matmul(l4, w5), b5, name='z5')
logits = tf.squeeze(z5)
out = tf.sigmoid(logits)
if ex_wts is None:
# Average loss.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
else:
# Weighted loss.
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels) * ex_wts)
return w_dict, loss, logits
|
Python
| 474
| 38.405064
| 118
|
/Regression/src/model/bulid_model.py
| 0.567864
| 0.543931
|
Peroxidess/Ablation-Time-Prediction-Model
|
refs/heads/main
|
import numpy as np
import pandas as pd
import six
from tqdm import tqdm
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection
from model.training_ import training_model, model_training, precision, recall, f1, r2
from model.history_ import plot_history_df
def run(train_data, test_data, seed, task_name, target='label'):
train_data, test_data, co_col, ca_col, nor = data_preprocessing(train_data, test_data,
ca_co_sel_flag=False, onehot_flag=True)
_, test_data = anomaly_dectection(train_data, test_data)
# train_data, test_data = anomaly_dectection(train_data, test_data)# Outlier detection
train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \
get_dataset_(nor,train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, seed=seed, val_ratio=val_ratio,)# label confusion according to requirements
metric_df = pd.DataFrame([])
test_prediction = pd.DataFrame([])
history_df = pd.DataFrame([])
history_list = []
epoch_len_list = []
if n_splits > 1:
kf = KFold(n_splits=n_splits, shuffle=False, random_state=seed)
for k, (train_index, val_index) in enumerate(kf.split(train_set_mix)):
print('KFlod in : {}'.format(k))
model_, history_, metric_, test_pred_, epoch_len = training_model(train_set_mix, train_set_mix_label,
task_name, train_index, val_index,
test_set, test_set_label,
epoch, batchsize, iter_, step_, target, seed)
metric_df = pd.concat([metric_df, metric_], axis=0)
history_df = pd.concat([history_df, history_], axis=1)
history_list.append(history_)
test_prediction = pd.concat([test_prediction, pd.DataFrame(test_pred_)], axis=1)
epoch_len_list.append(epoch_len)
plot_history_df(history_list, task_name)
print('epoch_len_mean', np.mean(epoch_len_list)) # mean epoch in kflod cross validation
else:
model_, history_, metric_, test_pred_, epoch_len = training_model(train_set_mix, train_set_mix_label,
task_name, None, None,
test_set, test_set_label,
epoch, batchsize, iter_, step_, target, seed)
metric_df = pd.concat([metric_df, metric_], axis=0)
test_prediction = pd.concat([test_prediction, pd.DataFrame(test_pred_)], axis=1)
history_df = pd.concat([history_df, history_], axis=1)
history_list.append(history_)
plot_history_df(history_list, task_name, val_flag='val_')
try:
model_.save('{}_{}nrun_{}Fold.h5'.format(task_name, nrun, n_splits))
except:
print('Failed to save model')
return metric_df, test_prediction, history_df
np.random.seed(2020)
datasets_name = 'LiverAblation'
task_name = 'ablation_time_load' # ablation_time_enh / ablation_time_vanilla / relapse_risk
nrun = 10 # num of repeated experiments
clean_ratio = 1 # 1 for No label confusion
test_ratio = 0 # test data ratio for label confusion
val_ratio = 0 # val data ratio for label confusion
n_splits = 1 # n_splits > 1 for Kfold cross validation / n_splits==1 for training all data
epoch = 5000 # Kfold cross validation: a large number / training all data: mean epoch
batchsize = 256
iter_ = 2 # Number of iterations for label modification
step_ = 0.0001 # learning rate for label modification
def main():
metric_df_all = pd.DataFrame([])
test_prediction_all = pd.DataFrame([]) # for prediction of test data
history_df_all = pd.DataFrame([]) # for keras model
for i, trial in enumerate(tqdm(six.moves.xrange(nrun))):
print('rnum : {}'.format(i))
seed = (trial * 2718) % 2020 # a different random seed for each run
train_data, test_data = load_data_(datasets_name, task_name,seed)
metric_df, test_prediction, history_df = run(train_data, test_data, seed, task_name)
metric_df_all = pd.concat([metric_df_all, metric_df], axis=0)
test_prediction_all = pd.concat([test_prediction_all, test_prediction], axis=1)
history_df_all = pd.concat([history_df_all, history_df], axis=1)
for col in metric_df_all.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df_all[col].mean(),
metric_df_all[col].std(),
metric_df_all[col].max(),
metric_df_all[col].median(),
metric_df_all[col].min()))
metric_df_all.to_csv('./metric_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
history_df_all.to_csv('./history_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
# test_prediction_all.columns = ['ab_time', 'ab_time_enh']
test_prediction_all.to_csv('./prediction{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits))
plt.show()
pass
if __name__ == '__main__':
main()
pass
|
Python
| 103
| 54.815533
| 123
|
/Regression/src/main.py
| 0.567229
| 0.558184
|
deepikaasharma/string-concat-for-numbers
|
refs/heads/master
|
first_num = '123'
second_num = '456'
third_num = '789'
# Replace `None` with your code
final_num = (first_num+second_num+third_num)
print(int(final_num))
|
Python
| 7
| 21.142857
| 44
|
/main.py
| 0.688312
| 0.62987
|
islamaf/Software-development-exercise
|
refs/heads/main
|
import os
from tkinter import Tk, ttk, filedialog
import pandas as pd
from win32 import win32api
root = Tk()
root.title('Ahram Exam')
root.resizable(True, True)
root.frame_header = ttk.Frame()
root.geometry("350x250")
root.eval('tk::PlaceWindow . center')
ttk.Label(root.frame_header, text='Browse file to open:', style='Header.TLabel', font=("Arial", 15)).grid(row=1, column=1)
filename = ttk.Button(root.frame_header, text="Browse", command=lambda: open_file()).grid(row=4, column=1)
print_result = ttk.Button(root.frame_header, text="Print result", command=lambda: print_file())
print_result.grid(row=12, column=1)
print_result['state'] = 'disabled'
def open_file():
file_to_open = filedialog.askopenfilename(initialdir="C:/", title="Select file",
filetypes=(("all files", "*.*"), ("excel files", "*.xls")))
df = pd.read_excel(file_to_open)
os.startfile(file_to_open)
ttk.Label(root.frame_header, text='All averages:', style='Header.TLabel',font=("Arial", 15)).grid(row=6, column=1)
ttk.Label(root.frame_header, text=df.mean(), style='Header.TLabel', font=("Arial", 15)).grid(row=8, column=1)
ttk.Label(root.frame_header, text=get_max_mean(df), style='Header.TLabel', font=("Arial", 15)).grid(row=10, column=1)
f = open('maximum_average.txt', 'w')
f.write(get_max_mean(df))
f.close()
root.geometry("350x350")
print_result['state'] = 'enabled'
def print_file():
file_to_print = "maximum_average.txt"
if file_to_print:
win32api.ShellExecute(0, "print", file_to_print, None, ".", 0)
def get_max_mean(l):
max_val = 0
max_column = ''
winner = ""
for i, x in zip(l.columns, l.mean()):
if x > max_val:
max_val = x
max_column = i
winner = f'{max_column} is the maximum'
return winner
root.frame_header.pack(pady=10, anchor="center")
root.mainloop()
|
Python
| 62
| 30.161291
| 122
|
/gui_main.py
| 0.633868
| 0.610564
|
CENSOREDd/test_fk
|
refs/heads/master
|
#!/usr/bin/python3
from time import sleep
print("what the fuck???")
if __name__ == "__main__":
print("here is python code!!!")
print("Executing code...")
sleep(2)
|
Python
| 10
| 16.799999
| 35
|
/fk.py
| 0.578652
| 0.567416
|
CENSOREDd/test_fk
|
refs/heads/master
|
#!/usr/bin/python3
import fk
print("here is test")
|
Python
| 5
| 9.6
| 21
|
/test.py
| 0.679245
| 0.660377
|
hui98/opencv
|
refs/heads/master
|
import cv2
import numpy as np
import random
from math import *
# import an image
class image:
def __init__(self,na):
self.dir='/home/hui/Pictures/'
# self.name=raw_input('please input the picture name')
self.name=na
self.mode=cv2.IMREAD_COLOR
self.im=cv2.imread(self.dir+self.name,self.mode)
def reconf(self):
self.im = cv2.imread(self.dir + self.name, self.mode)
def modechoose(self,modex):
if modex=='color':
self.mode=cv2.IMREAD_COLOR
elif modex == 'gray':
self.mode=cv2.IMREAD_GRAYSCALE
elif modex== 'alpha':
self.mode=cv2.IMREAD_UNCHANGED
else:
print('wrong mode')
self.reconf()
def routechange(self):
self.dir=raw_input('input your new route')
self.name=raw_input('input your new filename')
self.reconf()
def show(self):
cv2.imshow('huihui',self.im)
k=cv2.waitKey(0)&0xFF
if k==27: #wait for esc coming
self.dele('all')
def dele(self,modeb):
if modeb=='all':
cv2.destroyAllWindows()
if modeb=='name':
cv2.destroyWindow(raw_input("please input your window's name"))
def saveas(self):
cv2.imwrite(raw_input('input your new filename'),self.im)
def getpixel(self,a,b,c): #pixel is xiangshu ni dong de~ a is x b is y c is 0 1 2 B G R
print self.im.item(a,b,c)
def setpixel(self,e,f,g,h): # e f g is like the a b c and h is the new pixel value
self.im.itemset((e,f,g),h)
look=image('hsj.jpeg')
shino =image('shino.jpeg')
juhua=image('juhua.jpg')
juhua.show()
'''for a in range(0,5000)
x=random.randint(0,280)
y=random.randint(0,449)
for b in range(0,3):
value=random.randint(0,255)
look.setpixel(x,y,b,value)'''
'''look.show()
shino.show()'''
'''test=look.im[50:140,100:200]
cv2.imshow('hui',test)
k = cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()'''
rows,cols,channel=look.im.shape
row,col,channels=shino.im.shape
pix=[]
sbliye=[]
hezi=[]
R=[]
G=[]
B=[]
n=130
route='/home/hui/'
green='sbliyeG.txt'
blue='sbliyeB.txt'
red='sbliyeR.txt'
gg=open(route+green,'w')
bb=open(route+blue,'w')
rr=open(route+red,'w')
'''M=cv2.getRotationMatrix2D((220,240),0,0.6)
K = cv2.getRotationMatrix2D((300, 300), 0, 0.5)
dst=cv2.warpAffine(look.im,M,(cols,rows))
shino1=cv2.warpAffine(shino.im,K,(col,row))
cv2.imshow('hui',dst)
cv2.imshow('shino',shino1)
for times in range(0,n):
M=cv2.getRotationMatrix2D((215,248),(times)*360.0/n,1)
dsto=cv2.warpAffine(dst,M,(cols,rows))
if times==129:
cv2.imshow('hi',dsto)
look.im=dst
for led in range(1,33):
for i in range(0,3):
pix.append(dsto.item(215,248-5*led,i))
shino1.itemset((300, 300-5*led, i),dsto.item(215,248-5*led,i) )
K = cv2.getRotationMatrix2D((300, 300), 360.0 / n, 1)
shino1 = cv2.warpAffine(shino1, K, (col, row))
cv2.imshow('huihui', shino1)'''
M=cv2.getRotationMatrix2D((220,240),0,0.6)
dst=cv2.warpAffine(juhua.im,M,(cols,rows))
def qm(x,y,nn): #x is xiangsu x y is xiangsu y
xz=195
yz=154
x0=x
y0=y
a=pi/65
A=np.matrix([[cos(nn*a),-sin(nn*a)],[sin(nn*a),cos(nn*a)]])
X=np.matrix([x0,y0])
X1=X*A
xy=X1.tolist()
x1=int(round(xy[0][0]))
y1=int(round(xy[0][1]))
x1=x1+xz
y1=y1+yz
return [x1,y1]
zuobiao=[]
for times in range(0,130):
for nnn in range(0,32):
aaa=qm(0,4*nnn+1,times)
zuobiao.append(aaa)
for i in range(0,3):
pix.append(dst.item(aaa[0],aaa[1],i))
shino.im.itemset((aaa[0],aaa[1],i),dst.item(aaa[0],aaa[1],i))
cv2.imshow('hui',dst)
shino.show()
lenth=n*32*3
for time in range(0,lenth):
if pix[time]<128:
sbliye.append('0')
else :
sbliye.append('1')
for ttt in range(0,n):
for ledp in range(0,32):
B.append(sbliye[(ttt+1)*96-(ledp+1)*3])
G.append(sbliye[(ttt+1)*96-(ledp+1)*3+1])
R.append(sbliye[(ttt+1)*96-(ledp+1)*3+2])
b=''.join(B)
g=''.join(G)
r=''.join(R)
B=[]
G=[]
R=[]
BB=hex(int(b,2))
GG=hex(int(g,2))
RR=hex(int(r,2))
if ttt==n-1:
rr.write(RR+'\n')
bb.write(BB+'\n')
gg.write(GG+'\n')
else :
if (ttt+1)%4==0 and ttt!=0:
rr.write(RR+',\n')
bb.write(BB + ',\n')
gg.write(GG + ',\n')
else :
rr.write(RR+' ,')
bb.write(BB+' ,')
gg.write(GG+' ,')
rr.close()
bb.close()
gg.close()
k=cv2.waitKey(0)&0xFF
if k==27:
cv2.destroyAllWindows()
|
Python
| 173
| 25.757225
| 99
|
/opencvtest.py
| 0.568932
| 0.514995
|
rodelrod/pomodoro-report
|
refs/heads/master
|
#!/usr/bin/env python
import unittest
from notebook_parser import *
import os
import errno
from datetime import datetime
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
class TestParser(unittest.TestCase):
"""Tests the RedNotebook monthly files parser."""
def setUp(self):
self.nb_path = '/tmp/test_pomodoro_report'
mkdir_p(self.nb_path)
f = open(os.path.join(self.nb_path, '2012-10.txt'), 'w')
f.write(
"21: {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n"
"25:\n"
" Cat3: {Some other shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'\n"
"27:\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
f.close()
self.p = Parser(self.nb_path)
def test_get_nb_filename(self):
self.assertEqual(
self.p._get_nb_filename(datetime(2012, 10, 14)),
os.path.join(self.nb_path,'2012-10.txt'))
def test_parse_day_block(self):
block = ['', '5', 'some stuff', '26', 'some other stuff']
expected = {5: 'some stuff', 26: 'some other stuff'}
self.assertEqual(self.p._parse_day_block(block), expected)
def test_get_day_with_categories(self):
"""Get day 27."""
expected = (
"\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
actual = self.p._get_day(datetime(2012, 10, 27))
self.assertEqual(actual, expected)
def test_get_day_without_categories(self):
"""Get day 21."""
expected = (
" {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n")
actual = self.p._get_day(datetime(2012, 10, 21))
self.assertEqual(actual, expected)
def test_get_inexistant_day(self):
"""Get 14/10."""
with self.assertRaises(EmptyDayException):
self.p._get_day(datetime(2012, 10, 14))
def test_get_inexistant_month(self):
"""Get 14/04."""
with self.assertRaises(EmptyDayException):
self.p._get_day(datetime(2012, 4, 14))
def test_get_text_with_categories(self):
block = (
"\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
expected = (
"1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b'illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk")
self.assertEqual(self.p._get_text(block), expected)
def test_get_text_without_categories(self):
block = (
" {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n")
expected = (
"1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk")
self.assertEqual(self.p._get_text(block), expected)
def test_get_pomodoros(self):
# TODO
pass
def tearDown(self):
os.remove(os.path.join(self.nb_path, '2012-10.txt'))
if __name__ == '__main__':
unittest.main()
|
Python
| 130
| 33.607693
| 66
|
/test_notebook_parser.py
| 0.498778
| 0.454545
|
rodelrod/pomodoro-report
|
refs/heads/master
|
#!/usr/bin/env python
import re
import os
NOTEBOOK_PATH = '/home/rrodrigues/.rednotebook/data'
class EmptyDayException(Exception):
"""No info was entered for this date."""
class Parser(object):
"""Parses RedNotebook monthly files.
This is a very basic parser used to extract Pomodoro references for each
day. It has the following limitations:
- Basically assumes there is nothing but the Pomodoro references in the
day's text.
- Ignores any Tags.
- Ignores any Categories.
- In the fancy cases where the text field ends up surrounded by double
quotes instead of single quotes, it breaks.
"""
def __init__(self, nb_path=NOTEBOOK_PATH):
self.nb_path = nb_path
def _get_nb_filename(self, date):
return os.path.join(self.nb_path, date.strftime('%Y-%m.txt'))
@staticmethod
def _parse_day_block(day_block_list):
day_blocks = {}
is_content = False
for index, token in enumerate(day_block_list):
if token.isdigit() and not is_content:
day = int(token)
is_content = True
elif is_content:
day_blocks[day] = token
is_content = False
else:
pass
return day_blocks
def _get_day(self, date):
day_filename = self._get_nb_filename(date)
if not os.path.isfile(day_filename):
raise EmptyDayException
with open(day_filename, 'r') as nb_file:
file_contents = nb_file.read()
day_blocks_list = re.split('^(\d+):', file_contents, flags=re.MULTILINE)
day_blocks = self._parse_day_block(day_blocks_list)
try:
return day_blocks[date.day]
except KeyError:
raise EmptyDayException
def _get_text(self, block):
after_text = re.split('\Wtext:', block)[1]
quote_set = False
started_text = False
ended_text = False
text = []
for token in after_text:
if token == "'":
if not started_text:
#first quote, text starts
started_text = True
elif quote_set and started_text:
#second quote
text.append("'")
quote_set = False
elif not quote_set and started_text:
# quote in the middle of text, maybe the end or first of an
# escape sequence
quote_set = True
else:
if quote_set:
# First character after a quote is not a quote, so this
# must be the end
break
elif started_text:
# Normal text, add it to the output
text.append(token)
else:
# Text hasn't started yet, discard token
continue
return ''.join(text)
def get_pomodoros(self):
# TODO
pass
|
Python
| 94
| 31.957447
| 80
|
/notebook_parser.py
| 0.525806
| 0.525484
|
shashi/phosphene
|
refs/heads/master
|
#
# This script plays an mp3 file and communicates via serial.Serial
# with devices in the Technites psychedelic room to visualize the
# music on them.
#
# It talks to 4 devices
# WaterFall -- tubes with LEDs and flying stuff fanned to music
# DiscoBall -- 8 60 watt bulbs wrapped in colored paper
# LEDWall -- a 4 channel strip of LED
# this time it was the LED roof instead :p
# LEDCube -- a 10x10x10 LED cube - work on this is still on
#
# the script also has a sloppy pygame visualization of the fft and
# beats data
#
import sys
import time
import scipy
import pygame
from pygame import display
from pygame.draw import *
import pathsetup # this module sets up PYTHONPATH for all this to work
from devices.discoball import DiscoBall
from devices.waterfall import Waterfall
from devices.ledwall import LEDWall
from devices.cube import Cube
import phosphene
from phosphene import audio, signalutil, util
from phosphene.util import *
from phosphene.signal import *
from phosphene.dsp import *
from phosphene.graphs import *
from phosphene.signalutil import *
from cube import cubeProcess
#from phosphene import cube
from threading import Thread
# Setup devices with their corresponding device files
devs = [
Waterfall("/dev/ttyACM0"),
DiscoBall("/dev/ttyACM1"),
LEDWall("/dev/ttyACM2")
]
pygame.init()
surface = display.set_mode((640, 480))
if len(sys.argv) < 2:
print "Usage: %s file.mp3" % sys.argv[0]
sys.exit(1)
else:
fPath = sys.argv[1]
sF, data = audio.read(fPath)
import serial
signal = Signal(data, sF)
signal.A = lift((data[:,0] + data[:,1]) / 2, True)
for d in devs:
d.setupSignal(signal)
def devices(s):
#threads = []
for d in devs:
if d.isConnected:
def f():
d.redraw(s)
d.readAck()
#t = Thread(target=f)
#threads.append(t)
#t.start()
f()
#for t in threads:
# t.join(timeout=2)
# if t.isAlive():
# d.isUnresponsive()
surface.fill((0, 0, 0))
graphsGraphs(filter(
lambda g: g is not None,
[d.graphOutput(signal) for d in devs]))(surface, (0, 0, 640, 480))
CubeState = lambda: 0
CubeState.count = 0
#cube = Cube("/dev/ttyACM1", emulator=True)
def cubeUpdate(signal):
CubeState.count = cubeProcess(cube, signal, CubeState.count)
def graphsProcess(s):
display.update()
processes = [graphsProcess, devices] #, cube.emulator]
signal.relthresh = 1.66
soundObj = audio.makeSound(sF, data)
# make a pygame Sound object from the data
# run setup on the signal
signalutil.setup(signal)
soundObj.play() # start playing it. This is non-blocking
perceive(processes, signal, 90) # perceive your signal.
|
Python
| 115
| 23.347826
| 77
|
/src/apps/psychroom.py
| 0.664286
| 0.646786
|
shashi/phosphene
|
refs/heads/master
|
# Functions to help you lift and fold
from .signal import *
from dsp import *
import numpy
import pdb
import math
def setup(signal, horizon=576):
# Note of awesome: this only sets up dependencies,
# things absolutely necessary are evaluated.
signal.fft = lift(lambda s: \
fft(s.A[-horizon/2:horizon/2], False, True, True))
for i in [1, 3, 4, 5, 6, 8, 12, 16, 32]:
setup_bands(signal, i)
def setup_bands(signal, bands):
def get(s, prefix):
return getattr(s, prefix + str(bands))
setattr(signal, 'chan%d' % bands,
lift(lambda s: group(bands, s.fft)))
setattr(signal, 'avg%d' % bands,
blend(lambda s: get(s, 'chan'),
lambda s, v, avg: 0.2 if v > avg else 0.5))
setattr(signal, 'longavg%d' % bands,
blend(lambda s: get(s, 'chan'),
lambda s, v, avg: 0.9 if s.frames < 50 else 0.992))
# Booya.
thresh = 1.7
setattr(signal, 'peaks%d' % bands,
blend(lambda s: get(s, 'avg') > thresh * get(s, 'longavg'),
lambda s, v, a: 0.2))
setattr(signal, 'chan%drel' % bands,
lift(lambda s: numpymap(
lambda (x, y): x / y if y > 0.001 else 1,
zip(get(s, 'chan'), get(s, 'longavg')))))
setattr(signal, 'avg%drel' % bands,
lift(lambda s: numpymap(
lambda (x, y): x / y if y > 0.001 else 1,
zip(get(s, 'avg'), get(s, 'longavg')))))
## Detecting beats
def normalize(data, signal, divisor=None):
if divisor is None: divisor = lambda s, n: getattr(s, 'longavg%d' % n)
n = len(data)
divs = divisor(signal, n)
return numpymap(lambda (a, b): a / max(0.01, b), zip(data, divs))
def fallingMax(f, minf=lambda s: 0.5, cutoff=0.95, gravity=lambda s: 0.9):
def maxer(signal, prev):
# prev contains:
thisFrame = f(signal)
if prev == None:
init = (thisFrame, [signal.t] * len(thisFrame))
return (init, init)
maxVal, maxTime = prev
mins = minf(signal)
try:
s = sum(mins)
except:
s = mins
for i in range(0, len(thisFrame)):
if thisFrame[i] > cutoff * maxVal[i] and s != 0:
# Update
maxVal[i] = thisFrame[i]
maxTime[i] = signal.t
else:
# Fall
maxVal[i] -= gravity(signal) * (signal.t - maxTime[i])
return ((maxVal, maxTime), (maxVal, maxTime))
return foldp(maxer, None)
def boopValue(t2, maxes):
maxVal, maxTime = maxes
return numpy.array([math.exp(-(t2 - t1) * 9) for t1 in maxTime])
def blend(f, rate=lambda s, val, avg: 0.3):
def blender(signal, avg):
vals = f(signal)
l = len(vals)
# None is the starting value
if avg is None: avg = [0] * l
for i in range(0, l):
if isinstance(rate, float):
r = rate
elif hasattr(rate, '__call__'):
r = rate(signal, vals[i], avg[i])
else:
ValueError("rate of decay must be a float or a lambda")
r = adjustRate(r, signal) # adjust based on fps
avg[i] = avg[i] * r + vals[i] * (1-r)
avg = numpy.array(avg)
return (avg, avg) # required by foldp
return foldp(blender, None)
def adjustRate(r, signal):
# THANKS MILKDROP! FOR EVERYTHING!
pow = math.pow
return pow(pow(r, signal.max_fps), 1.0/signal.fps)
|
Python
| 111
| 30.972973
| 74
|
/src/phosphene/signalutil.py
| 0.5255
| 0.506622
|
shashi/phosphene
|
refs/heads/master
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "phosphene",
version = "0.0.1",
author = "Shashi Gowda",
author_email = "shashigowda91@gmail.com",
description = ("A library for music processing and visualization"),
license = "MIT",
keywords = "music audio dsp visualization",
url = "https://github.com/shashi/phosphene",
packages=["phosphene"],
long_description=read("../README.md"),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"License :: OSI Approved :: MIT License",
],
)
|
Python
| 23
| 28.347826
| 71
|
/src/setup.py
| 0.611852
| 0.605926
|
shashi/phosphene
|
refs/heads/master
|
import serial
import numpy
import math
from device import Device
from cubelib import emulator
from cubelib import mywireframe as wireframe
from animations import *
import time
import threading
# A class for the cube
class Cube(Device):
def __init__(self, port, dimension=10, emulator=False):
Device.__init__(self, "Cube", port)
self.array = numpy.array([[\
[0]*dimension]*dimension]*dimension, dtype='bool')
self.dimension = dimension
self.emulator = emulator
self.name = "Cube"
def set_led(self, x, y, z, level=1):
self.array[x][y][z] = level
def get_led(self, x, y, z):
return self.array[x][y][z]
def takeSignal(self, signal):
pass
def toByteStream(self):
# 104 bits per layer, first 4 bits waste.
bytesPerLayer = int(math.ceil(self.dimension**2 / 8.0))
print bytesPerLayer
discardBits = bytesPerLayer * 8 - self.dimension**2
print discardBits
bts = bytearray(bytesPerLayer*self.dimension)
pos = 0
mod = 0
for layer in self.array:
mod = discardBits
for row in layer:
for bit in row:
if bit: bts[pos] |= 1 << mod
else: bts[pos] &= ~(1 << mod)
mod += 1
if mod == 8:
mod = 0
pos += 1
return bts
def redraw(self, wf=None, pv=None):
if self.emulator:
wf.setVisible(emulator.findIndexArray(self.array))
pv.run()
if __name__ == "__main__":
cube = Cube("/dev/ttyACM0")
#pv = emulator.ProjectionViewer(640,480)
#wf = wireframe.Wireframe()
#pv.createCube(wf)
count = 0
start = (0, 0, 0)
point = (0,0)
#fillCube(cube,0)
#cube.redraw()
#time.sleep(100)
def sendingThread():
while True:
cube.port.write("S")
bs = cube.toByteStream()
for i in range(0, 130):
time.sleep(0.01)
cube.port.write(chr(bs[i]))
print "wrote", bs[i]
assert(cube.port.read() == '.')
t = threading.Thread(target=sendingThread)
t.start()
#fillCube(cube,0)
#cube.set_led(9,9,9)
#for x in range(0, 9):
# for y in range(0, 9):
# for z in range(0, 9):
# cube.set_led(x, y, z, 1)
# time.sleep(1)
while True:
#wireframeCube(cube,(1,1,1),(9,9,9))
fillCube(cube, 1)
#planeBounce(cube,(count/20)%2+1,count%20)
#planeBounce(cube,1,count)
#start = wireframeExpandContract(cube,start)
#rain(cube,count,5,10)
#time.sleep(.1)
#point = voxel(cube,count,point)
#sine_wave(cube,count)
#pyramids(cube,count)
#side_waves(cube,count)
#fireworks(cube,4)
#technites(cube, count)
#setPlane(cube,1,(counter/100)%10,1)
#setPlane(cube,2,0,1)
#stringPrint(cube,'TECHNITES',count)
#moveFaces(cube)
#cube.set_led(0,0,0)
#cube.set_led(0,0,1)
cube.redraw()
count += 1
time.sleep(0.1)
|
Python
| 113
| 27.212389
| 66
|
/src/apps/devices/cube.py
| 0.530574
| 0.500784
|
shashi/phosphene
|
refs/heads/master
|
import scipy
import numpy
from util import *
def fftIdx(Fs, Hz, n):
assert(Hz <= Fs / 2);
return round(Fs / n * Hz)
memFftIdx = memoize(fftIdx)
def getNotes():
return [0] \
+ [16.35 * pow(2, i/12.0) + 1 for i in range(0, 101)] \
+ [11050, 22100]
def group(n, fft, grouping=lambda i: i):
"""
Put fft data into n bins by adding them.
grouping function defines how things are grouped
lambda i: i --> linear grouping
lambda i: 2 ** i --> logarithmic
"""
if isinstance(n, (list,tuple)):
splitPoints = numpy.array(n, dtype=float)
n = len(n) - 1
elif hasattr(grouping, '__call__'):
splitPoints = numpy.array([grouping(i) for i in range(0, n + 1)], \
dtype=float)
l = len(fft)
splitIdx = splitPoints / abs(max(splitPoints)) * l
splitIdx = [int(i) for i in splitIdx]
#pdb.set_trace()
return numpy.array(
[sum(fft[splitIdx[i-1]:splitIdx[i]]) for i in range(1, n + 1)])
def fft(samples, out_n, env=None, eq=None):
"""
Returns the short time FFT at i,
window width will be 1.5 * delta
1 * delta after i and 0.5 * delta before
"""
in_n = len(samples)
if env:
spectrum = abs(scipy.fft(samples * scipy.hamming(in_n) * envelope(in_n)))
else:
spectrum = abs(scipy.fft(samples))
if out_n:
if eq:
return group(out_n, spectrum[0:0.9*in_n/2]) * equalize(out_n)
else:
return group(out_n, spectrum[0:0.9*in_n/2])
else:
if eq:
return spectrum[0:in_n/2] * equalize(in_n/2)
else:
return spectrum[0:in_n/2]
def equalize(N, scale=-0.02):
f = lambda i: scale * scipy.log((N-i) * 1.0/N)
return numpymap(f, range(0, N))
equalize=memoize(equalize)
def envelope(N, power=1):
mult = scipy.pi / N
f = lambda i: pow(0.5 + 0.5 * scipy.sin(i*mult - scipy.pi / 2), power)
return numpymap(f, range(0, N))
envelope=memoize(envelope)
|
Python
| 75
| 26.146667
| 81
|
/src/phosphene/dsp.py
| 0.556483
| 0.52554
|
shashi/phosphene
|
refs/heads/master
|
import os
from hashlib import sha1
import scipy.io.wavfile as wav
import pygame.mixer
from pygame.sndarray import make_sound
# Set mixer defaults
pygame.mixer.pre_init(44100, 16, 2, 4096)
__all__ = ["read", "makeSound"]
def digest(string):
return sha1(string).hexdigest()
def read(fname):
""" Reads an audio file into a numpy array.
returns frequency, samples
"""
# this is an ugly way to read mp3. But works well.
# www.snip2code.com/Snippet/1767/Convert-mp3-to-numpy-array--Ugly--but-it
suffix = digest(fname)[0:6]
oname = '/tmp/tmp'+ suffix +'.wav'
# ask lame to decode it to a wav file
if not os.path.exists(oname):
# Well, if you ctrl-c before conversion, you're going to
# have to manually delete the file.
cmd = 'lame --decode "%s" "%s"' % (fname, oname)
os.system(cmd)
# now read using scipy.io.wavfile
data = wav.read(oname)
# return samplingFrequency, samples
return data[0], data[1]
def makeSound(samplingFreq, data):
""" Make a Player object from raw data
returns a pygame.mixer.Sound object
"""
# Ugh! impurity
pygame.mixer.init(frequency=samplingFreq)
return make_sound(data)
|
Python
| 44
| 26.704546
| 77
|
/src/phosphene/audio.py
| 0.654098
| 0.633607
|
shashi/phosphene
|
refs/heads/master
|
import numpy
import random
import time
from cubelib import mywireframe
from cubelib import emulator
# TODO:
# shiftPlane(axis, plane, delta)
# moves the plane along the axis by delta steps, if it exceeds dimensions, just clear it out, don't rotate.
# swapPlanes(axis1, plane1, axis2, plane2)
# rain should set random LEDs on the first plane (not a lot of them)
# and shift the plane along that axis by one step---Fixed
# and shift the plane along that axis by one step
#
# THINK:
# The python code keeps sending a 125 byte string to redraw the
# cube as often as it can, this contains 1000 bit values that the MSP
# handles. Now, in our code we have been using time.sleep() a lot.
# We probably can have a counter that each of these functions uses to
# advance its steps, and then increment / decrement that
# counter according to music
def wireframeCubeCenter(cube,size):
if size % 2 == 1:
size = size+1
half = size/2
start = cube.dimension/2 - half
end = cube.dimension/2 + half - 1
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for x in (start,end):
for y in (start,end):
for z in range(start,end+1):
cube.set_led(x,y,z)
cube.set_led(x,z,y)
cube.set_led(z,x,y)
def wireframeCube(cube,START,END):
x0,y0,z0 = START
x1,y1,z1 = END
print "start:",START,"end:",END
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for x in (x0,x1):
for y in (y0,y1):
if z0<z1:
for z in range(z0,z1+1):
cube.set_led(x,y,z)
print x,y,z, "set-1st condition"
else:
for z in range(z1,z0+1):
cube.set_led(x,y,z)
print x,y,z, "set-2nd condition"
for x in (x0,x1):
for z in (z0,z1):
if y0<y1:
for y in range(y0,y1+1):
cube.set_led(x,y,z)
print x,y,z, "Set - 1st"
else:
for y in range(y1,y0+1):
cube.set_led(x,y,z)
print x,y,z, "Set - 2nd"
for y in (y0,y1):
for z in (z0,z1):
if x0<x1:
for x in range(x0,x1+1):
cube.set_led(x,y,z)
print x,y,z, "SET - 1st"
else:
for x in range(x1,x0+1):
cube.set_led(x,y,z)
print x,y,z, "SET - 2nd"
def solidCubeCenter(cube,size):
if size % 2 == 1:
size = size+1
half = size/2
start = cube.dimension/2 - half
end = cube.dimension/2 + half
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for i in range(start,end):
for j in range(start,end):
for k in range(start,end):
cube.set_led(i,j,k)
def solidCube(cube,START,END):
x0,y0,z0 = START
x1,y1,z1 = END
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for i in range(x0,x1+1):
for j in range(y0,y1+1):
for k in range(z0,z1+1):
cube.set_led(i,j,k)
def setPlane(cube,axis,x,level = 1):
plane = level
if isinstance(level, int):
plane = numpy.array([[level]*10]*10, dtype=bool)
if axis == 1:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
cube.set_led(x,i,j,plane[i][j])
elif axis == 2:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
cube.set_led(i,x,j,plane[i][j])
else:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
cube.set_led(i,j,x,plane[i][j])
def shiftPlane(cube,axis,plane,delta):
if axis == 1:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
try:
cube.set_led(plane+delta,i,j,cube.get_led(plane,i,j))
cube.set_led(plane,i,j,0)
except:
cube.set_led(plane,i,j,0)
elif axis == 2:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
try:
cube.set_led(i,plane+delta,j,cube.get_led(i,plane,j))
cube.set_led(i,plane,j,0)
except:
cube.set_led(i,plane,j,0)
else:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
try:
cube.set_led(i,j,plane+delta,cube.get_led(i,j,plane))
cube.set_led(i,j,plane,0)
except:
cube.set_led(i,j,plane,0)
#def swapPlane(cube,axis,plane1,plane2):
def randPlane(cube,minimum,maximum):
array = numpy.array([[0]*cube.dimension]*cube.dimension,dtype = 'bool')
for i in range(minimum,maximum):
x = random.choice([i for i in range(0,cube.dimension)])
y = random.choice([i for i in range(0,cube.dimension)])
array[x][y] = 1
return array
def wireframeExpandContract(cube,start=(0,0,0)):
(x0, y0, z0) = start
for i in range(0,cube.dimension):
j = cube.dimension - i - 1
if(x0 == 0):
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0-i))
else:
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0-i))
time.sleep(0.1)
cube.redraw()
max_coord = cube.dimension - 1
corners = [0,max_coord]
x0 = random.choice(corners)
y0 = random.choice(corners)
z0 = random.choice(corners)
for j in range(0,cube.dimension):
i = cube.dimension - j - 1
if(x0 == 0):
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0-i))
else:
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0-i))
cube.redraw()
time.sleep(0.1)
return (x0, y0, z0) # return the final coordinate
def rain(cube,counter,minimum,maximum,axis=3):
shiftCube(cube,3,1)
setPlane(cube,axis,9,randPlane(cube,minimum,maximum))
def planeBounce(cube,axis,counter):
i = counter%20
if i:
if i<10: #to turn off the previous plane
setPlane(cube,axis,i-1,0)
elif i>10:
setPlane(cube,axis,20-i,0)
if i<10:
setPlane(cube,axis,i)
elif i>10:
setPlane(cube,axis,19-i)
def square(cube,size,translate=(0,0)):
x0,y0 = translate
array = numpy.array([[0]*cube.dimension] * cube.dimension)
for i in range(0,size):
for j in range(0,size):
array[i+x0][j+y0] = 1
return array
def distance(point1,point2):
x0,y0 = point1
x1,y1 = point2
return numpy.sqrt((x0-x1)**2 + (y0-y1)**2)
def circle(cube,radius,translate=(0,0)):
x1,y1 = translate
array = numpy.array([[0]*cube.dimension] * cube.dimension)
for i in range(0,2*radius):
for j in range(0,2*radius):
if distance((i,j),(radius,radius))<=radius:
array[i+x1][j+y1] = 1
return array
def wierdshape(cube,diagonal,translate=(0,0)):
x1,y1 = translate
array = numpy.array([[0]*cube.dimension] * cube.dimension)
if diagonal%2 == 0:
diagonal-=1
for y in range(0,diagonal):
for x in range(0,diagonal):
if(y>=diagonal/2):
if(x<=diagonal/2):
if(x>=y):
array[x][y] = 1
else:
if(x<=y):
array[x][y] = 1
else:
if(x<=diagonal/2):
if(x+y>=diagonal/2):
array[x][y] = 1
else:
if(x+y<=diagonal/2):
array[x][y] = 1
return array
def fillCube(cube,level=1):
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,level)
def voxel(cube,counter,point):
x,y = point
if(counter==0):
fillCube(cube,0)
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
cube.set_led(x,y,random.choice([0,cube.dimension-1]))
if counter%9==0:
x = random.choice([i for i in range(0,cube.dimension)])
y = random.choice([i for i in range(0,cube.dimension)])
if cube.get_led(x,y,counter%9)==1:
cube.set_led(x,y,counter%9+1)
cube.set_led(x,y,counter%9,0)
else:
cube.set_led(x,y,8-(counter%9))
cube.set_led(x,y,9-(counter%9),0)
return (x,y)
def shiftCube(cube,axis,delta):
for x in range(0,10):
for y in range(0,10):
for z in range(0,9):
if axis == 3:
cube.set_led(x,y,z,cube.get_led(x,y,z+delta))
cube.set_led(x,y,z+delta,0)
elif axis == 2:
cube.set_led(x,z,y,cube.get_led(x,z+delta,y))
cube.set_led(x,y,z+delta,0)
elif axis == 1:
cube.set_led(z,x,y,cube.get_led(z+delta,x,y))
cube.set_led(z+delta,x,y,0)
def pyramids(cube,counter,axis = 3):
if(counter%20 <cube.dimension):
size = counter%10 + 1
setPlane(cube,axis,cube.dimension-1,square(cube,counter%10 + 1,((cube.dimension-counter%10-1)/2,(cube.dimension-counter%10-1)/2)))
shiftCube(cube,axis,1)
else:
size = 9 - (counter-10)%10
translate = (cube.dimension - size)/2
setPlane(cube,axis,cube.dimension-1,square(cube,size,(translate,translate)))
shiftCube(cube,axis,1)
time.sleep(0)
print "counter = ",counter,"size=",size
def sine_wave(cube,counter):
fillCube(cube,0)
center = (cube.dimension-1)/2.0
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
dist = distance((x,y),(center,center))
cube.set_led(x,y,int(counter%10+numpy.sin(dist+counter)))
def side_waves(cube,counter):
fillCube(cube,0)
origin_x=4.5;
origin_y=4.5;
for x in range(0,10):
for y in range(0,10):
origin_x=numpy.sin(counter);
origin_y=numpy.cos(counter);
z=int(numpy.sin(numpy.sqrt(((x-origin_x)*(x-origin_x))+((y-origin_y)*(y-origin_y))))+counter%10);
cube.set_led(x,y,z);
def fireworks(cube,n):
origin_x = 3;
origin_y = 3;
origin_z = 3;
#Particles and their position, x,y,z and their movement,dx, dy, dz
origin_x = random.choice([i for i in range(0,4)])
origin_y = random.choice([i for i in range(0,4)])
origin_z = random.choice([i for i in range(0,4)])
origin_z +=5;
origin_x +=2;
origin_y +=2;
particles = [[None for _ in range(6)] for _ in range(n)]
print particles
#shoot a particle up in the air value was 600+500
for e in range(0,origin_z):
cube.set_led(origin_x,origin_y,e,1);
time.sleep(.05+.02*e);
cube.redraw()
fillCube(cube,0)
for f in range(0,n):
#Position
particles[f][0] = origin_x
particles[f][1] = origin_y
particles[f][2] = origin_z
rand_x = random.choice([i for i in range(0,200)])
rand_y = random.choice([i for i in range(0,200)])
rand_z = random.choice([i for i in range(0,200)])
try:
#Movement
particles[f][3] = 1-rand_x/100.0 #dx
particles[f][4] = 1-rand_y/100.0 #dy
particles[f][5] = 1-rand_z/100.0 #dz
except:
print "f:",f
#explode
for e in range(0,25):
slowrate = 1+numpy.tan((e+0.1)/20)*10
gravity = numpy.tan((e+0.1)/20)/2
for f in range(0,n):
particles[f][0] += particles[f][3]/slowrate
particles[f][1] += particles[f][4]/slowrate
particles[f][2] += particles[f][5]/slowrate;
particles[f][2] -= gravity;
cube.set_led(int(particles[f][0]),int(particles[f][1]),int(particles[f][2]))
time.sleep(1000)
def T():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for i in range(3,7):
for j in range(3,10):
plane[i][j] = 1
return plane
def E():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def B():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(4,6):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
plane[9][0] = 0
plane[9][9] = 0
return plane
def A():
plane = numpy.array([[0]*10] *10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
return plane
def C():
plane = numpy.array([[0]*10] *10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def D():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,2):
plane[i][j] = 1
for i in range(8,10):
plane[i][j] = 1
plane[9][0] = 0
plane[9][9] = 0
return plane
def F():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def H():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(4,7):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,10):
plane[i][j] = 1
return plane
def G():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(4,10):
plane[i][j] = 1
for i in range(4,10):
for j in range(4,6):
plane[i][j] = 1
return plane
def J():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for i in range(3,7):
for j in range(3,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(7,10):
plane[i][j] = 1
return plane
def K():
plane = numpy.array([[0]*10]*10)
for j in range(0,10):
for i in range(0,2):
plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][5+j/2] = 1
try:
plane[i-1][4+j/2] = 1
plane[i+1][4+j/2] = 1
except:
print "Blaaah"
if(i+j==9):
plane[i][j/2] = 1
try:
plane[i-1][j/2] = 1
plane[i+1][j/2] = 1
except:
print "Blaaah"
plane[9][5] = 0
plane[9][4] = 0
return plane
def L():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(7,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def M():
plane = numpy.array([[0]*10] * 10)
for i in range(0,2):
for j in range(0,10):
plane[i][j] = 1
for i in range(8,10):
for j in range(0,10):
plane[i][j] = 1
#for i in range(4,7):
#for j in range(0,10):
# plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i/2][j] = 1
try:
plane[i/2][j-1] = 1
plane[i/2][j+1] = 1
except:
print "Blaaah"
if(i+j==9):
plane[5 + i/2][j] = 1
try:
plane[5+i/2][j-1] = 1
plane[5+i/2][j+1] = 1
except:
print "Blaaah"
return plane
def N():
plane = numpy.array([[0]*10] * 10)
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,10):
plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
return plane
def O():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
return plane
def P():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,4):
plane[i][j] = 1
return plane
def Q():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,2):
plane[i][j] = 1
for i in range(8,10):
plane[i][j] = 1
for i in range(5,10):
for j in range(5,10):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
return plane
def R():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,6):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,4):
plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][5+j/2] = 1
try:
plane[i-1][4+j/2] = 1
plane[i+1][4+j/2] = 1
except:
print "Blaaah"
return plane
def I():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for i in range(3,7):
for j in range(3,10):
plane[i][j] = 1
return plane
def S():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,7):
plane[i][j] = 1
for i in range(7,10):
for j in range(4,10):
plane[i][j] = 1
return plane
def U():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(7,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
return plane
def V():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i/2][j] = 1
try:
plane[i/2][j-1] = 1
plane[i/2][j+1] = 1
except:
print "Blaaah"
if(i+j==9):
plane[5 + i/2][j] = 1
try:
plane[5+i/2][j-1] = 1
plane[5+i/2][j+1] = 1
except:
print "Blaaah"
plane[0][9] = 0
plane[9][9] = 0
return plane
def W():
plane = numpy.array([[0]*10] * 10)
for i in range(0,2):
for j in range(0,10):
plane[i][j] = 1
for i in range(8,10):
for j in range(0,10):
plane[i][j] = 1
#for i in range(4,7):
#for j in range(0,10):
# plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[5+i/2][j] = 1
try:
plane[5+i/2][j+2] = 1
plane[5+i/2][j+1] = 1
except:
print "Blaaah"
if(i+j==9):
plane[i/2][j] = 1
try:
plane[i/2][j+2] = 1
plane[i/2][j+1] = 1
except:
print "Blaaah"
return plane
def X():
plane = numpy.array([[0]*10]*10)
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
if(i+j == 9):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
return plane
def Y():
plane = numpy.array([[0]*10]*10)
for i in range(0,10):
for j in range(0,5):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
if(i+j == 9):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
for i in range(4,6):
for j in range(5,10):
plane[i][j] = 1
plane[0][9] = 0
plane[0][0] = 0
return plane
def Z():
plane = numpy.array([[0]*10]*10)
for i in range(0,10):
for j in range(0,10):
if(i+j == 9):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
return plane
def stringPrint(cube,string,counter=0,axis = 3):
if counter%10 ==0:
fillCube(cube,0)
i = string[(counter/10)%len(string)]
if i == 'A':
setPlane(cube,axis,9,A())
elif i == 'B':
setPlane(cube,axis,9,B())
elif i == 'C':
setPlane(cube,axis,9,C())
elif i == 'D':
setPlane(cube,axis,9,D())
elif i == 'E':
setPlane(cube,axis,9,E())
elif i == 'F':
setPlane(cube,axis,9,F())
elif i == 'G':
setPlane(cube,axis,9,G())
elif i == 'H':
setPlane(cube,axis,9,H())
elif i == 'I':
setPlane(cube,axis,9,I())
elif i == 'J':
setPlane(cube,axis,9,J())
elif i == 'K':
setPlane(cube,axis,9,K())
elif i == 'L':
setPlane(cube,axis,9,L())
elif i == 'M':
setPlane(cube,axis,9,M())
elif i == 'N':
setPlane(cube,axis,9,N())
elif i == 'O':
setPlane(cube,axis,9,O())
elif i == 'P':
setPlane(cube,axis,9,P())
elif i == 'Q':
setPlane(cube,axis,9,Q())
elif i == 'R':
setPlane(cube,axis,9,R())
elif i == 'S':
setPlane(cube,axis,9,S())
elif i == 'T':
setPlane(cube,axis,9,T())
elif i == 'U':
setPlane(cube,axis,9,U())
elif i == 'V':
setPlane(cube,axis,9,V())
elif i == 'W':
setPlane(cube,axis,9,W())
elif i == 'X':
setPlane(cube,axis,9,X())
elif i == 'Y':
setPlane(cube,axis,9,Y())
elif i == 'Z':
setPlane(cube,axis,9,Z())
else:
shiftCube(cube,axis,1)
def stringfly(cube,axis):
shiftCube(cube,axis,1)
def technites(cube,counter,axis = 3):
alpha = counter/9
if(counter%90 == 0):
fillCube(cube,0)
setPlane(cube,axis,9,T(cube))
elif(counter%90 == 10):
fillCube(cube,0)
setPlane(cube,axis,9,E(cube))
elif(counter%90 == 20):
fillCube(cube,0)
setPlane(cube,axis,9,C(cube))
elif(counter%90 == 30):
fillCube(cube,0)
setPlane(cube,axis,9,H(cube))
elif(counter%90 == 40):
fillCube(cube,0)
setPlane(cube,axis,9,N(cube))
elif(counter%90 == 50):
fillCube(cube,0)
setPlane(cube,axis,9,I(cube))
elif(counter%90 == 60):
fillCube(cube,0)
setPlane(cube,axis,9,T(cube))
elif(counter%90 == 70):
fillCube(cube,0)
setPlane(cube,axis,9,E(cube))
elif(counter%90 == 80):
fillCube(cube,0)
setPlane(cube,axis,9,S(cube))
else:
stringfly(cube,axis)
def moveFaces(cube):
Z0 = numpy.array([[0]*cube.dimension]*cube.dimension)
Z9 = numpy.array([[0]*cube.dimension]*cube.dimension)
X0 = numpy.array([[0]*cube.dimension]*cube.dimension)
X9 = numpy.array([[0]*cube.dimension]*cube.dimension)
for i in range(1,cube.dimension):
for j in range(0,cube.dimension):
X0[i-1][j] = cube.get_led(i,j,0)
for j in range(0,cube.dimension):
X0[9][j] = cube.get_led(9,j,0)
for i in range(0,cube.dimension-1):
for j in range(0,cube.dimension):
Z0[i+1][j] = cube.get_led(0,j,i)
for j in range(0,cube.dimension):
Z0[0][j] = cube.get_led(0,j,0)
for i in range(0,cube.dimension-1):
for j in range(0,cube.dimension):
X9[i+1][j] = cube.get_led(i,j,9)
for j in range(0,cube.dimension):
X9[0][j] = cube.get_led(0,j,9)
for i in range(1,cube.dimension):
for j in range(0,cube.dimension):
Z9[i-1][j] = cube.get_led(9,j,i)
for j in range(0,cube.dimension):
Z9[9][j] = cube.get_led(9,j,9)
fillCube(cube,0)
setPlane(cube,3,0,X0)
setPlane(cube,1,0,Z0)
setPlane(cube,3,9,X9)
setPlane(cube,1,9,Z9)
|
Python
| 992
| 26.765121
| 138
|
/src/apps/devices/animations.py
| 0.493211
| 0.440096
|
shashi/phosphene
|
refs/heads/master
|
import os, sys
dirname = os.path.dirname
here = os.path.abspath(__file__)
parentdir = dirname(dirname(here))
sys.path.append(parentdir)
|
Python
| 6
| 21.833334
| 34
|
/src/apps/pathsetup.py
| 0.737226
| 0.737226
|
shashi/phosphene
|
refs/heads/master
|
import numpy
from threading import Thread # this is for the repl
__all__ = ['memoize', 'memoizeBy', 'numpymap', 'indexable', 'reverse']
# Helper functions
def memoize(f, key=None):
mem = {}
def g(*args):
k = str(args)
if mem.has_key(k):
return mem[k]
else:
r = f(*args)
mem[k] = r
return r
return g
def memoizeBy(f, x, *args):
# memoize by something else.
return memoize(lambda k: f(*args))(x)
def numpymap(f, X):
" returns a numpy array after maping "
return numpy.array(map(f, X))
def indexable(f, offset=0):
" make a list-like object "
if not hasattr(f, '__call__'):
# XXX: Assuming f is a sequence type
try: f[0]
except:
raise "Are you sure what you are trying" + \
"to make indexable is a function or" + \
"a sequence type?"
g = f
f = lambda i: g[i] # LOL
class Indexable:
def getFunction(self):
return f
def __getitem__(self, *i):
if len(i) == 1:
i = i[0]
if isinstance(i, int):
return f(i + offset)
# Handle range queries
elif isinstance(i, slice):
return [f(j + offset) for j in \
range(i.start, i.stop, 1 if i.step is None else 0)]
else:
raise "You will have to implement that crazy indexing."
def __len__(self):
return 0
return Indexable()
def windowedMap(f, samples, width, overlap):
return res
def reverse(l):
m = [c for c in l]
m.reverse()
return m
|
Python
| 63
| 26.206348
| 79
|
/src/phosphene/util.py
| 0.497376
| 0.493294
|
shashi/phosphene
|
refs/heads/master
|
from devices.cubelib import emulator
from devices.cubelib import mywireframe as wireframe
from devices.animations import *
pv = emulator.ProjectionViewer(640,480)
wf = wireframe.Wireframe()
def cubeProcess(cube, signal, count):
pv.createCube(wf)
start = (0, 0, 0)
point = (0,0)
#planeBounce(cube,(count/20)%2+1,count%20)
#start = wireframeExpandContract(cube,start)
#rain(cube,count,5,10)
#time.sleep(.1)
#point = voxel(cube,count,point)
#sine_wave(cube,count)
#pyramids(cube,count)
#side_waves(cube,count)
#fireworks(cube,4)
technites(cube,count)
cube.redraw(wf, pv)
return count + 1
|
Python
| 23
| 27.304348
| 52
|
/src/apps/cube.py
| 0.680492
| 0.645161
|
shashi/phosphene
|
refs/heads/master
|
import device
from phosphene.signal import *
from phosphene.signalutil import *
from phosphene.graphs import *
class LEDWall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "LEDWall", port)
def setupSignal(self, signal):
CHANNELS = 6
val = lambda s: [max(0, scipy.log(s.avg3[0]+1)) - scipy.log(s.longavg3[0]+1)]
signal.avg1Falling = fallingMax(val)
def f(s):
n = int(min(6, max(0, val(s)[0] * CHANNELS / (s.avg1Falling[0] if s.avg1Falling[0] > 0.01 else 1))))
return [1 for i in range(0, n)] + [0 for i in range(0, 6-n)]
signal.ledwall = lift(f)
def graphOutput(self, signal):
return None
def redraw(self, signal):
print "LEDWall", self.toByteStream(signal.ledwall)
self.port.write(self.toByteStream(signal.ledwall))
|
Python
| 24
| 34.708332
| 112
|
/src/apps/devices/ledwall.py
| 0.614936
| 0.585764
|
shashi/phosphene
|
refs/heads/master
|
#!/bin/env python
#using the wireframe module downloaded from http://www.petercollingridge.co.uk/
import mywireframe as wireframe
import pygame
from pygame import display
from pygame.draw import *
import time
import numpy
key_to_function = {
pygame.K_LEFT: (lambda x: x.translateAll('x', -10)),
pygame.K_RIGHT: (lambda x: x.translateAll('x', 10)),
pygame.K_DOWN: (lambda x: x.translateAll('y', 10)),
pygame.K_UP: (lambda x: x.translateAll('y', -10)),
pygame.K_EQUALS: (lambda x: x.scaleAll(1.25)),
pygame.K_MINUS: (lambda x: x.scaleAll( 0.8)),
pygame.K_q: (lambda x: x.rotateAll('X', 0.1)),
pygame.K_w: (lambda x: x.rotateAll('X', -0.1)),
pygame.K_a: (lambda x: x.rotateAll('Y', 0.1)),
pygame.K_s: (lambda x: x.rotateAll('Y', -0.1)),
pygame.K_z: (lambda x: x.rotateAll('Z', 0.1)),
pygame.K_x: (lambda x: x.rotateAll('Z', -0.1))}
class ProjectionViewer:
""" Displays 3D objects on a Pygame screen """
def __init__(self, width, height):
self.width = width
self.height = height
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Wireframe Display')
self.background = (10,10,50)
self.wireframes = {}
self.displayNodes = True
self.displayEdges = True
self.nodeColour = (255,255,255)
self.edgeColour = (200,200,200)
self.nodeRadius = 3 #Modify to change size of the spheres
def addWireframe(self, name, wireframe):
""" Add a named wireframe object. """
self.wireframes[name] = wireframe
def run(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key in key_to_function:
key_to_function[event.key](self)
self.display()
pygame.display.flip()
def display(self):
""" Draw the wireframes on the screen. """
self.screen.fill(self.background)
for wireframe in self.wireframes.values():
if self.displayEdges:
for edge in wireframe.edges:
pygame.draw.aaline(self.screen, self.edgeColour, (edge.start.x, edge.start.y), (edge.stop.x, edge.stop.y), 1)
if self.displayNodes:
for node in wireframe.nodes:
if node.visiblity:
pygame.draw.circle(self.screen, self.nodeColour, (int(node.x), int(node.y)), self.nodeRadius, 0)
def translateAll(self, axis, d):
""" Translate all wireframes along a given axis by d units. """
for wireframe in self.wireframes.itervalues():
wireframe.translate(axis, d)
def scaleAll(self, scale):
""" Scale all wireframes by a given scale, centred on the centre of the screen. """
centre_x = self.width/2
centre_y = self.height/2
for wireframe in self.wireframes.itervalues():
wireframe.scale((centre_x, centre_y), scale)
def rotateAll(self, axis, theta):
""" Rotate all wireframe about their centre, along a given axis by a given angle. """
rotateFunction = 'rotate' + axis
for wireframe in self.wireframes.itervalues():
centre = wireframe.findCentre()
getattr(wireframe, rotateFunction)(centre, theta)
def createCube(self,cube,X=[50,140], Y=[50,140], Z=[50,140]):
cube.addNodes([(x,y,z) for x in X for y in Y for z in Z]) #adding the nodes of the cube framework.
allnodes = []
cube.addEdges([(n,n+4) for n in range(0,4)]+[(n,n+1) for n in range(0,8,2)]+[(n,n+2) for n in (0,1,4,5)]) #creating edges of the cube framework.
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
allnodes.append((X[0]+(X[1]-X[0])/9 * i,Y[0]+(Y[1] - Y[0])/9 * j,Z[0] + (Z[1]-Z[0])/9 * k))
cube.addNodes(allnodes)
#cube.outputNodes()
self.addWireframe('cube',cube)
def findIndex(coords): #Send coordinates of the points you want lit up. Will convert to neede
indices = []
for nodes in coords:
x,y,z = nodes
index = x*100+y*10+z + 8
indices.append(index)
return indices
def findIndexArray(array): #Takes a 3-D numpy array containing bool of all the LED points.
indices = []
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
if(array[i][j][k] == 1):
index = i*100+j*10+ k + 8
indices.append(index)
return indices
def wireframecube(size):
if size % 2 == 1:
size = size+1
half = size/2
start = 5 - half
end = 5 + half - 1
cubecords = [(x,y,z) for x in (start,end) for y in (start,end) for z in range(start,end+1)]+[(x,z,y) for x in (start,end) for y in (start,end) for z in range(start,end+1)] + [(z,y,x) for x in (start,end) for y in (start,end) for z in range(start,end+1)]
return cubecords
def cubes(size):
if size % 2 == 1:
size = size+1
half = size/2
cubecords = []
for i in range(0,size):
for j in range(0,size):
for k in range(0,size):
cubecords.append((5-half+i,5-half+j,5-half+k))
return cubecords
if __name__ == '__main__':
pv = ProjectionViewer(400, 300)
allnodes =[]
cube = wireframe.Wireframe() #storing all the nodes in this wireframe object.
X = [50,140]
Y = [50,140]
Z = [50,140]
pv.createCube(cube,X,Y,Z)
YZface = findIndex((0,y,z) for y in range(0,10) for z in range(0,10))
count = 0
for k in range(1,150000):
if k%5000 ==2500:
count = (count+2)%11
cube.setVisible(findIndex(wireframecube(count)))
pv.run()
|
Python
| 164
| 33.298782
| 254
|
/src/apps/devices/cubelib/emulator.py
| 0.594172
| 0.559879
|
shashi/phosphene
|
refs/heads/master
|
__all__ = ["emulator", "mywireframe"]
|
Python
| 1
| 37
| 37
|
/src/apps/devices/cubelib/__init__.py
| 0.578947
| 0.578947
|
shashi/phosphene
|
refs/heads/master
|
__all__ = ["discoball", "cube", "waterfall"]
|
Python
| 1
| 44
| 44
|
/src/apps/devices/__init__.py
| 0.555556
| 0.555556
|
shashi/phosphene
|
refs/heads/master
|
import time
import numpy
from util import indexable
__all__ = [
'Signal',
'lift',
'foldp',
'perceive'
]
class lift:
""" Annotate an object as lifted """
def __init__(self, f, t_indexable=None):
self.f = f
if hasattr(f, '__call__'):
self._type = 'lambda'
elif isinstance(self.f, (list, tuple, numpy.ndarray)):
self._type = 'iterable'
else:
raise ValueError(
"""You can lift only a function that takes
the signal as argument, or an iterable"""
)
self.indexable = t_indexable
def _manifest(self, signal):
# compute the current value of this lifted
# function given the current value of the signal
if self._type == "lambda":
return self.f(signal)
elif self._type == "iterable":
if self.indexable is None or self.indexable:
# Make the array temporally indexable
return indexable(self.f, signal.x)
elif indexable == False:
return self.f[signal.x]
def foldp(f, init=None):
"""Fold a value over time
"""
State = lambda: 0 # hack to let me store state
State.store = init
State.val = None
def g(signal):
val, store = f(signal, State.store)
State.store = store
State.val = val
return val
return lift(g)
class _WAIT:
# _WAIT instances are used in the locking
# mechanism in Signal to avoid recomputation
# when multiple threads are using a signal
pass
class Signal:
""" The Signal abstraction. """
def __init__(self, Y, sample_rate, max_fps=90):
self.Y = Y
self.x = 0
self.fps = 0
self.max_fps = max_fps
self.sample_rate = sample_rate
self.lifts = {}
self.t = lift(lambda s: s.time())
self.A = lift(Y[:,0], True)
self.cache = {}
def time(self, t=time.time):
# this signal's definition of time
return t()
def __getattr__(self, k):
# call the thing that is requred with self
if self.lifts.has_key(k):
# Lifted values must have the same value
# for the same x. Cache them.
# This also helps in performance e.g. when
# fft is needed a multiple places
if self.cache.has_key(k):
if isinstance(self.cache[k], _WAIT):
# Locking mechanism to avoid
# redundant computations by threads
while isinstance(self.cache[k], _WAIT):
pass
return self.cache[k][1]
else:
x, val = self.cache[k]
if x == self.x:
return val
self.cache[k] = _WAIT()
val = self.lifts[k]._manifest(self)
self.cache[k] = (self.x, val)
return val
else:
return self.__dict__[k]
def __setattr__(self, k, v):
if isinstance(v, lift):
self.lifts[k] = v
else:
self.__dict__[k] = v
def set_state(self, x, fps, frames):
self.x = x
self.fps = fps
self.frames = frames
def perceive(processes, signal, max_fps):
"""Let processes perceive the signal
simulates real-time reading of signals and runs all the functions
in processes (these functions take the current signal value as
argument)
"""
start_time = signal.time()
call_spacing = 1.0 / max_fps
sample_count = len(signal.Y)
prev_x = -1
x = 0
frames = 0
fps = max_fps
while True:
tic = signal.time()
# what should be the current sample?
x = int((tic - start_time) * signal.sample_rate)
if x >= sample_count:
break
frames += 1
# approximate current fps
fps = fps * 0.5 + 0.5 * signal.sample_rate / float(x - prev_x)
# Advance state of the signal
signal.set_state(x, fps, frames)
for p in processes:
p(signal) # show processes the signal
prev_x = x
toc = signal.time()
wait = call_spacing - (toc - tic)
# chill out before looping again
# FIXME: this assumes that the frame rate varies smoothly
# i.e. next frame takes approximately takes the
# same time as few frames immediately before it
if wait > 0:
time.sleep(wait)
|
Python
| 169
| 26.035503
| 70
|
/src/phosphene/signal.py
| 0.525717
| 0.521777
|
shashi/phosphene
|
refs/heads/master
|
import pdb
import scipy
import numpy
import pygame
from pygame import display
from pygame.draw import *
from pygame import Color
import math
def barGraph(data):
"""
drawing contains (x, y, width, height)
"""
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
w = W / l
try:
for i in range(0, l):
h = data[i]
c = Color(0, 0, 0, 0)
c.hsva = (0, 100, 100, 0)
x = x0 + i * w
y = y0 + H * (1 - h)
rect(surface, c, \
(x, y, 0.9 * w, h * H))
except:
pdb.set_trace()
return f
def boopGraph(data):
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
dx = W / l
try:
for i in range(0, l):
d = data[i]
a = dx * d
x = (dx - a) / 2 + i * dx + x0
y = (H - dx) / 2 + (dx - a) / 2 + y0
c = Color(255, 255, 255, 255)
rect(surface, c, \
(x, y, a, a))
except:
pdb.set_trace()
return f
def circleRays(surface, center, data, transform=lambda y: scipy.log(y + 1)):
x0, y0 = center
total = math.radians(360)
l = len(data)
m = transform(max(data))
part = total/l
for i in range(0, l):
if m > 0:
p = transform(data[i])
h = p * 5
hue = p / m
c = Color(0, 0, 0, 0)
c.hsva = ((1-hue) * 360, 100, 100, 0)
x = x0 + (m*2+h)*math.cos(part * i)
y = y0 + (m*2+h)*math.sin(part*i)
line(surface, c,
(x0,y0),(x,y),1)
circle(surface,c, center,int(m*2),0)
def graphsGraphs(graphs, direction=0):
def f(surface, bigRect):
x0, y0, W, H = bigRect
h = H / len(graphs)
for graph in graphs:
graph(surface, (x0, y0, W, h))
y0 += h
return f
|
Python
| 85
| 24.6
| 76
|
/src/phosphene/graphs.py
| 0.413603
| 0.377298
|
shashi/phosphene
|
refs/heads/master
|
import device
from phosphene.signal import *
from phosphene.signalutil import *
from phosphene.graphs import *
class DiscoBall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "DiscoBall", port)
def setupSignal(self, signal):
signal.discoball = lift(lambda s: numpymap(lambda (a, b): 1 if a > b * 1.414 else 0, zip(s.avg12, s.longavg12)))
def graphOutput(self, signal):
return boopGraph(signal.discoball[:4])
def redraw(self, signal):
data = self.truncate(signal.discoball[:4] * 255)
print data
self.port.write(self.toByteStream(data))
|
Python
| 19
| 31.947369
| 120
|
/src/apps/devices/discoball.py
| 0.662939
| 0.638978
|
shashi/phosphene
|
refs/heads/master
|
import device
from phosphene.signal import *
import scipy, numpy
from phosphene.graphs import barGraph
class Waterfall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "Waterfall", port)
def setupSignal(self, signal):
def waterfall(s):
lights = [s.avg8[i] * 150 / max(0.5, s.longavg8[i]) \
for i in range(0, 8)]
fans = [2*i for i in lights]
lights.reverse()
return lights + fans
signal.waterfall = lift(waterfall)
def graphOutput(self, signal):
return barGraph(self.truncate(signal.waterfall) / 255.0)
def redraw(self, signal):
payload = self.toByteStream(signal.waterfall)
self.port.write(payload)
|
Python
| 26
| 28.5
| 65
|
/src/apps/devices/waterfall.py
| 0.601043
| 0.58279
|
shashi/phosphene
|
refs/heads/master
|
import serial
import numpy
from threading import Thread
class Device:
def __init__(self, name, port):
self.array = []
try:
self.port = serial.Serial(port)
self.isConnected = True
print "Connected to", name
except Exception as e:
self.port = None
self.isConnected = False
print "Error connecting to", name, e
def setupSignal(self, signal):
pass
def graphOutput(self, signal):
pass
def truncate(self, array):
return numpy.array([min(int(i), 255) for i in array])
def toByteStream(self, array):
return [chr(i) for i in self.truncate(array)]
def readAck(self):
print self.port.read(size=1) # Read the acknowledgement
def redraw(self):
if self.isConnected:
self.port.write(self.toByteStream())
self.port.read(size=1) #Acknowledgement
else:
#print "Connection to %s lost!" % self.name
pass
def isUnresponsive(self):
print "%s is not responding! Stopping to communicate."
self.isConnected = False
|
Python
| 42
| 26.285715
| 63
|
/src/apps/devices/device.py
| 0.584132
| 0.579773
|
shashi/phosphene
|
refs/heads/master
|
__all__ = ["audio", "dsp", "signal", "graphs", "util"]
|
Python
| 1
| 54
| 54
|
/src/phosphene/__init__.py
| 0.490909
| 0.490909
|
shashi/phosphene
|
refs/heads/master
|
import sys
import pdb
import pygame
from pygame import display
from pygame.draw import *
import scipy
import time
from phosphene import audio, util, signalutil, signal
from phosphene.graphs import barGraph, boopGraph, graphsGraphs
from threading import Thread
if len(sys.argv) < 2:
print "Usage: %s file.mp3" % sys.argv[0]
sys.exit(1)
else:
fPath = sys.argv[1]
# initialize PyGame
SCREEN_DIMENSIONS = (640, 480)
pygame.init()
surface = display.set_mode(SCREEN_DIMENSIONS)
sF, data = audio.read(fPath)
sig = signal.Signal(data, sF)
sig.A = signal.lift((data[:,0] + data[:,1]) / 2, True)
def beats(s):
""" Extract beats in the signal in 4 different
frequency ranges """
# quick note: s.avg4 is a decaying 4 channel fft
# s.longavg4 decays at a slower rate
# beat detection huristic:
# beat occured if s.avg4 * threshold > s.longavg4
threshold = 1.7
return util.numpymap(
lambda (x, y): 1 if x > threshold * y else 0,
zip(s.avg4 * threshold, s.longavg4))
# Lift the beats
sig.beats = signal.lift(beats)
# not sure if this can be called sustain.
# blend gives a decay effect
sig.sustain = signalutil.blend(beats, 0.7)
def graphsProcess(s):
# clear screen
surface.fill((0, 0, 0))
# draw a decaying fft differential and the beats in the full
# pygame window.
graphsGraphs([
barGraph(s.avg12rel / 10),
boopGraph(s.beats),
boopGraph(s.sustain)
])(surface, (0, 0) + SCREEN_DIMENSIONS)
# affect the window
display.update()
def repl():
""" call this function to give you a pdb shell
while the program is running. You will be
dropped in the current context. """
def replFunc():
pdb.set_trace()
replThread = Thread(target=replFunc)
replThread.start()
#repl()
# apply utility "lift"s -- this sets up signal.avgN and longavgN variables
signalutil.setup(sig)
soundObj = audio.makeSound(sF, data)
# make a pygame Sound object from the data
soundObj.play() # start playing it. This is non-blocking
# perceive signal at 90 fps (or lesser when not possible)
signal.perceive([graphsProcess], sig, 90)
|
Python
| 81
| 26.234568
| 77
|
/src/demo.py
| 0.662738
| 0.644152
|
TaegamJung/mannam
|
refs/heads/master
|
from django.shortcuts import render
# View에 Model(Post 게시글) 가져오기
from .models import Post
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views.generic.edit import FormView
from .forms import PostSearchForm
from django.db.models import Q
from django.shortcuts import render
class SearchFormView(FormView):
form_class = PostSearchForm
template_name = 'main/post_search.html'
def form_valid(self, form):
schWord = '%s' % self.request.POST['search_word']
post_list = Post.objects.filter(Q(postname__icontains=schWord) | Q(contents__icontains=schWord)).distinct()
context = {}
context['form'] = form
context['search_term'] = schWord
context['object_list'] = post_list
return render(self.request, self.template_name, context)
class UserCreateView(CreateView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = reverse_lazy('register_done')
class UserCreateDone(TemplateView):
template_name = 'registration/register_done.html'
# index.html 페이지를 부르는 index 함수
class index(TemplateView):
template_name = 'main/index.html'
# blog.html 페이지를 부르는 blog 함수
def blog(request):
# 모든 Post를 가져와 postlist에 저장합니다
postlist = Post.objects.all()
# blog.html 페이지를 열 때, 모든 Post인 postlist도 같이 가져옵니다
return render(request, 'main/blog.html', {'postlist': postlist})
# blog의 게시글(posting)을 부르는 posting 함수
def posting(request, pk):
# 게시글(Post) 중 primary_key를 이용해 하나의 게시글(post)를 찾습니다
post = Post.objects.get(pk=pk)
# posting.html 페이지를 열 때, 찾아낸 게시글(post)을 같이 가져옵니다
return render(request, 'main/posting.html', {'post': post})
def new_feed(request):
return render(request, 'new_feed.html')
|
Python
| 63
| 29.619047
| 115
|
/main/views.py
| 0.700828
| 0.700828
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
# Initialize
T = 10000
repl_num = 100
service_rate_h = 1./7
service_rate_i = 1./3
Mean1_psc_cap = []
STD1_psc_cap = []
Mean2_psc_cap = []
STD2_psc_cap = []
Mean3_psc_cap = []
STD3_psc_cap = []
Mean4_psc_cap = []
STD4_psc_cap = []
Mean5_psc_cap = []
STD5_psc_cap = []
Mean6_psc_cap = []
STD6_psc_cap = []
cc0 = 17 # number of CSC beds when transfer rate is 15%
cc1 = 17 # number of CSC beds when transfer rate is 35%
cc2 = 17 # number of CSC beds when transfer rate is 55%
for ph in np.arange(0.15, 0.66, 0.2):
X_outer = []
cc = csc_bed(ph, cc0, cc1, cc2)
for iteration in np.arange(repl_num):
Dist = queue_ext(ph, c1 = cc0, c2 = cc1, c3 = cc2, T = T)
X_outer.append(Dist/T)
if 0.14 <= ph <= 0.16:
Mean1_psc_cap.append(np.mean(X_outer, axis = 0))
STD1_psc_cap.append(np.std(X_outer, axis = 0))
elif 0.24 <= ph <= 0.26:
Mean2_psc_cap.append(np.mean(X_outer, axis = 0))
STD2_psc_cap.append(np.std(X_outer, axis = 0))
elif 0.34 <= ph <= 0.36:
Mean3_psc_cap.append(np.mean(X_outer, axis = 0))
STD3_psc_cap.append(np.std(X_outer, axis = 0))
elif 0.44 <= ph <= 0.46:
Mean4_psc_cap.append(np.mean(X_outer, axis = 0))
STD4_psc_cap.append(np.std(X_outer, axis = 0))
elif 0.54 <= ph <= 0.56:
Mean5_psc_cap.append(np.mean(X_outer, axis = 0))
STD5_psc_cap.append(np.std(X_outer, axis = 0))
elif 0.64 <= ph <= 0.66:
Mean6_psc_cap.append(np.mean(X_outer, axis = 0))
STD6_psc_cap.append(np.std(X_outer, axis = 0))
else:
print("ERROR")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.5)
ax1.bar(np.arange(cc0+1), Mean1_psc_cap[0], yerr = 1.96*STD1_psc_cap[0]/np.sqrt(repl_num))
ax2.bar(np.arange(cc1+1), Mean3_psc_cap[0], yerr = 1.96*STD3_psc_cap[0]/np.sqrt(repl_num))
ax3.bar(np.arange(cc2+1), Mean5_psc_cap[0], yerr = 1.96*STD5_psc_cap[0]/np.sqrt(repl_num))
ax1.title.set_text('(a)')
ax2.title.set_text('(b)')
ax3.title.set_text('(c)')
fig.text(0.5, 0.0, 'Bed occupancy', ha='center')
fig.text(0.0, 0.5, 'Occupancy probability', va='center', rotation='vertical')
plt.savefig("5_bed_distribution_add_psc_cap.pdf")
plt.savefig("5_bed_distribution_add_psc_cap.jpg")
save_list = [Mean1_psc_cap, Mean3_psc_cap, Mean5_psc_cap]
open_file = open("base_psc_cap_mean.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
save_list = [STD1_psc_cap, STD3_psc_cap, STD5_psc_cap]
open_file = open("base_psc_cap_std.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
|
Python
| 76
| 33.039474
| 90
|
/stroke_expanded_add_capacity.py
| 0.596998
| 0.532458
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_source import *
g = r.Random(1234)
def next_arrival(arrival_rate):
U = g.uniform(0,1)
arrival_time = -1./arrival_rate * m.log(U)
return arrival_time
def next_service(service_rate):
U = g.uniform(0,1)
service_time = -1./service_rate * m.log(U)
return service_time
def redirect(p):
U = g.uniform(0,1)
if p >= U:
red = 1
else:
red = 0
return(red)
def countX(lst, x):
count = 0
for ele in lst:
if (ele == x):
count = count + 1
return count
def queue_base_only(ph, arrival_rate_p_h = 2.0*0.15, arrival_rate_p_i = 2.0*0.85,
arrival_rate_c_h = 3.0*0.15, arrival_rate_c_i = 3.0*0.85,
service_rate_h = 1./7, service_rate_i = 1./3,
c1 = 15, c2 = 15, c3 = 15,
psc1_tr_h = 0.95,
psc2_tr_h = 0.95, psc2_tr_i = 0.15,
psc3_tr_h = 0.95, psc3_tr_i = 0.15,
T = 1000):
# Initialize
pi = ph
patid = 0
red_prop_h1 = psc1_tr_h # ph
red_prop_i1 = pi
red_prop_h2 = psc2_tr_h # 0.15
red_prop_i2 = psc2_tr_i # 0.15
red_prop_h3 = psc3_tr_h # 0.15
red_prop_i3 = psc3_tr_i # 0.15
Q = []
X = []
if 0.14 <= ph <= 0.16:
cc = c1
elif 0.24 <= ph <= 0.26:
cc = cc0
elif 0.34 <= ph <= 0.36:
cc = c2
elif 0.44 <= ph <= 0.46:
cc = cc0
elif 0.54 <= ph <= 0.56:
cc = c3
elif 0.64 <= ph <= 0.66:
cc = cc0
else:
print("ERROR", ph)
sent = 0
overflown = 0
#####
# Degugging
#####
CSC = []
csc_entered = 0
total_busy_serv1 = 0
#####
LenQ = []
LenX = []
Time = []
Dist = np.zeros(cc+1)
next_arrival_P1_h = next_arrival(arrival_rate_p_h)
next_arrival_P1_i = next_arrival(arrival_rate_p_i)
next_arrival_P2_h = next_arrival(arrival_rate_p_h)
next_arrival_P2_i = next_arrival(arrival_rate_p_i)
next_arrival_P3_h = next_arrival(arrival_rate_p_h)
next_arrival_P3_i = next_arrival(arrival_rate_p_i)
next_arrival_C_h = next_arrival(arrival_rate_c_h)
next_arrival_C_i = next_arrival(arrival_rate_c_i)
next_complete = m.inf
Event = [next_arrival_P1_h, next_arrival_P1_i, next_arrival_P2_h, next_arrival_P2_i, next_arrival_P3_h, next_arrival_P3_i, next_arrival_C_h, next_arrival_C_i, next_complete]
# Next event
t = min(Event)
while t < T:
Time.append(t)
LenQ.append(len(Q))
LenX.append(len(X))
Update_vec = np.zeros(cc + 1)
Update_vec[len(X)] = 1
if t == next_arrival_P1_h:
patid += 1
if redirect(red_prop_h1) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype]) # type == 1: hem; type == 2: isch
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P1_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P1_i:
patid += 1
if redirect(red_prop_i1) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P1_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P2_h:
patid += 1
if redirect(red_prop_h2) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P2_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P2_i:
patid += 1
if redirect(red_prop_i2) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P2_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P3_h:
patid += 1
if redirect(red_prop_h3) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P3_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P3_i:
patid += 1
if redirect(red_prop_i3) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P3_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_C_h:
patid += 1
csc_entered += 1
stype = 1
if len(X) >= cc:
overflown += 1
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_C_h = t + next_arrival(arrival_rate_c_h)
elif t == next_arrival_C_i:
patid += 1
csc_entered += 1
stype = 2
if len(X) >= cc:
overflown += 1
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_C_i = t + next_arrival(arrival_rate_c_i)
elif t == next_complete:
compl = min(sublist[2] for sublist in X)
for i in np.arange(len(X)):
if X[i][2] == compl:
ind = i
X.pop(ind)
if len(X) > 0 :
next_complete = min(sublist[2] for sublist in X)
else:
next_complete = m.inf
Event = [next_arrival_P1_h, next_arrival_P1_i, next_arrival_P2_h, next_arrival_P2_i, next_arrival_P3_h, next_arrival_P3_i, next_arrival_C_h, next_arrival_C_i, next_complete]
tp = t
t = min(Event)
total_busy_serv1 = total_busy_serv1 + len(X)*(t-tp)
Dist = Dist + Update_vec * (t - tp)
if len(X) >= cc + 1:
print("ERROR!")
break
return(Dist, total_busy_serv1)
def queue(ph, arrival_rate_p_h = 2.0*0.15, arrival_rate_p_i = 2.0*0.85,
arrival_rate_c_h = 3.0*0.15, arrival_rate_c_i = 3.0*0.85,
service_rate_h = 1./7, service_rate_i = 1./3,
c1 = 15, c2 = 15, c3 = 15,
psc1_tr_h = 0.95,
psc2_tr_h = 0.95, psc2_tr_i = 0.15,
psc3_tr_h = 0.95, psc3_tr_i = 0.15,
T = 1000):
# Initialize
pi = ph
patid = 0
red_prop_h1 = psc1_tr_h # ph
red_prop_i1 = pi
red_prop_h2 = psc2_tr_h # 0.15
red_prop_i2 = psc2_tr_i # 0.15
red_prop_h3 = psc3_tr_h # 0.15
red_prop_i3 = psc3_tr_i # 0.15
Q = []
X = []
if 0.14 <= ph <= 0.16:
cc = c1
elif 0.24 <= ph <= 0.26:
cc = cc0
elif 0.34 <= ph <= 0.36:
cc = c2
elif 0.44 <= ph <= 0.46:
cc = cc0
elif 0.54 <= ph <= 0.56:
cc = c3
elif 0.64 <= ph <= 0.66:
cc = cc0
else:
print("ERROR", ph)
sent = 0
overflown = 0
#####
# Degugging
#####
CSC = []
csc_entered = 0
total_busy_serv1 = 0
#####
LenQ = []
LenX = []
Time = []
Dist = np.zeros(cc+1)
next_arrival_P1_h = next_arrival(arrival_rate_p_h)
next_arrival_P1_i = next_arrival(arrival_rate_p_i)
next_arrival_P2_h = next_arrival(arrival_rate_p_h)
next_arrival_P2_i = next_arrival(arrival_rate_p_i)
next_arrival_P3_h = next_arrival(arrival_rate_p_h)
next_arrival_P3_i = next_arrival(arrival_rate_p_i)
next_arrival_C_h = next_arrival(arrival_rate_c_h)
next_arrival_C_i = next_arrival(arrival_rate_c_i)
next_complete = m.inf
Event = [next_arrival_P1_h, next_arrival_P1_i, next_arrival_P2_h, next_arrival_P2_i, next_arrival_P3_h, next_arrival_P3_i, next_arrival_C_h, next_arrival_C_i, next_complete]
# Next event
t = min(Event)
while t < T:
Time.append(t)
LenQ.append(len(Q))
LenX.append(len(X))
Update_vec = np.zeros(cc + 1)
Update_vec[len(X)] = 1
if t == next_arrival_P1_h:
patid += 1
if redirect(red_prop_h1) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype]) # type == 1: hem; type == 2: isch
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P1_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P1_i:
patid += 1
if redirect(red_prop_i1) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P1_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P2_h:
patid += 1
if redirect(red_prop_h2) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P2_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P2_i:
patid += 1
if redirect(red_prop_i2) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P2_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P3_h:
patid += 1
if redirect(red_prop_h3) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P3_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P3_i:
patid += 1
if redirect(red_prop_i3) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P3_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_C_h:
patid += 1
csc_entered += 1
stype = 1
if len(X) >= cc:
overflown += 1
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_C_h = t + next_arrival(arrival_rate_c_h)
elif t == next_arrival_C_i:
patid += 1
csc_entered += 1
stype = 2
if len(X) >= cc:
overflown += 1
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_C_i = t + next_arrival(arrival_rate_c_i)
elif t == next_complete:
compl = min(sublist[2] for sublist in X)
for i in np.arange(len(X)):
if X[i][2] == compl:
ind = i
X.pop(ind)
if len(X) > 0 :
next_complete = min(sublist[2] for sublist in X)
else:
next_complete = m.inf
Event = [next_arrival_P1_h, next_arrival_P1_i, next_arrival_P2_h, next_arrival_P2_i, next_arrival_P3_h, next_arrival_P3_i, next_arrival_C_h, next_arrival_C_i, next_complete]
tp = t
t = min(Event)
total_busy_serv1 = total_busy_serv1 + len(X)*(t-tp)
Dist = Dist + Update_vec * (t - tp)
if len(X) >= cc + 1:
print("ERROR!")
break
return(Dist)
def csc_bed(ph, cc0, cc1, cc2):
if 0.14 <= ph <= 0.16:
cc = cc0
elif 0.24 <= ph <= 0.26:
cc = cc0
elif 0.34 <= ph <= 0.36:
cc = cc0
elif 0.44 <= ph <= 0.46:
cc = cc0
elif 0.54 <= ph <= 0.56:
cc = cc0
elif 0.64 <= ph <= 0.66:
cc = cc0
else:
print("error")
return(cc)
def queue_ext(ph, arrival_rate_p_h = 2.0*0.15, arrival_rate_p_i = 2.0*0.85,
arrival_rate_c_h = 3.0*0.15, arrival_rate_c_i = 3.0*0.85,
service_rate_h = 1./7, service_rate_i = 1./3,
c1 = 15, c2 = 15, c3 = 15,
psc1_tr_h = 0.95,
psc2_tr_h = 0.95, psc2_tr_i = 0.15,
psc3_tr_h = 0.95, psc3_tr_i = 0.15,
psc4_tr_h = 0.95, psc4_tr_i = 0.15,
T = 1000):
# Initialize
pi = ph
patid = 0
red_prop_h1 = psc1_tr_h # ph
red_prop_i1 = pi
red_prop_h2 = psc2_tr_h
red_prop_i2 = psc2_tr_i
red_prop_h3 = psc3_tr_h
red_prop_i3 = psc3_tr_i
red_prop_h4 = psc4_tr_h
red_prop_i4 = psc4_tr_i
Q = []
X = []
if 0.14 <= ph <= 0.16:
cc = c1
elif 0.24 <= ph <= 0.26:
cc = cc0
elif 0.34 <= ph <= 0.36:
cc = c2
elif 0.44 <= ph <= 0.46:
cc = cc0
elif 0.54 <= ph <= 0.56:
cc = c3
elif 0.64 <= ph <= 0.66:
cc = cc0
else:
print("ERROR", ph)
sent = 0
overflown = 0
#####
# Degugging
#####
CSC = []
csc_entered = 0
total_busy_serv1 = 0
#####
LenQ = []
LenX = []
Time = []
Dist = np.zeros(cc+1)
next_arrival_P1_h = next_arrival(arrival_rate_p_h)
next_arrival_P1_i = next_arrival(arrival_rate_p_i)
next_arrival_P2_h = next_arrival(arrival_rate_p_h)
next_arrival_P2_i = next_arrival(arrival_rate_p_i)
next_arrival_P3_h = next_arrival(arrival_rate_p_h)
next_arrival_P3_i = next_arrival(arrival_rate_p_i)
next_arrival_P4_h = next_arrival(arrival_rate_p_h)
next_arrival_P4_i = next_arrival(arrival_rate_p_i)
next_arrival_C_h = next_arrival(arrival_rate_c_h)
next_arrival_C_i = next_arrival(arrival_rate_c_i)
next_complete = m.inf
Event = [
next_arrival_P1_h, next_arrival_P1_i,
next_arrival_P2_h, next_arrival_P2_i,
next_arrival_P3_h, next_arrival_P3_i,
next_arrival_P4_h, next_arrival_P4_i,
next_arrival_C_h, next_arrival_C_i,
next_complete
]
# Next event
t = min(Event)
while t < T:
Time.append(t)
LenQ.append(len(Q))
LenX.append(len(X))
Update_vec = np.zeros(cc + 1)
Update_vec[len(X)] = 1
if t == next_arrival_P1_h:
patid += 1
if redirect(red_prop_h1) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype]) # type == 1: hem; type == 2: isch
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P1_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P1_i:
patid += 1
if redirect(red_prop_i1) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P1_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P2_h:
patid += 1
if redirect(red_prop_h2) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P2_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P2_i:
patid += 1
if redirect(red_prop_i2) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P2_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P3_h:
patid += 1
if redirect(red_prop_h3) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P3_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P3_i:
patid += 1
if redirect(red_prop_i3) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P3_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_P4_h:
patid += 1
if redirect(red_prop_h4) == 1:
sent += 1
stype = 1
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P4_h = t + next_arrival(arrival_rate_p_h)
elif t == next_arrival_P4_i:
patid += 1
if redirect(red_prop_i4) == 1:
sent += 1
stype = 2
if len(X) >= cc:
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_P4_i = t + next_arrival(arrival_rate_p_i)
elif t == next_arrival_C_h:
patid += 1
csc_entered += 1
stype = 1
if len(X) >= cc:
overflown += 1
Q.append([patid, stype])
else:
LOS = next_service(service_rate_h)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_C_h = t + next_arrival(arrival_rate_c_h)
elif t == next_arrival_C_i:
patid += 1
csc_entered += 1
stype = 2
if len(X) >= cc:
overflown += 1
Q.append([patid, stype])
else:
LOS = next_service(service_rate_i)
X.append([patid, stype, t + LOS])
next_complete = min(sublist[2] for sublist in X)
next_arrival_C_i = t + next_arrival(arrival_rate_c_i)
elif t == next_complete:
compl = min(sublist[2] for sublist in X)
for i in np.arange(len(X)):
if X[i][2] == compl:
ind = i
X.pop(ind)
if len(X) > 0 :
next_complete = min(sublist[2] for sublist in X)
else:
next_complete = m.inf
Event = [
next_arrival_P1_h, next_arrival_P1_i,
next_arrival_P2_h, next_arrival_P2_i,
next_arrival_P3_h, next_arrival_P3_i,
next_arrival_P4_h, next_arrival_P4_i,
next_arrival_C_h, next_arrival_C_i,
next_complete
]
tp = t
t = min(Event)
total_busy_serv1 = total_busy_serv1 + len(X)*(t-tp)
Dist = Dist + Update_vec * (t - tp)
if len(X) >= cc + 1:
print("ERROR!")
break
return(Dist)
def queue_customization(
psc_hemorrhagic, psc_ischemic,
csc_hemorrhagic, csc_ischemic,
LOS_hemorrhagic, LOS_ischemic,
psc1_transfer_rate_hemorrhagic,
psc1_transfer_rate_ischemic,
psc2_transfer_rate_hemorrhagic,
psc2_transfer_rate_ischemic,
psc3_transfer_rate_hemorrhagic,
psc3_transfer_rate_ischemic,
csc_bed_capacity, T, repl_num):
Mean = []
STD = []
X_outer = []
for iteration in np.arange(repl_num):
Dist = queue(
c1 = csc_bed_capacity, c2 = csc_bed_capacity, c3 = csc_bed_capacity,
arrival_rate_p_h = psc_hemorrhagic, arrival_rate_p_i = psc_ischemic,
arrival_rate_c_h = csc_hemorrhagic, arrival_rate_c_i = csc_ischemic,
service_rate_h = 1./LOS_hemorrhagic, service_rate_i = 1./LOS_ischemic,
psc1_tr_h = psc1_transfer_rate_hemorrhagic, ph = psc1_transfer_rate_ischemic,
psc2_tr_h = psc2_transfer_rate_hemorrhagic, psc2_tr_i = psc2_transfer_rate_ischemic,
psc3_tr_h = psc3_transfer_rate_hemorrhagic, psc3_tr_i = psc3_transfer_rate_ischemic,
T = T)
X_outer.append(Dist/T)
Mean.append(np.mean(X_outer, axis = 0))
STD.append(np.std(X_outer, axis = 0))
fig, (ax1) = plt.subplots(1, 1)
fig.subplots_adjust(hspace=0.5)
ax1.bar(np.arange(csc_bed_capacity+1), Mean[0], yerr = 1.96*STD[0]/np.sqrt(repl_num))
#ax1.title.set_text('(a)')
fig.text(0.5, 0.0, 'Bed occupancy', ha='center')
fig.text(0.0, 0.5, 'Occupancy probability', va='center', rotation='vertical')
plt.savefig("bed_distribution_cust.pdf")
plt.savefig("bed_distribution_cust.jpg")
plt.figure()
plt.bar([psc1_transfer_rate_ischemic],
[
Mean[0][len(Mean[0])-1]
],
yerr = [
1.96*STD[0][len(STD[0])-1]/np.sqrt(repl_num)
])
plt.xlabel("Transfer rates at PSC 1")
plt.ylabel("Overflow probability")
plt.savefig("overflow_probability_cust.pdf")
plt.savefig("overflow_probability_cust.jpg")
mean_fin = Mean[0][len(Mean[0])-1]*100
std_fin = 1.96*STD[0][len(STD[0])-1]/np.sqrt(repl_num)*100
print("Overflow probability is {mean:.2f} +/- {CI:.2f}" \
.format(mean = mean_fin, CI = std_fin))
|
Python
| 726
| 33.280991
| 181
|
/stroke_functions.py
| 0.455259
| 0.423831
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
repl_num = 100
# Base case
open_file = open("base_mean.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
Mean1 = loaded_list[0]
Mean2 = loaded_list[1]
Mean3 = loaded_list[2]
open_file = open("base_std.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
STD1 = loaded_list[0]
STD2 = loaded_list[1]
STD3 = loaded_list[2]
# Base case + added capacity
open_file = open("base_cap_mean.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
Mean1_cap = loaded_list[0]
Mean2_cap = loaded_list[1]
Mean3_cap = loaded_list[2]
open_file = open("base_cap_std.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
STD1_cap = loaded_list[0]
STD2_cap = loaded_list[1]
STD3_cap = loaded_list[2]
# Expanded case
open_file = open("base_psc_mean.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
Mean1_psc = loaded_list[0]
Mean2_psc = loaded_list[1]
Mean3_psc = loaded_list[2]
open_file = open("base_psc_std.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
STD1_psc = loaded_list[0]
STD2_psc = loaded_list[1]
STD3_psc = loaded_list[2]
# Expanded case + added capacity
open_file = open("base_psc_cap_mean.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
Mean1_psc_cap = loaded_list[0]
Mean2_psc_cap = loaded_list[1]
Mean3_psc_cap = loaded_list[2]
open_file = open("base_psc_cap_std.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
STD1_psc_cap = loaded_list[0]
STD2_psc_cap = loaded_list[1]
STD3_psc_cap = loaded_list[2]
# Expanded case + reduced transfer rates
open_file = open("base_psc_red_mean.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
Mean1_psc_red = loaded_list[0]
Mean2_psc_red = loaded_list[1]
Mean3_psc_red = loaded_list[2]
open_file = open("base_psc_red_std.pkl", "rb")
loaded_list = pickle.load(open_file)
open_file.close()
STD1_psc_red = loaded_list[0]
STD2_psc_red = loaded_list[1]
STD3_psc_red = loaded_list[2]
labels = ["0.15", "0.35", "0.55"]
M1 = [Mean1[0][len(Mean1[0])-1], Mean2[0][len(Mean2[0])-1], Mean3[0][len(Mean3[0])-1]]
M2 = [Mean1_psc[0][len(Mean1_psc[0])-1], Mean2_psc[0][len(Mean2_psc[0])-1], Mean3_psc[0][len(Mean3_psc[0])-1]]
M3 = [Mean1_psc_red[0][len(Mean1_psc_red[0])-1], Mean2_psc_red[0][len(Mean2_psc_red[0])-1], Mean3_psc_red[0][len(Mean3_psc_red[0])-1]]
M4 = [Mean1_psc_cap[0][len(Mean1_psc_cap[0])-1], Mean2_psc_cap[0][len(Mean2_psc_cap[0])-1], Mean3_psc_cap[0][len(Mean3_psc_cap[0])-1]]
x = np.arange(len(labels)) # the label locations
width = 0.125 # the width of the bars
fig, ax = plt.subplots(figsize=(12,8), dpi= 100)
rects1 = ax.bar(x - 4.5*width/3, M1, width, yerr = [1.96*STD1[0][len(STD1[0])-1]/np.sqrt(repl_num), 1.96*STD2[0][len(STD2[0])-1]/np.sqrt(repl_num), 1.96*STD3[0][len(STD3[0])-1]/np.sqrt(repl_num)], label='Base case')
rects2 = ax.bar(x - 1.5*width/3, M2, width, yerr = [1.96*STD1_psc[0][len(STD1_psc[0])-1]/np.sqrt(repl_num), 1.96*STD2_psc[0][len(STD2_psc[0])-1]/np.sqrt(repl_num), 1.96*STD3_psc[0][len(STD3_psc[0])-1]/np.sqrt(repl_num)], label='Expanded case')
rects3 = ax.bar(x + 1.5*width/3, M3, width, yerr = [1.96*STD1_psc_red[0][len(STD1_psc_red[0])-1]/np.sqrt(repl_num), 1.96*STD2_psc_red[0][len(STD2_psc_red[0])-1]/np.sqrt(repl_num), 1.96*STD3_psc_red[0][len(STD3_psc_red[0])-1]/np.sqrt(repl_num)], label='Expanded case, reduced transfer')
rects4 = ax.bar(x + 4.5*width/3, M4, width, yerr = [1.96*STD1_psc_cap[0][len(STD1_psc_cap[0])-1]/np.sqrt(repl_num), 1.96*STD2_psc_cap[0][len(STD2_psc_cap[0])-1]/np.sqrt(repl_num), 1.96*STD3_psc_cap[0][len(STD3_psc_cap[0])-1]/np.sqrt(repl_num)], label='Expanded case, additional Neuro-ICU beds')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Overflow probability')
ax.set_ylabel('Transfer rates at PSC 1')
ax.set_title('Overflow probability by case')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_yticks([0.00, 0.10, 0.20, 0.30, 0.40, 0.50])
ax.legend()
plt.savefig("6_overflow_prob_by_case.pdf")
plt.savefig("6_overflow_prob_by_case.jpg")
|
Python
| 102
| 38.911766
| 294
|
/stroke_overall_comparison.py
| 0.659162
| 0.591377
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
# Initialize
T = 10000
repl_num = 10
service_rate_h = 1./7
service_rate_i = 1./3
Mean1_psc = []
STD1_psc = []
Mean2_psc = []
STD2_psc = []
Mean3_psc = []
STD3_psc = []
Mean4_psc = []
STD4_psc = []
Mean5_psc = []
STD5_psc = []
Mean6_psc = []
STD6_psc = []
cc0 = 15 # number of CSC beds when transfer rate is 15%
cc1 = 15 # number of CSC beds when transfer rate is 35%
cc2 = 15 # number of CSC beds when transfer rate is 55%
for ph in np.arange(0.15, 0.66, 0.2):
X_outer = []
cc = csc_bed(ph, cc0, cc1, cc2)
for iteration in np.arange(repl_num):
Dist = queue_ext(ph, c1 = cc0, c2 = cc1, c3 = cc2, T = T)
X_outer.append(Dist/T)
if 0.14 <= ph <= 0.16:
Mean1_psc.append(np.mean(X_outer, axis = 0))
STD1_psc.append(np.std(X_outer, axis = 0))
elif 0.24 <= ph <= 0.26:
Mean2_psc.append(np.mean(X_outer, axis = 0))
STD2_psc.append(np.std(X_outer, axis = 0))
elif 0.34 <= ph <= 0.36:
Mean3_psc.append(np.mean(X_outer, axis = 0))
STD3_psc.append(np.std(X_outer, axis = 0))
elif 0.44 <= ph <= 0.46:
Mean4_psc.append(np.mean(X_outer, axis = 0))
STD4_psc.append(np.std(X_outer, axis = 0))
elif 0.54 <= ph <= 0.56:
Mean5_psc.append(np.mean(X_outer, axis = 0))
STD5_psc.append(np.std(X_outer, axis = 0))
elif 0.64 <= ph <= 0.66:
Mean6_psc.append(np.mean(X_outer, axis = 0))
STD6_psc.append(np.std(X_outer, axis = 0))
else:
print("ERROR")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.5)
ax1.bar(np.arange(cc0+1), Mean1_psc[0], yerr = 1.96*STD1_psc[0]/np.sqrt(repl_num))
ax2.bar(np.arange(cc1+1), Mean3_psc[0], yerr = 1.96*STD3_psc[0]/np.sqrt(repl_num))
ax3.bar(np.arange(cc2+1), Mean5_psc[0], yerr = 1.96*STD5_psc[0]/np.sqrt(repl_num))
ax1.title.set_text('(a)')
ax2.title.set_text('(b)')
ax3.title.set_text('(c)')
fig.text(0.5, 0.0, 'Bed occupancy', ha='center')
fig.text(0.0, 0.5, 'Occupancy probability', va='center', rotation='vertical')
plt.savefig("3_bed_distribution_add_psc.pdf")
plt.savefig("3_bed_distribution_add_psc.jpg")
plt.figure()
plt.bar(["0.15", "0.35", "0.55"],
[
Mean1_psc[0][len(Mean1_psc[0])-1],
Mean3_psc[0][len(Mean3_psc[0])-1],
Mean5_psc[0][len(Mean5_psc[0])-1]
],
yerr = [
1.96*STD1_psc[0][len(STD1_psc[0])-1]/np.sqrt(repl_num),
1.96*STD3_psc[0][len(STD3_psc[0])-1]/np.sqrt(repl_num),
1.96*STD5_psc[0][len(STD5_psc[0])-1]/np.sqrt(repl_num)
])
plt.xlabel("Transfer rates at PSC 1")
plt.ylabel("Overflow probability")
plt.savefig("3_overflow_probability_add_psc.pdf")
plt.savefig("3_overflow_probability_add_psc.jpg")
save_list = [Mean1_psc, Mean3_psc, Mean5_psc]
open_file = open("base_psc_mean.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
save_list = [STD1_psc, STD3_psc, STD5_psc]
open_file = open("base_psc_std.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
|
Python
| 93
| 31.67742
| 82
|
/stroke_expanded.py
| 0.575893
| 0.505102
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
import stroke_base
import stroke_base_add_capacity
import stroke_expanded
import stroke_expanded_reduced_rate
import stroke_expanded_add_capacity
import stroke_overall_comparison
|
Python
| 7
| 29
| 35
|
/stroke_main.py
| 0.817352
| 0.817352
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
import numpy as np
import random as r
import math as m
import matplotlib.pyplot as plt
import pickle
|
Python
| 5
| 19.200001
| 31
|
/stroke_source.py
| 0.788462
| 0.788462
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
# Initialize
T = 10000
repl_num = 100
service_rate_h = 1./7
service_rate_i = 1./3
Mean1_cap = []
STD1_cap = []
Mean2_cap = []
STD2_cap = []
Mean3_cap = []
STD3_cap = []
Mean4_cap = []
STD4_cap = []
Mean5_cap = []
STD5_cap = []
Mean6_cap = []
STD6_cap = []
cc0 = 15 # number of CSC beds when transfer rate is 15%
cc1 = 16 # number of CSC beds when transfer rate is 35%
cc2 = 17 # number of CSC beds when transfer rate is 55%
for ph in np.arange(0.15, 0.66, 0.2):
X_outer = []
cc = csc_bed(ph, cc0, cc1, cc2)
for iteration in np.arange(repl_num):
Dist = queue(ph, c1 = cc0, c2 = cc1, c3 = cc2, T = T)
X_outer.append(Dist/T)
if 0.14 <= ph <= 0.16:
Mean1_cap.append(np.mean(X_outer, axis = 0))
STD1_cap.append(np.std(X_outer, axis = 0))
elif 0.24 <= ph <= 0.26:
Mean2_cap.append(np.mean(X_outer, axis = 0))
STD2_cap.append(np.std(X_outer, axis = 0))
elif 0.34 <= ph <= 0.36:
Mean3_cap.append(np.mean(X_outer, axis = 0))
STD3_cap.append(np.std(X_outer, axis = 0))
elif 0.44 <= ph <= 0.46:
Mean4_cap.append(np.mean(X_outer, axis = 0))
STD4_cap.append(np.std(X_outer, axis = 0))
elif 0.54 <= ph <= 0.56:
Mean5_cap.append(np.mean(X_outer, axis = 0))
STD5_cap.append(np.std(X_outer, axis = 0))
elif 0.64 <= ph <= 0.66:
Mean6_cap.append(np.mean(X_outer, axis = 0))
STD6_cap.append(np.std(X_outer, axis = 0))
else:
print("ERROR")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.5)
ax1.bar(np.arange(cc0+1), Mean1_cap[0], yerr = 1.96*STD1_cap[0]/np.sqrt(repl_num))
ax2.bar(np.arange(cc1+1), Mean3_cap[0], yerr = 1.96*STD3_cap[0]/np.sqrt(repl_num))
ax3.bar(np.arange(cc2+1), Mean5_cap[0], yerr = 1.96*STD5_cap[0]/np.sqrt(repl_num))
ax1.title.set_text('(a)')
ax2.title.set_text('(b)')
ax3.title.set_text('(c)')
fig.text(0.5, 0.0, 'Bed occupancy', ha='center')
fig.text(0.0, 0.5, 'Occupancy probability', va='center', rotation='vertical')
plt.savefig("2_bed_distribution_base_add_cap.pdf")
plt.savefig("2_bed_distribution_base_add_cap.jpg")
plt.figure()
plt.bar(["0.15", "0.35", "0.55"],
[
Mean1_cap[0][len(Mean1_cap[0])-1],
Mean3_cap[0][len(Mean3_cap[0])-1],
Mean5_cap[0][len(Mean5_cap[0])-1]
],
yerr = [
1.96*STD1_cap[0][len(STD1_cap[0])-1]/np.sqrt(repl_num),
1.96*STD3_cap[0][len(STD3_cap[0])-1]/np.sqrt(repl_num),
1.96*STD5_cap[0][len(STD5_cap[0])-1]/np.sqrt(repl_num)
])
plt.xlabel("Transfer rates at PSC 1")
plt.ylabel("Overflow probability")
plt.savefig("2_overflow_probability_base_add_cap.pdf")
plt.savefig("2_overflow_probability_base_add_cap.jpg")
save_list = [Mean1_cap, Mean3_cap, Mean5_cap]
open_file = open("base_cap_mean.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
save_list = [STD1_cap, STD3_cap, STD5_cap]
open_file = open("base_cap_std.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
|
Python
| 93
| 31.860214
| 82
|
/stroke_base_add_capacity.py
| 0.577594
| 0.506823
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
# Initialize
T = 10000
repl_num = 100
service_rate_h = 1./7
service_rate_i = 1./3
Mean1_psc_red = []
STD1_psc_red = []
Mean2_psc_red = []
STD2_psc_red = []
Mean3_psc_red = []
STD3_psc_red = []
Mean4_psc_red = []
STD4_psc_red = []
Mean5_psc_red = []
STD5_psc_red = []
Mean6_psc_red = []
STD6_psc_red = []
cc0 = 15 # number of CSC beds when transfer rate is 15%
cc1 = 15 # number of CSC beds when transfer rate is 35%
cc2 = 15 # number of CSC beds when transfer rate is 55%
for ph in np.arange(0.15, 0.66, 0.2):
X_outer = []
cc = csc_bed(ph, cc0, cc1, cc2)
for iteration in np.arange(repl_num):
Dist = queue_ext(ph, c1 = cc0, c2 = cc1, c3 = cc2,
psc2_tr_i = 0.025,
psc3_tr_i = 0.025,
psc4_tr_i = 0.025,
T = T)
X_outer.append(Dist/T)
if 0.14 <= ph <= 0.16:
Mean1_psc_red.append(np.mean(X_outer, axis = 0))
STD1_psc_red.append(np.std(X_outer, axis = 0))
elif 0.24 <= ph <= 0.26:
Mean2_psc_red.append(np.mean(X_outer, axis = 0))
STD2_psc_red.append(np.std(X_outer, axis = 0))
elif 0.34 <= ph <= 0.36:
Mean3_psc_red.append(np.mean(X_outer, axis = 0))
STD3_psc_red.append(np.std(X_outer, axis = 0))
elif 0.44 <= ph <= 0.46:
Mean4_psc_red.append(np.mean(X_outer, axis = 0))
STD4_psc_red.append(np.std(X_outer, axis = 0))
elif 0.54 <= ph <= 0.56:
Mean5_psc_red.append(np.mean(X_outer, axis = 0))
STD5_psc_red.append(np.std(X_outer, axis = 0))
elif 0.64 <= ph <= 0.66:
Mean6_psc_red.append(np.mean(X_outer, axis = 0))
STD6_psc_red.append(np.std(X_outer, axis = 0))
else:
print("ERROR")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.5)
ax1.bar(np.arange(cc0+1), Mean1_psc_red[0], yerr = 1.96*STD1_psc_red[0]/np.sqrt(repl_num))
ax2.bar(np.arange(cc1+1), Mean3_psc_red[0], yerr = 1.96*STD3_psc_red[0]/np.sqrt(repl_num))
ax3.bar(np.arange(cc2+1), Mean5_psc_red[0], yerr = 1.96*STD5_psc_red[0]/np.sqrt(repl_num))
ax1.title.set_text('(a)')
ax2.title.set_text('(b)')
ax3.title.set_text('(c)')
fig.text(0.5, 0.0, 'Bed occupancy', ha='center')
fig.text(0.0, 0.5, 'Occupancy probability', va='center', rotation='vertical')
plt.savefig("4_bed_distribution_add_psc_red.pdf")
plt.savefig("4_bed_distribution_add_psc_red.jpg")
save_list = [Mean1_psc_red, Mean3_psc_red, Mean5_psc_red]
open_file = open("base_psc_red_mean.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
save_list = [STD1_psc_red, STD3_psc_red, STD5_psc_red]
open_file = open("base_psc_red_std.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
|
Python
| 80
| 33.299999
| 90
|
/stroke_expanded_reduced_rate.py
| 0.574664
| 0.508493
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
# Initialize
T = 10000
repl_num = 100
service_rate_h = 1./7
service_rate_i = 1./3
Mean1 = []
STD1 = []
Mean2 = []
STD2 = []
Mean3 = []
STD3 = []
Mean4 = []
STD4 = []
Mean5 = []
STD5 = []
Mean6 = []
STD6 = []
MeanBed1 = []
MeanBed2 = []
MeanBed3 = []
MeanBed4 = []
MeanBed5 = []
MeanBed6 = []
StdBed1 = []
StdBed2 = []
StdBed3 = []
StdBed4 = []
StdBed5 = []
StdBed6 = []
cc0 = 15 # number of CSC beds when transfer rate is 15%
cc1 = 15 # number of CSC beds when transfer rate is 35%
cc2 = 15 # number of CSC beds when transfer rate is 55%
for ph in np.arange(0.15, 0.66, 0.2):
X_outer = []
Mean_outer = []
cc = csc_bed(ph, cc0, cc1, cc2)
for iteration in np.arange(repl_num):
Dist, busy_serv = queue_base_only(ph, c1 = cc0, c2 = cc1, c3 = cc2, T = T)
X_outer.append(Dist/T)
Mean_outer.append(busy_serv/T)
if 0.14 <= ph <= 0.16:
Mean1.append(np.mean(X_outer, axis = 0))
STD1.append(np.std(X_outer, axis = 0))
MeanBed1.append(np.mean(Mean_outer, axis = 0))
StdBed1.append(np.std(Mean_outer, axis = 0))
elif 0.24 <= ph <= 0.26:
Mean2.append(np.mean(X_outer, axis = 0))
STD2.append(np.std(X_outer, axis = 0))
MeanBed2.append(np.mean(Mean_outer, axis = 0))
StdBed2.append(np.std(Mean_outer, axis = 0))
elif 0.34 <= ph <= 0.36:
Mean3.append(np.mean(X_outer, axis = 0))
STD3.append(np.std(X_outer, axis = 0))
MeanBed3.append(np.mean(Mean_outer, axis = 0))
StdBed3.append(np.std(Mean_outer, axis = 0))
elif 0.44 <= ph <= 0.46:
Mean4.append(np.mean(X_outer, axis = 0))
STD4.append(np.std(X_outer, axis = 0))
MeanBed4.append(np.mean(Mean_outer, axis = 0))
StdBed4.append(np.std(Mean_outer, axis = 0))
elif 0.54 <= ph <= 0.56:
Mean5.append(np.mean(X_outer, axis = 0))
STD5.append(np.std(X_outer, axis = 0))
MeanBed5.append(np.mean(Mean_outer, axis = 0))
StdBed5.append(np.std(Mean_outer, axis = 0))
elif 0.64 <= ph <= 0.66:
Mean6.append(np.mean(X_outer, axis = 0))
STD6.append(np.std(X_outer, axis = 0))
MeanBed6.append(np.mean(Mean_outer, axis = 0))
StdBed6.append(np.std(Mean_outer, axis = 0))
else:
print("ERROR")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.5)
ax1.bar(np.arange(cc0+1), Mean1[0], yerr = 1.96*STD1[0]/np.sqrt(repl_num))
ax2.bar(np.arange(cc1+1), Mean3[0], yerr = 1.96*STD3[0]/np.sqrt(repl_num))
ax3.bar(np.arange(cc2+1), Mean5[0], yerr = 1.96*STD5[0]/np.sqrt(repl_num))
ax1.title.set_text('(a)')
ax2.title.set_text('(b)')
ax3.title.set_text('(c)')
fig.text(0.5, 0.0, 'Bed occupancy', ha='center')
fig.text(0.0, 0.5, 'Occupancy probability', va='center', rotation='vertical')
plt.savefig("1_bed_distribution_base.pdf")
plt.savefig("1_bed_distribution_base.jpg")
plt.figure()
plt.bar(["0.15", "0.35", "0.55"],
[
Mean1[0][len(Mean1[0])-1],
Mean3[0][len(Mean3[0])-1],
Mean5[0][len(Mean5[0])-1]
],
yerr = [
1.96*STD1[0][len(STD1[0])-1]/np.sqrt(repl_num),
1.96*STD3[0][len(STD3[0])-1]/np.sqrt(repl_num),
1.96*STD5[0][len(STD5[0])-1]/np.sqrt(repl_num)
])
plt.xlabel("Transfer rates at PSC 1")
plt.ylabel("Overflow probability")
plt.savefig("1_overflow_probability_base.pdf")
plt.savefig("1_overflow_probability_base.jpg")
plt.figure()
plt.bar(["0.15", "0.35", "0.55"],
[
MeanBed1[0],
MeanBed3[0],
MeanBed5[0]
],
yerr = [
1.96*StdBed1[0]/np.sqrt(repl_num),
1.96*StdBed3[0]/np.sqrt(repl_num),
1.96*StdBed5[0]/np.sqrt(repl_num)
]
)
plt.xlabel("Transfer rates at PSC 1")
plt.ylabel("Mean number of beds occupied")
plt.savefig("1_mean_base.pdf")
plt.savefig("1_mean_base.jpg")
save_list = [Mean1, Mean3, Mean5]
open_file = open("base_mean.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
save_list = [STD1, STD3, STD5]
open_file = open("base_std.pkl", "wb")
pickle.dump(save_list, open_file)
open_file.close()
|
Python
| 139
| 29.093525
| 82
|
/stroke_base.py
| 0.558306
| 0.490745
|
hjtree0825/stroke_network_ctmc_simulations
|
refs/heads/main
|
from stroke_functions import *
############################################################################
############################################################################
############################################################################
# Simply change the numbers in this section.
# LOS (in days)
LOS_hemorrhagic = 7
LOS_ischemic = 3
# Number of beds at CSC Neuro-ICU
csc_bed_capacity = 15
# Average daily number of stroke patients examined at PSC
psc_hemorrhagic = 0.3
psc_ischemic = 1.7
# Average daily number of stroke patients examined at CSC
csc_hemorrhagic = 0.45
csc_ischemic = 2.55
# Transfer rates
# (i) PSC 1
# hemorrhagic
psc1_transfer_rate_hemorrhagic = 0.95
# ischemic
psc1_transfer_rate_ischemic = 0.15
# (ii) PSC 2
# hemorrhagic
psc2_transfer_rate_hemorrhagic = 0.95
# ischemic
psc2_transfer_rate_ischemic = 0.15
# (iii) PSC 3
# hemorrhagic
psc3_transfer_rate_hemorrhagic = 0.95
# ischemic
psc3_transfer_rate_ischemic = 0.15
############################################################################
############################################################################
############################################################################
# Initialize (no need to change, in general)
T = 10000
repl_num = 100
# Run simulations
queue_customization(
psc_hemorrhagic = psc_hemorrhagic, psc_ischemic = psc_ischemic,
csc_hemorrhagic = csc_hemorrhagic, csc_ischemic = csc_ischemic,
LOS_hemorrhagic = LOS_hemorrhagic, LOS_ischemic = LOS_ischemic,
psc1_transfer_rate_hemorrhagic = psc1_transfer_rate_hemorrhagic,
psc1_transfer_rate_ischemic = psc1_transfer_rate_ischemic,
psc2_transfer_rate_hemorrhagic = psc2_transfer_rate_hemorrhagic,
psc2_transfer_rate_ischemic = psc2_transfer_rate_ischemic,
psc3_transfer_rate_hemorrhagic = psc3_transfer_rate_hemorrhagic,
psc3_transfer_rate_ischemic = psc3_transfer_rate_ischemic,
csc_bed_capacity = csc_bed_capacity,
T = T, repl_num = repl_num
)
|
Python
| 65
| 29.169231
| 76
|
/stroke_customization.py
| 0.557692
| 0.527613
|
MayankAgarwal/Word-ladder
|
refs/heads/master
|
''' Implements various search mechanisms '''
from node import Node
import os
class Search(object):
''' Contains search methods '''
def __init__(self, start_state, end_state):
self.start_state = start_state
self.end_state = end_state
# Path to absolute english dictionary
dir_path = os.path.dirname(os.path.abspath(__file__))
self.dict_path = os.path.join(dir_path, "resources", "wordlist.txt")
self.dict_path = os.path.normpath(self.dict_path)
self.dictionary_list = self.load_dict_into_list()
def load_dict_into_list(self):
''' Load dictionary into list '''
wordlist = []
try:
f = open(self.dict_path, 'r')
for word in f:
wordlist.append(word.strip())
return wordlist
except IOError as _:
pass
finally:
f.close()
def astar_search(self):
''' Implements A-star search '''
visited_words = []
start_node = Node(self.start_state, 0, self.end_state)
current_node = start_node
fringe = [current_node]
while not current_node.is_state_result():
if not fringe:
return "ERROR: No path exists"
visited_words.append(current_node.state)
next_nodes = current_node.get_next_nodes(self.dictionary_list)
for node in next_nodes:
if node.state in visited_words:
continue
else:
fringe.append(node)
fringe.remove(current_node)
current_node = self.__get_least_cost_astar(fringe)
return current_node
@classmethod
def __get_least_cost_astar(cls, fringe):
''' Returns the least costing element from fringe '''
return sorted(fringe, key=lambda node: node.depth + node.h_distance)[0]
if __name__ == '__main__':
word1 = raw_input("Enter 1st word: ")
word2 = raw_input("Enter 2nd word: ")
temp = Search(word1, word2)
result = temp.astar_search()
path = []
while result is not None:
path.insert(0, result.state)
result = result.parent
print " -> ".join(path)
|
Python
| 92
| 23.293478
| 79
|
/search/search.py
| 0.561074
| 0.557047
|
MayankAgarwal/Word-ladder
|
refs/heads/master
|
''' Heuristic class holds the heuristic functions used for A* search '''
def levenshtein_distance(word1, word2, i=None, j=None):
'''
Returns the levenshtein distance between the two words
Args:
1) word1: 1st word
2) word2: 2nd word
'''
if i is None:
i = len(word1)
if j is None:
j = len(word2)
if min(i, j) == 0:
return max(i, j)
comp1 = levenshtein_distance(word1, word2, i-1, j) + 1
comp2 = levenshtein_distance(word1, word2, i, j-1) + 1
indicator = 1
if word1[i-1] == word2[j-1]:
indicator = 0
comp3 = levenshtein_distance(word1, word2, i-1, j-1) + indicator
return min(comp1, comp2, comp3)
|
Python
| 30
| 22.433332
| 72
|
/search/heuristic.py
| 0.584637
| 0.534851
|
MayankAgarwal/Word-ladder
|
refs/heads/master
|
''' Search specification for Word ladder problem '''
import os
import re
import heuristic
class Node(object):
''' Represents a node in the word ladder graph i.e. a word '''
def __init__(self, state, depth, result_state, parent=None):
self.state = state # current state
self.depth = depth # Depth of the current state in search graph
self.result_state = result_state # Result state the search is looking for
# parent node of the current state
self.parent = parent
# Heuristic distance between current state and result state
self.h_distance = heuristic.levenshtein_distance(self.state, self.result_state)
def is_state_result(self):
''' Returns True if the current state is the result state '''
return self.state.strip().lower() == self.result_state.strip().lower()
def __generate_adj_words_regex__(self):
'''
Generates a regex that matches words adjacent (one character modification away
from state)
'''
regex = []
start_regex = r"^\w" + self.state + r"$"
end_regex = r"^" + self.state + r"\w$"
regex.append(start_regex)
regex.append(end_regex)
state_temp = "^" + self.state + "$"
for i in xrange(1, len(state_temp)-1):
mid_pos_regex = state_temp[0:i] + r"\w" + state_temp[i+1:]
regex.append(mid_pos_regex)
return "|".join(regex)
def __get_matching_words__(self, re_exp, wordlist):
''' Returns a list of words matching the passed regular expression '''
search_regex = re.compile(re_exp, re.IGNORECASE)
matching_words = []
for word in wordlist:
if search_regex.search(word) and word.lower() != self.state.lower():
matching_words.append(word.strip())
return matching_words
def get_next_nodes(self, wordlist):
''' Returns the next nodes of this node. '''
adjacent_nodes = []
search_regex = self.__generate_adj_words_regex__()
for matched_word in self.__get_matching_words__(search_regex, wordlist):
node_temp = Node(matched_word, self.depth + 1, self.result_state, self)
adjacent_nodes.append(node_temp)
return adjacent_nodes
|
Python
| 74
| 30.472973
| 87
|
/search/node.py
| 0.596228
| 0.594085
|
covertspatandemos/git_demo_2
|
refs/heads/main
|
#!/usr/bin/env python
print('a')
print('b')
print('c')
print('w')
print('x')
print('1')
print('2')
print('3')
print('4')
print('5')
|
Python
| 12
| 10.083333
| 21
|
/demo.py
| 0.56391
| 0.526316
|
lukemadera/ml-learning
|
refs/heads/master
|
import numpy as np
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
# Implementing a function to make sure the models share the same gradient
# def ensure_shared_grads(model, shared_model):
# for param, shared_param in zip(model.parameters(), shared_model.parameters()):
# if shared_param.grad is not None:
# return
# shared_param._grad = param.grad
class ActorCritic(nn.Module):
def __init__(self, numActions, numInputs=84):
super(ActorCritic, self).__init__()
# self.conv1 = nn.Conv2d(numInputs, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(numInputs, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.linear1 = nn.Linear(192, 512)
self.actor = nn.Linear(512, numActions)
self.critic = nn.Linear(512, 1)
# In a PyTorch model, you only have to define the forward pass.
# PyTorch computes the backwards pass for you!
def forward(self, x):
# Normalize image pixels (from rgb 0 to 255) to between 0 and 1.
x = x / 255.
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
x = F.relu(self.linear1(x))
return x
# Only the Actor head
def get_action_probs(self, x):
x = self(x)
actionProbs = F.softmax(self.actor(x), dim=1)
actionProbs = torch.clamp(actionProbs, 0.0001, 0.9999)
return actionProbs
# Only the Critic head
def getStateValue(self, x):
x = self(x)
stateValue = self.critic(x)
return stateValue
# Both heads
def evaluate_actions(self, x):
x = self(x)
actionProbs = F.softmax(self.actor(x), dim=1)
actionProbs = torch.clamp(actionProbs, 0.0001, 0.9999)
stateValues = self.critic(x)
return actionProbs, stateValues
class A2C():
def __init__(self, numActions, gamma=None, learningRate=None, maxGradNorm=0.5,
entropyCoefficient=0.01, valueLossFactor=0.5, sharedModel=None,
sharedOptimizer=None, device='cpu'):
self.gamma = gamma if gamma is not None else 0.99
self.learningRate = learningRate if learningRate is not None else 0.0007
self.maxGradNorm = maxGradNorm
self.entropyCoefficient = entropyCoefficient
self.valueLossFactor = valueLossFactor
self.model = ActorCritic(numActions).to(device=device)
self.sharedModel = sharedModel
self.optimizer = sharedOptimizer if sharedOptimizer is not None else \
optim.Adam(self.model.parameters(), lr=self.learningRate)
self.device = device
print ('A2C hyperparameters',
'learningRate', self.learningRate,
'gamma', self.gamma,
'entropyCoefficient', self.entropyCoefficient,
'valueLossFactor', self.valueLossFactor,
'maxGradNorm', self.maxGradNorm)
def save(self, filePath='training-runs/a2c.pth'):
torch.save({'state_dict': self.model.state_dict(),
'optimizer' : self.optimizer.state_dict(),
}, filePath)
print("=> saved checkpoint... ", filePath)
def load(self, filePath='training-runs/a2c.pth'):
if os.path.isfile(filePath):
print("=> loading checkpoint... ", filePath)
checkpoint = torch.load(filePath)
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print("done!")
else:
print("no checkpoint found...", filePath)
# def syncSharedModel(self):
# if self.sharedModel is not None:
# # Synchronizing with the shared model
# self.model.load_state_dict(self.sharedModel.state_dict())
def getValues(self, state):
stateTensor = torch.tensor(state, dtype=torch.float32, device=self.device)
return self.model.get_action_probs(stateTensor)
def pickAction(self, bestAction, validActions=None, randomRatio=-1):
action = bestAction
if randomRatio >= 0 and validActions is not None:
randNum = random.uniform(0, 1)
if randNum < randomRatio:
action = np.random.choice(validActions)
# print ('random action')
# action = actionProbs.multinomial(num_samples=1)
# action = action[0,0].tolist()
if validActions is not None and action not in validActions:
action = np.random.choice(validActions)
return action
def selectActions(self, states, validActions=None, randomRatio=-1):
statesTensor = torch.tensor(states, dtype=torch.float32, device=self.device)
actionProbs, stateValues = self.model.evaluate_actions(statesTensor)
actions = []
for item in actionProbs:
bestAction = item.max(0)[1].tolist()
action = self.pickAction(bestAction, validActions, randomRatio)
actions.append(action)
return actions, stateValues.tolist()
def selectAction(self, state, validActions=None, randomRatio=-1):
# Need to add dimension to simulate stack of states, even though just have one.
stateTensor = torch.tensor(state, dtype=torch.float32, device=self.device)
actionProbs, stateValues = self.model.evaluate_actions(stateTensor)
_, bestAction = actionProbs.max(maxIndex)
bestAction = bestAction[0].tolist()
action = self.pickAction(bestAction, validActions, randomRatio)
return action, stateValues
def calcActualStateValues(self, rewards, dones, statesTensor):
rewards = rewards.tolist()
dones = dones.tolist()
# R is the cumulative reward.
R = []
rewards.reverse()
if dones[-1]:
# if 0:
nextReturn = 0
else:
stateTensor = statesTensor[-1].unsqueeze(0)
nextReturn = self.model.getStateValue(stateTensor)[0][0].tolist()
# Backup from last state to calculate "true" returns for each state in the set
R.append(nextReturn)
dones.reverse()
for r in range(1, len(rewards)):
if dones[r]:
# if 0:
thisReturn = 0
else:
thisReturn = rewards[r] + nextReturn * self.gamma
# print ('thisReturn', thisReturn, rewards[r], nextReturn, self.gamma, rewards, r)
R.append(thisReturn)
nextReturn = thisReturn
R.reverse()
# print ('rewards', rewards)
stateValuesActual = torch.tensor(R, dtype=torch.float32, device=self.device).unsqueeze(1)
# print ('stateValuesActual', stateValuesActual)
# print ('R', R)
return stateValuesActual
def learn(self, states, actions, rewards, dones, values=None):
statesTensor = torch.tensor(states, dtype=torch.float32, device=self.device)
# s = torch.tensor(states, dtype=torch.float32, device=self.device)
# Need to convert from array of tensors to tensor of tensors.
# actionProbs, stateValuesEst = self.model.evaluate_actions(torch.cat(statesTensor, 0))
actionProbs, stateValuesEst = self.model.evaluate_actions(statesTensor)
# print ('actionProbs', actionProbs)
# print ('stateValuesEst', stateValuesEst)
actionLogProbs = actionProbs.log()
# print ('actionProbs', actionProbs)
# print ('actionLogProbs', actionLogProbs)
a = torch.tensor(actions, dtype=torch.int64, device=self.device).view(-1,1)
chosenActionLogProbs = actionLogProbs.gather(1, a)
# print ('chosenActionLogProbs', chosenActionLogProbs)
versionToUse = 'v1'
# v1 - original
if versionToUse == 'v1':
# Calculating the actual values.
stateValuesActual = self.calcActualStateValues(rewards, dones, statesTensor)
# print ('stateValuesActual', stateValuesActual)
# This is also the TD (Temporal Difference) error
advantages = stateValuesActual - stateValuesEst
# print ('advantages', advantages)
valueLoss = advantages.pow(2).mean()
# print ('value_loss', value_loss)
entropy = (actionProbs * actionLogProbs).sum(1).mean()
# print ('entropy', entropy, actionProbs, actionLogProbs)
actionGain = (chosenActionLogProbs * advantages).mean()
# print ('actiongain', actionGain)
totalLoss = self.valueLossFactor * valueLoss - \
actionGain - self.entropyCoefficient * entropy
# print ('totalLoss', totalLoss, valueLoss, actionGain)
# v2 - http://steven-anker.nl/blog/?p=184
if versionToUse == 'v2':
R = 0
if not dones[-1]:
stateTensor = statesTensor[-1]
R = self.model.getStateValue(stateTensor)[0][0].tolist()
n = len(statesTensor)
VF = stateValuesEst
RW = np.zeros(n)
ADV = np.zeros(n)
A = np.array(actions)
for i in range(n - 1, -1, -1):
R = rewards[i] + self.gamma * R
RW[i] = R
ADV[i] = R - VF[i]
advantages = torch.from_numpy(ADV, device=self.device)
# rewardsTensor = []
# for reward in rewards:
# print (reward, torch.tensor([reward], device=self.device))
# rewardsTensor.append(torch.tensor(reward, device=self.device))
rewardsTensor = list(map(lambda x: torch.tensor([x], device=self.device), rewards))
rewardsTensor = torch.cat(rewardsTensor, 0)
valueLoss = 0.5 * (stateValuesEst - rewardsTensor).pow(2).mean()
# valueLoss = 0.5 * (stateValuesEst - torch.from_numpy(RW, device=self.device)).pow(2).mean()
actionOneHot = chosenActionLogProbs #Is this correct??
negLogPolicy = -1 * actionLogProbs
# Only the output related to the action needs to be adjusted, since we only know the result of that action.
# By multiplying the negative log of the policy output with the one hot encoded vectors, we force all outputs
# other than the one of the action to zero.
policyLoss = ((negLogPolicy * actionOneHot).sum(1) * advantages.float()).mean()
entropy = (actionProbs * negLogPolicy).sum(1).mean()
# Training works best if the value loss has less influence than the policy loss, so reduce value loss by a factor.
# Optimizing with this loss function could result in converging too quickly to a sub optimal solution.
# I.e. the probability of a single action is significant higher than any other, causing it to always be chosen.
# To prevent this we add a penalty on having a high entropy.
totalLoss = self.valueLossFactor * valueLoss + policyLoss - self.entropyCoefficient * entropy
self.optimizer.zero_grad()
totalLoss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.maxGradNorm)
# if self.sharedModel is not None:
# ensure_shared_grads(self.model, self.sharedModel)
self.optimizer.step()
|
Python
| 273
| 42.153847
| 126
|
/breakout_ai_a2c.py
| 0.614973
| 0.598167
|
lukemadera/ml-learning
|
refs/heads/master
|
# Decimal is causing rounding errors? E.g. 1/3 is 3.333333333334 and 1/3 of 30 is 9.9999999999990
# We want to keep precision at a max, but don't increase precision for numbers that start as less.
# For example, change 33.33333333333334 to 33.33333333 and keep 1 as 1 (not 1.0000000001)
from decimal import *
# decimals = 8
# def set_decimals(decimals1):
# global decimals
# decimals = decimals1
# def precision_string(decimals):
# if decimals == 0:
# return '1'
# precision = '.'
# # -1 because add a '1' at the end as last digit
# for count in range(0, (decimals-1)):
# precision += '0'
# precision += '1'
# return precision
# def number(num, decimals1 = False):
# global decimals
# num_decimals_max = decimals1 or decimals
# num_str = str(num)
# index_dot = num_str.find('.')
# if index_dot < 0:
# num_decimals = 0
# else:
# num_decimals_str = len(num_str) - (index_dot + 1)
# if num_decimals_str < num_decimals_max:
# num_decimals = num_decimals_str
# else:
# num_decimals = num_decimals_max
# precision = precision_string(num_decimals)
# return Decimal(num).quantize(Decimal(precision), rounding=ROUND_HALF_UP)
# decimal type does not store in MongoDB
def number(num):
if not isinstance(num, float):
return float(num)
return num
def toFixed(num, precision1='.01'):
numFixed = precision(num, precision1)
numNoZeroes = removeZeroes(str(numFixed))
if numNoZeroes[-1] == '.':
return str(num)
return numNoZeroes
# '0.010000' will return a precision of 6 decimals, instead of 2! So fix by
# removing any trailing zeroes.
def removeZeroes(str1):
newStr = str1
lastIndex = len(str1)
for index, char in reversed(list(enumerate(str1))):
if char != '0':
break
lastIndex = index
newStr = str1[slice(0, lastIndex)]
return newStr
def decimalCount(numString):
index = numString.find('.')
if index > -1:
return len(numString) - index - 1
return -1
def precision(num, precision1 = '.01', round1='down'):
precision = removeZeroes(precision1)
# See if value is already correct precision.
if decimalCount(str(num)) == decimalCount(precision):
return num
rounding = ROUND_UP if round1 == 'up' else ROUND_DOWN
newVal = float(Decimal(num).quantize(Decimal(precision), rounding=rounding))
if newVal == 0.0:
newVal = float(Decimal(num).quantize(Decimal(precision), rounding=ROUND_UP))
return newVal
|
Python
| 80
| 31.225
| 98
|
/number.py
| 0.639255
| 0.592708
|
lukemadera/ml-learning
|
refs/heads/master
|
import gym
import logging
import numpy as np
import torch
import time
import breakout_ai_a2c as ai_a2c
import date_time
import number
from subproc_vec_env import SubprocVecEnv
from atari_wrappers import make_atari, wrap_deepmind, Monitor
def updateState(obs, state, nc):
# Do frame-stacking here instead of the FrameStack wrapper to reduce IPC overhead
state = np.roll(state, shift=-nc, axis=3)
state[:, :, :, -nc:] = obs
return state
def runTrain(gymId='BreakoutNoFrameskip-v4', numEnvs=16, seed=0, filePathBrain='training/breakout-v1.pth',
numSteps=5, numBatches=20000, outputBatchInterval=1000, joinEnvs=1, epsilon=0.00001):
def make_env(rank):
def _thunk():
env = make_atari(gymId)
env.seed(seed + rank)
gym.logger.setLevel(logging.WARN)
env = wrap_deepmind(env)
# wrap the env one more time for getting total reward
env = Monitor(env, rank)
return env
return _thunk
print ('training starting', numBatches, outputBatchInterval,
'epsilon', epsilon)
env = SubprocVecEnv([make_env(i) for i in range(numEnvs)])
numActions = env.action_space.n
torchDevice = 'cpu'
if torch.cuda.is_available():
torchDevice = 'cuda'
agent = ai_a2c.A2C(numActions, device=torchDevice)
if filePathBrain:
agent.load(filePath=filePathBrain)
timingStart = date_time.now()
batchCount = 0
states, actions, rewards, dones, values = [], [], [], [], []
for ii in range(numEnvs):
states.append([])
actions.append([])
rewards.append([])
dones.append([])
values.append([])
# Set first state.
# Environment returns 1 frame, but we want multiple, so we stack the new
# state on top of the past ones.
nh, nw, nc = env.observation_space.shape
nstack = 4
batchStateShape = (numEnvs * numSteps, nh, nw, nc * nstack)
emptyState = np.zeros((numEnvs, nh, nw, nc * nstack), dtype=np.uint8)
obs = env.reset()
# states = updateState(obs, emptyState, nc)
lastStates = updateState(obs, emptyState, nc)
lastDones = [False for _ in range(numEnvs)]
totalRewards = []
realTotalRewards = []
# All actions are always valid.
validActions = [0,1,2,3]
while batchCount < numBatches:
states, actions, rewards, dones, values = [], [], [], [], []
stepCount = 0
while stepCount < numSteps:
actionsStep, valuesStep = agent.selectActions(lastStates, validActions=validActions, randomRatio=epsilon)
# print ('actionsStep', actionsStep)
states.append(np.copy(lastStates))
actions.append(actionsStep)
values.append(valuesStep)
if stepCount > 0:
dones.append(lastDones)
# Input the action (run a step) for all environments.
statesStep, rewardsStep, donesStep, infosStep = env.step(actionsStep)
# Update state for any dones.
for n, done in enumerate(donesStep):
if done:
lastStates[n] = lastStates[n] * 0
lastStates = updateState(obs, lastStates, nc)
# Update rewards for logging / tracking.
for done, info in zip(donesStep, infosStep):
if done:
totalRewards.append(info['reward'])
if info['total_reward'] != -1:
realTotalRewards.append(info['total_reward'])
lastDones = donesStep
rewards.append(rewardsStep)
stepCount += 1
# Dones is one off, so add the last one.
dones.append(lastDones)
# discount/bootstrap off value fn
# lastValues = self.agent.value(lastStates).tolist()
# Can skip this as it is done in the learn function with calcActualStateValues?
# Join all (combine batches and steps).
states = np.asarray(states, dtype='float32').swapaxes(1, 0).reshape(batchStateShape)
actions = np.asarray(actions).swapaxes(1, 0).flatten()
rewards = np.asarray(rewards).swapaxes(1, 0).flatten()
dones = np.asarray(dones).swapaxes(1, 0).flatten()
values = np.asarray(values).swapaxes(1, 0).flatten()
agent.learn(states, actions, rewards, dones, values)
batchCount += 1
if batchCount % outputBatchInterval == 0:
runTime = date_time.diff(date_time.now(), timingStart, 'minutes')
totalSteps = batchCount * numSteps
runTimePerStep = runTime / totalSteps
runTimePerStepUnit = 'minutes'
if runTimePerStep < 0.02:
runTimePerStep *= 60
runTimePerStepUnit = 'seconds'
print (batchCount, numBatches, '(batch done)',
number.toFixed(runTime), 'run time minutes,', totalSteps,
'steps,', number.toFixed(runTimePerStep), runTimePerStepUnit, 'per step')
r = totalRewards[-100:] # get last 100
tr = realTotalRewards[-100:]
if len(r) == 100:
print("avg reward (last 100):", np.mean(r))
if len(tr) == 100:
print("avg total reward (last 100):", np.mean(tr))
print("max (last 100):", np.max(tr))
# Only save periodically as well.
if filePathBrain:
agent.save(filePathBrain)
env.close()
if filePathBrain:
agent.save(filePathBrain)
runTime = date_time.diff(date_time.now(), timingStart, 'minutes')
totalSteps = numBatches * numSteps
runTimePerStep = runTime / totalSteps
runTimePerStepUnit = 'minutes'
if runTimePerStep < 0.02:
runTimePerStep *= 60
runTimePerStepUnit = 'seconds'
print ('training done:', number.toFixed(runTime), 'run time minutes,', totalSteps,
'steps,', number.toFixed(runTimePerStep), runTimePerStepUnit, 'per step')
return None
runTrain(filePathBrain='training/breakout-v1-2.pth', epsilon=0.0001)
|
Python
| 165
| 35.660606
| 117
|
/breakout_run_train.py
| 0.6082
| 0.592495
|
lukemadera/ml-learning
|
refs/heads/master
|
import datetime
import dateutil.parser
import dateparser
import math
import pytz
def now(tz = 'UTC', microseconds = False):
# return pytz.utc.localize(datetime.datetime.utcnow())
dt = datetime.datetime.now(pytz.timezone(tz))
if not microseconds:
dt = dt.replace(microsecond = 0)
return dt
def now_string(format = '%Y-%m-%d %H:%M:%S %z', tz = 'UTC'):
return string(now(tz), format)
def arrayString(datetimes, format = '%Y-%m-%d %H:%M:%S %z'):
return list(map(lambda datetime1: string(datetime1, format), datetimes))
def arrayStringFields(array1, fields=[], format = '%Y-%m-%d %H:%M:%S %z'):
def mapString1(obj1):
return dictStringFields(obj1, fields, format)
return list(map(mapString1, array1))
def dictStringFields(object1, fields=[], format = '%Y-%m-%d %H:%M:%S %z'):
newObject = {}
for key in object1:
if key in fields:
newObject[key] = string(object1[key], format)
else:
newObject[key] = object1[key]
return newObject
def string(datetime1, format = '%Y-%m-%d %H:%M:%S %z'):
# return datetime1.strftime('%Y-%m-%d %H:%M:%S %z')
# Much more performant.
return datetime1.isoformat()
def stringFormat(datetime1, format = '%Y-%m-%d %H:%M:%S %z'):
return datetime1.strftime('%Y-%m-%d %H:%M:%S %z')
# def from_string(datetime_string, format = '%Y-%m-%d %H:%M:%S %z'):
# return datetime.strptime(datetime_string, format)
def from_string(dt_string):
return dateutil.parser.parse(dt_string)
def remove_seconds(datetime1):
return datetime1.replace(second = 0, microsecond = 0)
def remove_microseconds(datetime1):
return datetime1.replace(microsecond = 0)
# Sets seconds (and microseconds) to 0.
def remove_seconds_string(datetime_string, format_in = '%Y-%m-%d %H:%M:%S %z', format_out = '%Y-%m-%d %H:%M:%S %z'):
datetime1 = from_string(datetime_string)
datetime1 = remove_seconds(datetime1)
return string(datetime1, format_out)
def diff(datetime1, datetime2, unit='minutes'):
if datetime2 > datetime1:
dt_diff = datetime2 - datetime1
else:
dt_diff = datetime1 - datetime2
# Note only total_seconds works - otherwise it just gives the remainer
# (e.g. if more than one hour, time will be 1 hour and 5 seconds, not 3605 seconds).
# https://stackoverflow.com/questions/2788871/date-difference-in-minutes-in-python
if unit == 'seconds':
return float(dt_diff.total_seconds())
if unit == 'minutes':
return float(dt_diff.total_seconds() / 60)
elif unit == 'hours':
return float(dt_diff.total_seconds() / (60*60))
# Unlike seconds, apparently days will not cut off weeks and months, so this
# still works if more than 7 days.
elif unit == 'days':
return float(dt_diff.days)
return None
def to_biggest_unit(value, unit = 'minutes'):
if unit == 'minutes':
if value < 60:
return {
'value': math.floor(value),
'unit': 'minutes'
}
if value < (60 * 24):
return {
'value': math.floor(value / 60),
'unit': 'hours'
}
if value < (60 * 24 * 28):
return {
'value': math.floor(value / 60 / 24),
'unit': 'days'
}
return None
# Note this will not handle intervals larger than the size of the
# next bigger unit (e.g. >60 minutes). So 90 minutes (1.5 hours) for example,
# could not be done with this; need whole numbers of each unit.
# E.g. turn 10:51 into 10:45 if interval is 15 minutes.
def floor_time_interval(datetime1, interval, unit = 'minutes'):
if unit == 'seconds':
seconds = math.floor(datetime1.second / interval) * interval
return datetime1.replace(second = seconds, microsecond = 0)
elif unit == 'minutes':
minutes = math.floor(datetime1.minute / interval) * interval
return datetime1.replace(minute = minutes, second = 0, microsecond = 0)
elif unit == 'hours':
hours = math.floor(datetime1.hour / interval) * interval
return datetime1.replace(hour = hours, minute = 0, second = 0, microsecond = 0)
elif unit == 'days':
days = math.floor(datetime1.day / interval) * interval
return datetime1.replace(day = days, hour = 0, minute = 0, second = 0, microsecond = 0)
elif unit == 'months':
months = math.floor(datetime1.month / interval) * interval
return datetime1.replace(month = months, day = 0, hour = 0, minute = 0, second = 0, microsecond = 0)
elif unit == 'years':
years = math.floor(datetime1.year / interval) * interval
return datetime1.replace(year = years, month = 0, day = 0, hour = 0, minute = 0, second = 0, microsecond = 0)
return None
def nextMonth(datetime1, hour=0, minute=0):
currentMonth = datetime1.month
currentYear = datetime1.year
if currentMonth == 12:
nextMonth = 1
nextYear = currentYear + 1
else:
nextMonth = currentMonth + 1
nextYear = currentYear
nextDatetime = datetime.datetime(nextYear, nextMonth, 1, hour, minute, 0, \
tzinfo=pytz.timezone('UTC'))
return nextDatetime
def previousMonth(datetime1, hour=0, minute=0):
currentMonth = datetime1.month
currentYear = datetime1.year
if currentMonth == 1:
previousMonth = 12
previousYear = currentYear - 1
else:
previousMonth = currentMonth - 1
previousYear = currentYear
previousDatetime = datetime.datetime(previousYear, previousMonth, 1, hour, minute, 0, \
tzinfo=pytz.timezone('UTC'))
return previousDatetime
def dateToMilliseconds(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
|
Python
| 166
| 37.945782
| 117
|
/date_time.py
| 0.633565
| 0.607889
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.