index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
25,300 | b68afb0142f58ccab3733ac800e405e1aaa0b209 | class CpuConstants(object):
OPCODE = 0xA2F0
DECODED_OPCODE = 0xA000
INVALID_OPCODE = 0xFFFF
V_CARRY = 0x0F
PC_BEFORE = 0x200
PC_AFTER = 0x202
PC_AFTER_SKIP = 0x204
# MEM #
MEM_SIZE = 4096
MEM_ADDRESS = 0x0F24
# SCREEN #
SCREEN_W = 64
SCREEN_H = 32
# ANNN #
OPCODE_ANNN = 0xABCD
IR_ANNN = 0x0BCD
# 1NNN #
OPCODE_1NNN = 0x1ABC
PC_1NNN = 0xABC
# 2NNN #
OPCODE_2NNN = 0x2ABC
PC_2NNN = 0xABC
SP_2NNN = 0x01
PC_ON_STACK_2NNN = 0x200
# 3XNN #
OPCODE_3XNN = 0x3456
X_3XNN = 4
VX_3XNN_EQ = 0x56
VX_3XNN_NEQ = 0x57
# 4XNN #
OPCODE_4XNN = 0x4567
X_4XNN = 5
VX_4XNN_EQ = 0x67
VX_4XNN_NEQ = 0x68
# 5XY0 #
OPCODE_5XY0 = 0x5670
X_5XY0 = 6
Y_5XY0 = 7
VX_5XY0_EQ = 0x12
VY_5XY0_EQ = 0x12
VY_5XY0_NEQ = 0x13
# 6XNN #
OPCODE_6XNN = 0x6123
X_6XNN = 0x01
VX_6XNN = 0x23
# 7XNN #
OPCODE_7XNN = 0x7133
OPCODE_7XNN_OVERFLOW = 0x71F0
X_7XNN = 0x01
VX_7XNN = 0x22
VX_7XNN_SUM = 0x55
VX_7XNN_SUM_OVERFLOW = 0x12
# 8XY0 #
OPCODE_8XY0 = 0x8120
X_8XY0 = 1
Y_8XY0 = 2
VX_8XY0_BEFORE = 0x11
VX_8XY0_AFTER = 0x23
VY_8XY0 = 0x23
# 8XY1 #
OPCODE_8XY1 = 0x8121
X_8XY1 = 0x01
Y_8XY1 = 0x02
VX_8XY1_BEFORE = 0x12
VX_8XY1_AFTER = 0x33
VY_8XY1 = 0x23
# 8XY2 #
OPCODE_8XY2 = 0x8342
X_8XY2 = 0x03
Y_8XY2 = 0x04
VX_8XY2_BEFORE = 0x12
VX_8XY2_AFTER = 0x02
VY_8XY2 = 0x23
# 8XY3 #
OPCODE_8XY3 = 0x8673
X_8XY3 = 0x06
Y_8XY3 = 0x07
VX_8XY3_BEFORE = 0x12
VX_8XY3_AFTER = 0x31
VY_8XY3 = 0x23
# 8XY4 #
OPCODE_8XY4 = 0x8124
X_8XY4 = 0x01
Y_8XY4 = 0x02
VX_8XY4_BEFORE = 0xA0
VX_8XY4_AFTER_NO_OVERFLOW = 0xAA
VX_8XY4_AFTER_OVERFLOW = 0x11
VY_8XY4_NORMAL = 0x0A
VY_8XY4_OVERFLOW = 0x71
# 8XY5 #
OPCODE_8XY5 = 0x8235
X_8XY5 = 0x02
Y_8XY5 = 0x03
VX_8XY5_BEFORE = 0x05
VX_8XY5_AFTER_NO_OVERFLOW = 0x01
VX_8XY5_AFTER_OVERFLOW = 0xF0
VY_8XY5_NORMAL = 0x04
VY_8XY5_OVERFLOW = 0x15
# 8XY6 #
OPCODE_8XY6 = 0x8676
X_8XY6 = 0x06
VX_8XY6_LSB0 = 0x20
VX_8XY6_LSB1 = 0x21
VX_8XY6_AFTER = 0x10
# 8XY7 #
OPCODE_8XY7 = 0x8347
X_8XY7 = 0x03
Y_8XY7 = 0x04
VX_8XY7_BEFORE = 0x20
VX_8XY7_AFTER_NO_OVERFLOW = 0x10
VX_8XY7_AFTER_OVERFLOW = 0xF0
VY_8XY7_NORMAL = 0x30
VY_8XY7_OVERFLOW = 0x10
# 8XYE #
OPCODE_8XYE = 0x812E
X_8XYE = 0x01
VX_8XYE_MSB0_BEFORE = 0x02
VX_8XYE_MSB1_BEFORE = 0x80
VX_8XYE_MSB0_AFTER = 0x04
VX_8XYE_MSB1_AFTER = 0x00
# FX15 #
OPCODE_FX15 = 0xF415
DT_FX15 = 0x1234
VX_FX15 = 0x1234
X_FX15 = 4
# FX18 #
OPCODE_FX18 = 0xF615
ST_FX18 = 0x1234
VX_FX18 = 0x1234
X_FX18 = 6
# 00E0 #
OPCODE_00E0 = 0x00E0
|
25,301 | 813648bbe53f16972dea8bb56f1ad7b931387d10 | from .playlist_analyser import app
|
25,302 | 96039ba8146077b215b52d6720226a4e33c69753 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.db import models
from base64 import b64encode
import pyimgur
# Create your models here.
class TimeStamped(models.Model):
class Meta:
abstract = True
created_at = models.DateTimeField(auto_now_add=True)
published_at = models.DateTimeField(auto_now=True)
class Rsvp(TimeStamped):
class Meta:
verbose_name = "RSVP"
verbose_name_plural = "RSVP's"
cerimonia = models.BooleanField()
recepcao = models.BooleanField()
nome = models.CharField(max_length=300)
email = models.CharField(max_length=300)
mensagem = models.TextField()
def __unicode__(self):
return u'%s' % (self.nome)
class Recado(TimeStamped):
class Meta:
verbose_name = "Recado"
verbose_name_plural = "Recados"
nome = models.CharField(max_length=300)
foto = models.URLField(blank=True, null=True, default='https://placehold.it/300x300')
texto = models.TextField()
aprovado = models.BooleanField(default=False)
def __str__(self):
return u'%s' % (self.nome)
def __unicode__(self):
return u'%s' % (self.nome)
class Noivo(TimeStamped):
class Meta:
verbose_name = "Noivo"
verbose_name_plural = "Noivo"
nome = models.CharField(max_length=255, blank=True, null=True)
sobrenome = models.CharField(max_length=255, blank=True, null=True)
descricao = models.TextField(blank=True, null=True)
facebook = models.URLField(blank=True, null=True)
instagram = models.URLField(blank=True, null=True)
email = models.CharField(max_length=255, blank=True, null=True)
foto_url = models.URLField(blank=True, null=True, default='https://placehold.it/300x300')
file = models.FileField(blank=True, null=True)
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_url = file
except (Exception,):
pass
return super(Noivo, self).save(*args, **kwargs)
def __str__(self):
return '%s' % self.nome
def __unicode__(self):
return '%s' % self.nome
class Noiva(TimeStamped):
class Meta:
verbose_name = "Noiva"
verbose_name_plural = "Noiva"
nome = models.CharField(max_length=255, blank=True, null=True)
sobrenome = models.CharField(max_length=255, blank=True, null=True)
descricao = models.TextField(blank=True, null=True)
facebook = models.URLField(blank=True, null=True)
instagram = models.URLField(blank=True, null=True)
email = models.CharField(max_length=255, blank=True, null=True)
foto_url = models.URLField(blank=True, null=True, default='https://placehold.it/300x300')
file = models.FileField(blank=True, null=True, verbose_name='Arquivo de Foto')
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_url = file
except (Exception,):
pass
return super(Noiva, self).save(*args, **kwargs)
def __str__(self):
return '%s' % self.nome
def __unicode__(self):
return '%s' % self.nome
class Endereco(TimeStamped):
class Meta:
verbose_name = "Endereco"
verbose_name_plural = "Enderecos"
rua = models.CharField(max_length=255, blank=True, null=True)
numero = models.CharField(max_length=255, blank=True, null=True)
bairro = models.CharField(max_length=255, blank=True, null=True)
cidade = models.CharField(max_length=255, blank=True, null=True)
estado = models.CharField(max_length=255, blank=True, null=True)
ponto_referencia = models.CharField(max_length=255, blank=True, null=True, verbose_name='Ponto de Referencia')
def __str__(self):
return '%s, %s, %s - %s, %s' % (self.rua, self.numero, self.bairro, self.cidade, self.estado)
def __unicode__(self):
return '%s, %s, %s - %s, %s' % (self.rua, self.numero, self.bairro, self.cidade, self.estado)
class Recepcao(TimeStamped):
class Meta:
verbose_name = "Recepcao"
verbose_name_plural = "Recepcao"
data = models.DateField(blank=True, null=True)
hora = models.TimeField(blank=True, null=True)
local = models.CharField(max_length=255, blank=True, null=True)
endereco = models.ForeignKey(Endereco, blank=True, null=True, on_delete=models.CASCADE)
cor_casamento = models.CharField(max_length=255, blank=True, null=True)
def __str__(self):
return '%s' % (self.local)
def __unicode__(self):
return '%s' % (self.local)
class Pagina_Inicio(TimeStamped):
class Meta:
verbose_name = "Pagina de Inicio"
verbose_name_plural = "Pagina de Inicio"
frase = models.TextField(blank=True, null=True, default='Vamos nos casar!')
foto_background_url = models.URLField(blank=True, null=True, default='https://i.imgur.com/jWXZF4z.jpg')
file = models.FileField(blank=True, null=True, verbose_name='Foto Background')
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_background_url = file
except (Exception,):
pass
return super(Pagina_Inicio, self).save(*args, **kwargs)
class Habilitavel(models.Model):
class Meta:
abstract = True
habilitado = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
published_at = models.DateTimeField(auto_now=True)
class Pagina_Noivos(Habilitavel):
class Meta:
verbose_name = "Pagina dos Noivos"
verbose_name_plural = "Pagina dos Noivos"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='Os Noivos')
class Pagina_Frase(Habilitavel):
class Meta:
verbose_name = "Pagina de Frase Principal"
verbose_name_plural = "Pagina de Frase Principal"
frase = models.TextField(blank=True, null=True,
default=u'Será uma honra ter você no momento mais incrível das nossas vidas, onde finalmente nos tornaremos um.')
foto_background_url = models.URLField(blank=True, null=True, default='https://imgur.com/QnAo2qj.jpg')
file = models.FileField(blank=True, null=True, verbose_name='Foto Background')
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_background_url = file
except (Exception,):
pass
return super(Pagina_Frase, self).save(*args, **kwargs)
class Pagina_Timeline(Habilitavel):
class Meta:
verbose_name = "Pagina de Timeline"
verbose_name_plural = "Pagina de Timeline"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='Nossa História')
texto_historia = models.TextField(blank=True, null=True)
class ItemTimeline(TimeStamped):
class Meta:
verbose_name = "Item Timeline"
verbose_name_plural = "Itens da Timeline"
data = models.DateField(blank=True, null=True)
titulo = models.CharField(max_length=255, blank=True, null=True)
descricao = models.TextField(blank=True, null=True)
timeline = models.ForeignKey(Pagina_Timeline, blank=True, null=True, on_delete=models.CASCADE)
foto_url = models.URLField(blank=True, null=True, default='https://placehold.it/640x480')
file = models.FileField(blank=True, null=True, verbose_name='Foto')
def save(self, *args, **kwargs):
self.timeline = Pagina_Timeline.objects.first()
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_url = file
except (Exception,):
pass
return super(ItemTimeline, self).save(*args, **kwargs)
def __str__(self):
return '%s' % (self.titulo)
def __unicode__(self):
return '%s' % (self.titulo)
class Pagina_Contador(Habilitavel):
class Meta:
verbose_name = "Contador"
verbose_name_plural = "Contador"
foto_background_url = models.URLField(blank=True, null=True, default='https://imgur.com/WvzW9gn.jpg')
file = models.FileField(blank=True, null=True, verbose_name='Foto Background')
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_background_url = file
except (Exception,):
pass
return super(Pagina_Contador, self).save(*args, **kwargs)
class CategoriaGaleria(TimeStamped):
class Meta:
verbose_name = "Categoria de Galeria"
verbose_name_plural = "Categorias de Galeria"
titulo = models.CharField(max_length=255, blank=True, null=True, default=u'Família')
def __str__(self):
return '%s' % (self.titulo)
def __unicode__(self):
return '%s' % (self.titulo)
class Pagina_Galeria(Habilitavel):
class Meta:
verbose_name = "Pagina de Galeria"
verbose_name_plural = "Pagina de Galeria"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='Galeria de Fotos')
texto = models.TextField(blank=True, null=True, default=u'Nada melhor do que fotos pra registrar tudo!')
class ItemGaleria(TimeStamped):
class Meta:
verbose_name = "Item da Galeria"
verbose_name_plural = "Itens da Galeria"
categoria = models.ForeignKey(CategoriaGaleria, blank=True, null=True, on_delete=models.CASCADE)
foto_url = models.URLField(blank=True, null=True, default='https://placehold.it/640x480')
file = models.FileField(blank=True, null=True, verbose_name='Foto')
galeria = models.ForeignKey(Pagina_Galeria, blank=True, null=True, on_delete=models.CASCADE, )
def save(self, *args, **kwargs):
self.galeria = Pagina_Galeria.objects.first()
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_url = file
except (Exception,):
pass
return super(ItemGaleria, self).save(*args, **kwargs)
def __str__(self):
return '%s' % (self.foto_url)
def __unicode__(self):
return '%s' % (self.foto_url)
class Pagina_Padrinhos(Habilitavel):
class Meta:
verbose_name = "Pagina de Padrinhos"
verbose_name_plural = "Pagina de Padrinhos"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='Padrinhos')
frase = models.TextField(blank=True, null=True)
class ItemPadrinho(TimeStamped):
class Meta:
verbose_name = "Item Padrinho"
verbose_name_plural = "Padrinhos"
nome = models.CharField(max_length=255, blank=True, null=True)
pagina_padrinhos = models.ForeignKey(Pagina_Padrinhos, blank=True, null=True, on_delete=models.CASCADE)
foto_url = models.URLField(blank=True, null=True, default='https://placehold.it/640x480')
file = models.FileField(blank=True, null=True, verbose_name='Foto')
def save(self, *args, **kwargs):
self.pagina_padrinhos = Pagina_Padrinhos.objects.first()
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_url = file
except (Exception,):
pass
return super(ItemPadrinho, self).save(*args, **kwargs)
def __str__(self):
return '%s' % (self.nome)
def __unicode__(self):
return '%s' % (self.nome)
class Pagina_RSVP(Habilitavel):
class Meta:
verbose_name = "Pagina de RSVP"
verbose_name_plural = "Pagina de RSVP"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='RSVP')
texto = models.TextField(blank=True, null=True)
class Pagina_ListaPresentes(Habilitavel):
class Meta:
verbose_name = "Pagina de Lista de Presentes"
verbose_name_plural = "Pagina de Lista de Presentes"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='Lista de Presentes')
texto = models.TextField(blank=True, null=True,
default=u'Abaixo, estão as listas que fizemos online. Qualquer dúvida, só entrar em contato com a gente!')
foto_background_url = models.URLField(blank=True, null=True, default='https://imgur.com/UWpXoS1.jpg')
file = models.FileField(blank=True, null=True, verbose_name='Foto Background')
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_background_url = file
except (Exception,):
pass
return super(Pagina_ListaPresentes, self).save(*args, **kwargs)
class ItemListaPresentes(TimeStamped):
class Meta:
verbose_name = "Item de Lista de Presentes"
verbose_name_plural = "Itens de Lista de Presentes"
nome = models.CharField(max_length=255, blank=True, null=True)
key = models.CharField(max_length=255, blank=True, null=True)
is_site = models.BooleanField(blank=True, null=True, default=True)
iframe = models.TextField(blank=True, null=True)
link_url = models.URLField(blank=True, null=True)
foto_url = models.URLField(blank=True, null=True, default='https://placehold.it/640x480')
file = models.FileField(blank=True, null=True, verbose_name='Foto')
pagina_listapresentes = models.ForeignKey(Pagina_ListaPresentes, blank=True, null=True,
on_delete=models.CASCADE)
def save(self, *args, **kwargs):
self.pagina_listapresentes = Pagina_ListaPresentes.objects.first()
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_url = file
except (Exception,):
pass
return super(ItemListaPresentes, self).save(*args, **kwargs)
def __str__(self):
return '%s' % (self.nome)
def __unicode__(self):
return '%s' % (self.nome)
class Pagina_Mural(Habilitavel):
class Meta:
verbose_name = "Pagina de Mural"
verbose_name_plural = "Pagina de Mural"
titulo_menu = models.CharField(max_length=255, blank=True, null=True, default='Mural')
texto = models.TextField(blank=True, null=True,
default=u'Por favor, não saia deste site sem deixar um recado para nós. Sua mensagem é muito importante, e gostaríamos de recebê-la. Nós receberemos e publicaremos as melhores em nossa página.')
class Pagina_Footer(Habilitavel):
class Meta:
verbose_name = "Pagina de Footer"
verbose_name_plural = "Pagina de Footer"
frase = models.TextField(blank=True, null=True)
foto_background_url = models.URLField(blank=True, null=True, default='https://imgur.com/nMHYQd9.jpg')
file = models.FileField(blank=True, null=True, verbose_name='Foto Background')
def save(self, *args, **kwargs):
try:
CLIENT_ID = "cdadf801dc167ab"
bencode = b64encode(self.file.read())
client = pyimgur.Imgur(CLIENT_ID)
r = client._send_request('https://api.imgur.com/3/image', method='POST', params={'image': bencode})
file = r['link']
self.foto_background_url = file
except (Exception,):
pass
return super(Pagina_Footer, self).save(*args, **kwargs)
|
25,303 | 2de859d464f9ec3e9273ba75e5ab63661ff1df39 | from django.apps import AppConfig
class ComplianceMatrixConfig(AppConfig):
name = 'compliance_matrix'
|
25,304 | 1fa39c160098eb269778a968391760917227e666 | def main():
end = list(input())
n = int(input())
start = []
for i in range(n):
query = input().split()
q = int(query[0])
if q == 1:
start,end = end,start
else:
f = int(query[1])
if f == 1:
start.append(query[-1])
else:
end.append(query[-1])
print(''.join(start[::-1] + end))
return
if __name__ == "__main__":
main() |
25,305 | c6ba9455b178b1a967e8176d867e49d02d323524 | from gensim.models import word2vec
from nltk import word_tokenize
import codecs, json
import numpy as np
iname = "text_1"
ifile = codecs.open(iname, "r", "utf-8-sig")
sens = []
while 1:
line = ifile.readline()
if not line:
break
sens.append(word_tokenize(line[:-1]))
model = word2vec.Word2Vec(sentences=sens)
words = model.wv.vocab.keys()
d = {}
for k in words:
d[k] = model[k].tolist()
with codecs.open("data.json", "w", "utf-8-sig") as fp:
json.dump(d, fp, ensure_ascii=False)
ifile.close()
|
25,306 | aa9e9159f0e0cb6f16dab241490ac05cd6f5781e | """
#M.20
BOX STACKING PROBLEM
UNDER CONSTRUCTION
LAID OFF FOR THE TIME BEING
|
25,307 | 8505b14a5388b0158404246416545f22dcc2adc4 | from text import cleaners
def test_transliteration_cleaners():
actual = cleaners.transliteration_cleaners('تَسْدِيدَةٍ اسْتَعْصَتْ عَلَى الْحَارِسْ')
expected = 'tasdiyda@in sta`sat `ala~ lharis'
assert actual == expected
def test_basic_cleaner():
actual = cleaners.basic_cleaners('تَسْدِيدَةٍ اسْتَعْصَتْ عَلَى الْحَارِسْ')
expected = 'تَسْدِيدَةٍ اسْتَعْصَتْ عَلَى الْحَارِسْ'
assert actual == expected
def english_cleaner():
actual = cleaners.english_cleaners('I want to be there early on the day. Please organize')
expected = 'I want to be there early on the day. Please organize'
assert actual == expected
def test_phones_cleaner():
actual = cleaners.basic_cleaners('{hh aw1 s s t ah0 n}. {y e s}, {hh aw1 s s t ah0 n}. {y e s}')
expected = '{hh aw1 s s t ah0 n}. {y e s}, {hh aw1 s s t ah0 n}. {y e s}'
assert actual == expected
def test_phones_cleaner_convert_arabic_comma():
actual = cleaners.arabic_cleaners('{hh aw1 s s t ah0 n}. {y e s}، {hh aw1 s s t ah0 n}. {y e s}')
expected = '{hh aw1 s s t ah0 n}. {y e s}, {hh aw1 s s t ah0 n}. {y e s}'
assert actual == expected
def test_phones_cleaner_convert_dash():
actual = cleaners.arabic_cleaners('{hh aw1 s s t ah0 n}. {y e s} - {hh aw1 s s t ah0 n}. {y e s} dsd sd')
expected = '{hh aw1 s s t ah0 n}. {y e s} - {hh aw1 s s t ah0 n}. {y e s} dsd sd'
assert actual == expected
|
25,308 | c9caa125b1bd7fe1b5480bb15efebc95d653ee15 | from ..components import Components
from ..utils import NRTLParameters, UNIQUACParameters
from .mixture import Mixture
class Mixtures:
"""
A class with pre-defined mixtures with constants validated against literature data
For details see ./tests/test_mixtures/test_default_mixtures.py
"""
H2O_MeOH: Mixture = Mixture(
name="H2O_MeOH",
first_component=Components.H2O,
second_component=Components.MeOH,
nrtl_params=NRTLParameters(
g12=-5132.51739,
g21=1438.40193,
alpha12=0,
alpha21=0.3,
a12=2.7321,
a21=-0.693,
),
uniquac_params=UNIQUACParameters(
alpha_12=-3.983130612278737,
alpha_21=4.87905141883045,
beta_12=0.007057570080263636,
beta_21=0.07115788640105822,
z=10,
)
)
H2O_EtOH: Mixture = Mixture(
name="H2O_EtOH",
first_component=Components.H2O,
second_component=Components.EtOH,
nrtl_params=NRTLParameters(
g12=5823,
g21=-633,
alpha12=0.3,
),
uniquac_params=UNIQUACParameters(
alpha_12=21.127561704493143,
alpha_21=100.10268878024358,
beta_12=-0.9175664931087569,
beta_21=2.4619377106475753,
z=13,
)
)
H2O_iPOH: Mixture = Mixture(
name="H2O_iPOH",
first_component=Components.H2O,
second_component=Components.iPOH,
nrtl_params=NRTLParameters(
g12=6899.21,
g21=106.99,
alpha12=0.3,
),
uniquac_params=UNIQUACParameters(
alpha_12=-756.0564435691869,
alpha_21=-678.5581809818217,
beta_12=333788.20320719096,
beta_21=266824.57316704467,
z=10,
)
)
H2O_AceticAcid: Mixture = Mixture(
name="H2O_AceticAcid",
first_component=Components.H2O,
second_component=Components.AceticAcid,
nrtl_params=NRTLParameters(
g12=-352.42,
g21=715.43,
alpha12=0.25,
),
uniquac_params=UNIQUACParameters(
alpha_12=-129.9765770340468,
alpha_21=23.640931620853934,
beta_12=-0.31354951657408175,
beta_21=0.07553464090651879,
z=10,
)
)
EtOH_ETBE: Mixture = Mixture(
name="EtOH_ETBE",
first_component=Components.EtOH,
second_component=Components.ETBE,
nrtl_params=NRTLParameters(
g12=1140.7722,
g21=2069.17502,
alpha12=0.3,
),
uniquac_params=UNIQUACParameters(
alpha_12=8942.589565297398,
alpha_21=-18441.776691222403,
beta_12=-2915794.8390840776,
beta_21=6478988.764315034,
z=10,
)
)
MeOH_Toluene: Mixture = Mixture(
name="MeOH_Toluene",
first_component=Components.MeOH,
second_component=Components.Toluene,
nrtl_params=NRTLParameters(
g12=3857.3,
g21=4290.3,
alpha12=0.4370,
),
uniquac_params=UNIQUACParameters(
alpha_12=4686.596023943361,
alpha_21=-2095.043872895277,
beta_12=-1234765.6427427588,
beta_21=957134.982560884,
z=10,
),
)
MeOH_MTBE: Mixture = Mixture(
name="MeOH_MTBE",
first_component=Components.MeOH,
second_component=Components.MTBE,
nrtl_params=NRTLParameters(
g12=2133.5,
g21=2025.3,
alpha12=0.6,
),
uniquac_params=UNIQUACParameters(
alpha_12=-2487.7680701255767,
alpha_21=-1614.4771614656215,
beta_12=903707.2728090351,
beta_21=651311.6984954888,
z=10,
),
)
MeOH_DMC: Mixture = Mixture(
name="MeOH_DMC",
first_component=Components.MeOH,
second_component=Components.DMC,
nrtl_params=NRTLParameters(
g12=3115.2,
g21=833.1,
alpha12=0.3,
),
uniquac_params=UNIQUACParameters(
alpha_12=739.9268135127102,
alpha_21=-168.38470470351714,
beta_12=-173417.54480148194,
beta_21=72635.51155280948,
z=473,
),
)
|
25,309 | ce5ebedba03dea0b5b9892b81f3d815db9fe1bd1 | from django.db import models
from django.db.models import signals
from django.dispatch import receiver
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
# Create your models here.
# class norms(models.Model):
# year = models.IntegerField(primary_key=True)
# low = models.IntegerField()
# high = models.IntegerField()
class island(models.Model):
year = models.IntegerField()
population = models.IntegerField()
# high = models.IntegerField()
# normsPred = models.IntegerField()
actualDem = models.IntegerField()
class eastern(models.Model):
year = models.IntegerField()
population = models.IntegerField()
# high = models.IntegerField()
# normsPred = models.IntegerField()
actualDem = models.IntegerField()
class western(models.Model):
year = models.IntegerField()
population = models.IntegerField()
# high = models.IntegerField()
# normsPred = models.IntegerField()
actualDem = models.IntegerField()
class predict(models.Model):
year = models.IntegerField(unique=True)
island = models.IntegerField()
western = models.IntegerField()
eastern = models.IntegerField()
total = models.IntegerField()
# def prediction(area):
#linear regression working code
# datas = area.objects.all()
# x=[]
# y=[]
# z = []
# z = np.array(z)
# for data in datas:
# x.append([data.year.year])
# y.append([data.actualDem])
# # x = np.array(x)
# # y = np.array(y)
# lin = LinearRegression()
# lin.fit(x, y)
# val = lin.predict(x)
# rmse = np.sqrt(mean_squared_error(y,val))
# r2 = r2_score(y,val)
#Polynomial regression working code
# for data in datas:
# x.append([data.year.year])
# y.append([(data.actualDem)-(data.normsPred)])
# count+=1
# x = np.array(x)
# y = np.array(y)
# poly = PolynomialFeatures()
# x_poly = poly.fit_transform(z)
# lin = LinearRegression()
# lin.fit(x_poly,y)
# val=lin.predict(poly.fit_transform(y))
# rmse = np.sqrt(mean_squared_error(y,val))
# r2 = r2_score(y,val)
#Signals Implementation for calculating prediction values
@receiver(signals.post_save,sender=western)
def calcwest(sender,instance,**kwargs):
# prediction(sender)
pass
@receiver(signals.post_save,sender=eastern)
def calceast(sender,instance,**kwargs):
# prediction(sender)
pass
@receiver(signals.post_save,sender=island)
def calcisland(sender,instance,**kwargs):
# prediction(sender)
pass |
25,310 | 63f3c437120cebe03878a10d337b1afd5dcf54f5 | def diff(a, b):
return a -b
def simpleColor(r,g,b):
r = int(r)
g = int(g)
b = int(b)
bg = ir = 0
try:
if r > g and r > b:
rg = diff(r,g)
rb = diff(r,b)
if g < 65 and b < 65 and rg < 60:
return "RED"
gb = diff(g,b)
if rg < rb:
if gb < rg:
if gb >=30 and rg >=80:
return "ORANGE"
elif gb <=20 and rg >= 80:
return "RED"
elif gb <=20 and b > 175:
return "CREAM"
else:
return "COCHOLATE"
else:
if rg > 60:
return "ORANGE"
elif r > 125:
return "AMARILLO"
else:
return "COCHOLATE"
elif rg > rb:
if bg > rb:
if gb < 60:
if r > 150:
return "RED 2"
else:
return "MARRON"
elif g > 125:
return "PINK"
else:
return "RED 3"
else:
if rb > 60:
if r > 160:
return "PINK"
else:
return "RED"
else:
return "RED"
else:
if rg > 20:
if r >= 100 and b >= 60:
return "RED"
elif r >= 100:
return "RED"
else:
return "MARRON"
else:
return "GRAY"
elif g > r and g > b:
gb = diff(g,b)
gr = diff(g,r)
if r < 65 and b < 65 and gb > 60:
return "GREEN"
rb = diff(r,b)
if r > b:
if gr < gb:
if rb >=150 and gr <= 20:
return "AMARILLO"
else:
return "GREEN"
else:
return "GREEN"
elif r > b:
if gb < gr:
if gb <= 20:
return "turqoise"
else:
return "GREEN"
else:
return "GREEN"
elif b > r and b > g:
bg = diff(b,g)
br = diff(b,r)
if r < 65 and g < 65 and bg > 60:
return "BLUE"
rg = diff(r,g)
if g > r:
if bg > rg:
if bg <= 20:
return "TURQOISE"
else:
return "LIGHT BLUE"
else:
if rg <= 20:
if r <= 150:
return "LILAC"
else:
return "BLUE"
else:
return "BLUE"
elif g > r:
if br < rg:
if br <= 20:
if r > 50 and g < 75:
return "PINK"
elif ir > 150:
return "LILAC"
else:
return "purple"
else:
return "PURPLE"
else:
if rg <= 20:
if bg <= 20:
return "GRAY"
else:
return "BLUE"
else:
if rg <= 20:
if r >= 100 and b > 60:
return "RED"
elif r >= 100:
return "RED"
else:
return "MARRON"
else:
return "GRAY"
else:
return "GRAY"
except:
return "Not Color"
if __name__ == "__main__":
import sys
print(simpleColor(sys.argv[1], sys.argv[2], sys.argv[3]))
|
25,311 | 72996b22b5e57a37bc2d34837f94884b841bfdb2 | #coding:utf-8
"""import requests
#http请求头信息
headers={
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.8',
'Connection':'keep-alive',
'Content-Length':'25',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie':'user_trace_token=20170214020222-9151732d-f216-11e6-acb5-525400f775ce; LGUID=20170214020222-91517b06-f216-11e6-acb5-525400f775ce; JSESSIONID=ABAAABAAAGFABEF53B117A40684BFB6190FCDFF136B2AE8; _putrc=ECA3D429446342E9; login=true; unick=yz; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; TG-TRACK-CODE=index_navigation; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1494688520,1494690499,1496044502,1496048593; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1496061497; _gid=GA1.2.2090691601.1496061497; _gat=1; _ga=GA1.2.1759377285.1487008943; LGSID=20170529203716-8c254049-446b-11e7-947e-5254005c3644; LGRID=20170529203828-b6fc4c8e-446b-11e7-ba7f-525400f775ce; SEARCH_ID=13c3482b5ddc4bb7bfda721bbe6d71c7; index_location_city=%E6%9D%AD%E5%B7%9E',
'Host':'www.lagou.com',
'Origin':'https://www.lagou.com',
'Referer':'https://www.lagou.com/jobs/list_Python?',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Anit-Forge-Code':'0',
'X-Anit-Forge-Token':'None',
'X-Requested-With':'XMLHttpRequest'
}
def get_json(url, page, lang_name):
#修改city更换城市
data = {'first': 'true', 'pn': page, 'kd': lang_name,'city':'北京'}
#post请求
json = requests.post(url,data, headers=headers).json()
list_con = json['content']['positionResult']['result']
info_list = []
for i in list_con:
info = []
info.append(i['companyId'])#现在没有公司名字,只能看到id
info.append(i['salary'])
info.append(i['city'])
info.append(i['education'])
info_list.append(info)
return info_list
def main():
#修改lang_name更换语言类型
lang_name = 'python'
page = 1
url = 'http://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
info_result = []
while page < 31:
info = get_json(url, page, lang_name)
info_result = info_result + info
page += 1
#写入lagou.txt文件中
with open('lagou.txt','w') as f:
for row in info_result:
f.write(str(row)+'\n')
if __name__ == '__main__':
main()"""
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import csv
# driver = webdriver.Chrome()
# driver.get("https://www.lagou.com/")
# time.sleep(3)
# driver.find_element_by_id("cboxClose").click()
# time.sleep(3)
# driver.find_element_by_id("search_button").click()
# print(driver.page_source)
driver = webdriver.Chrome()
def load_page(url):
driver.get(url)
source_html = driver.page_source
soup = BeautifulSoup(source_html)
positions = []
position_list = soup.find(name="div", attrs={'class': 's_position_list'})
for position in position_list.find_all(name='li', attrs={'class': 'con_list_item default_list'}):
position_company_name = position.find(name='div', attrs={'class': 'company_name'}).contents[1].get_text()
position_name = position.find(name='h3').getText()
posotion_address = position.find(name='span', attrs={'class': 'add'}).getText()
position_salary = position.find(name='span', attrs={'class': 'money'}).getText()
position_keywords = ""
position_experience = position.find(name='div', attrs={'class': 'li_b_l'}).contents[4].strip()
for keywords in position.find(name='div', attrs={'class': 'list_item_bot'}).find_all(name='span'):
position_keywords = position_keywords + " " + keywords.get_text()
positions.append([position_company_name, position_name, posotion_address, position_salary, position_experience,
position_keywords])
# for link in position_list.find_all(name='a',attrs={'class':'page_no'}):
# print(link['href'])
length = len(position_list.find_all(name='a', attrs={'class': 'page_no'}))
next_url = position_list.find_all(name='a', attrs={'class': 'page_no'})[length-1]['href']
if next_url:
return positions,next_url
return positions,None
# positions = load_page("https://www.lagou.com/zhaopin/Python/?labelWords=label")
# driver.get("https://www.lagou.com/zhaopin/Python/?labelWords=label")
def main():
url = "https://www.lagou.com/zhaopin/Python/?labelWords=label"
with open('python_work.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['公司名称', '职位名称', '公司地址', '工资', '工作经验','关键字'])
while url:
positions,url = load_page(url)
writer.writerows(positions)
main()
driver.close()
|
25,312 | cd2f71dc137770b488f9d6fe4161b7b774b5c606 | #!/usr/bin/env python3
# -*- config: utf-8 -*-
from tkinter import *
def change():
if var.get() == 0:
label['bg'] = 'red'
elif var.get() == 1:
label['bg'] = 'green'
elif var.get() == 2:
label['bg'] = 'blue'
root = Tk()
var = IntVar()
var.set(0)
red = Radiobutton(text="Red",
variable=var, value=0)
green = Radiobutton(text="Green",
variable=var, value=1)
blue = Radiobutton(text="Blue",
variable=var, value=2)
button = Button(text="Изменить",
command=change)
label = Label(width=20, height=10)
red.pack()
green.pack()
blue.pack()
button.pack()
label.pack()
root.mainloop()
|
25,313 | 59b4b9bd9b01fab6abda94ab5e9d4e29f0d1d873 | from selenium.webdriver.common.by import By
class Locators:
sign_in_assert = (By.CLASS_NAME, "nav-line-1-container")
SING_IN = (By.XPATH, "(//*[@class= 'nav-a nav-a-2 nav-progressive-attribute'])[1]")
EMAIL = (By.ID, "ap_email")
CONTINUE_BUTTON = (By.ID, "continue")
PASSWORD = (By.ID, "ap_password")
SING_IN_BUTTON = (By.ID, "signInSubmit")
SEARCH_BOX = (By.ID, "twotabsearchtextbox")
SEARCH_SUBMIT_BUTTON = (By.ID, "nav-search-submit-button")
SECOND_PAGE = (By.XPATH, "(//*[@class='a-normal'])[1]")
SELECT_PRODUCT = (By.XPATH, "(//*[@class='a-size-medium a-color-base a-text-normal'])[3]")
WISH_LIST_BUTTON = (By.ID, "add-to-wishlist-button-submit")
CONTINUE_SHOPPING_BUTTON = (By.ID, "WLHUC_continue")
HOVER_ITEM = (By.ID, "nav-link-accountList")
WISH_LIST = (By.CSS_SELECTOR, "#nav-flyout-wl-items > div > a > span")
DELETE_ITEM = (By.CSS_SELECTOR, ".g-move-delete-buttons > span")
#ASSERT LOCATORS
SEARCH_LIST = (By.CLASS_NAME, "a-dropdown-container")
SECOND_PAGE_ASSERT = (By.CLASS_NAME, "a-selected")
PRODUCT_NAME = (By.ID, "productTitle")
PRODUCT_NAME_WISH_LIST = (By.CSS_SELECTOR, "h3.a-size-base")
NO_PRODUCT = (By.XPATH, "//*[@class='a-box a-alert-inline a-alert-inline-success']")
|
25,314 | 586a5fa208f482b83aa8ed262a2f6ae00658afc5 | from __future__ import annotations
import time
from django.db import connections
from django.db.backends.signals import connection_created
def make_queries_slower(execute, sql, params, many, context):
# Slow down queries to simulate databases slower than SQLite
time.sleep(0.001)
return execute(sql, params, many, context)
def install_make_queries_slower(connection, **kwargs):
if make_queries_slower not in connection.execute_wrappers:
connection.execute_wrappers.append(make_queries_slower)
connection_created.connect(install_make_queries_slower)
for connection in connections.all():
install_make_queries_slower(connection=connection)
|
25,315 | 014ce1dd02fdab17cd696588e720fe9487fab469 | # HackerRank Problem
''' Question:
Amanda has a string of lowercase letters that she wants to copy to a new string. She can perform the following operations with the given costs. She can perform them any number of
times to construct a new string p:
Append a character to the end of string p at a cost of 1$ dollar.
Choose any substring of p and append it to the end of p at no charge.
Given n strings s[i], find and print the minimum cost of copying each s[i] to p[i] on a new line.
For example, given a string s=abcabc, it can be copied for 3 dollars. Start by copying a, b and c individually at a cost of 1 dollar per character. String p=abc at this time. Copy
p=[0:2] to the end of p at no cost to complete the copy.
Function Description
Complete the stringConstruction function in the editor below. It should return the minimum cost of copying a string.
stringConstruction has the following parameter(s):
s: a string
Input Format
The first line contains a single integer n, the number of strings.
Each of the next n lines contains a single string, s[i].
Output Format
For each string s[i] print the minimum cost of constructing a new string p[i] on a new line.
Sample Input
2
abcd
abab
Sample Output
4
2
Explanation
Query 0: We start with s="abcd" and p="".
Append character 'a' to p at a cost of 1 dollar, p="a".
Append character 'b' to p at a cost of 1 dollar, p="ab".
Append character 'c' to p at a cost of 1 dollar, p="abc".
Append character 'd' to p at a cost of 1 dollar, p="abcd".
Because the total cost of all operations is 4 dollars, we print 4 on a new line.
Query 1: We start with s="abab" and p="".
Append character 'a' to p at a cost of 1 dollar, p="a".
Append character 'b' to p at a cost of 1 dollar, p="b".
Append substring "ab" to p at no cost, p="abab".
Because the total cost of all operations is 2 dollars, we print 2 on a new line.
Note
A substring of a string S is another string S' that occurs "in" S. For example, the substrings of the string "abc" are "a", "b" ,"c", "ab", "bc", and "abc".'''
# Program Code:
def stringConstruction(s):
p = ''
amt = 0
for ch in s:
if ch in p:continue
p += ch
amt += 1
return(amt)
q = int(input())
for i in range(q):
s = input()
result = stringConstruction(s)
print(result)
|
25,316 | 7d930466a615bdad0e0a2101591fdf05cc0b2f06 | #
# MIT License
#
# Copyright (c) 2017-2019 Paul Taylor
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import getpass
from gpg_vault import config, utils, log, client, crypto, errors, file
def validateVPaths(path):
(plainPath, vpath) = validateVPath(path)
log.verbose(f"validating plainPath={plainPath}, vpath={vpath}")
if os.path.islink(plainPath):
raise errors.VaultError("'" + plainPath + "' is a symbolic link")
if os.path.exists(plainPath) and os.path.exists(vpath):
if os.path.getmtime(plainPath) > os.path.getmtime(vpath):
older = f"{plainPath} is more recent"
else:
older = f"{vpath} is more recent"
raise errors.VaultError(f"both '{plainPath}' and '{vpath}' exist ({older})")
if os.path.islink(vpath):
raise errors.VaultError(f"'{vpath}' is a symbolic link")
return (plainPath, vpath)
def validateVPath(path):
log.verbose(f"validating path='{path}'")
(base, ext) = utils.splitPath(path)
log.verbose(f"base={base}, ext={ext}")
(vext) = getVPathExt(base + ext)
log.verbose(f"vpath ext={vext}")
if base == '':
raise errors.VaultError(f"Invalid path '{path}' (base)")
if ext == '':
raise errors.VaultError(f"Invalid path '{path}' (ext)")
if vext == '':
raise errors.VaultError(f"Invalid path '{path}' (vext)")
return (base + ext, base + ext + vext)
def validateOpenVPath(path):
(plainPath, vpath) = validateVPaths(path)
if os.path.exists(plainPath):
raise errors.VaultError("'" + plainPath + "' exists")
if not os.path.exists(vpath):
raise errors.VaultError("'" + vpath + "' does not exist")
return (plainPath, vpath)
def validateEditVPath(path):
(plainPath, vpath) = validateVPaths(path)
if os.path.exists(plainPath):
raise errors.VaultError("'" + plainPath + "' exists")
return (plainPath, vpath)
def validateEncryptVPath(path):
(plainPath, vpath) = validateVPaths(path)
if not os.path.exists(plainPath):
raise errors.VaultError(f"'{plainPath}' does not exist")
if os.path.exists(vpath):
raise errors.VaultError(f"'{vpath}' exists")
return (plainPath, vpath)
def validateReencryptVPath(path):
(plainPath, vpath) = validateVPaths(path)
if not os.path.exists(vpath):
raise errors.VaultError(f"'{vpath}' does not exist")
if os.path.exists(plainPath):
raise errors.VaultError(f"'{plainPath}' exists")
return (plainPath, vpath)
def getVPathExt(path):
for vext in config.CONFIG['internal']['vexts']:
vpath = path + vext
log.verbose(f"checking for vpath {vpath}")
if os.path.exists(vpath):
log.verbose(f"found {vpath}")
return vext
log.verbose("no files found; using default extension")
return config.CONFIG['internal']['vext.default']
def openVPath(path, destpath, cmd):
log.verbose(f"openVPath {path} {destpath} {cmd}")
(plainPath, vpath) = validateOpenVPath(path)
pp64 = getPassPhrase(config.CONFIG['general']['group'], False)
crypto.decryptFile(vpath, destpath, pp64)
if destpath is not None and cmd is not None:
if utils.runCommand(cmd, destpath):
log.warning("file has been modified; updates discarded")
def editVPath(path, destpath, cmd):
log.verbose(f"editVPath {path} {destpath} {cmd}")
(plainPath, vpath) = validateEditVPath(path)
log.verbose(f"editVpath plainPath={plainPath}, vpath={vpath}")
if os.path.exists(vpath):
log.verbose(f"editVpath vpath {vpath} exists; decrypting")
pp64 = getPassPhrase(config.CONFIG['general']['group'], False)
crypto.decryptFile(vpath, destpath, pp64)
else:
log.verbose(f"editVpath vpath {vpath} does not exist")
pp64 = getPassPhrase(config.CONFIG['general']['group'], True)
if utils.runCommand(cmd, destpath):
file.backupFile(path)
crypto.encryptFile(destpath, vpath, pp64)
else:
if os.path.exists(destpath):
log.warning("file has not been modified")
else:
log.warning("file has not been created")
file.deletePath(destpath)
def encryptVPath(path):
log.verbose(f"encryptVPath path={path}")
(plainPath, vpath) = validateEncryptVPath(path)
log.verbose(f"encryptVpath plainPath={plainPath}, vpath={vpath}")
pp64 = getPassPhrase(config.CONFIG['general']['group'], True)
crypto.encryptFile(plainPath, vpath, pp64)
file.deletePath(plainPath)
def reencryptVPath(path, destpath):
log.verbose(f"reencryptVPath path={path}, destpath={destpath}")
(plainPath, vpath) = validateReencryptVPath(path)
log.verbose(f"reencryptVpath plainPath={plainPath}, vpath={vpath}")
group = config.CONFIG['general']['group']
group_current = group + ".current"
group_new = group + ".new"
pp64 = getPassPhrase(group_current, False, " (current)")
crypto.decryptFile(vpath, destpath, pp64)
pp64 = getPassPhrase(group_new, True, " (new)")
crypto.encryptFile(destpath, vpath, pp64)
def getPassPhrase(group, confirm, tag = ""):
log.verbose(f"getting passphrase group={group}, comfirm={confirm}")
pp = client.sendRequest(['get', group])
if len(pp) >= 1:
log.verbose("got passphrase from server")
log.sensitive(f"passphrase (base64): {pp[0]}")
return pp[0]
pp = ''
try:
while pp == '':
pp = getpass.getpass(f"Passphrase{tag}: ")
if confirm:
pp2 = ''
while pp2 == '':
pp2 = getpass.getpass(f"Confirm Passphrase{tag}: ")
if pp != pp2:
del pp
del pp2
raise errors.VaultSecurityError("Passphrases do not match")
del pp2
log.sensitive(f"passphrase |{pp}|")
pp64 = utils.str2b64(pp) # base64.b64encode(pp.encode('utf-8')).decode('utf-8')
log.sensitive(f"base64 |{pp64}|")
client.sendRequest(['set', group, pp64])
del pp
return pp64
except EOFError as e:
log.verbose(f"exception raised: {e}")
raise errors.VaultQuit(str(e))
except Exception as e:
log.verbose(f"exception raised: {e}")
raise errors.VaultQuit(str(e))
|
25,317 | b7a7ce919d30ee8e2c6be623838b78df775cf9a3 | from flask import Flask, request
from flask import render_template
app = Flask(__name__)
import json
import argparse
import urllib
import json
import os
import oauth2
import re
import csv
import nltk
import requests
import re, pickle, csv, os
import time
import operator
from textblob import TextBlob
from nltk.stem import WordNetLemmatizer
from elasticsearch import Elasticsearch
lemmatizer=WordNetLemmatizer()
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
r = requests.get('http://localhost:9200')
i = 1
countNounPhrase={}
es.indices.delete(index="twittermid", ignore=[400, 404])
mappings = {
"doc": {
"properties": {
"elasticsearch": {
"properties": {
"sentiment": {
"type": "string"
}
}
}
}
}
}
#creation of index with the mapping
es.indices.create(index='twittermid', body=mappings)
#save negative words in a dictionary
negativedict={}
fp = open('negativewords.txt', 'r')
line = fp.readline()
while line:
word = line.strip()
negativedict[word]=1
line = fp.readline()
fp.close()
#save positive words in a dictionary
positivedict={}
fp = open('positivewords.txt', 'r')
line = fp.readline()
while line:
word = line.strip()
positivedict[word]=1
line = fp.readline()
fp.close()
#save emoji in a dictionary
emojidict={}
fp = open('emojicollection.txt', 'r')
line = fp.readline()
while line:
word = line.strip()
list=word.split(',')
emojidict[list[1]]=list[0]
line = fp.readline()
fp.close()
#save emoji Value in a dictionary
emojiValue={}
fp = open('emojicollection.txt', 'r')
line = fp.readline()
while line:
word = line.strip()
list=word.split(',')
emojiValue[list[1]]=list[2]
line = fp.readline()
fp.close()
#start replaceTwoOrMore
def replaceTwoOrMore(s):
#look for 2 or more repetitions of character
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
return pattern.sub(r"\1\1", s)
#end
def posnegcheck(tweet):
num=0
words = tweet.split()
for w in words:
if w in positivedict.keys():
num=num+1
if w in negativedict.keys():
num=num-1
return num
def findemoji(tweet):
num=0
i=0
while i<len(tweet):
if(tweet.find('\ud',i,len(tweet))==-1):
break
else:
i=tweet.find('\ud',i,len(tweet))
if tweet[i:i+12] in emojiValue.keys():
#print(emojidict[tweet[i:i+12]])
#tweet=tweet.replace(tweet[i:i+12]," "+emojidict[tweet[i:i+12]]+" "+emojidict[tweet[i:i+12]]+" ")
if emojiValue[tweet[i:i+12]]>0:
num=num+2
elif emojiValue[tweet[i:i+12]]<0:
num=num-2
else:
num=num
i=i+3
else:
i=i+1
return num
def extractemoji(tweet):
i=0
while i<len(tweet):
if(tweet.find('\ud',i,len(tweet))==-1):
break
else:
i=tweet.find('\ud',i,len(tweet))
if tweet[i:i+12] in emojidict.keys():
#print(emojidict[tweet[i:i+12]])
tweet=tweet.replace(tweet[i:i+12]," "+emojidict[tweet[i:i+12]]+" "+emojidict[tweet[i:i+12]]+" ")
i=i+3
else:
i=i+1
return tweet
#start process_tweet
def processemojiTweet(tweet):
# process the tweets
tweet=extractemoji(tweet)
#Convert to lower case
tweet = tweet.lower()
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','AT_USER',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
return tweet
#end
#start process_tweet
def processnoemojiTweet(tweet):
# process the tweets
#Convert to lower case
tweet = tweet.lower()
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','AT_USER',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
return tweet
#end
#start getStopWordList
def getStopWordList(stopWordListFileName):
#read the stopwords
stopWords = []
stopWords.append('AT_USER')
stopWords.append('URL')
fp = open(stopWordListFileName, 'r')
line = fp.readline()
while line:
word = line.strip()
stopWords.append(word)
line = fp.readline()
fp.close()
return stopWords
#end
#start getfeatureVector
def getFeatureVector(tweet, stopWords):
featureVector = []
words = tweet.split()
past_word=""
word_count=0
for w in words:
w=lemmatizer.lemmatize(w)
#replace two or more with two occurrences
w = replaceTwoOrMore(w)
#strip punctuation
w = w.strip('\'"?,.')
#check if it consists of only words
val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", w)
#ignore if it is a stopWord
#if(w in stopWords or val is None):
#continue
#else:
if word_count>0:
featureVector.append(past_word+" "+w.lower())
past_word=w.lower()
word_count=word_count+1
return featureVector
#end
#start extract_features
def extract_features(tweet, featureList):
tweet_words = set(tweet)
features = {}
for word in featureList:
features['contains(%s)' % word] = (word in tweet_words)
return features
#end
stopWords = getStopWordList('d:\\stopwords.txt')
# no emoji bigram classifier
noemojibigramfeatureList = []
fp = open("bigramnaiveList.txt", 'r')
line = fp.readline()
while line:
word = line.strip()
noemojibigramfeatureList.append(word)
line = fp.readline()
fp.close()
# open the pickle
classifier_f=open("noemojibigramnaivebayes.classifier","rb")
noemojibigramClassifier= pickle.load(classifier_f)
classifier_f.close()
# emoji bigram classifier
bigramemojifeatureList = []
fp = open("bigramemojiNaive.txt", 'r')
line = fp.readline()
while line:
word = line.strip()
bigramemojifeatureList.append(word)
line = fp.readline()
fp.close()
# open the pickle
classifier_f=open("bigramemojinaivebayes.classifier","rb")
bigramemojiClassifier= pickle.load(classifier_f)
classifier_f.close()
# no emoji unigram
noemojiunigramfeatureList = []
fp = open("noemojiList.txt", 'r')
line = fp.readline()
while line:
word = line.strip()
noemojiunigramfeatureList.append(word)
line = fp.readline()
fp.close()
# open the pickle
classifier_f=open("noemojiunigarmnaivebayes.classifier","rb")
noemojiunigramClassifier= pickle.load(classifier_f)
classifier_f.close()
#unigram emoji
unigramemojifeatureList = []
fp = open("unigramnaiveList.txt", 'r')
line = fp.readline()
while line:
word = line.strip()
unigramemojifeatureList.append(word)
line = fp.readline()
fp.close()
# open the unigram naive classifier
classifier_f=open("unigramemojinaive.classifier","rb")
unigramemojiClassifier= pickle.load(classifier_f)
classifier_f.close()
#class to connect with twiiter api and get data related to text
class TwitterData:
def parse_config(self):
config = {}
# from file args
if os.path.exists('config.json'):
with open('config.json') as f:
config.update(json.load(f))
else:
# may be from command line
parser = argparse.ArgumentParser()
parser.add_argument('-ck', '--consumer_key', default=None, help='Your developper `Consumer Key`')
parser.add_argument('-cs', '--consumer_secret', default=None, help='Your developper `Consumer Secret`')
parser.add_argument('-at', '--access_token', default=None, help='A client `Access Token`')
parser.add_argument('-ats', '--access_token_secret', default=None, help='A client `Access Token Secret`')
args_ = parser.parse_args()
def val(key):
return config.get(key)\
or getattr(args_, key)\
or raw_input('Your developper `%s`: ' % key)
config.update({
'consumer_key': val('consumer_key'),
'consumer_secret': val('consumer_secret'),
'access_token': val('access_token'),
'access_token_secret': val('access_token_secret'),
})
# should have something now
return config
#end
def oauth_req(self, url, http_method="GET", post_body=None,
http_headers=None):
config = self.parse_config()
consumer = oauth2.Consumer(key=config.get('consumer_key'), secret=config.get('consumer_secret'))
token = oauth2.Token(key=config.get('access_token'), secret=config.get('access_token_secret'))
client = oauth2.Client(consumer, token)
resp, content = client.request(
url,
method=http_method,
body=post_body or '',
headers=http_headers
)
return content
#end
#start getTwitterData
def getData(self, keyword, maxID,j):
maxTweets = 100
url = 'https://api.twitter.com/1.1/search/tweets.json?'
if maxID==0:
data = {'q': keyword, 'lang': 'en', 'result_type': 'recent', 'count': maxTweets, 'include_entities': 0}
else:
data = {'q': keyword, 'lang': 'en', 'result_type': 'recent', 'count': maxTweets, 'include_entities': 0, 'max_id':maxID}
#Add if additional params are passed
url += urllib.urlencode(data)
response = self.oauth_req(url)
jsonData = json.loads(response)
tweets = []
s=0
if 'errors' in jsonData:
print "API Error"
print jsonData['errors']
else:
for item in jsonData['statuses']:
s =item['id']
sepwords = json.dumps(item['text']).encode("utf-8").split()
i=0
for word in sepwords:
sepwords[i]=lemmatizer.lemmatize(word)
i=i+1
testtweet=" ".join(sepwords)
noemojibigramscore=0
noemojiunigramscore=0
bigramscore=0
unigramscore=0
processednoemojiTweet = processnoemojiTweet(testtweet)
processedemojiTweet = processemojiTweet(testtweet)
noemojibigramsentiment = noemojibigramClassifier.classify(extract_features(getFeatureVector(processednoemojiTweet, stopWords), noemojibigramfeatureList))
bigramsentiment = bigramemojiClassifier.classify(extract_features(getFeatureVector(processedemojiTweet, stopWords), bigramemojifeatureList))
noemojiunigramsentiment = noemojiunigramClassifier.classify(extract_features(getFeatureVector(processednoemojiTweet, stopWords), noemojiunigramfeatureList))
unigramsentiment = unigramemojiClassifier.classify(extract_features(getFeatureVector(processedemojiTweet, stopWords), unigramemojifeatureList))
score=posnegcheck(processednoemojiTweet)
# no emoji bigram
if noemojibigramsentiment=="positive":
noemojibigramscore =1
if noemojibigramsentiment=="negative":
noemojibigramscore =-1
noemojibigramscore=score+noemojibigramscore
if noemojibigramscore >0:
noemojibigramsentiment ="positive"
elif noemojibigramscore <0:
noemojibigramsentiment ="negative"
else:
noemojibigramsentiment ="neutral"
#bigram emoji
if bigramsentiment=="positive":
bigramscore =1
if bigramsentiment=="negative":
bigramscore =-1
bigramscore=score+bigramscore+findemoji(testtweet)
if bigramscore>0:
bigramsentiment="positive"
elif bigramscore<0:
bigramsentiment="negative"
else:
bigramsentiment="neutral"
#noemojiunigramsentiment
if noemojiunigramsentiment=="positive":
noemojiunigramscore=1
if noemojiunigramsentiment=="negative":
noemojiunigramscore=-1
noemojiunigramscore=score+noemojiunigramscore
if noemojiunigramscore>0:
noemojiunigramsentiment="positive"
elif noemojiunigramscore<0:
noemojiunigramsentiment="negative"
else:
noemojiunigramsentiment="neutral"
#unigramsentiment
if unigramsentiment=="positive":
unigramscore=1
if unigramsentiment=="negative":
unigramscore=-1
unigramscore=score+unigramscore+findemoji(testtweet)
if unigramscore>0:
unigramsentiment="positive"
elif unigramscore<0:
unigramsentiment="negative"
else:
unigramsentiment="neutral"
es.index(index='twittermid', doc_type='elasticsearch', id=j, body={"doc": {"tweets": testtweet }})
es.update(index="twittermid", doc_type='elasticsearch', id=j, body={"doc": {"unigramnoemoji": noemojiunigramsentiment }})
es.update(index="twittermid", doc_type='elasticsearch', id=j, body={"doc": {"bigramnoemoji": noemojibigramsentiment }})
es.update(index="twittermid", doc_type='elasticsearch', id=j, body={"doc": {"unigramemoji": unigramsentiment }})
es.update(index="twittermid", doc_type='elasticsearch', id=j, body={"doc": {"bigramemoji": bigramsentiment }})
j=j+1
#time.sleep(2)
return s
#home template
@app.route('/')
def index():
return render_template('home.html', title='Home')
@app.route('/hash', methods=['GET', 'POST'])
def search():
if request.method == 'POST':
td = TwitterData()
maID=0
i=0
j=0
while i<1:
maID=td.getData(request.form['hashtext'],maID,j)
j=j+100
i=i+1
binoemonu = es.search(index="twittermid", body={"query": {"match": {'bigramnoemoji':'neutral'}}})
biemonu = es.search(index="twittermid", body={"query": {"match": {'bigramemoji':'neutral'}}})
binoemopo = es.search(index="twittermid", body={"query": {"match": {'bigramnoemoji':'positive'}}})
biemopo = es.search(index="twittermid", body={"query": {"match": {'bigramemoji':'positive'}}})
binoemone = es.search(index="twittermid", body={"query": {"match": {'bigramnoemoji':'negative'}}})
biemone = es.search(index="twittermid", body={"query": {"match": {'bigramemoji':'negative'}}})
uninoemonu = es.search(index="twittermid", body={"query": {"match": {'unigramnoemoji':'neutral'}}})
uniemonu = es.search(index="twittermid", body={"query": {"match": {'unigramemoji':'neutral'}}})
uninoemopo = es.search(index="twittermid", body={"query": {"match": {'unigramnoemoji':'positive'}}})
uniemopo = es.search(index="twittermid", body={"query": {"match": {'unigramemoji':'positive'}}})
uninoemone = es.search(index="twittermid", body={"query": {"match": {'unigramnoemoji':'negative'}}})
uniemone = es.search(index="twittermid", body={"query": {"match": {'unigramemoji':'negative'}}})
return render_template('unigram.html',uniemopo=uniemopo,uniemone=uniemone,uniemonu=uniemonu,uninoemopo=uninoemopo,uninoemone=uninoemone,uninoemonu=uninoemonu,biemopo=biemopo,biemone=biemone,biemonu=biemonu,binoemopo=binoemopo,binoemone=binoemone,binoemonu=binoemonu)
if __name__ == '__main__':
app.run()
|
25,318 | 479fa0d1ee53db793927ec7819a1d1fb90c69426 | import torch
from torch import nn
from tqdm import tqdm
from itertools import chain
from sklearn.metrics import classification_report, f1_score
import math
class BiLSTMClassifier(nn.Module):
def __init__(self, output_size,rnn_hidden_size=100, dropout_p=0.5, w2v_weights=None):
super(BiLSTMClassifier, self).__init__()
self.embedding = nn.Embedding.from_pretrained(w2v_weights, freeze=False)
embed_dim = 300
self.rnn = nn.LSTM(
input_size=embed_dim,
hidden_size=rnn_hidden_size,
bias=True,
bidirectional=True,
num_layers=1
)
self.dropout = nn.Dropout(dropout_p)
self.fc = nn.Linear(rnn_hidden_size * 2, output_size)
def forward(self, x, lengths):
lengths = torch.tensor(lengths).cpu()
embed = self.dropout(self.embedding(x))
packed_input = nn.utils.rnn.pack_padded_sequence(embed, lengths, batch_first=True, enforce_sorted=False)
output, (h_n, c_n) = self.rnn(packed_input)
hidden = torch.cat((h_n[-2,:,:], h_n[-1,:,:]), dim=1)
logits = self.fc(hidden)
return logits
def train(model, optimizer, loss_function, loader, device, log_every_n=10):
"""
Run a single epoch of training
"""
model.train() # Run model in training mode
loss_history = []
running_loss = 0.
running_loss_history = []
for i, batch in tqdm(enumerate(loader)):
optimizer.zero_grad() # Always set gradient to 0 before computing it
logits = model(batch[0].to(device), batch[1]).squeeze()
loss = loss_function(logits, batch[2].to(device))
loss_history.append(loss.item())
running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average
loss.backward() # Perform backprop, which will compute dL/dw
if log_every_n and i % log_every_n == 0:
print("Running loss: ", running_loss)
running_loss_history.append(running_loss)
nn.utils.clip_grad_norm_(model.parameters(), 3.0) # We clip gradient's norm to 3
optimizer.step() # Update step: w = w - eta * dL / dW : eta = 1e-2 (0.01), gradient = 5e30; update value of 5e28
print("Epoch completed!")
print("Epoch Loss: ", running_loss)
print("Epoch Perplexity: ", math.exp(running_loss))
# The history information can allow us to draw a loss plot
return loss_history, running_loss_history
def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):
"""
Evaluate the model on a validation set
"""
model.eval()
batch_wise_true_labels = []
batch_wise_predictions = []
loss_history = []
running_loss = 0.
running_loss_history = []
with torch.no_grad(): # Disable gradient computation - required only during training
for i, batch in tqdm(enumerate(loader)):
logits = model(batch[0].to(device), batch[1]).squeeze()
loss = loss_function(logits, batch[2].to(device))
loss_history.append(loss.item())
running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average
running_loss_history.append(running_loss)
predictions = torch.sigmoid(logits)
batch_wise_true_labels.append(batch[2].view(-1).tolist())
batch_wise_predictions.append(predictions.view(-1).tolist())
# flatten the list of predictions using itertools
all_true_labels = list(chain.from_iterable(batch_wise_true_labels))
all_predictions = list(chain.from_iterable(batch_wise_predictions))
all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]
print("Evaluation Loss: ", running_loss)
# Now we can generate a classification report
print("Classification report after epoch:")
print(f1_score(all_true_labels, all_predictions, average='micro'))
print(classification_report(all_true_labels, all_predictions, labels=labels))
return loss_history, running_loss_history
def run_training(model, optimizer, loss_function, train_loader, valid_loader, device, labels, n_epochs=10):
for i in range(n_epochs):
train(model, optimizer, loss_function, train_loader, device, log_every_n=10)
evaluate(model, optimizer, loss_function, valid_loader, device, labels, log_every_n=10)
# torch.save(model,'en_model.pkl')
torch.save(model.state_dict(), 'en_model_state.pkl')
|
25,319 | e9da13fd536fce544c5f06e610546d8a96b8441e | seen = set()
while True:
try:
a = input().split()
except EOFError:
break
for x in a:
if not (x.lower() in seen):
print(x,end=" ")
seen.add(x.lower())
else:
print(".",end=" ")
print()
|
25,320 | ec71420df65bd1abc46f5a1cbdd8c8c51f3b8970 | # EX-2 - Faça um programa que leia os dados do usuário (nome, sobrenome, idade), adicione
# em uma lista e imprima seus elementos
# pegando os dados de entrada do usuário
nome = input('Digite seu nome: ')
sobrenome = input('Digite seu sobrenome: ')
idade = input('Digite sua idade: ')
# adicionando os dados em uma lista
dados = []
dados.append(nome)
dados.append(sobrenome)
dados.append(idade)
# imprime os dados da lista
print('\nImprimindo os dados:')
for dado in dados:
print(dado)
|
25,321 | 1bcd7efdf15d73c82b4d69b69f8d4a6c51272724 | ## Basic feature extraction using text data
# Number of words
# Number of characters
# Average word length
# Number of stopwords
# Number of special characters
# Number of numerics
# Number of uppercase words
import pandas as pd
from nltk.corpus import stopwords
stop = stopwords.words('english')
## to increase the display width and display it on the same line
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
## function to calculate the average-word-length in a sentence
def avg_word(sentence):
words = str(sentence).split()
return (sum(len(word) for word in words)/len(words))
## to read the csv file
train = pd.read_csv('./Inputs/Hoteldata.csv', low_memory=False)
# train = pd.read_csv('sampleData.csv')
# print(train)
# print(train.head())
## to calculate the word-Count in the comments
train['word_Count'] = train['Comment'].apply(lambda x: len(str(x).split(" ")))
## to calculate the char-count in the comments
train['char_Count'] = train['Comment'].str.len() #this will also include blank spaces
## to calculate the average word length of the sentence
train['avg_word'] = train['Comment'].apply(lambda x: avg_word(x))
## to count the stopwords # Some examples of stop words are: "a," "and," "but," "how," "or," and "what."
train['stopwords'] = train['Comment'].apply(lambda x: len([x for x in str(x).split() if x in stop]))
## to count the number of special characters starting with hashtags.
train['hashtags'] = train['Comment'].apply(lambda x: len([x for x in str(x).split() if x.startswith('#')]))
## to count the number of numerics
train['numerics'] = train['Comment'].apply(lambda x: len([x for x in str(x).split() if x.isdigit()]))
## to count the number of Uppercase words
train['upper'] = train['Comment'].apply(lambda x: len([x for x in str(x).split() if x.isupper()]))
## to print the array of all the pre-processes...
print(train[['Comment','word_Count','char_Count','avg_word','stopwords','hashtags','numerics','upper']])
train.to_csv('./Outputs/output-main1.csv')
|
25,322 | bcf335bc48708380ea9f5aa1342a4fcfcacc86b7 | '''
To create a tuple with one value you have to add a comma after the one value,
otherwise Python thinks that it is not a tuple
'''
this_tuple = ('mango',) # Tuple
print(this_tuple)
print(type(this_tuple))
this_tuple = ('mango') # Not Tuple
print(this_tuple)
print(type(this_tuple)) #string
# Tuple can be any data type
data_type = ('string', 20, 65.233, True)
print(data_type)
print(type(data_type))
# Tuple Constructor
constructor = tuple(('one', 'two', 'three'))
print(constructor)
print(type(constructor)) |
25,323 | 2df60d010ca3a429e93cbd5c171ada4cedc9def4 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from six.moves import range, zip
"""
Author : Lyubimov, A.Y.
Created : 04/07/2015
Last Changed: 11/21/2019
Description : Analyzes integration results and outputs them in an accessible
format. Includes (optional) unit cell analysis by hierarchical
clustering (Zeldin, et al., Acta Cryst D, 2013). In case of
multiple clusters outputs a file with list of integrated pickles
that comprise each cluster. (The clustering module requires scipy
and is thus currently suspended.) Populates a PHIL file for PRIME
with information from integration results (e.g. unit cell,
resolution, data path, etc.)
"""
import os
import numpy as np
from collections import Counter
import math
from libtbx import easy_pickle as ep
from cctbx import crystal, uctbx, statistics
from cctbx.sgtbx.lattice_symmetry import metric_subgroups
from iota import iota_version, now
import iota.utils.utils as util
from prime.postrefine.mod_mx import mx_handler
from prime.postrefine import mod_input
def isprop(v):
"""Test if attribute is a property."""
return isinstance(v, property)
class AnalysisResult(object):
pass
class Plotter(object):
def __init__(self, params, info):
self.info = info
self.params = params
self.final_objects = self.info.get_final_objects()
self.hm_file = os.path.join(self.info.viz_base, "heatmap.pdf")
self.hi_file = os.path.join(self.info.viz_base, "res_histogram.pdf")
self.xy_file = os.path.join(self.info.viz_base, "beamXY.pdf")
self.font = {"fontfamily": "sans-serif", "fontsize": 12}
def plot_spotfinding_heatmap(self, write_files=False):
import matplotlib.pyplot as plt
hlist = [i.final["sph"] for i in self.final_objects]
alist = [i.final["spa"] for i in self.final_objects]
ch = max(hlist) - min(hlist) + 1
ca = max(alist) - min(alist) + 1
ints = [(i.final["sph"], i.final["spa"]) for i in self.final_objects]
ic = Counter(ints)
hm_data = np.zeros((ch, ca))
for i in ic.items():
hm_data[i[0][0] - min(hlist), i[0][1] - min(alist)] = i[1]
rows = range(min(hlist), max(hlist) + 1)
cols = range(min(alist), max(alist) + 1)
row_labels = [str(i) for i in rows]
col_labels = [str(j) for j in cols]
fig, ax = plt.subplots()
fig.canvas.draw()
heatmap = plt.pcolor(hm_data, cmap="Reds")
ax.set_yticks(np.arange(len(rows)) + 0.5, minor=False)
ax.set_xticks(np.arange(len(cols)) + 0.5, minor=False)
ax.set_yticklabels(row_labels, minor=False)
ax.set_xticklabels(col_labels, minor=False)
ax.set_xlabel("Spot area")
ax.set_ylabel("Spot height")
plt.gca().set_xlim(0, len(cols))
plt.gca().set_ylim(0, len(rows))
# Annotate
for y in range(hm_data.shape[0]):
for x in range(hm_data.shape[1]):
plt.text(
x + 0.5,
y + 0.5,
"%3d" % hm_data[y, x],
horizontalalignment="center",
verticalalignment="center",
)
if write_files:
fig.savefig(self.hm_file, format="pdf", bbox_inches=0)
else:
plt.show()
def calculate_beam_xy(self):
"""calculates beam xy and other parameters."""
info = []
# Import relevant info
pixel_size = self.info.pixel_size
for i in [j.final for j in self.final_objects]:
try:
info.append(
[
i,
i["beamX"],
i["beamY"],
i["wavelength"],
i["distance"],
(i["a"], i["b"], i["c"], i["alpha"], i["beta"], i["gamma"]),
]
)
except IOError as e:
print("IOTA ANALYSIS ERROR: BEAMXY failed! ", e)
pass
# Calculate beam center coordinates and distances
beamX = [i[1] for i in info]
beamY = [j[2] for j in info]
beam_dist = [
math.hypot(i[1] - np.median(beamX), i[2] - np.median(beamY)) for i in info
]
beam_dist_std = np.std(beam_dist)
img_list = [
[i[0], i[1], i[2], i[3], i[4], i[5], j]
for i, j in list(zip(info, beam_dist))
]
# Separate out outliers
outliers = [i for i in img_list if i[3] > 2 * beam_dist_std]
clean = [i for i in img_list if i[3] <= 2 * beam_dist_std]
cbeamX = [i[1] for i in clean]
cbeamY = [j[2] for j in clean]
obeamX = [i[1] for i in outliers]
obeamY = [j[2] for j in outliers]
# Calculate median wavelength, detector distance and unit cell params from
# non-outliers only
wavelengths = [i[3] for i in clean]
distances = [i[4] for i in clean]
cells = [i[5] for i in clean]
wavelength = np.median(wavelengths)
det_distance = np.median(distances)
a = np.median([i[0] for i in cells])
b = np.median([i[1] for i in cells])
c = np.median([i[2] for i in cells])
# Calculate predicted L +/- 1 misindexing distance for each cell edge
aD = det_distance * math.tan(2 * math.asin(wavelength / (2 * a)))
bD = det_distance * math.tan(2 * math.asin(wavelength / (2 * b)))
cD = det_distance * math.tan(2 * math.asin(wavelength / (2 * c)))
return (
beamX,
beamY,
cbeamX,
cbeamY,
obeamX,
obeamY,
beam_dist,
[i[4] for i in info],
aD,
bD,
cD,
pixel_size,
)
def plot_beam_xy(self, write_files=False, return_values=False, threeD=False):
"""Plot beam center coordinates and a histogram of distances from the
median of beam center coordinates to each set of coordinates.
Superpose a predicted mis-indexing shift by L +/- 1 (calculated
for each axis).
"""
import matplotlib.pyplot as plt
# Get values
(
beamX,
beamY,
cbeamX,
cbeamY,
obeamX,
obeamY,
beam_dist,
distances,
aD,
bD,
cD,
pixel_size,
) = self.calculate_beam_xy()
# Plot figure
if threeD:
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(111, projection="3d")
else:
fig = plt.figure(figsize=(9, 13))
gsp = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ax1 = fig.add_subplot(gsp[0, :], aspect="equal")
# Calculate axis limits of beam center scatter plot
ax1_delta = np.ceil(np.max(beam_dist))
xmax = round(np.median(beamX) + ax1_delta)
xmin = round(np.median(beamX) - ax1_delta)
ymax = round(np.median(beamY) + ax1_delta)
ymin = round(np.median(beamY) - ax1_delta)
zmax = round(np.ceil(np.max(distances)))
zmin = round(np.floor(np.min(distances)))
ax1.set_xlim(xmin, xmax)
ax1.set_ylim(ymin, ymax)
if threeD:
ax1.set_zlim(zmin, zmax)
# Plot beam center scatter plot
if threeD:
ax1.scatter(beamX, beamY, distances, alpha=1, s=20, c="grey", lw=1)
ax1.plot(
[np.median(beamX)],
[np.median(beamY)],
[np.median(distances)],
markersize=8,
marker="o",
c="yellow",
lw=2,
)
else:
ax1.scatter(cbeamX, cbeamY, alpha=1, s=20, c="grey", lw=1)
ax1.scatter(obeamX, obeamY, alpha=1, s=20, c="red", lw=1)
ax1.plot(
np.median(beamX),
np.median(beamY),
markersize=8,
marker="o",
c="yellow",
lw=2,
)
# Plot projected mis-indexing limits for all three axes
circle_a = plt.Circle(
(np.median(beamX), np.median(beamY)),
radius=aD,
color="r",
fill=False,
clip_on=True,
)
circle_b = plt.Circle(
(np.median(beamX), np.median(beamY)),
radius=bD,
color="g",
fill=False,
clip_on=True,
)
circle_c = plt.Circle(
(np.median(beamX), np.median(beamY)),
radius=cD,
color="b",
fill=False,
clip_on=True,
)
ax1.add_patch(circle_a)
ax1.add_patch(circle_b)
ax1.add_patch(circle_c)
# Set labels
ax1.set_xlabel("BeamX (mm)", fontsize=15)
ax1.set_ylabel("BeamY (mm)", fontsize=15)
if threeD:
ax1.set_zlabel("Distance (mm)", fontsize=15)
ax1.set_title("Beam XYZ Coordinates")
else:
ax1.set_title("Beam XY Coordinates")
if not threeD:
# Plot histogram of distances to each beam center from median
ax2 = fig.add_subplot(gsp[1, :])
ax2_n, ax2_bins, ax2_patches = plt.hist(
beam_dist, 20, facecolor="b", alpha=0.75, histtype="stepfilled"
)
ax2_height = (np.max(ax2_n) + 9) // 10 * 10
ax2.axis([0, np.max(beam_dist), 0, ax2_height])
ax2.set_xlabel("Distance from median (mm)", fontsize=15)
ax2.set_ylabel("No. of images", fontsize=15)
if write_files:
fig.savefig(self.xy_file, format="pdf", bbox_inches=0)
else:
plt.show()
if return_values:
return np.median(beamX), np.median(beamY), pixel_size
def plot_res_histogram(self, write_files=False):
import matplotlib.pyplot as plt
# Get resolution values
hres = [i.final["res"] for i in self.final_objects]
lres = [i.final["lres"] for i in self.final_objects]
# Plot figure
fig = plt.figure(figsize=(9, 13))
gsp = gridspec.GridSpec(2, 1)
hr = fig.add_subplot(gsp[0, :])
hr_n, hr_bins, hr_patches = plt.hist(
hres, 20, facecolor="b", alpha=0.75, histtype="stepfilled"
)
hr_height = (np.max(hr_n) + 9) // 10 * 10
hr.axis([np.min(hres), np.max(hres), 0, hr_height])
reslim = "High Resolution Limit ({})".format(r"$\AA$")
hr.set_xlabel(reslim, fontsize=15)
hr.set_ylabel("No. of frames", fontsize=15)
lr = fig.add_subplot(gsp[1, :])
lr_n, lr_bins, lr_patches = plt.hist(
lres, 20, facecolor="b", alpha=0.75, histtype="stepfilled"
)
lr_height = (np.max(lr_n) + 9) // 10 * 10
lr.axis([np.min(lres), np.max(lres), 0, lr_height])
reslim = "Low Resolution Limit ({})".format(r"$\AA$")
lr.set_xlabel(reslim, fontsize=15)
lr.set_ylabel("No. of frames", fontsize=15)
if write_files:
fig.savefig(self.hi_file, format="pdf", bbox_inches=0)
else:
plt.show()
class Analyzer(object):
"""Class to analyze integration results."""
def __init__(self, info=None, params=None, gui_mode=False):
self.info = info
self.params = params
self.gui_mode = gui_mode
# Attributes for LivePRIME override
self.best_pg = None
self.best_uc = None
def get_results(self, finished_objects=None):
if not finished_objects:
finished_objects = self.info.get_finished_objects()
if not finished_objects:
return False
final_objects = []
self.info.unplotted_stats = {}
for key in self.info.stats:
self.info.unplotted_stats[key] = dict(lst=[])
for obj in finished_objects:
item = [obj.input_index, obj.img_path, obj.img_index]
if len(self.info.unprocessed) > 0 and item in self.info.unprocessed:
self.info.unprocessed.remove(item)
if (
len(self.info.categories["not_processed"][0]) > 0
and item in self.info.categories["not_processed"][0]
):
self.info.categories["not_processed"][0].remove(item)
if obj.fail:
key = obj.fail.replace(" ", "_")
if key in self.info.categories:
self.info.categories[key][0].append(item)
else:
self.info.categories["integrated"][0].append(obj.final["final"])
self.info.final_objects.append(obj.obj_file)
final_objects.append(obj)
if not obj.fail or "triage" not in obj.fail:
self.info.categories["have_diffraction"][0].append(obj.img_path)
# Calculate processing stats from final objects
if final_objects:
self.info.pixel_size = final_objects[0].final["pixel_size"]
# Get observations from file
try:
all_obs = ep.load(self.info.idx_file)
except Exception:
all_obs = None
# Collect image processing stats
for obj in final_objects:
for key in self.info.stats:
if key in obj.final:
stat_tuple = (
obj.input_index,
obj.img_path,
obj.img_index,
obj.final[key],
)
self.info.stats[key]["lst"].append(stat_tuple)
# add proc filepath info to 'pointers'
pointer_dict = {
"img_file": obj.img_path,
"obj_file": obj.obj_file,
"img_index": obj.img_index,
"experiments": obj.eint_path,
"reflections": obj.rint_path,
}
self.info.pointers[str(obj.input_index)] = pointer_dict
if key not in self.info.unplotted_stats:
self.info.unplotted_stats[key] = dict(lst=[])
self.info.unplotted_stats[key]["lst"].append(stat_tuple)
# Unit cells and space groups (i.e. cluster iterable)
self.info.cluster_iterable.append(
[
float(obj.final["a"]),
float(obj.final["b"]),
float(obj.final["c"]),
float(obj.final["alpha"]),
float(obj.final["beta"]),
float(obj.final["gamma"]),
str(obj.final["sg"]),
]
)
# Get observations from this image
obs = None
if "observations" in obj.final:
obs = obj.final["observations"].as_non_anomalous_array()
else:
pickle_path = obj.final["final"]
if os.path.isfile(pickle_path):
try:
pickle = ep.load(pickle_path)
obs = pickle["observations"][0].as_non_anomalous_array()
except Exception as e:
print(
"IMAGE_PICKLE_ERROR for {}: {}".format(pickle_path, e)
)
with util.Capturing():
if obs:
# Append observations to combined miller array
obs = obs.expand_to_p1()
if all_obs:
all_obs = all_obs.concatenate(
obs, assert_is_similar_symmetry=False
)
else:
all_obs = obs
# Get B-factor from this image
try:
mxh = mx_handler()
asu_contents = mxh.get_asu_contents(500)
observations_as_f = obs.as_amplitude_array()
observations_as_f.setup_binner(auto_binning=True)
wp = statistics.wilson_plot(
observations_as_f, asu_contents, e_statistics=True
)
b_factor = wp.wilson_b
except RuntimeError as e:
b_factor = 0
print("B_FACTOR_ERROR: ", e)
self.info.b_factors.append(b_factor)
# Save collected observations to file
if all_obs:
ep.dump(self.info.idx_file, all_obs)
# Calculate dataset stats
for k in self.info.stats:
stat_list = list(zip(*self.info.stats[k]["lst"]))[3]
stats = dict(
lst=self.info.stats[k]["lst"],
median=np.median(stat_list).item(),
mean=np.mean(stat_list).item(),
std=np.std(stat_list).item(),
max=np.max(stat_list).item(),
min=np.min(stat_list).item(),
cons=Counter(stat_list).most_common(1)[0][0],
)
self.info.stats[k].update(stats)
return True
else:
return False
def print_results(self, final_table=None):
"""Prints diagnostics from the final integration run."""
assert self.info
if not final_table:
final_table = ["\n\n{:-^80}\n".format("ANALYSIS OF RESULTS")]
if not self.info.categories["integrated"]:
final_table.append("NO IMAGES INTEGRATED!")
else:
label_lens = [len(v["label"]) for k, v in self.info.stats.items()]
max_label = int(5 * round(float(np.max(label_lens)) / 5)) + 5
for k, v in self.info.stats.items():
if k in ("lres", "res", "beamX", "beamY"):
continue
line = (
"{: <{l}}: max = {:<6.2f} min = {:<6.2f} "
"avg = {:<6.2f} ({:<6.2f})"
"".format(
v["label"], v["max"], v["min"], v["mean"], v["std"], l=max_label
)
)
final_table.append(line)
# TODO: Figure out what to do with summary charts
# # If more than one integrated image, plot various summary graphs
# if len(self.info.categories['integrated']) > 1:
# plot = Plotter(self.params, self.info)
# if self.params.analysis.summary_graphs:
# if ( self.params.advanced.processing_backend == 'ha14' and
# self.params.cctbx_ha14.grid_search.type is not None
# ):
# plot.plot_spotfinding_heatmap(write_files=True)
# plot.plot_res_histogram(write_files=True)
# med_beamX, med_beamY, pixel_size = plot.plot_beam_xy(write_files=True,
# return_values=True)
# else:
# with warnings.catch_warnings():
# # To catch any 'mean of empty slice' runtime warnings
# warnings.simplefilter("ignore", category=RuntimeWarning)
# beamXY_info = plot.calculate_beam_xy()
# beamX, beamY = beamXY_info[:2]
# med_beamX = np.median(beamX)
# med_beamY = np.median(beamY)
# pixel_size = beamXY_info[-1]
final_table.append(
"{: <{l}}: X = {:<4.2f}, Y = {:<4.2f}"
"".format(
"Median Beam Center",
self.info.stats["beamX"]["mean"],
self.info.stats["beamY"]["mean"],
l=max_label,
)
)
# Special entry for resolution last
v = self.info.stats["res"]
final_table.append(
"{: <{l}}: low = {:<6.2f} high = {:<6.2f} "
"avg = {:<6.2f} ({:<6.2f})"
"".format(
v["label"], v["max"], v["min"], v["mean"], v["std"], l=max_label
)
)
for item in final_table:
util.main_log(self.info.logfile, item, False)
self.info.update(final_table=final_table)
def unit_cell_analysis(self):
"""Calls unit cell analysis module, which uses hierarchical clustering
(Zeldin, et al, Acta D, 2015) to split integration results according to
detected morphological groupings (if any).
Most useful with preliminary integration without target unit
cell specified.
"""
# Will not run clustering if only one integration result found or if turned off
if not self.info.categories["integrated"]:
util.main_log(
self.info.logfile, "\n\n{:-^80}\n".format(" UNIT CELL ANALYSIS "), True
)
util.main_log(self.info.logfile, "\n UNIT CELL CANNOT BE DETERMINED!", True)
elif len(self.info.categories["integrated"]) == 1:
unit_cell = self.info.cluster_iterable[0][:5]
point_group = self.info.cluster_iterable[0][6]
util.main_log(
self.info.logfile, "\n\n{:-^80}\n".format(" UNIT CELL ANALYSIS "), True
)
uc_line = (
"{:<6} {:^4}: {:<6.2f}, {:<6.2f}, {:<6.2f}, {:<6.2f}, "
"{:<6.2f}, {:<6.2f}".format(
"(1)",
point_group,
unit_cell[0],
unit_cell[1],
unit_cell[2],
unit_cell[3],
unit_cell[4],
unit_cell[5],
)
)
util.main_log(self.info.logfile, uc_line, True)
self.info.best_pg = str(point_group)
self.info.best_uc = unit_cell
else:
uc_table = []
uc_summary = []
if self.params.analysis.clustering.flag_on:
# run hierarchical clustering analysis
from xfel.clustering.cluster import Cluster
counter = 0
self.info.clusters = []
threshold = self.params.analysis.clustering.threshold
cluster_limit = self.params.analysis.clustering.limit
final_pickles = self.info.categories["integrated"][0]
pickles = []
if self.params.analysis.clustering.n_images:
import random
for i in range(len(self.params.analysis.clustering.n_images)):
random_number = random.randrange(0, len(final_pickles))
if final_pickles[random_number] in pickles:
while final_pickles[random_number] in pickles:
random_number = random.randrange(0, len(final_pickles))
pickles.append(final_pickles[random_number])
else:
pickles = final_pickles
# Cluster from files (slow, but will keep for now)
ucs = Cluster.from_files(pickle_list=pickles)
# Do clustering
clusters, _ = ucs.ab_cluster(
threshold=threshold,
log=False,
write_file_lists=False,
schnell=False,
doplot=False,
)
uc_table.append("\n\n{:-^80}\n" "".format(" UNIT CELL ANALYSIS "))
# extract clustering info and add to summary output list
if cluster_limit is None:
if len(pickles) / 10 >= 10:
cluster_limit = 10
else:
cluster_limit = len(pickles) / 10
for cluster in clusters:
sorted_pg_comp = sorted(
cluster.pg_composition.items(), key=lambda x: -1 * x[1]
)
pg_nums = [pg[1] for pg in sorted_pg_comp]
cons_pg = sorted_pg_comp[np.argmax(pg_nums)]
if len(cluster.members) > cluster_limit:
counter += 1
# Write to file
cluster_filenames = [j.path for j in cluster.members]
if self.params.analysis.clustering.write_files:
output_file = os.path.join(
self.info.int_base, "uc_cluster_{}.lst".format(counter)
)
for fn in cluster_filenames:
with open(output_file, "a") as scf:
scf.write("{}\n".format(fn))
mark_output = os.path.basename(output_file)
else:
mark_output = "*"
output_file = None
else:
mark_output = ""
output_file = None
# Populate clustering info for GUI display
uc_init = uctbx.unit_cell(cluster.medians)
symmetry = crystal.symmetry(
unit_cell=uc_init, space_group_symbol="P1"
)
groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)
top_group = groups.result_groups[0]
best_sg = str(groups.lattice_group_info()).split("(")[0]
best_uc = top_group["best_subsym"].unit_cell().parameters()
# best_sg = str(top_group['best_subsym'].space_group_info())
uc_no_stdev = (
"{:<6.2f} {:<6.2f} {:<6.2f} "
"{:<6.2f} {:<6.2f} {:<6.2f} "
"".format(
best_uc[0],
best_uc[1],
best_uc[2],
best_uc[3],
best_uc[4],
best_uc[5],
)
)
cluster_info = {
"number": len(cluster.members),
"pg": best_sg,
"uc": uc_no_stdev,
"filename": mark_output,
}
self.info.clusters.append(cluster_info)
# format and record output
# TODO: How to propagate stdevs after conversion from Niggli?
# uc_line = "{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), "\
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), "\
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) "\
# "{}".format('({})'.format(len(cluster.members)), cons_pg[0],
# cluster.medians[0], cluster.stdevs[0],
# cluster.medians[1], cluster.stdevs[1],
# cluster.medians[2], cluster.stdevs[2],
# cluster.medians[3], cluster.stdevs[3],
# cluster.medians[4], cluster.stdevs[4],
# cluster.medians[5], cluster.stdevs[5],
# mark_output)
# uc_table.append(uc_line)
uc_table.append(
"{:<6}: {} {}".format(
len(cluster.members), uc_no_stdev, mark_output
)
)
lattices = ", ".join(
["{} ({})".format(i[0], i[1]) for i in sorted_pg_comp]
)
# uc_info = [len(cluster.members), cons_pg[0], cluster.medians,
# output_file, uc_line, lattices]
uc_info = [
len(cluster.members),
best_sg,
best_uc,
output_file,
uc_no_stdev,
lattices,
]
uc_summary.append(uc_info)
else:
# generate average unit cell
uc_table.append(
"\n\n{:-^80}\n" "".format(" UNIT CELL AVERAGING (no clustering) ")
)
uc_a, uc_b, uc_c, uc_alpha, uc_beta, uc_gamma, uc_sg = list(
zip(*self.info.cluster_iterable)
)
cons_pg = Counter(uc_sg).most_common(1)[0][0]
all_pgs = Counter(uc_sg).most_common()
unit_cell = (
np.median(uc_a),
np.median(uc_b),
np.median(uc_c),
np.median(uc_alpha),
np.median(uc_beta),
np.median(uc_gamma),
)
# Populate clustering info for GUI display
uc_init = uctbx.unit_cell(unit_cell)
symmetry = crystal.symmetry(unit_cell=uc_init, space_group_symbol="P1")
groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)
top_group = groups.result_groups[0]
best_sg = str(groups.lattice_group_info()).split("(")[0]
best_uc = top_group["best_subsym"].unit_cell().parameters()
# best_sg = str(top_group['best_subsym'].space_group_info())
uc_no_stdev = (
"{:<6.2f} {:<6.2f} {:<6.2f} "
"{:<6.2f} {:<6.2f} {:<6.2f} "
"".format(
best_uc[0],
best_uc[1],
best_uc[2],
best_uc[3],
best_uc[4],
best_uc[5],
)
)
cluster_info = {
"number": len(self.info.cluster_iterable),
"pg": best_sg,
"uc": uc_no_stdev,
"filename": None,
}
self.info.clusters.append(cluster_info)
# uc_line = "{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), " \
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), " \
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) " \
# "{}".format('({})'.format(len(self.final_objects)), cons_pg,
# np.median(uc_a), np.std(uc_a),
# np.median(uc_b), np.std(uc_b),
# np.median(uc_c), np.std(uc_c),
# np.median(uc_alpha), np.std(uc_alpha),
# np.median(uc_beta), np.std(uc_beta),
# np.median(uc_gamma), np.std(uc_gamma), '')
#
# uc_table.append(uc_line)
uc_table.append(uc_no_stdev)
lattices = ", ".join(["{} ({})".format(i[0], i[1]) for i in all_pgs])
# uc_info = [len(self.final_objects), cons_pg, unit_cell, None,
# uc_line, lattices]
uc_info = [
len(self.info.cluster_iterable),
best_sg,
best_uc,
None,
uc_no_stdev,
lattices,
]
uc_summary.append(uc_info)
uc_table.append("\nMost common unit cell:\n")
# select the most prevalent unit cell (most members in cluster)
uc_freqs = [i[0] for i in uc_summary]
uc_pick = uc_summary[np.argmax(uc_freqs)]
uc_table.append(uc_pick[4])
uc_table.append(
"\nBravais Lattices in Biggest Cluster: {}" "".format(uc_pick[5])
)
self.info.best_pg = str(uc_pick[1])
self.info.best_uc = uc_pick[2]
if uc_pick[3] is not None:
self.prime_data_path = uc_pick[3]
for item in uc_table:
util.main_log(self.info.logfile, item, False)
self.info.update(uc_table=uc_table)
if self.gui_mode:
return self.info.clusters
def print_summary(self, write_files=True):
"""Prints summary and appends to general log file.
Also outputs some of it on stdout. Also writes out output list
files.
"""
assert self.info
if not self.info.categories["integrated"]:
util.main_log(
self.info.logfile,
"NO IMAGES SUCCESSFULLY PROCESSSED!",
(not self.gui_mode),
)
return
summary = []
summary.append("\n\n{:-^80}\n".format("SUMMARY"))
categories = [
"total",
"failed_triage",
"have_diffraction",
"failed_spotfinding",
"failed_indexing",
"failed_grid_search",
"failed_integration",
"failed_filter",
"integrated",
]
for cat in categories:
lst, fail, fn, _ = self.info.categories[cat]
path = os.path.join(self.info.int_base, fn)
if len(lst) > 0 or cat in ("integrated", "diffraction"):
summary.append("{: <20}: {}".format("{} ".format(fail), len(lst)))
with open(path, "w") as cf:
for item in lst:
if isinstance(item, tuple) or isinstance(item, list):
item = ", ".join([str(i) for i in item])
cf.write("{}\n".format(item))
if cat == "integrated" and write_files:
if not hasattr(self, "prime_data_path"):
self.prime_data_path = path
summary.append("\n\nIOTA version {0}".format(iota_version))
summary.append("{}\n".format(now))
for item in summary:
util.main_log(self.info.logfile, "{}".format(item), False)
self.info.update(summary=summary)
def make_prime_input(self, filename="prime.phil", run_zero=False):
"""Imports default PRIME input parameters, modifies correct entries and
prints out a starting PHIL file to be used with PRIME."""
assert self.info
pixel_size = self.info.pixel_size
hres = self.info.stats["res"]
lres = self.info.stats["lres"]
# If symmetry / unit cell were not overridden from GUI, set from INFO
if not self.best_pg:
try:
self.best_pg = self.info.best_pg.replace(" ", "")
except AttributeError as e:
print("PRIME INPUT ERROR, SPACE GROUP: ", e)
self.best_pg = "P1"
if not self.best_uc:
self.best_uc = self.info.best_uc
# Determine crystal system from crystal symmetry
sym = crystal.symmetry(space_group_symbol=self.best_pg)
crystal_system = str(sym.space_group().crystal_system())
# Determine number of images for indexing ambiguity resolution
# My default: 1/2 of images or 300, whichever is smaller
if len(self.info.categories["integrated"]) >= 600:
idx_ambiguity_sample = 300
idx_ambiguity_selected = 100
else:
idx_ambiguity_sample = int(
round(len(self.info.categories["integrated"]) / 2)
)
idx_ambiguity_selected = int(round(idx_ambiguity_sample / 3))
# Set run number to 000 if running LivePRIME
out_dir = os.path.join(os.path.dirname(self.prime_data_path), "prime")
if run_zero:
run_path = os.path.join(out_dir, "000")
else:
run_path = util.set_base_dir(out_dir=out_dir)
# Populate pertinent data parameters
prime_params = mod_input.master_phil.extract()
prime_params.run_no = run_path
prime_params.data = [self.prime_data_path]
prime_params.title = "Auto-generated by IOTA v{} on {}" "".format(
iota_version, now
)
prime_params.scale.d_min = hres["mean"]
prime_params.scale.d_max = 8
prime_params.postref.scale.d_min = hres["mean"]
prime_params.postref.scale.d_max = lres["max"]
prime_params.postref.crystal_orientation.d_min = hres["mean"]
prime_params.postref.crystal_orientation.d_max = lres["max"]
prime_params.postref.reflecting_range.d_min = hres["mean"]
prime_params.postref.reflecting_range.d_max = lres["max"]
prime_params.postref.unit_cell.d_min = hres["mean"]
prime_params.postref.unit_cell.d_max = lres["max"]
prime_params.postref.allparams.d_min = hres["mean"]
prime_params.postref.allparams.d_max = lres["max"]
prime_params.merge.d_min = hres["mean"]
prime_params.merge.d_max = lres["max"]
prime_params.target_unit_cell = uctbx.unit_cell(self.best_uc)
prime_params.target_space_group = self.best_pg
prime_params.target_crystal_system = crystal_system
prime_params.pixel_size_mm = pixel_size
prime_params.n_residues = 500
prime_params.indexing_ambiguity.n_sample_frames = idx_ambiguity_sample
prime_params.indexing_ambiguity.n_selected_frames = idx_ambiguity_selected
# Determine which queue to run on (i.e. match IOTA queue)
# Modify specific options based in IOTA settings
# Queue options
if self.params.mp.method == "lsf" and self.params.mp.queue is not None:
prime_params.queue.mode = "bsub"
prime_params.queue.qname = self.params.mp.queue
# Number of processors (automatically, 1/2 of IOTA procs)
prime_params.n_processors = int(self.params.mp.n_processors / 2)
# Generate PRIME param PHIL
prime_phil = mod_input.master_phil.format(python_object=prime_params)
prime_file = os.path.join(self.info.int_base, filename)
with open(prime_file, "w") as pf:
pf.write(prime_phil.as_str())
return prime_phil
def run_get_results(self, finished_objects=None):
self.info.have_results = self.get_results(finished_objects=finished_objects)
return self.info.have_results
def run_all(self, get_results=True):
if get_results:
self.info.have_results = self.get_results()
if self.info.have_results:
try:
self.print_results()
except Exception as e:
error = "IOTA PRINTING ERROR: " + str(e)
self.info.errors.append(error)
try: # Using try block because it can fail silently
self.unit_cell_analysis()
except Exception as e:
error = "IOTA CLUSTERING ERROR: " + str(e)
self.info.errors.append(error)
try:
self.print_summary()
except Exception as e:
error = "IOTA SUMMARY ERROR: " + str(e)
self.info.errors.append(error)
try:
self.make_prime_input()
except Exception as e:
error = "IOTA PRIME INPUT ERROR: " + str(e)
self.info.errors.append(error)
return self.info
|
25,324 | ef87d4ef2419c60a41918cd5d1061cdccf2b75b7 | def regula_falsi (f,a,b,tol) :
if f(a)*f(b)>0:
print('tebakan awal a dan b tidak mengurung akar')
return None
c = b - ((f(b)*(b-a)) / (f(b)-f(a)))
while abs(f(c))>tol:
c = b - ((f(b)*(b-a)) / (f(b)-f(a)))
if f(a)*f(c)>0:
a=c
if f(b)*f(c)>0:
b=c
return c
|
25,325 | eef660d3b250ea940d46069147a1e96c6d36a163 | import os
import subprocess
import sys
import time
import getpass
from applicake.apputils import dirs
from applicake.apputils import dicts
from applicake.apputils import validation
from applicake.coreutils.keys import Keys, KeyHelp
from applicake.coreutils.log import Logger
from applicake.coreutils.arguments import Argument, parse_sysargs
from applicake.coreutils.info import get_handler
class IApp(object):
@classmethod
def main(cls):
"""
Main method to run through the whole App
@return : None
"""
raise NotImplementedError
def add_args(self):
"""
Defines Arguments required or used by App
@return: list with Arguments
"""
raise NotImplementedError
def setup(self, app_args):
"""
Set up environment for running App
@param app_args: Arguments required by App
@return: logger, dict with info
"""
raise NotImplementedError
def run(self, log, info):
"""
Run the App
@param log: logger
@param info: dict with info
@return: (modified) dict with info
"""
raise NotImplementedError
def teardown(self, log, info):
"""
Clean up enviroment after running App
@param log: logger
@param info: dict with info
@return: None
"""
raise NotImplementedError
class BasicApp(IApp):
@classmethod
def main(cls):
log = None
try:
start = time.time()
ci = cls()
app_args = ci.add_args()
log, req_info, info = ci.setup(app_args)
ret_info = ci.run(log, req_info)
info = dicts.merge(info, ret_info, priority='right')
ci.teardown(log, info)
log.debug("%s finished sucessfully at %s" % (cls.__name__, time.asctime()))
log.info("%s finished sucessfully after %ss" % (cls.__name__, int(time.time() - start)))
except Exception, e:
msg = cls.__name__ + " failed! " + str(e)
if isinstance(e, KeyError):
msg += " key not found in info"
msg += "\n"
# feature request cuklinaj: mail when fail, delay between
if os.environ.get("LSB_JOBID"):
controlfile = os.getenv("HOME") + "/.last_error_message"
if not os.path.exists(controlfile) or (time.time() - os.stat(controlfile).st_mtime) > 600:
subprocess.call("touch %s; echo \"Failure reason: %s\nTo prevent spam you won't get such warnings for the next 10 minutes\" | mail -s \"Workflow Failed\" %s" % (
controlfile, msg, getpass.getuser()), shell=True)
# if app fails before logger is created use sys.exit for message
if not log:
sys.exit(msg)
log.error(msg)
sys.exit(1)
def add_args(self):
raise NotImplementedError("add_args() not implemented")
def setup(self, app_args):
# basic arguments for every node
basic_args = [Argument(Keys.INPUT, KeyHelp.INPUT, default=''),
Argument(Keys.OUTPUT, KeyHelp.OUTPUT, default=''),
Argument(Keys.MODULE, KeyHelp.MODULE, default=''),
Argument(Keys.LOG_LEVEL, KeyHelp.LOG_LEVEL, default="DEBUG")]
# Fixme: Prettify WORKDIR creation system
# WORKDIR: if WORKDIR is defined add related args
for i, arg in enumerate(app_args):
if arg.name == Keys.WORKDIR:
app_args.insert(i + 1, Argument(Keys.BASEDIR, KeyHelp.BASEDIR, default='.'))
app_args.insert(i + 2, Argument(Keys.JOB_ID, KeyHelp.JOB_ID, default=''))
app_args.insert(i + 3, Argument(Keys.SUBJOBLIST, KeyHelp.SUBJOBLIST, default=''))
app_args.insert(i + 4, Argument(Keys.NAME, KeyHelp.NAME, default=self.__class__.__name__))
break
defaults, cliargs = parse_sysargs(basic_args + app_args)
# construct info from defaults < info < commandlineargs
ih = get_handler(cliargs.get(Keys.INPUT, None))
fileinfo = ih.read(cliargs.get(Keys.INPUT, None))
info = dicts.merge(cliargs, dicts.merge(fileinfo, defaults))
# setup logging
log = Logger.create(info[Keys.LOG_LEVEL])
# request by malars: show dataset prominent in logger
if Keys.DATASET_CODE in info:
if not isinstance(info[Keys.DATASET_CODE], list):
if Keys.MZXML in info and not isinstance(info[Keys.MZXML], list):
log.info("Dataset is %s (%s)" % (info[Keys.DATASET_CODE], os.path.basename(info[Keys.MZXML])))
else:
log.info("Dataset is %s" % info[Keys.DATASET_CODE])
else:
log.debug("Datasets are %s" % info[Keys.DATASET_CODE])
# WORKDIR: create WORKDIR (only after mk log)
info = dirs.create_workdir(log, info)
# filter to requested args
if Keys.ALL_ARGS in info:
# if ALL_ARGS is set give whole info to app...
req_info = info
else:
req_info = {}
# ...otherwise copy only explicitly requested args to app
for key in [arg.name for arg in basic_args + app_args]:
if key in info:
req_info[key] = info[key]
log.debug("info for app: %s" % req_info)
return log, req_info, info
def run(self, log, info):
raise NotImplementedError("run() not implemented")
def teardown(self, log, info):
ih = get_handler(info.get(Keys.OUTPUT))
ih.write(info, info.get(Keys.OUTPUT))
class WrappedApp(BasicApp):
def run(self, log, info):
info, cmd = self.prepare_run(log, info)
exit_code, stdout = self.execute_run(log, info, cmd)
info = self.validate_run(log, info, exit_code, stdout)
return info
def prepare_run(self, log, info):
raise NotImplementedError("prepare_run() not implemented")
def execute_run(self, log, info, cmd):
out = ""
exit_code = 0
if isinstance(cmd, list):
for single_command in cmd:
exit_code_s, out_s = self.execute_run_single(log, info, single_command)
exit_code += exit_code
out += out_s
if exit_code_s !=0:
break
else:
exit_code, out = self.execute_run_single(log, info, cmd)
return exit_code, out
@staticmethod
def execute_run_single(log, info, cmd):
# Fixme: Prettify/document MODULE load system
# if MODULE is set load specific module before running cmd. requires http://modules.sourceforge.net/
if info.get('MODULE', '') != '':
cmd = "module purge && module load %s && %s" % (info['MODULE'], cmd)
cmd = cmd.replace("\n", "")
log.debug("command is [%s]" % cmd)
# stderr to stdout: http://docs.python.org/2/library/subprocess.html#subprocess.STDOUT
# read input "streaming" from subprocess: http://stackoverflow.com/a/17698359
# get exitcode: http://docs.python.org/2/library/subprocess.html#subprocess.Popen.returncode
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
out = ""
for line in iter(p.stdout.readline, ''):
print line.strip()
out += line
p.communicate()
exit_code = p.returncode
return exit_code, out
def validate_run(self, log, info, exit_code, stdout):
validation.check_exitcode(log, exit_code)
validation.check_stdout(log, stdout)
return info
|
25,326 | 380f36a47a612c60b33e169fa6f075046565e25a | import unittest
import os
from HtmlTestRunner import HTMLTestRunner
from UnitTest import SearchText
from GoogleImage import HomePageTest
# get the directory path to output report file
dir = os.getcwd()
# get all tests from SearchText and HomePageTest class
search_text = unittest.TestLoader().loadTestsFromTestCase(SearchText)
home_page_test = unittest.TestLoader().loadTestsFromTestCase(HomePageTest)
# create a test suite combining search_text and home_page_test
test_suite = unittest.TestSuite([home_page_test, search_text])
# run the suite
runner = HTMLTestRunner(output='example_test_suite')
# run the suite using HTMLTestRunner
runner.run(test_suite) |
25,327 | f93940a2baff60ceae1f1434e4f34df76039b7f8 | # from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return HttpResponse('<h1>What is Django?</h1> <p>With Django, you can take Web applications from concept to launch in a matter of hours. Django takes care of much of the hassle of Web development, so you can focus on writing your app without needing to reinvent the wheel. It’s free and open source.</p> <p>Django is Fast, reliable, fulled with features, and secure</p>') |
25,328 | f59d490c138cb5b69e4aeb886c70db24338ef92d | # -*- coding: utf-8 -*-
import os, sys
import datetime,time
import json
from kr36_location import kr36_cities
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../../util'))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../support'))
import loghelper
import util, name_helper, url_helper, download
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import parser_db_util
#logger
loghelper.init_logger("36kr_company_parser", stream=True)
logger = loghelper.get_logger("36kr_company_parser")
SOURCE = 13020 #36kr
TYPE = 36001 #公司信息
download_crawler = download.DownloadCrawler(use_proxy=False)
def formCityName(name):
if name.endswith("市"):
return name.split("市")[0]
if name.endswith("县"):
return name.split("县")[0]
return name
def process():
logger.info("36kr_company_parser begin...")
start = 0
while True:
items = parser_db_util.find_process_limit(SOURCE, TYPE, start, 1000)
for item in items:
r = parse_company(item)
logger.info(json.dumps(r, ensure_ascii=False, cls=util.CJsonEncoder))
if r["status"] == "INIT" :
parser_db_util.update_active(SOURCE, item["key"], 'N')
parser_db_util.update_processed(item["_id"])
logger.info("processed %s" ,item["url"])
continue
source_company_id = parser_db_util.save_company_standard(r, download_crawler)
parser_db_util.delete_source_company_name(source_company_id)
parser_db_util.delete_source_mainbeianhao(source_company_id)
parser_db_util.save_source_company_name(source_company_id, r["name"],12020)
parser_db_util.save_source_company_name(source_company_id, r["fullName"],12010)
main_company_name = name_helper.get_main_company_name(r["fullName"])
if main_company_name != r["fullName"]:
parser_db_util.save_source_company_name(source_company_id, main_company_name,12010)
logger.info("source_company_id=%s", source_company_id)
artifacts=parse_artifact(source_company_id,item)
logger.info(json.dumps(artifacts, ensure_ascii=False, cls=util.CJsonEncoder))
if (r["fullName"] is None or r["fullName"].strip() == "") and (r['description'] is None or r['description'].strip() == "") \
and len(artifacts) == 0:
parser_db_util.update_active(SOURCE, item["key"], 'N')
parser_db_util.update_processed(item["_id"])
logger.info("missing all stuff, processed %s", item["url"])
continue
parser_db_util.save_artifacts_standard(source_company_id, artifacts)
# parser_db_util.delete_funding(source_company_id)
# flag=parseFinance_save(source_company_id,item, download_crawler)
flag = True
if item["content"].has_key("founders") and item["content"]["founders"]["data"].has_key("data"):
parseMember_save(source_company_id,5010,item["content"]["founders"]["data"]["data"], download_crawler)
if item["content"].has_key("employees") and item["content"]["employees"]["data"].has_key("data"):
parseMember_save(source_company_id,5030,item["content"]["employees"]["data"]["data"], download_crawler)
if item["content"].has_key("former_members") and item["content"]["former_members"]["data"].has_key("data"):
parseMember_save(source_company_id,5040,item["content"]["former_members"]["data"]["data"],download_crawler)
if flag:
parser_db_util.update_processed(item["_id"])
logger.info("processed %s" ,item["url"])
else:
logger.info("lack something: %s", item["url"])
#break
start += 1000
if len(items) == 0:
break
logger.info("36kr_company_parser end.")
def parse_company(item):
logger.info("parse_company")
company_key = item["key"]
#company basic info
c = item["content"]["company_base"]["data"]["company"]
#check if page is under development or is completed(CREATED)
if c["status"] == "INIT":
return {
"status":c["status"],
}
tags = item["content"]["company_base"]["data"]["tags"]
tags2 = []
for tag in tags:
tags2.append(tag["name"])
tags_str = ",".join(tags2)
logo=c["logo"]
if logo:
logo = logo.replace("https://","http://")
establish_date = None
if c.has_key("startDate"):
d = time.localtime(c["startDate"]/1000)
if d.tm_year > 1980:
establish_date = datetime.datetime(d.tm_year,d.tm_mon,d.tm_mday)
address1 = None
address2 = None
if c.has_key("address1"):
address1 = c["address1"]
if c.has_key("address2"):
address2 = c["address2"]
location_id = 0
if address2!=None:
city = kr36_cities.get(str(address2),None)
if city != None:
location = parser_db_util.get_location(formCityName(city))
if location != None:
location_id= location["locationId"]
if location_id==0 and address1 != None:
city = kr36_cities.get(str(address1),None)
if city != None:
location = parser_db_util.get_location(formCityName(city))
if location != None:
location_id = location["locationId"]
#logger.info("locationid =%s",location_id)
fullName = c["fullName"]
fullName = fullName.replace("_","")
idx = fullName.rfind(u"公司")
if idx != -1:
fullName = fullName[:(idx+len(u"公司"))]
fullName = name_helper.company_name_normalize(fullName)
desc = ""
productDesc = None
modelDesc = None
operationDesc = None
teamDesc = None
marketDesc = None
compititorDesc = None
advantageDesc = None
planDesc = None
otherDesc = None
if c.has_key("projectAdvantage"): # 我们的产品与优势
productDesc = c["projectAdvantage"].strip()
if c.has_key("dataLights"): # 我们的用户
operationDesc = c["dataLights"].strip()
if c.has_key("projectPlan"): # 未来的我们
modelDesc = c["projectPlan"].strip()
if c.has_key("competitor"): # 与我们相似的产品
compititorDesc = c["competitor"].strip()
if c.has_key("intro"): # 其他
# otherDesc = c["intro"].strip()
desc = c["intro"].strip()
if c.has_key("story"): # 团队介绍
teamDesc = c["story"].strip()
'''
if productDesc or operationDesc or modelDesc or compititorDesc or teamDesc:
desc = ""
if productDesc:
desc += u"<p>我们的产品与优势</p>\n" + "<pre>" + productDesc + "</pre>\n"
if operationDesc:
desc += u"<p>我们的用户</p>\n" + "<pre>" + operationDesc + "</pre>\n"
if modelDesc:
desc += u"<p>未来的我们</p>\n" + "<pre>" + modelDesc + "</pre>\n"
if compititorDesc:
desc += u"<p>与我们相似的产品</p>\n" + "<pre>" + compititorDesc + "</pre>\n"
if otherDesc:
desc += u"<p>其他</p>\n" + "<pre>" + otherDesc + "</pre>\n"
if teamDesc:
desc += u"<p>团队介绍</p>\n" + "<pre>" + teamDesc + "</pre>\n"
else:
desc = otherDesc
'''
return {
"status":c["status"],
"name": c["name"],
"fullName": fullName,
"description": desc,
"productDesc": productDesc,
"modelDesc": modelDesc,
"operationDesc": operationDesc,
"teamDesc": teamDesc,
"marketDesc": marketDesc,
"compititorDesc": compititorDesc,
"advantageDesc": advantageDesc,
"planDesc": planDesc,
"otherDesc": otherDesc,
"brief": c["brief"],
"round": 0,
"roundDesc": None,
"companyStatus": 2010,
'fundingType': 0,
"locationId": location_id,
"address": None,
"phone": None,
"establishDate": establish_date,
"logo": logo,
"source": SOURCE,
"sourceId": company_key,
"field": c.get("industry"),
"subField": None,
"tags": tags_str,
"headCountMin": None,
"headCountMax": None
}
#source_company_id = parser_util.insert_source_company(source_company)
def parse_artifact(source_company_id,item):
logger.info("parse_artifact")
company_key = item["key"]
c = item["content"]["company_base"]["data"]["company"]
artifacts = []
# artifact
website = c.get("website","").strip()
website = url_helper.url_normalize(website)
if website is not None and website != "":
type, market, app_id = url_helper.get_market(website)
if type == 4010:
if website.find('36kr.com') > 0 and c["name"].find('36') == -1:
pass
else:
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": website,
"domain": app_id,
"type": type
}
artifacts.append(artifact)
elif (type==4040 or type==4050) and app_id is not None:
domain = get_android_domain(market, app_id)
if (type==4040 or type==4050) and domain is not None:
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": website,
"domain": domain,
"type": type
}
artifacts.append(artifact)
weibo = c.get("weibo","").strip()
if weibo is not None and weibo != "":
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": weibo,
"domain": None,
"type": 4030
}
artifacts.append(artifact)
weixin = c.get("weixin","").strip()
if weixin is not None and weixin != "":
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": weixin,
"domain": weixin,
"type": 4020
}
artifacts.append(artifact)
iphoneAppstoreLink = c.get("iphoneAppstoreLink","").strip()
if iphoneAppstoreLink is not None and iphoneAppstoreLink != "":
type, market, app_id = url_helper.get_market(iphoneAppstoreLink)
domain = get_android_domain(market, app_id)
if (type==4040 or type==4050) and domain is not None:
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": iphoneAppstoreLink,
"domain": domain,
"type": type
}
artifacts.append(artifact)
ipadAppstoreLink = c.get("ipadAppstoreLink","").strip()
if ipadAppstoreLink is not None and ipadAppstoreLink != "":
type, market, app_id = url_helper.get_market(ipadAppstoreLink)
domain = get_android_domain(market, app_id)
if (type==4040 or type==4050) and domain is not None:
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": ipadAppstoreLink,
"domain": domain,
"type": type
}
artifacts.append(artifact)
androidLink = c.get("androidLink","").strip()
if androidLink is not None and androidLink != "":
type, market, app_id = url_helper.get_market(androidLink)
domain = get_android_domain(market, app_id)
if (type==4040 or type==4050) and domain is not None:
artifact = {
"sourceCompanyId": source_company_id,
"name": c["name"],
"description": None,
"link": androidLink,
"domain": domain,
"type": type
}
artifacts.append(artifact)
return artifacts
def get_android_domain(app_market, app_id):
domain = None
if app_market == 16010 or app_market == 16020:
android_app = parser_db_util.find_android_market(app_market, app_id)
if android_app:
domain = android_app["apkname"]
else:
domain = app_id
return domain
def parseFinance_save(source_company_id,item, download_crawler):
logger.info("parseFinance_save")
if item is None:
return None
company_key = item["key"]
# Check Investor if saved in investor databases (36003)
flag = True
if not item["content"].has_key("past_finance"):
return True
if not item["content"]["past_finance"]["data"].has_key("data"):
return True
finances = item["content"]["past_finance"]["data"]["data"]
for finance in finances:
logger.info("%s,%s,%s,%s" % (finance.get("phase"),finance.get("financeAmountUnit"),finance["financeDate"],finance.get("financeAmount")))
roundStr = finance.get("phase")
fundingRound = 0
if roundStr == "INFORMAL" or roundStr=="ANGEL":
fundingRound = 1011
roundStr = "天使"
elif roundStr == "PRE_A":
fundingRound = 1020
roundStr = "Pre-A"
elif roundStr == "A":
fundingRound = 1030
elif roundStr == "A_PLUS":
fundingRound = 1031
roundStr = "A+"
elif roundStr == "B":
fundingRound = 1040
elif roundStr == "B_PLUS":
fundingRound = 1041
roundStr = "B+"
elif roundStr == "C":
fundingRound = 1050
elif roundStr == "D":
fundingRound = 1060
elif roundStr == "E":
fundingRound = 1070
elif roundStr == "ACQUIRED":
fundingRound = 1120
elif roundStr == "IPO":
fundingRound = 1110
elif roundStr == "NEEQ":
fundingRound = 1105
elif roundStr == "SEED":
fundingRound = 1010
d = time.localtime(finance["financeDate"]/1000)
fundingDate = datetime.datetime(d.tm_year,d.tm_mon,d.tm_mday)
fundingCurrency = 3010
if finance.get("financeAmountUnit") == "USD":
fundingCurrency = 3010
elif finance.get("financeAmountUnit") == "CNY":
fundingCurrency = 3020
fundingInvestment = 0
precise = 'Y'
financeAmount = finance.get("financeAmount")
if financeAmount != None:
try:
fundingInvestment = int(financeAmount) * 10000
except:
pass
if fundingInvestment == 0:
if financeAmount == u"数万":
fundingInvestment = 1*10000
precise = 'N'
elif financeAmount == u"数十万":
fundingInvestment = 10*10000
precise = 'N'
elif financeAmount == u"数百万":
fundingInvestment = 100*10000
precise = 'N'
elif financeAmount == u"数千万":
fundingInvestment = 1000*10000
precise = 'N'
elif financeAmount == u"数万万":
fundingInvestment = 10000*10000
precise = 'N'
elif financeAmount == u"数亿":
fundingInvestment = 10000*10000
precise = 'N'
elif financeAmount == u"数十亿":
fundingInvestment = 10000*10000*10
precise = 'N'
source_funding = {
"sourceCompanyId": source_company_id,
"preMoney": None,
"postMoney": None,
"investment": fundingInvestment,
"precise": precise,
"round": fundingRound,
"roundDesc": roundStr,
"currency": fundingCurrency,
"fundingDate": fundingDate
}
logger.info(json.dumps(source_funding, ensure_ascii=False, cls=util.CJsonEncoder))
source_investors = []
investors = finance.get("participants")
if investors is not None:
for investor in investors:
logger.info(investor.get("name"))
entityId = investor.get("entityId")
entityType = investor.get("entityType")
if entityType == "ORGANIZATION":
item = parser_db_util.find_36kr(SOURCE,36003,str(entityId))
if item:
v = item["content"]["investor_base"]["data"]
if v["name"] == "":
v["name"] = v["nameAbbr"]
logo = v.get("logo")
if logo:
logo = logo.replace("https://","http://")
source_investor = {
"name": v["name"],
"website": v.get("website"),
"description": v["intro"],
"logo_url":logo,
"stage": None,
"field": None,
"type":10020,
"source":SOURCE,
"sourceId":str(entityId)
}
source_investors.append(source_investor)
else:
logger.info("No investor %s",str(entityId))
flag=False
elif entityType == "COMPANY":
item = parser_db_util.find_36kr(SOURCE, 36001, str(entityId))
if item:
v = item["content"]["company_base"]["data"]["company"]
source_investor = {
"name": v["name"],
"website": v.get("website"),
"description": v["intro"],
"logo_url":v.get("logo"),
"stage": None,
"field": None,
"type":10020,
"source":SOURCE,
"sourceId":str(entityId)
}
source_investors.append(source_investor)
else:
logger.info("No company %s", str(entityId))
flag=False
else:
logger.info("**********" + entityType + ", entityId=" + str(entityId))
logger.info(json.dumps(source_investors, ensure_ascii=False, cls=util.CJsonEncoder))
parser_db_util.save_funding_standard(source_funding, download_crawler, source_investors)
return flag
type_map = {
"FOUNDER":"创始人",
"CO_FOUNDER":"联合创始人",
"TECH":"技术",
"DESIGN":"设计",
"PRODUCT":"产品",
"OPERATOR":"运营",
"SALE":"市场与销售",
"HR":"行政、人事及财务",
"INVEST":"投资和并购",
}
def parseMember_save(source_company_id, type, members, download_crawler):
logger.info("parseMember_save")
for m in members:
if not m.has_key("name"):
continue
logger.info(m["name"])
desc = m.get("intro")
member_type = type_map.get(m.get("type"),"")
position = m.get("position","")
if len(position) > 20:
if desc is None:
desc = position
else:
desc += '\n' + position
position = member_type
else:
position = member_type + position
logo = m.get("avatar")
if logo:
logo = logo.replace("https://","http://")
source_member = {
"source": SOURCE,
"sourceId": str(m["id"]),
"name": m["name"],
"photo_url": logo,
"weibo": None,
"location": 0,
"role": None,
"description": desc,
"education": None,
"work": None
}
source_company_member_rel = {
"sourceCompanyId": source_company_id,
"position": position,
"joinDate": None,
"leaveDate": None,
"type": type
}
try:
parser_db_util.save_member_standard(source_member, download_crawler, source_company_member_rel)
except:
pass
if __name__ == "__main__":
while True:
process()
#break #test
time.sleep(30*60) |
25,329 | 53ae8ba581ffe9ce73d8288afbc6edb4d094eb58 |
# coding: utf-8
# # Experiment Results
# In[17]:
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('run', 'helper_functions.py')
get_ipython().run_line_magic('matplotlib', 'inline')
# Here we review the results from all of our experiments across the various ML algorithms we investigated and the datasets we created.
#
# We provide a summary below with a link to each of the notebooks used for the experiments.
# ## Experiment Summary
#
# We created the following datasets as part of our experiment design
#
# | Link | Dataset | Cleaned + Lags/Moving Averages | Including Signals | Enhanced Signals
# | ---- | :------ | ------------ | ----------- | ---------- |
# | [link](DataViewer.ipynb?DATA=Atlanta) | Atlanta | x | x | x |
# | [link](DataViewer.ipynb?DATA=Boston) | Boston | x | x | x |
# | [link](DataViewer.ipynb?DATA=Dallas) | Dallas | x | x | x |
# | [link](DataViewer.ipynb?DATA=Houston) | Houston | x | x | x |
# | [link](DataViewer.ipynb?DATA=New_York) | New York | x | x | x |
# | [link](DataViewer.ipynb?DATA=Miami) | Miami | x | x | x |
#
#
# We created the following supplementary signals that where used to enhance the datasets above
#
# | Link | Dataset | Cleaned | Enhanced with Lags/Moving Averages | Reference |
# | ---- | :------ | :-------: | :------------------------------: | :---------: |
# | [link](SignalViewer.ipynb?DATA=AO) | AO (Artic Oscillation) | x | x | x |
# | [link](SignalViewer.ipynb?DATA=NAO) | NAO (North American Oscilliation) | x | x | x |
# | [link](SignalViewer.ipynb?DATA=NINO3) | NINO3 | x | x | x |
# | [link](SignalViewer.ipynb?DATA=NINO4) | NINO4 | x | x | x |
# | [link](SignalViewer.ipynb?DATA=NINO12) | NINO1/2 | x | x | x |
# | [link](SignalViewer.ipynb?DATA=NINO34) | NINO3/4 | x | x | x |
#
# We also prepared other signals and locations to consider as part of our experimental design. We however limited ourselves to these datasets due to time constraints.
#
# We ran the following models as part of our experiment design, these notebooks we used repeatedly to run each variant of the experiments and we included hooks to track the artifacts and results.
#
# | Link | Experiments |
# | ---- | :------- |
# | [link](Daily_Temp_Analysis_ARIMA.ipynb) | ARIMA statistical models and techniques |
# | [link](Daily_Temp_Analysis_DT.ipynb) | Decision Tree |
# | [link](Daily_Temp_Analysis_RF.ipynb) | Random Forest |
# | [link](Daily_Temp_Analysis_RNN.ipynb) | Sequential Recurrent Neural Net |
#
# We systematically collected and archived all of the data created by each of the experiments and saved this assigning each experiment its own unique identifier. We also recorded who/when the experiment was run. We also collected various artifacts created in each experiement so we could repeat the experiment at a later date should it be required.
#
# This diagram shows the NINO area of the ocean ( courtesy of Climate Prediction Center, National Weather Service ) and it shows how the temperature of the ocean is localized.
# In[70]:
get_ipython().run_cell_magic('HTML', '', '<img src="http://www.cpc.ncep.noaa.gov/products/analysis_monitoring/enso_update/sstanim.gif"></img>')
# ## Summary of results by location
#
# Here is some quick links to our results
#
# | Link | Experiments Results for cities |
# | ---- | :------- |
# | [link](Experiment_Results-Atlanta.ipynb) | Atlanta |
# | [link](Experiment_Results-Boston.ipynb) | Boston |
# | [link](Experiment_Results-Dallas.ipynb) | Dallas|
# | [link](Experiment_Results-Houston.ipynb) |Houston |
# | [link](Experiment_Results-New_York.ipynb) | New York |
# | [link](Experiment_Results-Miami.ipynb) | Miami |
#
#
# We have provided some pivot tables and charts to allow the reader to see the results of our experiments. We will focus initially on the Mean Squared Error metric as a means of illustrating how each model performed.
#
# ARIMA has been excluded from these comparisons as we were unable to create a forecast that would extend any more than 6 days before the signal reverted to the mean trend.
#
# On investigation, we would need to use an alternative model called SARIMAX which would allow us to extend the forecasts over a longer time period and thus we would be able to make a meaningful comparison.
# In[4]:
# All runs presented in pivot table
results_df = get_results()
results_df.pivot_table(index=['CITY'], columns=['MODEL_NAME','FEATURE_TYPE'], values="MEAN_SQUARED_ERROR")
# As we can see above, the enhanced signals appear in some instances to improve the forecasts. However this is not always the case, this could be because the cluster of signals choosen should be selected specifically for each location vs. just applying the same data set to each.
#
# We will also review those features flagged in each city's analysis to determine the top 10 features used by the models.
# Let's use some boxplots to review the performance of our models over all of our experiments
# In[19]:
# All runs presented in pivot table
create_boxplot_traces_for_results(results_df,'MODEL_NAME','MEAN_SQUARED_ERROR',"Mean Squared Error by Model Type")
# As expected the **RNN** performs the best, **Random forest** is the second, **Decision Tree** is the last
# In[20]:
# All runs presented in pivot table
create_boxplot_traces_for_results(results_df,'FEATURE_TYPE','MEAN_SQUARED_ERROR',"Mean Squared Error by Feature Type")
# In contrary to what we saw above the signals are not having a big of an effect on our prediction, this may be because we are missing signals or we have not tuned the hyper parameters of the models enough
# ## Conclusion
#
# The Arima model was unsuitable for forecasting as detailed in summary switching to a more advanced model for example SARIMAX or THETA would help us to forecast for a longer period of time
#
# We also incorporated more signals like lags which is signal 1,2,7,30,90 and 365 days ago and moving averages which is signal averaged over 1 week, 30 days, 60 days etc from alternate locations (more locations for SST ) and more weather measurement types like AO (Arctic Oscillation), NAO (North Atlantic Oscillation) and their lags and moving averages, also removed signals that are found to have no predictive power.
#
# As per the results detailed in summary RNN performs better of all models and we intend to use LSTM model as a progression from our sequential RNN
# ## Takeaways
#
# * We used GIT as the version control tool, as we reached come to the end of the project we realised being organized with git came really handy with code merges and large csv or pickle file checkins.
# * Serializing data, capturing results(pickle files) and images as we ran multiple experiments provided indispensible which helped us to quickly change artifacts / notebooks as we wanted to answer new questions or compare results.
# * Notebooks is a great tool for quick analysis but are less efficient when you have to perform multiple iterations with different input parameters and datasets, A hybrid approach is recommended.
# * We needed more time to tune our hyperparameters using grid searching or any other approaches.
# * Now that we have good amount of results, It would be a good time to get the results reviewed by a domain expert so that we can identify weaknesses in our approach towards data and redefine our next steps, for example adding more signals or removing unnecessary signals, tune hyperparameters of our models.
# * We could scale our data and no of experiments if we have faster compute resources.
# * We would like to investigate a framework (i.e. MLFlow ) or similiar to help manage the end to end process of data capture, running experiences, instrumentation and presenting our results.
|
25,330 | ee760ec89aef74da5bcdb06aa55c13f3b329bce6 | from serpent.sprite import Sprite
class SpriteLocator:
def __init__(self, **kwargs):
pass
def locate(self, sprite=None, game_frame=None):
constellation_of_pixel_images = sprite.generate_constellation_of_pixels_images()
location = None
for i in range(len(constellation_of_pixel_images)):
constellation_of_pixels_item = list(sprite.constellation_of_pixels[i].items())[0]
query_coordinates = constellation_of_pixels_item[0]
query_rgb = constellation_of_pixels_item[1]
rgb_coordinates = Sprite.locate_color(query_rgb, image=game_frame.frame)
rgb_coordinates = list(map(lambda yx: (yx[0] - query_coordinates[0], yx[1] - query_coordinates[1]), rgb_coordinates))
maximum_y = game_frame.frame.shape[0] - constellation_of_pixel_images[i].shape[0]
maximum_x = game_frame.frame.shape[1] - constellation_of_pixel_images[i].shape[1]
for y, x in rgb_coordinates:
if y < 0 or x < 0 or y > maximum_y or x > maximum_x:
continue
for yx, rgb in sprite.constellation_of_pixels[i].items():
if tuple(game_frame.frame[y + yx[0], x + yx[1], :]) != rgb:
break
else:
location = (
y,
x,
y + constellation_of_pixel_images[i].shape[0],
x + constellation_of_pixel_images[i].shape[1]
)
return location
|
25,331 | d12158563f5e932d02e05ac50f53f8346475278c | """
There are n piles of stones arranged in a row. The i-th pile has stones[i]
stones.
A move consists of merging exactly k consecutive piles into one pile, and the
cost of this move is equal to the total number of stones in these k piles.
Find the minimum cost to merge all piles of stones into one pile. If it is
impossible, return -1.
Example 1:
Input: stones = [3,2,4,1], k = 2 Output: 20
Explanation: We start with [3, 2, 4, 1].
We merge [3, 2] for a cost of 5, and we are left with [5, 4, 1].
We merge [4, 1] for a cost of 5, and we are left with [5, 5].
We merge [5, 5] for a cost of 10, and we are left with [10].
The total cost was 20, and this
is the minimum possible.
"""
class Solution1000:
pass
|
25,332 | 290f204b67a4b081b55a1e46773ccfbc0e35c4eb | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import AIC_site.storage
class Migration(migrations.Migration):
dependencies = [
('base', '0044_auto_20160301_1248'),
]
operations = [
migrations.CreateModel(
name='StaffMember',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150, verbose_name='full name')),
('email', models.EmailField(max_length=254, blank=True)),
('entrance_year', models.PositiveIntegerField(verbose_name='entrance year')),
('label', models.CharField(max_length=150, verbose_name='label', blank=True)),
('bio', models.CharField(max_length=300, verbose_name='biography', blank=True)),
('image', models.ImageField(upload_to=b'staff/images/', storage=AIC_site.storage.SyncingHashStorage('storages.backends.sftpstorage.SFTPStorage'), verbose_name='image')),
],
options={
'verbose_name': 'staff member',
'verbose_name_plural': 'staff',
},
),
migrations.CreateModel(
name='StaffTeam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='team name')),
('members', models.ManyToManyField(to='base.StaffMember', verbose_name='team members', blank=True)),
('parent', models.ForeignKey(verbose_name='parent team', blank=True, to='base.StaffTeam', null=True)),
],
options={
'verbose_name': 'staff team',
'verbose_name_plural': 'staff teams',
},
),
]
|
25,333 | d976eb2cca65750e223436b439a0c767593385bb |
def voidF(x, y, z):
a=x+y+z*x*y
def main():
voidF(1,3,4)
return 12
if __name__ == "__main__":
import sys
ret=main()
sys.exit(ret)
|
25,334 | be7ff412d18af8820c34495dc2e4fab8bd331c9c | import os
import sys
import re
import datetime
from bs4 import BeautifulSoup
sys.path.append('../')
import settings
import scraping
import linebot
# 対象ページを取得
scraping = scraping.Scraping()
listUrl = []
newUrlList = []
page = scraping.getPage(settings.SCRAPING_PAGE_URL)
aElems = scraping.getElement(page, 'a')
for a in aElems:
try:
aClass = a.get('class').pop(0)
if aClass in 'js-cassette_link_href':
listUrl.append(a.get('href'))
print(listUrl)
except:
pass
# 更新日時をチェック、当日の場合は配列に格納
for url in listUrl:
url = settings.SCRAPING_PAGE_DMAIN + url
result = scraping.checkUpdateDate(url)
if result:
newUrlList.append(url)
# 新着をLINEで通知
linebot = linebot.Linebot()
text = linebot.createText(newUrlList)
res = linebot.pushMessage(settings.LINE_GROUP_ID, text)
|
25,335 | 2782e60d66ce9cdee047ac9c3ae71423415a1cc0 | Python 3.5.2 (default, Nov 17 2016, 17:05:23)
[GCC 5.4.0 20160609] on linux
Type "copyright", "credits" or "license()" for more information.
>>> a=5
>>> b=10
>>> a=b
>>> b=a
>>> a
10
>>> b=
SyntaxError: invalid syntax
>>> b
10
>>> c=a
>>> a=b
>>> b=c
>>> a
10
>>> b
10
>>> four='4'
>>> print(four*4)
4444
>>> print(four*3)
444
>>> five=4
>>> print(five)
4
>>> print(five*3)
12
>>> my_name='student'
>>> print("hi,"+myname')
SyntaxError: EOL while scanning string literal
>>> print("hi,"+my_name)
hi,student
>>> my_age=15
>>> print('Iam'+my_age+'yearsold')
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
print('Iam'+my_age+'yearsold')
TypeError: Can't convert 'int' object to str implicitly
>>> print('iam'+15+'yearsold')
Traceback (most recent call last):
File "<pyshell#25>", line 1, in <module>
print('iam'+15+'yearsold')
TypeError: Can't convert 'int' object to str implicitly
>>> ('iam'+'15'+'yearsold')
'iam15yearsold'
>>> score=1
>>> total=score+(count*2)
Traceback (most recent call last):
File "<pyshell#28>", line 1, in <module>
total=score+(count*2)
NameError: name 'count' is not defined
>>> print (total)
Traceback (most recent call last):
File "<pyshell#29>", line 1, in <module>
print (total)
NameError: name 'total' is not defined
>>> count=11
>>> total=23
>>> total=score+(count*2)
>>>
>>> print(total)
23
>>>
==== RESTART: /home/student/rakan19_lab2/meet2017y1lab2/squareinTurtle.py ====
|
25,336 | 1caf4025e83b64eb33c40847b30fac2ac1b9a6fb | """
Simplify CNKI directory structure by removing spurious elements of the path. For
example, this script replaces the first path below with the second:
HY3/2007/E/ZHPF/ZHPF200708034/ZHPF200708034.fuse.xml
HY3/2007/E/ZHPF/ZHPF200708034.fuse.xml
The result is more compact directories and reduced use of inodes. Progress and
warnings are printed to log files.
Usage:
$ python simplify-structure/py (d1d1|d1d2|hy2|hy3|d1|d2) filter?
The argument is a shorthand for one of the subpaths of the CNKI directory:
Drive1/disk1
Drive1/disk2
HY2
HY3
disk1
disk2
Because of the size of hy2 and hy3 and the slowness of this script on the nsf
share, you can also use identifiers that point to 1MB fragments of hy2 and hy3:
hy2-a, hy2-b, hy2-c, hy2-d, hy2-e, hy2-f, hy2-g, hy3-a, hy3-b and hy3-c. These
sub lists were created with split-vlist-file.py.
If a filter is added, then the string given is required to be an element of the
file path being simplified. This was need for those case where we simplified a
directory and it threw warnings indicating that something went wrong with
unarchiving. When this was the case, it was always for a particular year. To fix
it, we (1) removed the year from the simplified directory, (2) unarchived the
year again, (3) moved the year to its position in the simplified directory, and
(4) simplified it again. This happend for hy3-1997 and d1d1-2010. For the
former, we needed to re simplify two files (hy3-a and hy3-b) because the 1997
files were in both those files:
$ python simplify-structure.py hy3-a 1997 &
$ python simplify-structure.py hy3-b 1997 &
"""
import os, sys, shutil
CNKI_DIR = '/home/j/corpuswork/fuse/FUSEData/cnki'
LISTS_DIR = CNKI_DIR + '/information/tar-lists/targz-fuse-xml-all/vlist-grep'
EXT = 'fuse-xml-all-targz-vlist-grep.txt'
FILES = { 'd1d1': LISTS_DIR + '/CNKI-Drive1-disk1-' + EXT,
'd1d2': LISTS_DIR + '/CNKI-Drive1-disk2-' + EXT,
'hy2': LISTS_DIR + '/CNKI-HY2-' + EXT,
'hy2-a': "CNKI-HY2-a-fuse-xml-all-targz-vlist-grep.txt",
'hy2-b': "CNKI-HY2-b-fuse-xml-all-targz-vlist-grep.txt",
'hy2-c': "CNKI-HY2-c-fuse-xml-all-targz-vlist-grep.txt",
'hy2-d': "CNKI-HY2-d-fuse-xml-all-targz-vlist-grep.txt",
'hy2-e': "CNKI-HY2-e-fuse-xml-all-targz-vlist-grep.txt",
'hy2-f': "CNKI-HY2-f-fuse-xml-all-targz-vlist-grep.txt",
'hy2-g': "CNKI-HY2-g-fuse-xml-all-targz-vlist-grep.txt",
'hy3': LISTS_DIR + '/CNKI-HY3-' + EXT,
'hy3-a': "CNKI-HY3-a-fuse-xml-all-targz-vlist-grep.txt",
'hy3-b': "CNKI-HY3-b-fuse-xml-all-targz-vlist-grep.txt",
'hy3-c': "CNKI-HY3-c-fuse-xml-all-targz-vlist-grep.txt",
'd1': LISTS_DIR + '/CNKI-disk1-' + EXT,
'd2': LISTS_DIR + '/CNKI-disk2-' + EXT }
def simplify(id, vlist_file, filter):
name = "%s-%s" % (id, filter) if filter else id
fh_progress = open("log-progress-%s.txt" % name, 'w')
fh_warnings = open("log-warnings-%s.txt" % name, 'w')
print vlist_file
c = 0
for line in open(vlist_file):
if filter and line.find('/'+filter+'/') < 0:
continue
c += 1
if c % 1000 == 0:
fh_progress.write("%d\n" % c)
fh_progress.flush()
#if c > 10: break
path = line.split()[-1]
long_dir = os.path.split(path)[0]
short_dir = os.path.split(long_dir)[0]
basename = os.path.basename(path)
src = os.path.join(CNKI_DIR, long_dir, basename)
dst = os.path.join(CNKI_DIR, short_dir, basename)
if not os.path.exists(src):
fh_warnings.write("File does not exist:%s\n" % src)
fh_warnings.flush()
elif not os.path.isdir(os.path.dirname(dst)):
fh_warnings.write("Directory does not exist %s\n" % short_dir)
fh_warnings.flush()
else:
shutil.move(src, dst)
os.rmdir(os.path.dirname(src))
if __name__ == '__main__':
id = sys.argv[1]
filter = sys.argv[2] if len(sys.argv) > 2 else False
simplify(id, FILES[id], filter)
|
25,337 | 25c39839c2382074726e7ee68d7b5eb831deaba2 | #!python2.7
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
def pos(v, w, phi, theta, s, n, m = 0.145, r = 0.03734):
global fig
global ax
x = [0.0]
y = [0.0]
z = [0.0]
xv = math.sin(phi)*math.cos(theta)*v
yv = math.cos(phi)*math.sin(theta)*v
zv = math.cos(phi)*math.cos(theta)*v
while(z[-1] < 18.47):
x.append(x[-1]+xv/n)
y.append(y[-1]+yv/n)
z.append(z[-1]+zv/n)
yv-= 9.8/n
V = math.sqrt(xv**2+yv**2+zv**2)
Fm = V*w*(4*math.pi/3)*(r**3)/m
xc = yv*zv/math.sqrt((yv**2*zv**2)+(zv*xv*math.tan(theta+(math.sin(s)*math.pi)))**2+(yv*xv*math.tan(phi+(math.cos(s)*math.pi)))**2)
yc = (zv*xv*math.tan(theta+(math.sin(s)*math.pi)))/math.sqrt((yv**2*zv**2)+(zv*xv*math.tan(theta+(math.sin(s)*math.pi)))**2+(yv*xv*math.tan(phi+(math.cos(s)*math.pi)))**2)
zc = (yv*xv*math.tan(phi+(math.cos(s)*math.pi)))/math.sqrt((yv**2*zv**2)+(zv*xv*math.tan(theta+(math.sin(s)*math.pi)))**2+(yv*xv*math.tan(phi+(math.cos(s)*math.pi)))**2)
xv += xc*Fm/n
yv += yc*Fm/n
zv += zc*Fm/n
print '%f, %f, %f' %(x[-1],y[-1],z[-1])
ax.plot(np.array(x),np.array(z),np.array(y))
ax.set_xlim3d([-2.0, 2.0])
ax.set_xlabel('X')
ax.set_ylim3d([0.0, 20.0])
ax.set_ylabel('Y')
ax.set_zlim3d([-2.0, 2.0])
ax.set_zlabel('Z')
ax.set_title('Curveball')
plt.show()
return [x,y,z]
dat = pos(42.0, 50.0, math.pi/326, math.pi/128, 0.0, 250.0)
def show():
ax.view_init(elev = 0.0, azim = 90.0)
plt.show()
def analyze(v, w, phi, theta, n, N = 5, m = 0.145, r = 0.03734):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(N):
dat = pos(v, w, phi, theta, 2*i*math.pi/N, n)
ax.set_xlim3d([-2.0, 2.0])
ax.set_xlabel('X')
ax.set_ylim3d([0.0, 20.0])
ax.set_ylabel('Y')
ax.set_zlim3d([-2.0, 2.0])
ax.set_zlabel('Z')
ax.set_title('3D Test')
ax.view_init(elev = 0.0, azim = 90.0)
plt.show()
return 'Done' |
25,338 | 645fc931635d6509ccdd0f6ef5bcacf115961d60 | from django.urls import path
from . import views
app_name = 'slider'
urlpatterns = [
path('', views.home_slides_news, name='home'),
path('contacts/', views.contacts_page, name='contacts_page'),
path('sending_mail/', views.sending_mail, name='sending_mail'),
path('contacts/sending_mail', views.contacts_send_mail, name='contacts_send_mail'),
]
|
25,339 | 533702e59429eaf28a7132888b49057714738674 | #!/bin/python
def parr(ar):
for r in ar:
print r,
print
def partition(ar):
ret1 = []
ret2 = []
pivot = ar[0]
for v in ar[1:]:
if v <= pivot:
ret1.append(v)
else:
ret2.append(v)
ret = ret1 + [pivot] + ret2
return ret, len(ret1)
def qSort(ar, i, j):
if len(ar[i:j]) <= 1:
return ar[i:j]
ret, p = partition(ar[i:j])
ar = ret
ret = qSort(ar, 0, p) + [ar[p]] + qSort(ar, p+1, len(ar))
parr(ret)
return ret
def quickSort(ar):
ar = qSort(ar, 0, len(ar))
m = input()
ar = [int(i) for i in raw_input().strip().split()]
quickSort(ar)
|
25,340 | 1388145bd63a0e9d3a8e6d20a4de910ace4ef4b7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
import time
from django.http import Http404, response
from django.test.utils import override_settings
from django.urls import reverse
import six
from six.moves.urllib.parse import urlencode
from wechatpy.client.api import WeChatJSAPI
from ..models import WeChatApp
from ..sites.wechat import WeChatSite, WeChatView, wechat_view
from .base import mock, WeChatTestCase
class WeChatSiteTestCase(WeChatTestCase):
def test_app_queryset(self):
"""测试app_queryset正确"""
that = self
class TestSite(WeChatSite):
@property
def app_queryset(self):
return WeChatApp.objects.filter(name=that.app.name)
class TestView(WeChatView):
def get(self, request, appname):
return response.HttpResponse(status=204)
def _get_appname(self, request, appname):
return appname
# 在app_queryset中的公众号可访问,否则404
site = TestSite()
view = site._create_view(TestView)
resp = view(self.rf().get("/"), self.app.name)
self.assertEqual(resp.status_code, 204)
self.assertRaises(Http404, view, self.rf().get("/"),
self.another_app.name)
def test_wechat_view(self):
"""测试wechat_view"""
that = self
class View(WeChatView):
app_queryset = WeChatApp.objects.filter(name=self.app.name)
def post(self, request):
that.assertEqual(request.wechat.app.id, that.app.id)
that.assertEqual(request.wechat.appname, that.app.name)
return response.HttpResponse(status=204)
def _get_appname(self, request, *args, **kwargs):
return that.app.name
# 测试app_queryset
with mock.patch.object(View, "_get_appname"):
View._get_appname.return_value = self.another_app.name
view = View.as_view()
self.assertRaises(Http404, view, self.rf().post("/"))
# 测试http method正确
view = View.as_view()
resp = view(self.rf().get("/"))
self.assertEqual(resp.status_code, 405)
resp = view(self.rf().post("/"))
self.assertEqual(resp.status_code, 204)
# 测试装饰器
@wechat_view("^$", methods=["POST"])
def View(request, appname):
return response.HttpResponse(status=204)
resp = View.as_view()(self.rf().get("/"), self.app.name)
self.assertEqual(resp.status_code, 405)
resp = View.as_view()(self.rf().post("/"), self.app.name)
self.assertEqual(resp.status_code, 204)
def test_jsapi(self):
"""测试jsapi"""
with mock.patch.object(WeChatJSAPI, "get_jsapi_ticket"):
ticket = "ticket"
WeChatJSAPI.get_jsapi_ticket.return_value = "ticket"
jsapi_list = ["onMenuShareTimeline", "onMenuShareAppMessage"]
src = reverse("wechat_django:jsconfig",
kwargs=dict(appname=self.app.name))
querystr = urlencode(dict(
jsApiList=",".join(jsapi_list)
))
referrer = "https://baidu.com/abc"
resp = self.client.get(src + "?" + querystr,
HTTP_REFERER=referrer)
pattern = r"wx\.config\(JSON\.parse\('(.+)'\)\);"
match = re.match(pattern, resp.content.decode())
self.assertTrue(match)
json_str = match.group(1)
data = json.loads(json_str)
debug = data.get("debug")
appid = data["appId"]
timestamp = data["timestamp"]
noncestr = data["nonceStr"]
js_api_list = data["jsApiList"]
client = self.app.client
signature = client.jsapi.get_jsapi_signature(noncestr, ticket,
timestamp, referrer)
self.assertFalse(debug)
self.assertEqual(appid, self.app.appid)
self.assertAlmostEqual(timestamp, time.time(), delta=3)
self.assertIsInstance(timestamp, int)
self.assertIsInstance(noncestr, six.string_types)
self.assertTrue(noncestr)
self.assertEqual(js_api_list, jsapi_list)
self.assertEqual(signature, data["signature"])
with override_settings(DEBUG=False):
querystr = urlencode(dict(
debug=True
))
resp = self.client.get(src + "?" + querystr,
HTTP_REFERER=referrer)
match = re.match(pattern, resp.content.decode())
self.assertTrue(match)
json_str = match.group(1)
data = json.loads(json_str)
self.assertFalse(data.get("debug"))
with override_settings(DEBUG=True):
resp = self.client.get(src + "?" + querystr,
HTTP_REFERER=referrer)
match = re.match(pattern, resp.content.decode())
self.assertTrue(match)
json_str = match.group(1)
data = json.loads(json_str)
self.assertTrue(data.get("debug"))
def test_request(self):
"""测试请求响应正常,路由匹配正常"""
pass
|
25,341 | 1f536613076a1ff2f817b277f3a5c950dd6820e5 | """
Seek e Cursors
seek() -> É utilizado para movimentar o cursor pelo arquivo.
arquivo = open('texto.txt')
print(arquivo.read())
# A função seek é utilizada para movimentação do cursor pelo arquivo. Ela recebe um parâmetro que indica onde queremos colocar o cursor
# Movimentando o cursor pelo arquivo com a função seek()
#Voltando para a posição 0 do texto, colocando o curso na posição 0
arquivo.seek(0)
print(arquivo.read())
# readline() -> Função que lê o arquivo linha a linha.
print(arquivo.readline())
print(arquivo.readline())
# readlines()
linhas = arquivo.readlines()
print(linhas)
print(type(linhas))
print(linhas.__len__())
"""
#OBS: Quando abrimos um arquivo com a função open() é criada uma conexão entre o arquivo no disco do computador e o programa Python. Essa conexão é chamada de streaming.
# Ao finalizar os trabalhos com o arquivo devemos fechar essa conexão. Para isso utilizamso a função clone()
arquivo = open('texto.txt')
print(arquivo.read())
print(arquivo.closed) #Verifica se o arquivo está aberto ou fechado - True: Arquivo fechado / False: Arquivo aberto
arquivo.close()
print(arquivo.closed) |
25,342 | 7195ea953a7c14dc3d0456f3a70d431165e23865 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Currency.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(763, 475)
Form.setMaximumSize(QtCore.QSize(16777182, 16777215))
Form.setAutoFillBackground(False)
self.horizontalLayoutWidget = QtWidgets.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(40, 360, 681, 80))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(Form)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(40, 10, 681, 51))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.horizontalLayoutWidget_2)
self.doubleSpinBox.setMaximum(99990000.0)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.horizontalLayout_2.addWidget(self.doubleSpinBox)
self.label_3 = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.lineEdit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_2.setMaxLength(11)
self.lineEdit_2.setObjectName("lineEdit_2")
self.horizontalLayout_2.addWidget(self.lineEdit_2)
self.label_4 = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
self.label_4.setObjectName("label_4")
self.horizontalLayout_2.addWidget(self.label_4)
self.lineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit.setMaxLength(11)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.pushButton_3 = QtWidgets.QPushButton(self.horizontalLayoutWidget_2)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout_2.addWidget(self.pushButton_3)
self.checkBox = QtWidgets.QCheckBox(self.horizontalLayoutWidget_2)
self.checkBox.setAutoFillBackground(True)
self.checkBox.setObjectName("checkBox")
self.horizontalLayout_2.addWidget(self.checkBox)
self.label = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
self.label.setText("")
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.textEdit = QtWidgets.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(40, 66, 681, 291))
self.textEdit.setObjectName("textEdit")
self.retranslateUi(Form)
self.pushButton.clicked.connect(self.lineEdit_2.clear)
self.pushButton.clicked.connect(self.doubleSpinBox.clear)
self.pushButton.clicked.connect(self.lineEdit.clear)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton_2.setText(_translate("Form", "Exit"))
self.pushButton.setText(_translate("Form", "Zurücksetzen"))
self.label_2.setText(_translate("Form", "Betrag"))
self.label_3.setText(_translate("Form", "Währung"))
self.label_4.setText(_translate("Form", "Zielwährungen"))
self.pushButton_3.setText(_translate("Form", "umrechnen"))
self.checkBox.setText(_translate("Form", "Live-Daten"))
|
25,343 | 76cbb8bede46ccaa62b1e92e87300f8e651e2e81 | from typing import AnyStr
moves = ((0, -1), (0, 1), (-1, 0), (1, 0))
def construct(r, c, cnt, construct_done):
global ans
if cnt > ans:
ans = cnt
visited[r][c] = 1
for x, y in moves:
next_r = r+x
next_c = c+y
if 0 <= next_r < N and 0 <= next_c < N:
if not visited[next_r][next_c]:
path = mat[next_r][next_c]
if path < mat[r][c]:
construct(next_r, next_c, cnt+1, construct_done)
elif path - K < mat[r][c] and construct_done:
mat[next_r][next_c] = mat[r][c] - 1
construct(next_r, next_c, cnt+1, 0)
mat[next_r][next_c] = path
visited[r][c] = 0
if __name__ == "__main__":
T = int(input())
for tc in range(1, T+1):
N, K = map(int, input().split())
visited = [[0 for _ in range(N)] for _ in range(N)]
ans = 0
mat = [list(map(int, input().split())) for _ in range(N)]
top = 0
tops = []
for r in range(N):
for c in range(N):
if mat[r][c] > top:
tops = []
tops.append((c, r))
top = mat[r][c]
elif mat[r][c] == top:
tops.append((c, r))
for c, r in tops:
construct(r, c, 1, 1)
print(f"#{tc} {ans}")
# git commit -m "code: Solve swea 1949 등산로 조성 (yoonbaek)" |
25,344 | 4465c213fd38db1ee535949376a4faf538df6ff0 | import json
from datetime import datetime
import mysql
import numpy as np
import pandas as pd
from mysql.connector import connect
from requests import Request
from requests.auth import HTTPBasicAuth
from requests_throttler import BaseThrottler, ThrottledRequest
from sqlalchemy import create_engine
from tqdm import tqdm
from config import BASE_DIR
def find_commit_info(thing: ThrottledRequest):
try:
info = thing.response.json()
except json.decoder.JSONDecodeError:
print('error')
return None, 'Unknown', 'Unknown'
print(info)
sha = info.get('sha', 'Unknown')
author = info.get('author', info.get('committer', {})).get('name', 'Unknown')
date = info.get('author', info.get('committer', {})).get('date', '1970-01-01T00:00:00Z')
date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z')
return sha, author, date
if __name__ == '__main__':
db_connection_str = 'mysql+mysqlconnector://vulnerability-history:secret@localhost:33062/vulnerability-history'
db_connection = create_engine(db_connection_str)
query = """
SELECT repo_id, full_name as repo_name, commit_hash
FROM updates
INNER JOIN repos r on updates.repo_id = r.id
WHERE ISNULL(commit_date)
GROUP BY commit_hash, full_name
"""
df = pd.read_sql(query, con=db_connection)
with connect(host='localhost', port=33062, user='vulnerability-history', database='vulnerability-history',
password='secret') as connection:
for (repo_id, repo_name), commit_df in tqdm(df.groupby(['repo_id', 'repo_name'])):
commits = commit_df['commit_hash'].to_list()
print(repo_id)
print(repo_name)
with BaseThrottler(name='base-throttler', delay=0.8) as bt:
dependency_requests = [
Request(method='GET', url='https://api.github.com/repos/{}/git/commits/{}'.format(repo_name, commit_hash),
auth=HTTPBasicAuth('Rheddes', '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')) for commit_hash in commits]
throttled_requests = bt.multi_submit(dependency_requests)
for response in throttled_requests:
commit_hash, commit_author, commit_date = find_commit_info(response)
if commit_hash is None:
print('skipping')
continue
with connection.cursor() as cursor:
cursor.execute(
"UPDATE updates SET commit_author=%s, commit_date=%s WHERE repo_id=%s AND commit_hash=%s",
[commit_author, commit_date.strftime('%Y-%m-%d'), int(repo_id), commit_hash]
)
connection.commit()
|
25,345 | 2fceb901c9449c1511ade7bc07b8ba9b76b76629 | import pandas as pd
import json
tc_data = pd.read_parquet('teachers.parquet', engine='pyarrow')
st_data = pd.read_csv('students.csv')
new_st_data = st_data[st_data.columns[0]].str.split("_", n=6, expand=True)
new_st_data['name'] = new_st_data[1] + '_' + new_st_data[2]
new_st_data['cid'] = new_st_data[6]
tc_data['name'] = tc_data['fname'] + '_' + tc_data['lname']
json_data = {}
for index, row in new_st_data.iterrows():
for tc_index, tc_row in tc_data.iterrows():
if row['cid'] == tc_row['cid']:
if row['name'] not in json_data:
json_data[row['name']] = [{
"class": row['cid'],
"teacher_name": tc_row['name'],
"teacher_id": tc_row['id']
}]
else:
json_data[row['name']].append({
"class": row['cid'],
"teacher_name": tc_row['name'],
"teacher_id": tc_row['id']
})
with open('report.json', 'w') as fp:
report = {"Report_Data": json_data}
json.dump(report, fp)
|
25,346 | 7cc17886cbdb3b0916bbc0f697524d879ad7b44e | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 07:56:11 2018
@author: flemmingholtorf
"""
from __future__ import print_function
from main.mods.SemiBatchPolymerization.mod_class_stgen import SemiBatchPolymerization_multistage
from main.mods.SemiBatchPolymerization.mod_class import SemiBatchPolymerization
from main.dync.MHEGen import msMHEGen
from main.examples.SemiBatchPolymerization.noise_characteristics import *
from scipy.stats import chi2
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
# don't write messy bytecode files
# might want to change if ran multiple times for performance increase
# sys.dont_write_bytecode = True
# discretization parameters:
nfe, ncp = 24, 3 # Radau nodes assumed in code
# state variables:
x_vars = {"PO":[()], "Y":[()], "W":[()], "PO_fed":[()], "MY":[()], "MX":[(0,),(1,)], "T":[()], "T_cw":[()]}
# corrupted state variables:
x_noisy = []
# output variables:
y_vars = {"PO":[()], "Y":[()], "MY":[()], 'T':[()], 'T_cw':[()]}
# controls:
u = ["u1", "u2"]
u_bounds = {"u1": (0.0,0.4), "u2": (0.0, 3.0)}
# uncertain parameters:
p_noisy = {"A":[('p',),('i',)],'kA':[()]}
# noisy initial conditions:
noisy_ics = {'PO_ic':[()],'T_ic':[()],'MY_ic':[()],'MX_ic':[(1,)]}
# initial uncertainty set description (hyperrectangular):
p_bounds = {('A', ('i',)):(-0.2,0.2),('A', ('p',)):(-0.2,0.2),('kA',()):(-0.2,0.2),
('PO_ic',()):(-0.02,0.02),('T_ic',()):(-0.005,0.005),
('MY_ic',()):(-0.01,0.01),('MX_ic',(1,)):(-0.002,0.002)}
# time horizon bounds:
tf_bounds = [10.0*24/nfe, 30.0*24/nfe]
# path constrained properties to be monitored:
pc = ['Tad','T']
# monitored vars:
poi = [x for x in x_vars] + u
#parameter scenario:
scenario = {('A',('p',)):-0.2,('A',('i',)):-0.2,('kA',()):-0.2}
# scenario-tree definition:
st = {} # scenario tree : {parent_node, scenario_number on current stage, base node (True/False), scenario values {'name',(index):value}}
s_max, nr, alpha = 4, 2, 0.2
dummy ={(1, 2): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1+0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1+0.01, ('PO_ic', ()): 1+0.02},
(1, 3): {('A', ('p',)): 1-alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(1, 4): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(1, 5): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(1, 6): {('A', ('p',)): 1+alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(1, 7): {('A', ('p',)): 1+alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(1, 8): {('A', ('p',)): 1+alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(1, 9): {('A', ('p',)): 1+alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02},
(2, 2): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1+alpha},
(2, 3): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha},
(2, 4): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha},
(3, 2): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1+alpha},
(3, 3): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}}
for i in range(1,nfe+1):
if i < nr + 1:
for s in range(1,s_max**i+1):
if s%s_max == 1:
st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),True,{('A',('p',)):1.0,('A',('i',)):1.0,('kA',()):1.0})
else:
scen = s%s_max if s%s_max != 0 else 3
st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,dummy[(i,scen)])
else:
for s in range(1,s_max**nr+1):
st[(i,s)] = (i-1,s,True,st[(i-1,s)][3])
#s_max, nr, alpha = 9, 1, 0.2
#for i in range(1,nfe+1):
# if i < nr + 1:
# for s in range(1,s_max**i+1):
# if s%s_max == 1:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),True,{('A',('p',)):1.0,('A',('i',)):1.0,('kA',()):1.0})
# elif s%s_max == 2:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0+alpha,('kA',()):1.0-alpha})
# elif s%s_max == 3:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0+alpha,('kA',()):1.0-alpha})
# elif s%s_max == 4:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0-alpha,('kA',()):1.0-alpha})
# elif s%s_max == 5:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0-alpha,('kA',()):1.0-alpha})
# elif s%s_max == 6:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0+alpha,('kA',()):1.0+alpha})
# elif s%s_max == 7:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0+alpha,('kA',()):1.0+alpha})
# elif s%s_max == 8:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0-alpha,('kA',()):1.0+alpha})
# else:
# st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0-alpha,('kA',()):1.0+alpha})
# else:
# for s in range(1,s_max**nr+1):
# st[(i,s)] = (i-1,s,True,st[(i-1,s)][3])
sr = s_max**nr
# create MHE-NMPC-controller object
c = msMHEGen(d_mod = SemiBatchPolymerization_multistage,
d_mod_mhe = SemiBatchPolymerization,
y=y_vars,
x=x_vars,
x_noisy=x_noisy,
p_noisy=p_noisy,
u=u,
u_bounds = u_bounds,
tf_bounds = tf_bounds,
poi = x_vars,
scenario_tree = st,
robust_horizon = nr,
s_max = sr,
noisy_inputs = False,
noisy_params = False,
adapt_params = False,
update_scenario_tree = False,
process_noise_model = 'params_bias',
uncertainty_set = p_bounds,
confidence_threshold = alpha,
robustness_threshold = 0.05,
obj_type='economic',
nfe_t=nfe,
ncp_t=ncp,
path_constraints=pc)
# arguments for closed-loop simulation:
disturbance_src = {'disturbance_src':'parameter_scenario','scenario':scenario}
cov_matrices = {'y_cov':mcov,'q_cov':qcov,'u_cov':ucov,'p_cov':pcov}
reg_weights = {'K_w':1.0}
stgen_in = {'epc':['PO_ptg','mw','unsat'],'pc':['T_max','T_min','temp_b'],'noisy_ics':noisy_ics,'par_bounds':p_bounds}
# run closed-loop simulation:
performance, iters = c.run(fix_noise=True,
advanced_step=False,
stgen=True,
disturbance_src=disturbance_src,
cov_matrices=cov_matrices,
regularization_weights=reg_weights,
meas_noise=x_measurement,
stgen_args=stgen_in)
c.plant_simulation_model.check_feasibility(display=True)
""" visualization"""
#plot state trajectories and estimates
x = []
for i in range(1,iters+1):
for cp in range(ncp+1):
x.append(x[-cp-1]+c.pc_trajectory['tf',(i,cp)] if i > 1 else c.pc_trajectory['tf',(i,cp)])
x_e = [c.nmpc_trajectory[i,'tf'] for i in range(1,iters)]
for var in poi[:-2]:
if var == 'MX':
for k in [0,1]:
y_e = [c.nmpc_trajectory[i,(var,(k,))] for i in range(1,iters)]
y = [c.monitor[i][var,(1,cp,k,1)] for i in range(1,iters+1) for cp in range(ncp+1)]
plt.figure(), plt.plot(x,y), plt.plot(x_e,y_e,'r',marker='x',linestyle='None'), plt.xlabel('time [min]'), plt.ylabel(var+str(k))
else:
y_e = [c.nmpc_trajectory[i,(var,())] for i in range(1,iters)]
y = [c.monitor[i][var,(1,cp,1)] for i in range(1,iters+1) for cp in range(ncp+1)]
plt.figure(), plt.plot(x,y), plt.plot(x_e,y_e,'r',marker='x',linestyle='None'), plt.xlabel('time [min]'), plt.ylabel(var)
# path constraints
x = []
for i in range(1,iters+1):
for cp in range(1,ncp+1):
x.append(x[-cp]+c.pc_trajectory['tf',(i,cp)] if i > 1 else c.pc_trajectory['tf',(i,cp)])
y = [c.pc_trajectory['T',(i,(cp,))] for i in range(1,iters+1) for cp in range(1,ncp+1)]
plt.figure(), plt.plot(x,y,color='grey'), plt.plot([0,x[-1]],[423.15e-2,423.15e-2],'r--'), plt.plot([0,x[-1]],[373.15e-2,373.15e-2],'r--')
plt.xlabel('time [min]'), plt.ylabel('T')
y = [c.pc_trajectory['Tad',(i,(cp,))] for i in range(1,iters+1) for cp in range(1,ncp+1)]
plt.figure(), plt.plot(x,y,color='grey'), plt.plot([0,x[-1]],[443.15e-2,443.15e-2],'r--')
plt.xlabel('time [min]'), plt.ylabel('Tad')
#plot control profiles
x = [c.nmpc_trajectory[i,'tf'] for i in range(1,iters+1)]
for control in u:
y = [c.nmpc_trajectory[i,control] for i in range(1,iters+1)]
plt.figure(), plt.step(x,y),plt.step([0,x[0]],[y[0],y[0]],'C0'),plt.xlabel('time [min]'), plt.ylabel(control)
# visualize confidence region:
if c.update_scenario_tree:
dimension = 3 # dimension n of the n x n matrix = #DoF
rhs_confidence = chi2.isf(1.0-0.99,dimension) # 0.1**2*5% measurment noise, 95% confidence level, dimension degrees of freedo
rows = {}
# plot cube cube
kA = np.array([0.8,1.2])#*e.nominal_parameter_values['kA',()]
Ap = np.array([0.8,1.2])#*e.nominal_parameter_values['A',('p',)]
Ai = np.array([0.8,1.2])#*e.nominal_parameter_values['A',('i',)]
x = [Ap[0],Ap[1],Ap[0],Ap[1]]
y = [Ai[1],Ai[1],Ai[0],Ai[0]]
X,Y = np.meshgrid(x,y)
Z_u = np.array([[kA[1],kA[1],kA[1],kA[1]] for i in range(len(x))])
Z_l = np.array([[kA[0],kA[0],kA[0],kA[0]] for i in range(len(x))])
aux = {1:X,2:Y,3:(Z_l,Z_u)}
combinations = [[1,2,3],[1,3,2],[3,1,2]]
facets = {}
b = 0
for combination in combinations:
facets[b] = np.array([aux[i] if i != 3 else aux[i][0] for i in combination])
facets[b+1] = np.array([aux[i] if i != 3 else aux[i][1] for i in combination])
b += 2
p_star = np.zeros(dimension)
for key in scenario:
p_star[c.PI_indices[key]]=(1+scenario[key])*c.nominal_parameter_values[key]
p_star[2] *= c.olnmpc.Hrxn['p'].value
# for facet in facets:
# f = open('results/face'+str(facet)+'.txt','wb')
# for i in range(4):
# for j in range(4):
# f.write(str(facets[facet][0][i][j]*c.nominal_parameter_values['A',('i',)]) + '\t' + str(facets[facet][1][i][j]*c.nominal_parameter_values['A',('p',)]) + '\t' + str(facets[facet][2][i][j]*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value) + '\n')
# f.write('\n')
# f.close()
for r in range(1,7):
A_dict = c.mhe_confidence_ellipsoids[r]
center = np.zeros(dimension)
for par in c.nmpc_trajectory[r,'e_pars']:
center[c.PI_indices[par]] = c.nmpc_trajectory[r,'e_pars'][par]
for m in range(dimension):
rows[m] = np.array([A_dict[(m,i)] for i in range(dimension)])
A = 1/rhs_confidence*np.array([np.array(rows[i]) for i in range(dimension)])
U, s, V = np.linalg.svd(A) # singular value decomposition
radii = 1/np.sqrt(s) # length of half axes, V rotation
# transform in polar coordinates for simpler waz of plotting
u = np.linspace(0.0, 2.0 * np.pi, 30) # angle = idenpendent variable
v = np.linspace(0.0, np.pi, 30) # angle = idenpendent variable
x = radii[0] * np.outer(np.cos(u), np.sin(v)) # x-coordinate
y = radii[1] * np.outer(np.sin(u), np.sin(v)) # y-coordinate
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
#f = open('results/data_ellipsoid'+str(r)+'.txt','wb')
for i in range(len(x[0][:])):
for j in range(len(x[:][0])):
[x[i][j],y[i][j],z[i][j]] = np.dot(U,[x[i][j],y[i][j],z[i][j]]) + center
z[i][j] *= c.olnmpc.Hrxn['p'].value
#f.write(str(x[i][j]) + '\t' + str(y[i][j]) + '\t' + str(z[i][j]) + '\n')
#f.write('\n')
#f.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x,y,z,alpha = 0.1, edgecolor='r')
ax.scatter(center[0],center[1],center[2]*c.olnmpc.Hrxn['p'].value,marker='o',color='r')
ax.scatter(p_star[0],p_star[1],p_star[2],marker='o',color='k')
for i in facets:
ax.plot_surface(facets[i][0]*c.nominal_parameter_values['A',('i',)],facets[i][1]*c.nominal_parameter_values['A',('p',)],facets[i][2]*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value,edgecolors='k',color='grey',alpha=0.1)
scaling = np.array([0.5,1.5])
ax.set_xlim(scaling*c.nominal_parameter_values['A',('i',)])
ax.set_xlabel('\n' + r'$A_i$ [$\frac{m^3}{mol s}$]', linespacing=1.2)
ax.w_xaxis.set_pane_color((1.0,1.0,1.0,1.0))
ax.set_xticks(np.array([2.5e5,4e5,5.5e5])*1e-4)
ax.set_ylim(scaling*c.nominal_parameter_values['A',('p',)])
ax.set_ylabel('\n' + r'$A_p$ [$\frac{m^3}{mol s}$]', linespacing=1.2)
ax.w_yaxis.set_pane_color((1.0,1.0,1.0,1.0))
ax.set_yticks(np.array([8e3,14e3,20e3])*1e-4)
ax.set_zlim(scaling*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value)
ax.set_zlabel('\n' + r'$kA$ [$\frac{kJ}{K}$]', linespacing=1.2)
ax.w_zaxis.set_pane_color((1.0,1.0,1.0,1.0))
ax.set_zticks(np.array([0.04*2,0.07*2,0.1*2])*c.olnmpc.Hrxn['p'].value)
fig.tight_layout()
#fig.savefig('results/125grid/'+str(r)+'.pdf')
#ax.tick_params(axis='both',direction='in')
#ax.view_init(15,35)
# plot half axis
plt.xlabel(r'$\Delta A_i$')
plt.ylabel(r'$\Delta A_p$')
# plot CPU times:
x = range(1,iters)
for k in ['olnmpc','lsmhe']:
utime = [sum(performance[k,i][1][l].ru_utime-performance[k,i][0][l].ru_utime for l in [1]) for i in range(1,iters)]
stime = [sum(performance[k,i][1][l].ru_stime-performance[k,i][0][l].ru_stime for l in [1]) for i in range(1,iters)]
plt.figure(), plt.title(k+' - required CPU time')
plt.bar(x,utime,label='utime'), plt.bar(x,stime,bottom=utime,color='C1',label='stime')
plt.ylabel(r'$t_{CPU} [s]$'), plt.xlabel('iteration'), plt.legend() |
25,347 | bbec036a458f9418bd3bbbc5ec0425bf055ed00a | from pypokerstove import *
def test_Construct():
oeval = OmahaHighHandEvaluator()
assert True == oeval.usesSuits()
assert 5 == oeval.boardSize()
def test_RankEval():
oeval = OmahaHighHandEvaluator()
hand = CardSet("2c3c")
board = CardSet("2c3c4c")
poker_eval = oeval.evaluateRanks(hand, board)
assert TWO_PAIR == poker_eval.type()
assert Rank("3") == poker_eval.majorRank()
assert Rank("2") == poker_eval.minorRank()
|
25,348 | 844eac28cff10313766ee69d38382f213b026665 | import textmining
import csv
import os
from tensorly.tenalg import khatri_rao
from tensorly.decomposition import parafac
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from scipy import spatial
import plotly
from plotly.graph_objs import *
from collections import deque
import matplotlib.pyplot as plt
import re
class TermDocumentTensor():
def __init__(self, directory, type="binary"):
self.vocab = []
self.tdt = []
self.corpus_names = []
self.directory = directory
self.type = type
self.rank_approximation = None
self.factor_matrices = []
# These are the output of our tensor decomposition.
self.factors = []
def create_factor_matrices(self):
tdm_1 = np.matmul(self.factors[0], np.transpose(khatri_rao([self.factors[2], self.factors[1]])))
tdm_2 = np.matmul(self.factors[1], np.transpose(khatri_rao([self.factors[2], self.factors[0]])))
tdm_3 = np.matmul(self.factors[2], np.transpose(khatri_rao([self.factors[1], self.factors[0]])))
self.factors = [tdm_1, tdm_2, tdm_3]
return self.factors
def generate_cosine_similarity_matrix(self, matrix):
cosine_sim = []
for entry in matrix:
sim = []
for other_entry in matrix:
sim.append(spatial.distance.cosine(entry, other_entry)*-1 + 1)
cosine_sim.append(sim)
return cosine_sim
def get_estimated_rank(self):
"""
Getting the rank of a tensor is an NP hard problem
Therefore we use an estimation based on the size of the dimensions of our tensor.
These numbers are grabbed from Table 3.3 of Tammy Kolda's paper:
http://www.sandia.gov/~tgkolda/pubs/pubfiles/TensorReview.pdf
:return:
"""
# At the moment the rank returned by this function is normally too high for either
# my machine or the tensorly library to handle, therefore I have made it just return 1 for right now
I = len(self.tdt[0])
J = len(self.tdt[0][0])
K = len(self.tdt)
if I == 1 or J == 1 or K == 1:
return 1
elif I == J == K == 2:
return 2
elif I == J == 3 and K == 2:
return 3
elif I == 5 and J == K == 3:
return 5
elif I >= 2*J and K == 2:
return 2*J
elif 2*J > I > J and K ==2:
return I
elif I == J and K == 2:
return I
elif I >= J*K:
return J*K
elif J*K - J < I < J*K:
return I
elif I == J*K - I:
return I
else:
print(I, J, K, "did not have an exact estimation")
return min(I*J, I*K, J*K)
def print_formatted_term_document_tensor(self):
for matrix in self.tdt:
print(self.vocab)
for i in range(len(matrix)):
print(self.corpus_names[i], matrix[i])
def create_term_document_tensor(self, **kwargs):
if self.type == "binary":
return self.create_binary_term_document_tensor(**kwargs)
else:
return self.create_text_corpus(**kwargs)
def create_binary_term_document_tensor(self, **kwargs):
doc_content = []
first_occurences_corpus = {}
ngrams = kwargs["ngrams"] if kwargs["ngrams"] is not None else 1
print(ngrams)
for file_name in os.listdir(self.directory):
previous_bytes = deque()
first_occurences = {}
byte_count = 0
with open(self.directory + "/" + file_name, "rb") as file:
my_string = ""
while True:
byte_count += 1
current_byte = file.read(1).hex()
if not current_byte:
break
if byte_count >= ngrams:
byte_gram = "".join(list(previous_bytes)) + current_byte
if byte_gram not in first_occurences:
first_occurences[byte_gram] = byte_count
if byte_count % ngrams == 0:
my_string += byte_gram + " "
if ngrams > 1:
previous_bytes.popleft()
if ngrams > 1:
previous_bytes.append(current_byte)
first_occurences_corpus[file_name] = first_occurences
doc_content.append(my_string)
doc_names = os.listdir(self.directory)
# Convert a collection of text documents to a matrix of token counts
vectorizer = TfidfVectorizer(use_idf=False)
# Learn the vocabulary dictionary and return term-document matrix.
x1 = vectorizer.fit_transform(doc_content).toarray()
self.vocab = ["vocab"]
self.vocab.extend(vectorizer.get_feature_names())
tdm = []
for i in range(len(doc_names)):
row = x1[i]
tdm.append(row)
tdm_first_occurences = []
self.corpus_names = doc_names
# tdm_first_occurences[0] = tdm[0]
# Create a first occurences matrix that corresponds with the tdm
for j in range(len(doc_names)):
item = doc_names[j]
this_tdm = []
for i in range(0, len(tdm[0])):
word = self.vocab[i]
try:
this_tdm.append(first_occurences_corpus[item][word])
except:
this_tdm.append(0)
# print(this_tdm)
tdm_first_occurences.append(this_tdm)
tdt = [tdm, tdm_first_occurences]
self.tdt = tdt
return self.tdt
def convert_term_document_tensor_to_csv(self):
# Converts a tdm to csv
try:
tdt = self.tdt
# if the tdt is 3d or greater
if isinstance(self.tdt[0][0], list):
tdt = self.tdt[0]
with open("test.csv", "w", newline='') as csv_file:
writer = csv.writer(csv_file)
for entry in tdt:
num_list = map(str, entry)
writer.writerow(num_list)
except IndexError:
print("You must create the term document tensor")
return IndexError
def create_term_document_tensor_text(self):
mydoclist = []
tdm = textmining.TermDocumentMatrix()
files = []
first_occurences_corpus = {}
text_names = []
number_files = 0
for file in os.listdir(self.directory):
number_files += 1
first_occurences = {}
words = 0
with open(self.directory + "/" + file, "r") as shake:
files.append(file)
lines_100 = ""
while True:
my_line = shake.readline()
if not my_line:
break
re.sub(r'\W+', '', my_line)
for word in my_line.split():
words += 1
if word not in first_occurences:
first_occurences[word] = words
lines_100 += my_line
first_occurences_corpus[file] = first_occurences
tdm.add_doc(lines_100)
mydoclist.append(file)
text_names.append(file)
tdm = list(tdm.rows(cutoff=1))
tdt = [0, 0]
tdm_first_occurences = []
# tdm_first_occurences[0] = tdm[0]
# Create a first occurences matrix that corresponds with the tdm
for j in range(len(text_names)):
item = text_names[j]
this_tdm = []
for i in range(0, len(tdm[0])):
word = tdm[0][i]
try:
this_tdm.append(first_occurences_corpus[item][word])
except:
this_tdm.append(0)
# print(this_tdm)
tdm_first_occurences.append(this_tdm)
self.vocab = tdm.pop(0)
self.corpus_names = mydoclist
tdt[0] = tdm
tdt[1] = tdm_first_occurences
tdt = np.asanyarray(tdt)
self.tdt = tdt
return tdt
def parafac_decomposition(self):
self.factors = parafac(np.array(self.tdt), rank=self.get_estimated_rank())
return self.factors
def main():
tdt = TermDocumentTensor("zeus_binaries", type="binary")
tdt.create_binary_term_document_tensor(ngrams=1)
tdt.convert_term_document_tensor_to_csv()
print(tdt.get_estimated_rank())
factors = tdt.parafac_decomposition()
factor_matrices = tdt.create_factor_matrices()
cos_sim = tdt.generate_cosine_similarity_matrix(factor_matrices[1])
#tdt.print_formatted_term_document_tensor()
plotly.tools.set_credentials_file(username='MaxPoole', api_key='2ajqCLZjiLNDFxgyLtGn')
fig, ax1 = plt.subplots(1, 1)
ax1.imshow(cos_sim, cmap='hot')
print(tdt.corpus_names)
plt.show()
main() |
25,349 | fb8f96d3da134ab975587b4afd2323a8c0305ac3 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from app_dir.utils.models import BaseUUIDModel
from app_dir.address.models import Suburb, State, Country
User = get_user_model()
class UserRole(models.Model):
""" Representation of User Roles. """
name = models.CharField(max_length=20, help_text=_('Role Name'))
class Meta:
verbose_name = _('User Role')
verbose_name_plural = _('User Roles')
def __str__(self):
return self.name
class Account(models.Model):
""" Representation of User account. """
user = models.OneToOneField(User, help_text=_('User'), related_name='account')
role = models.ForeignKey(UserRole, help_text=_('Role'))
class Meta:
verbose_name = _('User Account')
verbose_name_plural = _('User Accounts')
def __str__(self):
return f"{self.user} at {self.role}"
@property
def draft_application(self):
from app_dir.permit.models import Application
draft, created = Application.objects.get_or_create(
applicant_account=self,
is_draft=True
)
return draft |
25,350 | b535709ffb63de0f414ec876666a40b1f41b2aad | # -*- coding:utf-8 -*-
'''
二分查找的四种变形:
查找第一个等于给定值的索引 FindFirstEqTargetBinarySearch
查找最后一个等于给定值的索引 FindLastEqTargetBinarySearch
查找第一个大于等于给定值的索引 FindFirstGEqTargetBinarySearch
查找最后一个小于等于给定值的索引 FindLastLEqTargetBinarySearch
'''
def FindFirstEqTargetBinarySearch(arr, value):
low = 0
high = len(arr) - 1
while low <= high:
mid = int((low + high) / 2)
if arr[mid] > value:
high = mid - 1
elif arr[mid] < value:
low = mid + 1
else:
# 在所有等于给定值的数据中进行操作且索引值尽可能的小
if mid == 0 or arr[mid - 1] != value:
return mid
else:
high = mid - 1
return
def FindLastEqTargetBinarySearch(arr, value):
low = 0
high = len(arr) - 1
while low <= high:
mid = int((low + high) / 2)
if arr[mid] > value:
high = mid - 1
elif arr[mid] < value:
low = mid + 1
else:
# 在所有等于给定值的数据中进行操作且索引值尽可能的大
if mid == len(arr) - 1 or arr[mid + 1] != value:
return mid
else:
low = mid + 1
return
def FindFirstGEqTargetBinarySearch(arr, value):
low = 0
high = len(arr) - 1
while low <= high:
mid = int((low + high) / 2)
if arr[mid] < value:
low = mid + 1
else:
# 在所有大于等于给定值的元素中操作,当找出的mid的前一个小于给定元素,则此mid为最终目标
if mid == 0 or arr[mid - 1] < value:
return mid
else:
high = mid - 1
return
def FindLastLEqTargetBinarySearch(arr, value):
low = 0
high = len(arr) - 1
while low <= high:
mid = int( (low + high) / 2)
if arr[mid] > value:
high = mid - 1
else:
# 在所有小于等于给定值的元素中操作,当找出的mid的后一个大于给定元素,则此mid为最终目标
if (mid == len(arr) - 1) or (arr[mid + 1] > value):
return mid
else:
low = mid + 1
return
if __name__ == '__main__':
arr = [1,1,1,1,3,4]
print(FindFirstEqTargetBinarySearch(arr, 1))
print(FindLastEqTargetBinarySearch(arr, 1))
print(FindFirstGEqTargetBinarySearch(arr, 1))
print(FindLastLEqTargetBinarySearch(arr, 1))
|
25,351 | a75675597b6ac6a92419706b35b8af8aa17f9c05 | import random
class RandomMover:
def move(self):
return random.uniform(0,1) < 0.5
|
25,352 | 601aafd9c09ea36a4b4d72c619dbd6dfdc0e8cb1 | __author__ = 'Jia'
'''
Given two binary strings, return their sum (also a binary string).
For example,
a = "11"
b = "1"
Return "100".
'''
class Solution:
# @param a, a string
# @param b, a string
# @return a string
def addBinary(self, a, b):
carry = 0
index = 0
result = []
while index < len(a) and index < len(b):
num1 = int(a[len(a) - 1 - index])
num2 = int(b[len(b) - 1 - index])
tmp = num1 + num2 + carry
result.append(str(tmp % 2))
carry = tmp / 2
index += 1
while index < len(a):
num1 = int(a[len(a) - 1 - index])
tmp = num1 + carry
result.append(str(tmp % 2))
carry = tmp / 2
index += 1
while index < len(b):
num1 = int(b[len(b) - 1 - index])
tmp = num1 + carry
result.append(str(tmp % 2))
carry = tmp / 2
index += 1
if carry == 1:
result.append(str(1))
result.reverse()
return ''.join(result)
s = Solution()
print s.addBinary('10', '110') |
25,353 | 82bcaaf259191245dbc8111acdd1a7432552171d | import graphene
from graphene import relay
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from cards.models import (
Color, Layout, Supertype, Type, Subtype, CardName, Block, Expansion,
Rarity, Artist, Watermark, Border, Edition, Format, LegalityType,
Legality, Language, ForeignVersion, Ruling, FlipCardPair,
SplitCardPair, MeldCardTriplet
)
class ColorNode(DjangoObjectType):
class Meta:
model = Color
filter_fields = ['name']
interfaces = (relay.Node,)
class LayoutNode(DjangoObjectType):
class Meta:
model = Layout
filter_fields = ['name']
interfaces = (relay.Node,)
class SupertypeNode(DjangoObjectType):
class Meta:
model = Supertype
filter_fields = ['name']
interfaces = (relay.Node,)
class TypeNode(DjangoObjectType):
class Meta:
model = Type
filter_fields = ['name']
interfaces = (relay.Node,)
class SubtypeNode(DjangoObjectType):
class Meta:
model = Subtype
filter_fields = ['name']
interfaces = (relay.Node,)
class CardNameNode(DjangoObjectType):
class Meta:
model = CardName
filter_fields = ['name']
interfaces = (relay.Node,)
class BlockNode(DjangoObjectType):
class Meta:
model = Block
filter_fields = ['name']
interfaces = (relay.Node,)
class ExpansionNode(DjangoObjectType):
class Meta:
model = Expansion
filter_fields = ['name']
interfaces = (relay.Node,)
class RarityNode(DjangoObjectType):
class Meta:
model = Rarity
filter_fields = ['name']
interfaces = (relay.Node,)
class ArtistNode(DjangoObjectType):
class Meta:
model = Artist
filter_fields = ['name']
interfaces = (relay.Node,)
class WatermarkNode(DjangoObjectType):
class Meta:
model = Watermark
filter_fields = ['name']
interfaces = (relay.Node,)
class BorderNode(DjangoObjectType):
class Meta:
model = Border
filter_fields = ['name']
interfaces = (relay.Node,)
class EditionNode(DjangoObjectType):
class Meta:
model = Edition
filter_fields = ['card_name']
interfaces = (relay.Node,)
class FormatNode(DjangoObjectType):
class Meta:
model = Format
filter_fields = ['name']
interfaces = (relay.Node,)
class LegalityTypeNode(DjangoObjectType):
class Meta:
model = LegalityType
filter_fields = ['name']
interfaces = (relay.Node,)
class LegalityNode(DjangoObjectType):
class Meta:
model = Legality
filter_fields = ['card_name']
interfaces = (relay.Node,)
class LanguageNode(DjangoObjectType):
class Meta:
model = Language
filter_fields = ['name']
interfaces = (relay.Node,)
class ForeignVersionNode(DjangoObjectType):
class Meta:
model = ForeignVersion
filter_fields = ['edition']
interfaces = (relay.Node,)
class RulingNode(DjangoObjectType):
class Meta:
model = Ruling
filter_fields = ['card_name']
interfaces = (relay.Node,)
class FlipCardPairNode(DjangoObjectType):
class Meta:
model = FlipCardPair
filter_fields = []
interfaces = (relay.Node,)
class SplitCardPairNode(DjangoObjectType):
class Meta:
model = SplitCardPair
filter_fields = []
interfaces = (relay.Node,)
class MeldCardTripletNode(DjangoObjectType):
class Meta:
model = MeldCardTriplet
filter_fields = []
interfaces = (relay.Node,)
class Query(object):
color = relay.Node.Field(ColorNode)
layout = relay.Node.Field(LayoutNode)
supertype = relay.Node.Field(SupertypeNode)
type = relay.Node.Field(TypeNode)
subtypes = relay.Node.Field(SubtypeNode)
card_name = relay.Node.Field(CardNameNode)
block = relay.Node.Field(BlockNode)
expansion = relay.Node.Field(ExpansionNode)
rarity = relay.Node.Field(RarityNode)
artist = relay.Node.Field(ArtistNode)
watermark = relay.Node.Field(WatermarkNode)
border = relay.Node.Field(BorderNode)
edition = relay.Node.Field(EditionNode)
format = relay.Node.Field(FormatNode)
legality_type = relay.Node.Field(LegalityTypeNode)
legality = relay.Node.Field(LegalityNode)
language = relay.Node.Field(LanguageNode)
foreign_version = relay.Node.Field(ForeignVersionNode)
ruling = relay.Node.Field(RulingNode)
flip_card_pair = relay.Node.Field(FlipCardPairNode)
split_card_pair = relay.Node.Field(SplitCardPairNode)
meld_card_triplet = relay.Node.Field(MeldCardTripletNode)
all_colors = DjangoFilterConnectionField(ColorNode)
all_layouts = DjangoFilterConnectionField(LayoutNode)
all_supertypes = DjangoFilterConnectionField(SupertypeNode)
all_types = DjangoFilterConnectionField(TypeNode)
all_subtypes = DjangoFilterConnectionField(SubtypeNode)
all_card_names = DjangoFilterConnectionField(CardNameNode)
all_blocks = DjangoFilterConnectionField(BlockNode)
all_expansions = DjangoFilterConnectionField(ExpansionNode)
all_rarities = DjangoFilterConnectionField(RarityNode)
all_artists = DjangoFilterConnectionField(ArtistNode)
all_watermarks = DjangoFilterConnectionField(WatermarkNode)
all_borders = DjangoFilterConnectionField(BorderNode)
all_editions = DjangoFilterConnectionField(EditionNode)
all_formats = DjangoFilterConnectionField(FormatNode)
all_legality_types = DjangoFilterConnectionField(LegalityTypeNode)
all_legalities = DjangoFilterConnectionField(LegalityNode)
all_languages = DjangoFilterConnectionField(LanguageNode)
all_foreign_versions = DjangoFilterConnectionField(ForeignVersionNode)
all_rulings = DjangoFilterConnectionField(RulingNode)
all_flip_card_pairs = DjangoFilterConnectionField(FlipCardPairNode)
all_split_card_pairs = DjangoFilterConnectionField(SplitCardPairNode)
all_meld_card_triplets = DjangoFilterConnectionField(MeldCardTripletNode)
|
25,354 | 9219b92ad501b6cb8c7fddc3f5921c9541cc0134 | # Problem No.: 2920
# Solver: Jinmin Goh
# Date: 20191126
# URL: https://www.acmicpc.net/problem/2920
import sys
num = input().split()
for i in range(len(num)):
num[i] = int(num[i])
ascFlag = True
desFlag = True
for i in range(1,8):
if num[i] >= num[i-1]:
desFlag = False
if num[i] <= num[i-1]:
ascFlag = False
if desFlag:
print("descending")
if ascFlag:
print("ascending")
if not desFlag and not ascFlag:
print("mixed") |
25,355 | d7f82383020406931f7ee70d2bedb620f43e98c9 | """
The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number
would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
The first ten terms would be: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
Visual representation of the problem
n: 1 2 3 4 5 6
* * * * * *
* * * * * * * * * *
* * * * * * * * * * * *
* * * * * * * * * * * *
* * * * * * * * * *
* * * * * *
n+1: 2 3 4 5 6 7
Solution approach:
Step 1: Find number of dots in each triangle -- we can multiplying n by n+1 an divide the result by 2
Step 2: Find factors of the number from Step 1
"""
# %%
def triangle_dots(n):
return int((n*(n+1)/2))
def natural_num_factors(x):
factors = []
for x_ in range(1,x+1):
if x % x_ == 0:
r = int(x/x_)
if x_ in factors:
break
else:
factors.append(x_)
if r in x:
break
else:
factors.append(r)
return set(factors)
def first_n_divisors(n):
for s in range(n, int(1e9)):
seq_ = triangle_dots(s)
seq_factors = natural_num_factors(seq_)
if len(seq_factors) > n:
break
return seq_
|
25,356 | a63a4224da4a823e5354eb29cc13372d36f33a4e | __all__ = ['ttypes', 'constants', 'SecurityCamera']
|
25,357 | 99bb29f7e2be6cae47e4c46c154e318b87a8610e | import os
import argparse
import pandas as pd
from utils_ import read_json, save_json, find_json_files
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--grouped_ICD9", default=None, type=int, required=True,
help = "Flag to define the input data (grouped ICD9 version or non-grouped ICD9 version).")
parser.add_argument("--umls_codes_path", default=None, type=str, required=True,
help = "The path where the mapped extracted UMLS codes are stored.")
parser.add_argument("--employment_mapping_path", default=None, type=str, required=True,
help = "The path of the employment mapping csv file.")
parser.add_argument("--household_mapping_path", default=None, type=str, required=True,
help = "The path of the household mapping csv file.")
parser.add_argument("--housing_mapping_path", default=None, type=str, required=True,
help = "The path of the housing mapping csv file.")
args = parser.parse_args()
if args.grouped_ICD9:
data_init = read_json('data/processed_data/3_data_task_valid_grouped_icd9.json')
else:
data_init = read_json('data/processed_data/3_data_task_valid.json')
social_files = find_json_files(args.umls_codes_path)
employment_mapping = pd.read_csv(args.employment_mapping_path)
employment_umls_codes = employment_mapping['CUI'].tolist()
employment_textual_description = employment_mapping['Description'].tolist()
household_mapping = pd.read_csv(args.household_mapping_path)
household_umls_codes = household_mapping['CUI'].tolist()
household_textual_description = household_mapping['Description'].tolist()
housing_mapping = pd.read_csv(args.housing_mapping_path)
housing_umls_codes = housing_mapping['CUI'].tolist()
housing_textual_description = housing_mapping['Description'].tolist()
for f in social_files:
s_f = read_json(f)
name = f.split('/')[-1].split('.')[0]
k1 = name.split('_')[0]
k2 = name.split('_')[1]
data_init[k1][k2]['notes_info'] = {'umls_codes': s_f['umls_codes'],
'textual_description': s_f['textual_description']}
data_init[k1][k2]['social_info'] = {'employment': {'umls_codes': [],
'textual_description': []},
'housing': {'umls_codes': [],
'textual_description': []},
'household_composition': {'umls_codes': [],
'textual_description': []}}
for c in s_f['umls_codes']:
# Treat employment codes
try:
employment_index = employment_umls_codes.index(c)
data_init[k1][k2]['social_info']['employment']['umls_codes'].append(c)
data_init[k1][k2]['social_info']['employment']['textual_description'].append(employment_textual_description[employment_index].lower())
except:
pass
# Treat household
try:
household_index = household_umls_codes.index(c)
data_init[k1][k2]['social_info']['household_composition']['umls_codes'].append(c)
data_init[k1][k2]['social_info']['household_composition']['textual_description'].append(household_textual_description[household_index].lower())
except:
pass
# Treat housing
try:
housing_index = housing_umls_codes.index(c)
data_init[k1][k2]['social_info']['housing']['umls_codes'].append(c)
data_init[k1][k2]['social_info']['housing']['textual_description'].append(housing_textual_description[housing_index].lower())
except:
pass
output_path = "data/processed_data/"
if args.grouped_ICD9:
save_json(data_init, output_path + '4_data_after_adding_notes_info_grouped_icd9.json')
else:
save_json(data_init, output_path + '4_data_after_adding_notes_info.json') |
25,358 | e80e6bfd0276d9aa3f462f8f7dbeebb45a390d25 | ##Problem 8
##Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
##
thousand_digit = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
n = 13
current_maximum = 1
for k in range(n): #clean up later
current_maximum = current_maximum * int(thousand_digit[k])
for i in range(1000-n): #Loop through 1000-string
current_product = 1
for l in range(n): #Find product of n consecutive digits
current_product = current_product * int(thousand_digit[l+i])
if current_product > current_maximum:
current_maximum = current_product
print(current_maximum) |
25,359 | 425aedfbd4d9aaf6e7e154332ab083437d6b95e4 | import argparse
from functools import partial
import re
import time
import datetime
import numpy as np
from PIL import Image, ImageDraw
import svgwrite
import gstreamer
import requests
from pose_engine import PoseEngine
EDGES = (
("nose", "left eye"),
("nose", "right eye"),
("nose", "left ear"),
("nose", "right ear"),
("left ear", "left eye"),
("right ear", "right eye"),
("left eye", "right eye"),
("left shoulder", "right shoulder"),
("left shoulder", "left elbow"),
("left shoulder", "left hip"),
("right shoulder", "right elbow"),
("right shoulder", "right hip"),
("left elbow", "left wrist"),
("right elbow", "right wrist"),
("left hip", "right hip"),
("left hip", "left knee"),
("right hip", "right knee"),
("left knee", "left ankle"),
("right knee", "right ankle"),
)
def shadow_text(dwg, x, y, text, font_size=16):
dwg.add(
dwg.text(
text,
insert=(x + 1, y + 1),
fill="black",
font_size=font_size,
style="font-family:sans-serif",
)
)
dwg.add(
dwg.text(
text,
insert=(x, y),
fill="white",
font_size=font_size,
style="font-family:sans-serif",
)
)
x = 0
report = True
calibrate = True
original_right_eye_y = 270
def draw_pose(draw, dwg, pose, first=False, color="blue", threshold=0.3):
global x, report, calibrate, original_right_eye_y
xys = {}
if pose.score < threshold:
return
for label, keypoint in pose.keypoints.items():
if keypoint.score < 0.8:
continue
if label == "right eye":
print(
"%-20s x=%-4d y=%-4d score=%.1f"
% (label, keypoint.yx[1], keypoint.yx[0], keypoint.score)
)
if calibrate == True:
original_right_eye_y = keypoint.yx[0]
print("Calibrated", original_right_eye_y)
calibrate = False
xys[label] = (int(keypoint.yx[1]), int(keypoint.yx[0]))
dwg.add(
dwg.circle(
center=(int(keypoint.yx[1]), int(keypoint.yx[0])),
r=5,
fill="cyan",
fill_opacity=keypoint.score,
stroke=color,
)
)
if (
label == "right eye"
and abs(keypoint.yx[0] - original_right_eye_y) > 50
and keypoint.yx[0] != 0
):
x += 1
print("Number of bad positions: ", x)
print("Distance from calibrated y-coordinate: ", abs(keypoint.yx[0] - original_right_eye_y))
print()
if x > 10:
draw.ellipse((0, 0, 1000, 1000), fill=(255, 0, 0, 0))
data = {
"time": str(datetime.datetime.now()),
"score": float(abs(keypoint.yx[0] - original_right_eye_y)),
"y_coordinate": float(keypoint.yx[0]),
"baseline_y_coordinate": float(original_right_eye_y)
}
if report == True:
requests.post("http://172.16.249.255:8000/blur", data="True")
requests.post("http://172.16.249.255:8000/event", json=data)
report = False
if (
label == "right eye"
and abs(keypoint.yx[0] - original_right_eye_y) < 50
and keypoint.yx[0] != 0
):
x = 0
if report == False:
report = True
requests.post("http://172.16.249.255:8000/blur", data="False")
draw.ellipse(
(
int(keypoint.yx[1]) - 5,
int(keypoint.yx[0]) - 5,
int(keypoint.yx[1]) + 5,
int(keypoint.yx[0]) + 5,
),
fill=(255, 0, 0, 0),
)
for a, b in EDGES:
if a not in xys or b not in xys:
continue
ax, ay = xys[a]
bx, by = xys[b]
dwg.add(dwg.line(start=(ax, ay), end=(bx, by), stroke=color, stroke_width=2))
draw.line([(ax, ay), (bx, by)], fill=color, width=2)
def run(callback, use_appsrc=False):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--mirror", help="flip video horizontally", action="store_true")
parser.add_argument("--model", help=".tflite model path.", required=False)
parser.add_argument(
"--res",
help="Resolution",
default="1280x720",
choices=["480x360", "640x480", "1280x720"],
)
parser.add_argument(
"--videosrc", help="Which video source to use", default="/dev/video0"
)
parser.add_argument("--h264", help="Use video/x-h264 input", action="store_true")
args = parser.parse_args()
default_model = "models/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite"
if args.res == "480x360":
src_size = (640, 480)
appsink_size = (480, 360)
model = args.model or default_model % (353, 481)
elif args.res == "640x480":
src_size = (640, 480)
appsink_size = (640, 480)
model = args.model or default_model % (481, 641)
elif args.res == "1280x720":
src_size = (1280, 720)
appsink_size = (1280, 720)
model = args.model or default_model % (721, 1281)
print("Loading model: ", model)
engine = PoseEngine(model, mirror=args.mirror)
gstreamer.run_pipeline(
partial(callback, engine),
src_size,
appsink_size,
use_appsrc=use_appsrc,
mirror=args.mirror,
videosrc=args.videosrc,
h264input=args.h264,
)
def main():
last_time = time.monotonic()
n = 0
sum_fps = 0
sum_process_time = 0
sum_inference_time = 0
out = Image.new("RGB", (1280, 720))
draw = ImageDraw.Draw(out)
def render_overlay(engine, image, svg_canvas):
nonlocal n, sum_fps, sum_process_time, sum_inference_time, last_time, out, draw
global calibrate
start_time = time.monotonic()
outputs, inference_time = engine.DetectPosesInImage(image)
end_time = time.monotonic()
n += 1
sum_fps += 1.0 / (end_time - last_time)
sum_process_time += 1000 * (end_time - start_time) - inference_time
sum_inference_time += inference_time
last_time = end_time
text_line = "PoseNet: %.1fms Frame IO: %.2fms TrueFPS: %.2f Nposes %d" % (
sum_inference_time / n,
sum_process_time / n,
sum_fps / n,
len(outputs),
)
shadow_text(svg_canvas, 10, 20, text_line)
draw_pose(draw, svg_canvas, outputs[0])
if n % 10 == 0:
out.save("output.png")
out = Image.new("RGB", (1280, 720))
draw = ImageDraw.Draw(out)
if requests.get("http://172.16.249.255:8000/calibrate").text == "True":
calibrate = True
run(render_overlay)
if __name__ == "__main__":
main()
|
25,360 | ffbd4583e3d8eff3f1bf6d9dbc62b565d3eff223 | import copy
import os.path
import datetime
import validators
from . import base
from . import util
from . import files
from . import rules
from . import config
from .dao import reaperutil, APIStorageException
from . import validators
from . import tempdir as tempfile
log = config.log
class Upload(base.RequestHandler):
def reaper(self):
"""Receive a sortable reaper upload."""
if not self.superuser_request:
self.abort(402, 'uploads must be from an authorized drone')
with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:
try:
file_store = files.FileStore(self.request, tempdir_path)
except files.FileStoreException as e:
self.abort(400, str(e))
now = datetime.datetime.utcnow()
fileinfo = dict(
name=file_store.filename,
created=now,
modified=now,
size=file_store.size,
hash=file_store.hash,
tags=file_store.tags,
metadata=file_store.metadata
)
container = reaperutil.create_container_hierarchy(file_store.metadata)
f = container.find(file_store.filename)
target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))
if not f:
file_store.move_file(target_path)
container.add_file(fileinfo)
rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)
elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):
file_store.move_file(target_path)
container.update_file(fileinfo)
rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)
throughput = file_store.size / file_store.duration.total_seconds()
log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))
def engine(self):
"""
URL format: api/engine?level=<container_type>&id=<container_id>
It expects a multipart/form-data request with a "metadata" field (json valid against api/schemas/input/enginemetadata)
and 0 or more file fields with a non null filename property (filename is null for the "metadata").
"""
level = self.get_param('level')
if level is None:
self.abort(404, 'container level is required')
if level != 'acquisition':
self.abort(404, 'engine uploads are supported only at the acquisition level')
acquisition_id = self.get_param('id')
if not acquisition_id:
self.abort(404, 'container id is required')
else:
acquisition_id = util.ObjectId(acquisition_id)
if not self.superuser_request:
self.abort(402, 'uploads must be from an authorized drone')
with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:
try:
file_store = files.MultiFileStore(self.request, tempdir_path)
except files.FileStoreException as e:
self.abort(400, str(e))
if not file_store.metadata:
self.abort(400, 'metadata is missing')
metadata_validator = validators.payload_from_schema_file(self, 'enginemetadata.json')
metadata_validator(file_store.metadata, 'POST')
file_infos = file_store.metadata['acquisition'].pop('files', [])
now = datetime.datetime.utcnow()
try:
acquisition_obj = reaperutil.update_container_hierarchy(file_store.metadata, acquisition_id, level)
except APIStorageException as e:
self.abort(400, e.message)
# move the files before updating the database
for name, fileinfo in file_store.files.items():
path = fileinfo['path']
target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))
files.move_file(path, target_path)
# merge infos from the actual file and from the metadata
merged_infos = self._merge_fileinfos(file_store.files, file_infos)
# update the fileinfo in mongo if a file already exists
for f in acquisition_obj['files']:
fileinfo = merged_infos.get(f['name'])
if fileinfo:
fileinfo.pop('path', None)
fileinfo['modified'] = now
acquisition_obj = reaperutil.update_fileinfo('acquisitions', acquisition_obj['_id'], fileinfo)
fileinfo['existing'] = True
# create the missing fileinfo in mongo
for name, fileinfo in merged_infos.items():
# if the file exists we don't need to create it
# skip update fileinfo for files that doesn't have a path
if not fileinfo.get('existing') and fileinfo.get('path'):
del fileinfo['path']
fileinfo['created'] = now
fileinfo['modified'] = now
acquisition_obj = reaperutil.add_fileinfo('acquisitions', acquisition_obj['_id'], fileinfo)
for f in acquisition_obj['files']:
if f['name'] in file_store.files:
file_ = {
'name': f['name'],
'hash': f['hash'],
'type': f.get('type'),
'measurements': f.get('measurements', [])
}
rules.create_jobs(config.db, acquisition_obj, 'acquisition', file_)
return [{'name': k, 'hash': v['hash'], 'size': v['size']} for k, v in merged_infos.items()]
def _merge_fileinfos(self, hard_infos, infos):
"""it takes a dictionary of "hard_infos" (file size, hash)
merging them with infos derived from a list of infos on the same or on other files
"""
new_infos = copy.deepcopy(hard_infos)
for info in infos:
new_infos[info['name']] = new_infos.get(info['name'], {})
new_infos[info['name']].update(info)
return new_infos
|
25,361 | 24415ef3b1670b0c5546c04e7e2bc2388b0a9d80 | # Databricks notebook source
# MAGIC
# MAGIC %run ./configuration
# COMMAND ----------
import pandas as pd
# COMMAND ----------
people_df = pd.DataFrame(
[
{
"name": "Lai Hui",
"address": "805 John Oval Apt. 470\nLake Amanda, NE 09043",
"phone_number": "3087607759",
"user_id": "16b74cfe-d9da-11ea-8534-0242ac110002",
"device_id": 1,
},
{
"name": "Armando Clemente",
"address": "293 Keith Drive\nEast David, NY 05983",
"phone_number": "8497224309",
"user_id": "16b78264-d9da-11ea-8534-0242ac110002",
"device_id": 2,
},
{
"name": "Meallan O'Conarain",
"address": "3048 Guerrero Alley\nJerryhaven, PA 56888",
"phone_number": "(580)703-9076x32254",
"user_id": "16b79f9c-d9da-11ea-8534-0242ac110002",
"device_id": 3,
},
{
"name": "Lakesia Brown",
"address": "549 Palmer Village\nLake Joseph, IN 44981",
"phone_number": "001-624-908-5142x446",
"user_id": "16b7b946-d9da-11ea-8534-0242ac110002",
"device_id": 4,
},
{
"name": "Anu Achaval",
"address": "8334 Kevin Fork Suite 531\nSouth Kennethton, WI 42697",
"phone_number": "468-733-3330x598",
"user_id": "16b7cec2-d9da-11ea-8534-0242ac110002",
"device_id": 5,
},
{
"name": "Ae Yujin",
"address": "USCGC Davis\nFPO AP 67548",
"phone_number": "001-072-063-6894x746",
"user_id": "16b7dd72-d9da-11ea-8534-0242ac110002",
"device_id": 6,
},
{
"name": "Pardeep Kapoor",
"address": "653 Monica Knoll\nHicksfort, KS 41378",
"phone_number": "001-834-698-3839x6306",
"user_id": "16b7f0d2-d9da-11ea-8534-0242ac110002",
"device_id": 7,
},
{
"name": "Julian Andersen",
"address": "321 Jackson Forest Apt. 689\nGarciafort, UT 91205",
"phone_number": "001-796-472-0831x3399",
"user_id": "16b807ac-d9da-11ea-8534-0242ac110002",
"device_id": 8,
},
{
"name": "Simone Graber",
"address": "55863 Brown Cliff\nPort Amybury, ND 99197",
"phone_number": "4548070215",
"user_id": "16b81c2e-d9da-11ea-8534-0242ac110002",
"device_id": 9,
},
{
"name": "Gonzalo Valdés",
"address": "2456 Rachael Manors Apt. 758\nSouth Curtisfort, WV 27129",
"phone_number": "(408)059-4700x9591",
"user_id": "16b830c4-d9da-11ea-8534-0242ac110002",
"device_id": 10,
},
]
)
# COMMAND ----------
people_spark_df = spark.createDataFrame(people_df)
(people_spark_df.write.format("delta").mode("overwrite").save(peopleDimPath))
# COMMAND ----------
spark.sql(
"""
DROP TABLE IF EXISTS health_tracker_user
"""
)
spark.sql(
f"""
CREATE TABLE health_tracker_user
USING DELTA
LOCATION "{peopleDimPath}"
"""
)
# COMMAND ----------
spark.sql(
"""
DROP TABLE IF EXISTS deletions
"""
)
spark.sql(
f"""
CREATE TABLE deletions AS
SELECT user_id FROM health_tracker_user
WHERE user_id in (
'16b807ac-d9da-11ea-8534-0242ac110002',
'16b81c2e-d9da-11ea-8534-0242ac110002'
)
"""
)
|
25,362 | 63b1fd15a3af9262c9cc525984584ecee70ecfe4 |
import requests
import json
from datetime import datetime
import threading
import os
import shutil
import socket
import time
previous_list = []
current_list = []
def capture():
global previous_list
global current_list
print("Current Date and Time of Run:")
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# Camera WiFi MAC address = f8:f0:05:a1:b9:df
# to figure out which Meraki device the camera is connected to:
url = "https://api.meraki.com/api/v0/networks/L_634444597505819269/clients/f8:f0:05:a1:b9:df"
headers = {
'X-Cisco-Meraki-API-Key': "b70ca858020930863c1542f511ec4267ab077aa6",
'User-Agent': "PostmanRuntime/7.18.0",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "0ca6880f-406a-46a4-8431-5e72757386be,e60c1ffd-fde9-42c6-8268-6443ff63f554",
'Accept-Encoding': "gzip, deflate",
'Referer': "https://api.meraki.com/api/v0/networks/L_634444597505819269/clients/f8:f0:05:a1:b9:df",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers)
print(response.text)
json_data0 = json.loads(response.text)
recentDeviceMac = json_data0["recentDeviceMac"]
print(recentDeviceMac)
#for the Meraki device which the camera is connected to, find that device's serial number
url = "https://api.meraki.com/api/v0/networks/L_634444597505819269/devices"
headers = {
'X-Cisco-Meraki-API-Key': "b70ca858020930863c1542f511ec4267ab077aa6",
'cache-control': "no-cache",
'Postman-Token': "9a429afd-9a2b-4bf3-84b9-8e70ba4c475f"
}
response = requests.request("GET", url, headers=headers)
print(response.text)
json_data1 = json.loads(response.text)
for item in json_data1:
mac = item.get("mac")
if mac == recentDeviceMac :
serial = item.get("serial")
print(serial)
#list all clients connected to device with matching serial number
url = "https://api.meraki.com/api/v0/devices/" + serial + "/clients" #Q2PD-6WK9-V4XS for MR33, Q2XD-4RAF-S592 for MR20
querystring = {"timespan":"20000"} # check what clients were connected to the Meraki in the past (timespan) seconds - set to past 5 minutes (300 seconds)
headers = {
'X-Cisco-Meraki-API-Key': "b70ca858020930863c1542f511ec4267ab077aa6",
'User-Agent': "PostmanRuntime/7.15.0",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "338b5785-52d9-412e-9db4-ea90361e0e69,441570a6-a644-42cb-8446-9244f803d755",
'accept-encoding': "gzip, deflate",
'referer': "https://api.meraki.com/api/v0/devices/Q2PD-6WK9-V4XS/clients?timespan=86400",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print("JSON Results:")
print(response.text)
json_data = json.loads(response.text)
if len(json_data) != 0: # if the JSON is not empty:
#open a text file to write the names of people (clients) connected to AP
first_file_path = "C:\\Faces\\Images_to_send\\face_txt.txt"
first_file = open(first_file_path, "w")
#empty the file of any previous contents
first_file.seek(0)
first_file.truncate()
print("Client Names found:")
for item in json_data: #isolating name (description) from other client info
desc = item.get("description")
directory = "c:\\Faces\\" + desc
vi_ip = '153.104.46.181'
host = '10.137.69.21'
if desc == "WINC-b9-df": # if client is the camera, get its IP address
host = item.get("ip")
print("camera ip address: " + host)
continue
elif desc == "V_I_User": # if client is the visually impaired user's smartphone, get its IP address
vi_ip = item.get("ip")
print("visually impaired user's device ip address: " + vi_ip)
continue
else: #otherwise add the names of the clients to the current list of connected clients
print(desc)
#current_list.append(desc)
#moving selected images to a different folder to be sent to camera
try:
for filename in sorted(os.listdir(directory)):
if filename.endswith(".pgm"):
#current_list.append(desc)
pathname = "C:\\Faces\\" + desc + "\\" + filename
shutil.copy(pathname, "C:\\Faces\\Images_to_send")
first_file.write(filename + "\n")
else:
print("file found that is not of type pgm")
continue
current_list.append(desc)
except Exception as e: #if there is not a corresponding folder in Faces, then we assume the client is not a coworker's smartphone - ignoring
print(e)
print("Stationary device - ignoring")
first_file.close()
#alphabetize the text file of names
alphalist = []
first_file_path = "C:\\Faces\\Images_to_send\\face_txt.txt"
with open(first_file_path) as first:
for line in first:
alphalist.append(line)
first.close()
alphalist.sort()
with open(first_file_path, 'w') as fout:
fout.write(vi_ip + "\n") #include the visually impaired user's smartphone's IP address as first line in text file
for alpha in alphalist:
fout.write(alpha)
fout.close()
#compare lists to see if there have been any changes since the last time the code was run
print("previous list:")
print(previous_list)
print("current list:")
print(current_list)
if previous_list == current_list:
print ("The lists are identical..no need to send new photos to the camera")
client_nochange(host, vi_ip) #call to TCP function to tell camera no need to get new photos
else :
print ("The lists are not identical..refreshing camera with new photos")
client_send(host) # call to TCP function to tell camera that we are sending it new photos
#removing all old images from Images_to_send in order to start fresh next time the code is run
directory = "c:\\Faces\\Images_to_send"
for filename in os.listdir(directory):
if filename.endswith(".pgm"):
os.remove(directory + "\\" + filename)
continue
else:
print("no more files in directory to delete")
continue
previous_list = current_list.copy() #now make the current list the previous list for the next time the code is run
current_list.clear() #clear the current list to start fresh the next time
def client_send(host_ip): #TCP Client function to send new photos
host = host_ip # camera's IP address
print(host)
port=8005
#here sending a text file containing image names
try:
time.sleep(6)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create TCP socket with IPv4 addressing
#s.connect((host, port)) # connect to camera's IP address at port 8005
connected = False
while (connected == False):
try:
s.connect((host, port)) # connect to camera's IP address at the next available sequential port number
connected = True
except Exception as f:
print(f)
time.sleep(3)
print("waiting for camera...")
#recv1 = s.recv(512) #receive a message from the camera stating that it is ready to receive data
#print(recv1)
textfile = "C:\\Faces\\Images_to_send\\face_txt.txt"
f = open(textfile, "rb")
while True:
veri = f.read(512)
if not veri:
print("finished sending file - no more bytes to send")
break
s.send(veri) #send data 512 bytes at a time
f.close()
s.close()
print("text file sent")
except Exception as e:
print(e)
print("Error3 - error with sending text file")
#~~~~~~~~~~~~~~~sending filesizes
# here sending multiple images
directory="C:\Faces\Images_to_send"
os.chdir(directory)
image_size_path = "C:\\Faces\\Images_to_send\\face_sizes.txt"
image_size_file = open(image_size_path, "w")
directory="C:\Faces\Images_to_send"
try:
for filename in sorted(os.listdir(directory)):
if filename.endswith(".pgm"): #for each image in Images_to_send:
print(filename)
filesize = os.path.getsize(filename)
print(filesize)
image_size_file.write(str(filesize) + "\n")
continue
else:
print("Error 4 - error collecting image")
continue
except Exception as e:
print(e)
print("Error 5 - cannot find image")
image_size_file.close()
try:
time.sleep(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create TCP socket with IPv4 addressing
port += 1
print(port)
connected = False
while (connected == False):
try:
s.connect((host, port)) # connect to camera's IP address at the next available sequential port number
connected = True
except Exception as f:
print(f)
time.sleep(3)
print("waiting for camera...")
f = open(image_size_path, "rb")
while True:
veri = f.read(512)
if not veri:
print("finished sending file - no more bytes to send")
break
s.send(veri) #send data 512 bytes at a time
f.close()
s.close()
print("file size text file sent")
except Exception as e:
print(e)
print("Error3 - error with sending text file")
#~~~~~~~~~~~~~~~sending ~~~~~~~~~~~~~~~images in one socket
# here sending multiple images
time.sleep(6)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create TCP socket with IPv4 addressing
port += 1
print(port)
connected = False
while (connected == False):
try:
s.connect((host, port)) # connect to camera's IP address at the next available sequential port number
connected = True
except Exception as f:
print(f)
time.sleep(3)
directory="C:\Faces\Images_to_send"
try:
for filename in sorted(os.listdir(directory)):
if filename.endswith(".pgm"): #for each image in Images_to_send:
print(filename)
filer = "C:\\Faces\\Images_to_send\\" + filename
f = open(filer, "rb")
while True:
veri = f.read(512)
if not veri:
print("finished sending image - no more bytes to send")
break
s.send(veri) #send data 512 bytes at a time
f.close()
continue
else:
print("Error 4 - error collecting image")
continue
s.close()
except Exception as e:
print(e)
print("Error 5 - error sending image")
def client_nochange(host_ip, vi_ip): #TCP Client function to send new photos
host = host_ip # camera's IP address
print(host)
port=8005
#here sending a string to the camera to say no change in clients have occurred
try:
time.sleep(6)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create TCP socket with IPv4 addressing
#s.connect((host, port)) # connect to camera's IP address at port 8005
connected = False
while (connected == False):
try:
s.connect((host, port)) # connect to camera's IP address at the next available sequential port number
connected = True
except Exception as f:
print(f)
time.sleep(3)
#recv2 = s.recv(512) #receive a message from the camera stating that it is ready to receive data
#print(recv2)
nochange = "\nno change"
s.send((vi_ip + nochange).encode()) #send camera the visually impaired user's smartphone's IP address (in case of change), followed by 'no change'
s.close()
print("no change text sent")
except Exception as e:
print(e)
print("Error3 - error with sending no change text")
def timed_prog():
threading.Timer(180.0, timed_prog).start() # run program every 300 seconds - 5 minutes
capture()
print("----------------------")
timed_prog() #main function call
|
25,363 | e885b2b44388c8a6d07dbdb74ff83caf6cec13b6 | class Robot(object):
def __init__(self):
self.name = ''
self.reset()
def reset(self):
"""
Every once in a while we need to reset a robot
to its factory settings, which means that their
name gets wiped. The next time you ask,
it will respond with a new random name.
The first time you boot them up, a random name is generated in
the format of two uppercase letters followed by three digits,
such as RX837 or BC811.
:return:
"""
import string
# import random
from random import SystemRandom
old_name = self.name
while old_name == self.name:
self.name = ''
n = 0
while n < 2:
# Standard pseudo-random generators are not
# suitable for security/cryptographic purposes.
# self.name += string.ascii_uppercase[random.randint(0, 25)]
rnd = SystemRandom()
self.name += string.ascii_uppercase[rnd.randrange(0, 25)]
n += 1
for n in range(0, 3):
self.name += str(rnd.randrange(0, 9))
|
25,364 | 638e54015e05f3d86c151a2141ca36315f66d819 | i = 0
i = float(i)
while i<=2:
j = float(i+1)
while j<=(3+i):
print("I=%.1f J=%.1f" %(i,j))
j += 1
i += 0.2
|
25,365 | 7b5d8925fcd85efa36d9a4f0319d4fa603b88768 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import xbmc
from tools import get_current_list_item_action_args, url_quoted_action_args
if __name__ == "__main__":
action_args = get_current_list_item_action_args()
action_args['media_type'] = "season"
action_args.pop("season", None)
action_args['trakt_id'] = action_args.get("trakt_season_id")
action_args.pop("trakt_season_id", None)
trakt_id = action_args.get("trakt_id")
if trakt_id:
path = "plugin://plugin.video.seren/?action=seasonEpisodes&action_args={}".format(
url_quoted_action_args(action_args)
)
xbmc.log(
"context.seren: Browse Season ({})".format(action_args["trakt_id"]),
xbmc.LOGINFO,
)
xbmc.executebuiltin("ActivateWindow(Videos,{},return)".format(path))
else:
xbmc.log(
"context.seren: Browse Season: No trakt_season_id in action_args: ({})".format(
get_current_list_item_action_args()
),
xbmc.LOGERROR
)
|
25,366 | d7c8124cdfc27b7199f2d76a553cc92c9e315a45 | import pandas as pd
import numpy as np
population_dict = {'California': 38332521,
'Texas': 26448193,
'New York': 19651127,
'Florida': 19552860,
'Illinois': 12882135}
population = pd.Series(population_dict)
print(population["Texas"])
print(dir(population))
np.log() |
25,367 | e036f98ec8da1e5f8936c832c28aefdd71d7ccea | # Question 1
# Write a Python program to remove empty List from List.
# Answer==
the_list = [1, 2, [], 3, [], [], 4]
# printing original list
print("The original list is : " + str(the_list))
# Remove empty List from List by using filter
new_list = list(filter(None, the_list))
print("List after empty list removal : " + str(new_list))
# Question 2
# Write a Python program to remove all duplicates words from a given sentence
# Answer==
string1 = "get get going"
# You can use input function in string1
words = string1.split()
print (" ".join(sorted(set(words), key=words.index)))
# Question 3
# Write a Python program to find all occurrences of a character in the given string
# Answer==
the_str = "Machine Learning"
# using count() to get count
counter = the_str.count('e')
print("Count of e in Machine Learning is : "
+ str(counter))
|
25,368 | bcb3a272066bc2f19566a518066c761a704bbc25 | # -*- coding: utf-8 -*-
"""wsgi like"""
import os
import sys
import asyncio
import logging.config
SRC_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.append(SRC_ROOT)
from app import create # noqa
loop = asyncio.get_event_loop()
app = create(loop=loop)
|
25,369 | 877e226a17933ad906551658f7e58ce856efd68e | txt = open("/home/guancio/Desktop/todo.org").read()
lines = txt.split("\n")
class OrgNode(object):
TEXT, OUTLINE, LIST = range(3)
def __init__(self, node_type=TEXT, title="", parent=None, depth=0):
self.title = title
self.childs = []
self.parent = parent
self.node_type = node_type
self.depth = depth
def __str__(self, ):
return """
title: %s
depth: %d
childs: %s
""" % (self.title, self.depth, str(self.childs))
doc_node = OrgNode(OrgNode.OUTLINE)
curr_node = doc_node
for line in lines:
if line[:1] == "*":
depth = line.find(" ")
while curr_node.node_type != OrgNode.OUTLINE:
curr_node = curr_node.parent
while curr_node.depth >= depth:
curr_node = curr_node.parent
next_node = OrgNode(OrgNode.OUTLINE, line[depth:], curr_node, depth)
curr_node.childs.append(next_node)
curr_node = next_node
elif line.lstrip()[:2] == "+ ":
depth = line.find("+")
while curr_node.node_type == OrgNode.LIST and curr_node.depth > depth:
curr_node = curr_node.parent
if curr_node.node_type != OrgNode.LIST or curr_node.depth < depth:
next_node = OrgNode(OrgNode.LIST, "", curr_node, depth)
curr_node.childs.append(next_node)
curr_node = next_node
curr_node.childs.append(line[depth:])
else:
curr_node.childs.append(line)
|
25,370 | f15a2b62d585548b608c3eb99f7bd9dd5ef2e22e | #!/usr/bin/env python3
# coding:utf-8
import requests, json, random, base64, os, logging, sys, time
from aes_crypto import AESCrypto
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Crypto.PublicKey import RSA
from pathlib import Path
from merge_pdf import file_name_walk
from config import DownloadPath, BookPath, TaskFilePath, BookQueue
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
DETAIL_URL = 'https://bridge.51zhy.cn/transfer/Content/Detail?'
LIST_URL = 'https://bridge.51zhy.cn/transfer/tableofcontent/list?'
AUTHORIZE_URL = 'https://bridge.51zhy.cn/transfer/content/authorize'
HEADER = {
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
}
HEADER2 = {
'Accept-Language': 'zh-CN,zh;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
}
# detial param
PARAM = {
'AccessToken': 'Qr3n1SX5TXikQaQw34HhCDoS3P5SCysT1KcX1dPJ',
'DeviceToken': 'ebook031BCBD364453A896DA7CBE1AE95599C',
'ApiName': '/Content/Detail',
'BridgePlatformName': 'phei_yd_web',
'random': str(random.uniform(0.0,1.0)),
'AppId': '3',
'id': ''
}
P = {
'AccessToken': 'Qr3n1SX5TXikQaQw34HhCDoS3P5SCysT1KcX1dPJ',
'DeviceToken': 'ebook031BCBD364453A896DA7CBE1AE95599C',
'ApiName': '/tableofcontent/list',
'BridgePlatformName': 'phei_yd_web',
'random': str(random.uniform(0.0,1.0)),
'AppId': '3',
'objectId': ''
}
rsa = {
"privateKey":"-----BEGIN RSA PRIVATE KEY-----\r\nMIIEogIBAAKCAQEAqhC+NmhvKKqB/Utz3HITsPrrMRmsi088T3cjE5yR+6beWhpz\r\nknylxOrI20uWhRvREoCfTt6AHYNXF7J4jJHYzyqSUFJYOlvabT2zsLCrn4zlfkRX\r\nyHJHHXD0soMGcUZmjj2z8/SsmX5IWZp6mmkYFItiYHIdMfYLz1OP0cX47x7wFf1m\r\nszTnRoyrDYm6PpQaP2VxCF4OC9D41pifkHwKfv6RQK5zV+xWFf17DG0U5yIxdiLT\r\nmDpBLjxid9XILOfnFQ0A6NQDicUV1nGw93oN3+kUUQcle2OJANhOa9oKqtDl1slM\r\nGa6qXBA9pwL99thovrHZTo3iWuujAcsY47+l/QIBAwKCAQBxYH7O8EobHFao3Pfo\r\nTA0gp0d2ER2yNNLfpMINEwv9GemRZve2/cPYnIXnh7muEothqxTfPwATrOS6dvsI\r\nYTs0xww1jDrRkpGeKSJ1yx0VCJj+2DqFoYS+S03MV1mg2Zm0KSKiox27qYWREacR\r\nm2VjB5bq9r4hTrKKN7U2g/tKE4iv2Jy+CQiz338S/6IJ0EKtGm/gzOOLW5pefvw1\r\nVvJsdT1z1Od5DyqymaYwEqqBk78GAaUoK50SEFSsIpKDZkeYVq4tP6EEBBdwOebT\r\nF4wzYzW+F6psWGgPehTv6e0ep7I4yWnNA94qPgZio5jD2uOX5WahGAEbUe/gnjzm\r\n8ZwrAoGBAOSi9QbU4xg351NmUtTzMG8Gyg/n6GQzZZ4IkDIduF6S05hxMTVLstSX\r\nkePFqVap/bVbwixiSOZ9/ovQo2SAjQBdBkLz0tlbreRy+45gFkPhpjGrcrS47XwI\r\nOE5y1GO3YKtDcl32g9/aqv5UhI3LWcHgVTBRTsA/kRgDTYxJmLAxAoGBAL5rQ3TB\r\nREIA1hkKt3I8q0c087lPQlSHYfNASfMiwUBJexyhveLWosFyhR3+p756xt2RXi95\r\nr+8VJVek/ofnQXtThLixIbioEQ47H0hawHexLhIIcPEf8XDhaOCRJsNFdK8+KM9v\r\npP1CCDW+iQsFtlF6hI8LmyByX6MtkDE/vIuNAoGBAJhso1njQhAlROJENziiIEoE\r\nhrVFRZgiQ76wYCFpJZRh4mWgy3jdIeMPtpfZG48b/njn1shBhe7+qbKLF5hVs1WT\r\nWYH34eY9HphMp7RADtfrxCEc9yMl86gFet73OEJ6QHIs9ulPApU8cf7jAwkyO9aV\r\njiA2NIAqYLqs3l2GZcrLAoGAfvIs+IDYLACOu1x6TChyL3iie4osOFpBTNWGohcr\r\ngDD8vcEpQeRsgPcDaVRv1FHZ6QuUH6Z1Sg4Y5Ripr++A/OJYeyDBJcVgtCdqMDyA\r\nT8t0DAWgoL/2S0DwlbYZ14OjH37F35/DU4Fazn8GB1kkNlGttLJnavbqbMkKy3/T\r\nB7MCgYEA3WeEEq7mIrm/Q4TOBM4cVLWUBbx9ssYsR+yzSGWIzDWr+fLsq4c8WePK\r\neaQGBc9Z5Y5sM1FsCMSTF0kZkJdEbTictq5JDNU6ND7EVTSkoVwIRyVNJI5U9w6n\r\nrQZIMqkgXXjJEmRC+SS0LtyKh/1HWBNspq7lb7USLefAgLXTnmU=\r\n-----END RSA PRIVATE KEY-----\r\n",
"publicKey":'-----BEGIN+PUBLIC+KEY-----%0D%0AMIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEAqhC%2BNmhvKKqB%2FUtz3HIT%0D%0AsPrMjI0MTYwMTQ0MTQ1MjEwMTQ3MTQ1MTc4MTY0MTc3MjQ0MjA5MjA4MTc4MTc2MTc3MjE0MjA3MTg0MjQ0MjE2MjI2MjAxMjQ3MTQ1MjUxMjI1MTQ1MjQ3MTk4MTI4MTYxrMRmsi088T3cjE5yR%2B6beWhpzknylxOrI20uWhRvREoCfTt6AHYNXF7J4jJHY%0D%0AzyqSUFJYOlvabT2zsLCrn4zlfkRXyHJHHXD0soMGcUZmjj2z8%2FSsmX5IWZp6mmkY%0D%0AFItiYHIdMfYLz1OP0cX47x7wFf1mszTnRoyrDYm6PpQaP2VxCF4OC9D41pifkHwK%0D%0Afv6RQK5zV%2BxWFf17DG0U5yIxdiLTmDpBLjxid9XILOfnFQ0A6NQDicUV1nGw93oN%0D%0A3%2BkUUQcle2OJANhOa9oKqtDl1slMGa6qXBA9pwL99thovrHZTo3iWuujAcsY47%2Bl%0D%0A%2FQIBAw%3D%3D%0D%0A-----END+PUBLIC+KEY-----%0D%0A'
}
def parse_detail_url(bookid):
PARAM['id'] = bookid
detail = requests.get(url= DETAIL_URL,
headers=HEADER,
params=PARAM)
return detail
def get_token_tile(detail):
data = json.loads(detail.text)
Title = data.get('Data').get('Title')
AuthorizeToken = data.get('Data').get('ExtendData').get('AuthorizeToken')
return AuthorizeToken, Title
def get_bookmark(bookid):
P['objectId'] = bookid
r = requests.get(LIST_URL,headers=HEADER, params=P)
filepath = os.path.join(DownloadPath, bookid, 'bookmark.json')
with open(filepath, 'w', encoding='utf-8') as fd:
fd.write(json.dumps(r.json(), indent=4, ensure_ascii=False))
logging.info('%s bookmark.json save done!'%bookid)
def parse_authorize_url(bookid, AuthorizeToken):
DATA = 'IsOnline=true&AccessToken=Qr3n1SX5TXikQaQw34HhCDoS3P5SCysT1KcX1dPJ&DeviceToken={DeviceToken}'\
'&ApiName=content/authorize&BridgePlatformName=phei_yd_web&random={random}'\
'&AppId=3&id={bookid}'\
'&type=rsa&devicekey={devicekey}'\
'&authorizeToken={authorizeToken}'.format(DeviceToken = PARAM['DeviceToken'],
random = str(random.uniform(0.0,1.0)),
bookid = bookid,
devicekey = rsa['publicKey'],
authorizeToken = AuthorizeToken)
authorize = requests.post(url= AUTHORIZE_URL,
data=DATA,headers=HEADER2)
return authorize
def get_key_urls(authorize):
authorize_data = json.loads(authorize.text)
data_key = authorize_data.get('Data').get('Key')
# 整书 url
book_url = authorize_data.get('Data').get('Url')
# 每一页pdf url
SplitFileUrls = authorize_data.get('Data').get('SplitFileUrls')
return data_key, book_url, SplitFileUrls
def load_taskfile(bookid):
filepath = os.path.join(TaskFilePath, bookid + '.txt')
with open(filepath, 'r', encoding='utf-8') as fd:
tasks = fd.read()
return eval(tasks)
def save_taskfile(bookid, tasks):
if not os.path.exists(TaskFilePath):
os.makedirs(TaskFilePath)
tasks = str(tasks)
filepath = os.path.join(TaskFilePath, bookid + '.txt')
with open(filepath, 'w', encoding='utf-8') as fd:
fd.write(tasks)
def download_pdf_page(page_url, bookid, title, cnt, aes_key):
# AES 加密过的PDF
pdf = requests.get(page_url, headers=HEADER)
aes = AESCrypto(aes_key, 'ECB', 'pkcs7')
f = aes.decrypt(base64.b64encode(pdf.content), None)
pagename = bookid + '-' + title + '-' +str(cnt) + '.pdf'
pagepath = os.path.join(DownloadPath, bookid, pagename)
with open(pagepath, 'wb') as fd:
fd.write(f)
book_cnt = 1
def download_book(bookid, booksum = 1):
global book_cnt
detail = parse_detail_url(bookid)
#print(detail.content)
AuthorizeToken, Title = get_token_tile(detail)
logging.info('%s parse detial url done!'%bookid)
authorize = parse_authorize_url(bookid, AuthorizeToken)
aes_key, book_url, SplitFileUrls = get_key_urls(authorize)
logging.info('%s parse authorize url done!'%bookid)
get_bookmark(bookid)
rsakey = RSA.importKey(rsa['privateKey'])
cipher = Cipher_pkcs1_v1_5.new(rsakey)
text = cipher.decrypt(base64.b64decode(aes_key), None)
aes_key = str(text, encoding='utf-8')
logging.info('%s decrypt AES key done!'%bookid)
print(SplitFileUrls)
page_sum = len(SplitFileUrls)
tasks = [i for i in range(1, page_sum+1)]
tasks_path = Path(TaskFilePath+bookid+'.txt')
if tasks_path.exists():
tasks = load_taskfile(bookid)
logging.info('%s taskfile already exist, jump'%bookid)#bug
else:
# 第一次运行
save_taskfile(bookid, tasks)
while tasks:
# popleft
num = tasks.pop(0) # num : int
try:
page_url = SplitFileUrls[num - 1]
download_pdf_page(page_url, bookid, Title, num, aes_key)
# 存档
save_taskfile(bookid, tasks)
logging.info('%s<%s> page %d/%d OK [%d/%d]'%(bookid, Title, num, page_sum, book_cnt, booksum))
slptime = round(random.uniform(10 ,20))
time.sleep(slptime)
except:
tasks.append(num)
save_taskfile(bookid, tasks)
logging.error('%s<%s> page %d/%d ERROR [%d/%d]'%(bookid, Title, num, page_sum, book_cnt, booksum))
# 合并PDF
floder = os.path.join(BookPath, bookid)
if not os.path.exists(floder):
os.makedirs(floder)
# 检查是否已经转换过
pdf = Path(BookPath, bookid, bookid+'-'+Title+'.pdf')
if not pdf.exists():
logging.info('%s merge pdf ...'%bookid)
pdfpath = os.path.join(DownloadPath, bookid)
try:
file_name_walk(pdfpath, bookid)
logging.info('%s merge pdf done!'%bookid)
book_cnt += 1
except:
logging.error('%s merge pdf error!'%bookid)
else:
book_cnt += 1
logging.warning('%s pdf already exists!'%bookid)
def download_books(bookid_list):
BOOKLIST = bookid_list
if not os.path.exists(DownloadPath):
os.makedirs(DownloadPath)
if not os.path.exists(BookPath):
os.makedirs(BookPath)
if not isinstance(BOOKLIST, list):
logging.error('BOOKLIST is not List!')
return
while BOOKLIST:
bookid = BOOKLIST.pop(0)
bookpath = os.path.join(DownloadPath, bookid)
if not os.path.exists(bookpath):
os.makedirs(bookpath)
logging.info('%s bookid path generate done!'%bookid)
try:
start = time.time()
download_book(bookid, len(BOOKLIST))
print('ok')
end = time.time()
logging.info('%s book download cost %d s'%((round(end-start)), bookid))
logging.info('sleep a while...')
slptime = round(random.uniform(8*60,10*60))
time.sleep(slptime)
logging.info('start next book...')
except:
logging.error('error in parse!')
logging.info('ALL Down!!!')
if __name__ == '__main__':
download_books(['19526294'])
# if BookQueue:
# logging.info('BookQueue have %d book'%len(BookQueue))
# download_books(BookQueue)
|
25,371 | 692f4dc4be5587697b4de90c5075d1f5d4f098a3 | from django import forms
from django.core.exceptions import ValidationError
from . import models
class reserv_room(forms.Form):
"""
reserv_roomのフォーム.
"""
# 予約ID
reserv_id = forms.CharField(
label="ID",
max_length=20,
required=True,
widget=forms.TextInput(attrs={"class": "reserve_type_text"}),
)
# 会議室ID
room_id = forms.ModelChoiceField(
models.room_info.objects, label="会議室名", required=True,
)
# 利用者
reserv_name = forms.CharField(
label="利用者",
max_length=20,
required=True,
widget=forms.TextInput(attrs={"class": "reserve_type_text"}),
)
# 開始日時
start_date_time = forms.SplitDateTimeField(label="開始日時", required=True)
# 終了日時
end_date_time = forms.SplitDateTimeField(label="終了日時", required=True)
def clean(self):
all_clean_data = super().clean()
start_date_time = all_clean_data["start_date_time"]
end_date_time = all_clean_data["end_date_time"]
if start_date_time >= end_date_time:
raise forms.ValidationError("終了日時は開始日時より後を選択して下さい。")
return all_clean_data
class reserv_room_info(forms.Form):
"""
reserv_room_infoのフォーム.
"""
# 会議室ID
room_id = forms.CharField(
label="会議室ID",
max_length=20,
required=True,
widget=forms.TextInput(attrs={"class": "reserve_type_text"}),
)
# 会議室名
room_name = forms.CharField(
label="会議室名",
max_length=20,
required=True,
widget=forms.TextInput(attrs={"class": "reserve_type_text"}),
)
|
25,372 | 4fa7c21c21e55f3d92046708f6889151e439b743 | # -*- coding: utf-8 -*-
## modele parameters.
# size of the simulation
NC = 40
NL = 30
# pandemia characteristic
MORTALITY = 10
## view parameters.
CELL_SIZE = 20
|
25,373 | 79b58ad4b72ef97c63e197e50732b4cf7d67789b | from datetime import datetime
from datetime import timedelta
from django.core.urlresolvers import reverse_lazy
from django.utils.encoding import smart_text
from quiz.polls.factories import QuizFactory, SolutionFactory
from quiz.utils.unittestcase import TestCase
class QuizListViewTest(TestCase):
url = reverse_lazy('polls:quiz_list')
def test_quiz_list_show_quizes(self):
response = self.client.get(self.url)
self.assertEqual(len(response.context['quizes']), 0)
QuizFactory(pub_date=datetime.now()+timedelta(days=1))
response = self.client.get(self.url)
self.assertEqual(len(response.context['quizes']), 0)
quiz = QuizFactory()
response = self.client.get(self.url)
self.assertEqual(len(response.context['quizes']), 1)
self.assertIn(quiz.name, smart_text(response))
class SolutionListViewTest(TestCase):
url = reverse_lazy('polls:solution_list')
def test_solution_list_show_own_solutions(self):
self.user = self.login()
response = self.client.get(self.url)
self.assertEqual(len(response.context['solutions']), 0)
SolutionFactory()
response = self.client.get(self.url)
self.assertEqual(len(response.context['solutions']), 0)
solution = SolutionFactory(user=self.user)
response = self.client.get(self.url)
self.assertEqual(len(response.context['solutions']), 1)
self.assertIn(str(solution.result), smart_text(response))
def test_solution_list_show_all_if_is_staff(self):
self.user = self.login(is_staff=True)
response = self.client.get(self.url)
self.assertEqual(len(response.context['solutions']), 0)
solution1 = SolutionFactory()
response = self.client.get(self.url)
self.assertEqual(len(response.context['solutions']), 1)
solution2 = SolutionFactory(user=self.user)
response = self.client.get(self.url)
self.assertEqual(len(response.context['solutions']), 2)
self.assertIn(str(solution1.result), smart_text(response))
self.assertIn(str(solution2.result), smart_text(response))
|
25,374 | 2a64dc783575e07495017c9ad412d3a0f45cde4f | #------------------------------------
#--------import error check----------
import argparse
import yara
from gp_lib.yaraparse import yaraparse
#--------import error check----------
#------------------------------------
def main():
parser = argparse.ArgumentParser(description='run Groom-Porter on yarafile')
parser.add_argument('yarafile', type=str, nargs=1, help='yara file to process')
parser.add_argument('-i', '--ignore', action='store_true', help='ignore compile errors')
args = parser.parse_args()
yara_p = yaraparse(args.yarafile[0])
if args.ignore:
print "WARNING: Groom-Porter is designed to parse yara files that compile without error. Ignoring this step may result in erroneous data"
print "\nParsing file: %s..." % yara_p.path
yara_p.parse()
print yara_p
else:
valid, why = yara_p.compile_check()
if valid:
print 'no compile errors'
print "\nParsing file: %s..." % yara_p.path
yara_p.parse()
print yara_p
else:
print 'Compile Error: %s' % str(why)
if __name__ == "__main__":
main()
|
25,375 | 9fee566fe53e01caa76356f468a080ddf2381378 | """
Auteur: Ludovic Malet
Date: juillet 2018
Description: Permet de convertir les fichiers PPT et PPTX en PDF
"""
import win32com.client
import glob
import os
class PPT2PDF:
def __init__(self, folder):
self.folder = folder
self.files = glob.glob(os.path.join(folder, "*.ppt*"))
self.formatType = 32
def convert(self):
powerpoint = win32com.client.Dispatch("Powerpoint.Application")
powerpoint.Visible = 1
for file in self.files:
filename = os.getcwd() + '\\' + file
newname = os.path.splitext(filename)[0] + ".pdf"
try:
deck = powerpoint.Presentations.Open(filename)
deck.SaveAs(newname, self.formatType)
deck.Close()
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
powerpoint.Quit() |
25,376 | abea259fff4ccad5787a238002d30d1d9e532767 | # -*- coding: utf-8 -*-
from os import path, listdir, mkdir, makedirs
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import timeit
import cv2
from skimage.color import label2rgb
from tqdm import tqdm
from multiprocessing import Pool
import lightgbm as lgb
from train_classifier import get_inputs
import pandas as pd
test_pred_folder = '/wdata/merged_pred'
lgbm_models_folder = '/wdata/lgbm_models'
out_pred = '/wdata/lgbm_pred'
DATA_THREADS = 4 #for p2.xlarge instance
num_split_iters = 5
folds_count = 5
sep_count = 3
best_thr = [0.25, 0.25, 0.2]
if __name__ == '__main__':
t0 = timeit.default_timer()
makedirs(out_pred, exist_ok=True)
all_files = []
inputs = []
outputs = []
inputs2 = []
outputs2 = []
labels = []
labels2 = []
separated_regions = []
fns = []
paramss = []
gbm_models = []
for it in range(num_split_iters):
for it2 in range(folds_count):
gbm_models.append(lgb.Booster(model_file=path.join(lgbm_models_folder, 'gbm_model_{0}_{1}.txt'.format(it, it2))))
inputs = []
paramss = []
used_ids = []
all_nadir_idxs = []
for f in tqdm(listdir(test_pred_folder)):
if '.png' in f and 'nadir' in f:
tmp = f.split('_')
nadir = int(tmp[1].split('nadir')[1])
nad_idx = 0
if nadir > 40:
nad_idx = 2
elif nadir > 25:
nad_idx = 1
all_nadir_idxs.append(nad_idx)
paramss.append((f, test_pred_folder, [nadir], True, None))
used_ids.append(f)
inputs = []
inputs2 = []
labels= []
labels2 = []
separated_regions= []
with Pool(processes=DATA_THREADS) as pool:
results = pool.starmap(get_inputs, paramss, len(paramss)//DATA_THREADS)
for i in range(len(results)):
inp, lbl, inp2, lbl2, sep_regs = results[i]
inputs.append(inp)
inputs2.append(inp2)
labels.append(lbl)
labels2.append(lbl2)
separated_regions.append(sep_regs)
print('Predicting...')
new_test_ids = []
rles = []
bst_k = np.zeros((sep_count+1))
removed = 0
replaced = 0
total_cnt = 0
im_idx = 0
non_empty_cnt = 0
for f in tqdm(used_ids):
if path.isfile(path.join(test_pred_folder, f)) and '.png' in f:
inp = inputs[im_idx]
pred = np.zeros((inp.shape[0]))
pred2 = [np.zeros((inp2.shape[0])) for inp2 in inputs2[im_idx]]
for m in gbm_models:
if pred.shape[0] > 0:
pred += m.predict(inp)
for k in range(len(inputs2[im_idx])):
if pred2[k].shape[0] > 0:
pred2[k] += m.predict(inputs2[im_idx][k])
if pred.shape[0] > 0:
pred /= len(gbm_models)
for k in range(len(pred2)):
if pred2[k].shape[0] > 0:
pred2[k] /= len(gbm_models)
pred_labels = np.zeros_like(labels[im_idx], dtype='uint16')
clr = 1
for i in range(pred.shape[0]):
max_sep = -1
max_pr = pred[i]
for k in range(len(separated_regions[im_idx])):
if len(separated_regions[im_idx][k][i]) > 0:
pred_lvl2 = pred2[k][separated_regions[im_idx][k][i]]
if len(pred_lvl2) > 1 and pred_lvl2.mean() > max_pr:
max_sep = k
max_pr = pred_lvl2.mean()
break
if len(pred_lvl2) > 1 and pred_lvl2.max() > max_pr:
max_sep = k
max_pr = pred_lvl2.max()
if max_sep >= 0:
pred_lvl2 = pred2[max_sep][separated_regions[im_idx][max_sep][i]]
replaced += 1
for j in separated_regions[im_idx][max_sep][i]:
if pred2[max_sep][j] > best_thr[all_nadir_idxs[im_idx]]:
pred_labels[labels2[im_idx][max_sep] == j+1] = clr
clr += 1
else:
removed += 1
else:
if pred[i] > best_thr[all_nadir_idxs[im_idx]]:
pred_labels[labels[im_idx] == i+1] = clr
clr += 1
else:
removed += 1
bst_k[max_sep+1] += 1
total_cnt += pred_labels.max()
cv2.imwrite(path.join(out_pred, f.replace('.png', '.tif')), pred_labels)
im_idx += 1
print('total_cnt', total_cnt, 'removed', removed, 'replaced', replaced, 'not empty:', non_empty_cnt)
print(bst_k)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60)) |
25,377 | bd46ccbc99e4add58804a3636585eb31c1cf828e | """plot ne-nfe time-nfe scatter figure"""
from matplotlib import pyplot as plt
import warnings; warnings.filterwarnings(action='once')
from matplotlib.ticker import FuncFormatter
import numpy as np
import random
NE_path = '../saved_model/PDS/FlowODE-True/NE.txt'
nfe_path = '../saved_model/PDS/FlowODE-True/nfe.txt'
time_path = '../saved_model/PDS/FlowODE-True/time.txt'
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
with open(time_path, 'r') as f:
content = f.read()
all_time = content.split("\n")
time_1 = all_time[0]
time_1 = time_1.split("\t")
print(len(time_1))
with open(NE_path, 'r') as f:
content = f.read()
all_NE = content.split("\n")
NE_1 = all_NE[0]
NE_1 = NE_1.split("\t")
#print(content)
with open(nfe_path, 'r') as f:
content = f.read()
all_nfe = content.split("\n")
nfe_1 = all_nfe[0]
nfe_1 = nfe_1.split("\t")
#print(content)
NE_1 = [ float(i) for i in NE_1[:-1]]
print(len(NE_1))
time_1 = [ float(i) for i in time_1[:-1]]
nfe_1 = [ float(i) for i in nfe_1[:-1]]
print(len(nfe_1))
print(len(time_1))
f,ax = plt.subplots(figsize=(10, 10),dpi=350)
ax2 = ax.twinx()
ax.scatter(nfe_1, NE_1, color='slateblue', label='Error (MSE)', rasterized=True, marker='d', s=12)
ax2.scatter(nfe_1, time_1, color='g', label='Time (s)', marker='.', rasterized=True, s=12)
#ax.set_ylim([0, 0.085])
ax.set_ylim([0, 0.006])
def formatnum(x, pos):
if x == 0:
return 0
return '$%.0f$x$10^{-3}$' % (x * 1000)
formatter = FuncFormatter(formatnum)
ax.yaxis.set_major_formatter(formatter)
ax.set_xlabel(r'$\mathcal{N}$ (number of function evaluations)', fontsize=15, horizontalalignment='center')
ax.set_ylabel('MSE',fontdict={'fontweight': 300, 'size': 15})
#plt.yticks(fontsize=20, alpha=1.)
plt.tick_params(labelsize=10)
ax2.set_ylabel("Time (s)",fontdict={'fontweight': 300, 'size': 15})
#plt.legend(loc = 'best')
plt.show()
|
25,378 | fca360c2ae6038d80220e118a63964da36c0ca7b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2019 aibot.me, Inc. All Rights Reserved
#
########################################################################
"""
File: block_transformer.py
Author: wangyan
Date: 2019/09/10 11:53:24
Brief: block_gru
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import time
import json
import logging
import h5py
import numpy as np
import tensorflow as tf
class BlockTransformer(object):
"""
Transformer BLOCK
"""
def __init__(self, config = {}):
'''
config : 该block 相关的conf
'''
self.name = "transformer_block"
self.hidden_size = config.get('hidden_size', 768)
self.num_hidden_layers = config.get('num_hidden_layers', 3)
self.num_attention_heads = config.get('num_attention_heads', 12)
self.intermediate_size = config.get('intermediate_size', 768)
self.hidden_act = config.get('hidden_act', 'gelu')
self.hidden_dropout_prob = config.get('hidden_dropout_prob', 0.5)
self.attention_probs_dropout_prob = config.get('attention_probs_dropout_prob', 0.5)
self.initializer_range = config.get('initializer_range', 0.1)
self.finish_load = False
def load(self, weight_file):
#读取h5py 1、先初始化config 2、init weight
#
self.finish_load = True
return True
def dump(self, weight_file):
return True
def ops(self, input_ids, input_mask, embedding_output):
'''
:param input_tensor:
:return:
'''
if self.finish_load:
pass
else:
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
self.all_encoder_layers = transformer_model(
input_tensor=embedding_output,
attention_mask=attention_mask,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
intermediate_act_fn=self.get_activation(self.hidden_act),
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
do_return_all_layers=True)
sequence_output = self.all_encoder_layers[-1]
return sequence_output
def get_activation(self, activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return self.gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def gelu(self, input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
|
25,379 | c3b105bfdf4932db3d0cc4fb3c862c0e50688370 | import pytest
from flask import url_for
from atat.app import make_app, make_config
from tests.factories import UserFactory
@pytest.fixture
def csrf_enabled_app(app):
app.config.update({"WTF_CSRF_ENABLED": True})
yield app
app.config.update({"WTF_CSRF_ENABLED": False})
def test_csrf_error(csrf_enabled_app, client):
response = client.post(
url_for("users.user"),
headers={"Content-Type": "application/x-www-form-urlencoded"},
data="csrf_token=invalid_token",
follow_redirects=True,
)
body = response.data.decode()
assert "Session Expired" in body
assert "Log in required" in body
@pytest.fixture
def blowup_app(notification_sender):
_blowup_app = make_app(make_config(direct_config={"default": {"DEBUG": False}}))
_blowup_app.notification_sender = notification_sender
@_blowup_app.route("/throw")
def throw():
raise ValueError()
yield _blowup_app
@pytest.fixture
def blowup_client(blowup_app):
yield blowup_app.test_client()
def test_errors_generate_notifications(
blowup_client, client, user_session, notification_sender
):
user_session(UserFactory.create())
blowup_client.get("/throw")
notification_sender.send.assert_called_once()
|
25,380 | 2fba6d89aa8fe6686af3028fde0e4a5310986b3c | from sqlalchemy import *
from sqlalchemy.orm import sessionmaker,mapper
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class ImeiRecord(Base):
__tablename__ = 'ImeiRecord'
id = Column(Integer, primary_key=True)
imei = Column(String(15))
head = Column(String(15))
state=Column(String(15))
xml=Column(String(1024))
date = Column(String(15))
def __repr__(self):
return "<Imei(imei='%s', head='%s', state='%s',xml='%s',date='%s')>" % (self.imei, self.head,self.state,self.xml, self.date)
class xmldbhelpper():
def __init__(self,base,dburl):
self.db=create_engine(dburl)
self.base=base
self.db.echo=False
self.base.metadata.create_all(self.db)
Session = sessionmaker(bind=self.db)
self.session = Session()
def AddImei(self,imeicode):
s=''
try:
self.session.add(imeicode)
self.session.commit()
s='aok'
except Exception as e:
s=str(e)
return s
def QueryImei(self,imei):
ilist = self.session.query(ImeiRecord).filter(ImeiRecord.imei==imei).all()
return ilist
def QueryHead(self,head,b):
pagination = self.session.query(ImeiRecord).filter(ImeiRecord.head==head).order_by(ImeiRecord.id.desc()).limit(10).offset(b).all()
return pagination
def QueryAll(self):
pagination = self.session.query(ImeiRecord).order_by(ImeiRecord.id.desc()).all()
return pagination
def QueryC(self):
pagination = self.session.query(ImeiRecord).filter(ImeiRecord.state=='0').order_by(ImeiRecord.id.desc()).all()
return pagination
def closeall(self):
s=''
try:
self.session.close()
s='cok'
except Exception as e:
s=str(e)
return s
def InitDb(url):
db=xmldbhelpper(Base,url)
return db
def AddImei(imeicode,db):
s=db.AddImei(imeicode)
return s
def QueryImei(imei,db):
s=db.QueryImei(imei)
return s
def CloseDb(db):
s=db.closeall()
return s
def QueryHead(db,head,b):
p=db.QueryHead(head,b)
return p
def QueryAll(db):
p=db.QueryAll()
return p
def QueryC(db):
p=db.QueryC()
return p
"""
sqlpath="sqlite:///emcdb//emc.db"
db=InitDb(sqlpath)
QueryHead(db,'l99m',0)
QueryHead(db,'l99m',1)
QueryHead(db,'l99m',2)
QueryHead(db,'l99m',3)
""" |
25,381 | 6d5824e9a6504005e2ca852e02af09bf4ae963ad | import os
import sys
import os.path
import compat
import ConfigParser
settings = {}
def load_settings():
defaultfile = os.path.join(os.path.dirname(__file__), "default.conf")
config = compat.RawConfigParser()
config.read(defaultfile)
settings["host"] = config.get("acousticbrainz", "host")
try:
settings["user_api_key"] = config.get("user", "api_key")
except ConfigParser.NoOptionError:
print('Please put your API key in the config file')
sys.exit(0)
|
25,382 | 8bf43fed353e7b8eb205d9ad85271278f66fa3e5 | import os
import platform
import textwrap
import pytest
from conans.model.recipe_ref import RecipeReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient, TurboTestClient
from conans.util.files import save, load
@pytest.fixture
def conanfile():
conanfile = str(GenConanfile()
.with_import("import os")
.with_setting("build_type").with_setting("arch")
.with_import("from conan.tools.microsoft import vs_layout")
.with_import("from conan.tools.files import AutoPackager, save"))
conanfile += """
def source(self):
save(self, "include/myheader.h", "")
def build(self):
save(self, "{libpath}/mylib.lib", "")
def layout(self):
vs_layout(self)
def package(self):
AutoPackager(self).run()
"""
return conanfile
subfolders_arch = {"armv7": "ARM", "armv8": "ARM64", "x86": None, "x86_64": "x64"}
@pytest.mark.parametrize("arch", ["x86_64", "x86", "armv7", "armv8"])
@pytest.mark.parametrize("build_type", ["Debug", "Release"])
def test_layout_in_cache(conanfile, build_type, arch):
"""The layout in the cache is used too, always relative to the "base" folders that the cache
requires. But by the default, the "package" is not followed
"""
client = TurboTestClient()
libarch = subfolders_arch.get(arch)
libpath = "{}{}".format(libarch + "/" if libarch else "", build_type)
ref = RecipeReference.loads("lib/1.0")
pref = client.create(ref, args="-s arch={} -s build_type={}".format(arch, build_type),
conanfile=conanfile.format(libpath=libpath))
bf = client.cache.pkg_layout(pref).build()
pf = client.cache.pkg_layout(pref).package()
# Check the build folder
assert os.path.exists(os.path.join(os.path.join(bf, libpath), "mylib.lib"))
# Check the package folder
assert os.path.exists(os.path.join(pf, "lib/mylib.lib"))
assert os.path.exists(os.path.join(pf, "include", "myheader.h"))
@pytest.mark.parametrize("arch", ["x86_64", "x86", "armv7", "armv8"])
@pytest.mark.parametrize("build_type", ["Debug", "Release"])
def test_layout_with_local_methods(conanfile, build_type, arch):
"""The layout in the cache is used too, always relative to the "base" folders that the cache
requires. But by the default, the "package" is not followed
"""
client = TestClient()
libarch = subfolders_arch.get(arch)
libpath = "{}{}".format(libarch + "/" if libarch else "", build_type)
client.save({"conanfile.py": conanfile.format(libpath=libpath)})
client.run("install . --name=lib --version=1.0 -s build_type={} -s arch={}".format(build_type, arch))
client.run("source .")
# Check the source folder (release)
assert os.path.exists(os.path.join(client.current_folder, "include", "myheader.h"))
client.run("build . --name=lib --version=1.0 -s build_type={} -s arch={}".format(build_type,
arch))
# Check the build folder (release)
assert os.path.exists(os.path.join(os.path.join(client.current_folder, libpath), "mylib.lib"))
@pytest.mark.skipif(platform.system() != "Windows", reason="Removing msvc compiler")
def test_error_no_msvc():
# https://github.com/conan-io/conan/issues/9953
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.tools.cmake import cmake_layout
class Pkg(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def layout(self):
cmake_layout(self)
""")
settings_yml = textwrap.dedent("""
os: [Windows]
os_build: [Windows]
arch_build: [x86_64]
compiler:
gcc:
version: ["8"]
build_type: [Release]
arch: [x86_64]
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
save(client.cache.settings_path, settings_yml)
client.run('install . -s os=Windows -s build_type=Release -s arch=x86_64 '
'-s compiler=gcc -s compiler.version=8 '
'-s:b os=Windows -s:b build_type=Release -s:b arch=x86_64 '
'-s:b compiler=gcc -s:b compiler.version=8')
assert "Installing" in client.out
def test_error_no_build_type():
# https://github.com/conan-io/conan/issues/9953
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.tools.cmake import cmake_layout
class Pkg(ConanFile):
settings = "os", "compiler", "arch"
def layout(self):
cmake_layout(self)
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run('install .', assert_error=True)
assert " 'build_type' setting not defined, it is necessary for cmake_layout()" in client.out
def test_cmake_layout_external_sources():
conanfile = textwrap.dedent("""
import os
from conan import ConanFile
from conan.tools.cmake import cmake_layout
from conan.tools.files import save, copy, load
class Pkg(ConanFile):
settings = "os", "build_type"
exports_sources = "exported.txt"
def layout(self):
cmake_layout(self, src_folder="src")
def generate(self):
save(self, "generate.txt", "generate")
def source(self):
save(self, "source.txt", "foo")
def build(self):
c1 = load(self, os.path.join(self.source_folder, "source.txt"))
c2 = load(self, os.path.join(self.source_folder, "..", "exported.txt"))
save(self, "build.txt", c1 + c2)
def package(self):
copy(self, "build.txt", self.build_folder, os.path.join(self.package_folder, "res"))
""")
client = TestClient()
client.save({"conanfile.py": conanfile, "exported.txt": "exported_contents"})
client.run("create . --name=foo --version=1.0 -s os=Linux")
assert "Packaged 1 '.txt' file: build.txt" in client.out
# Local flow
client.run("install . --name=foo --version=1.0 -s os=Linux")
assert os.path.exists(os.path.join(client.current_folder,
"build", "Release", "generators", "generate.txt"))
client.run("source .")
assert os.path.exists(os.path.join(client.current_folder, "src", "source.txt"))
client.run("build .")
contents = load(os.path.join(client.current_folder, "build", "Release", "build.txt"))
assert contents == "fooexported_contents"
client.run("export-pkg . --name=foo --version=1.0")
assert "Packaged 1 '.txt' file: build.txt" in client.out
@pytest.mark.parametrize("with_build_type", [True, False])
def test_basic_layout_external_sources(with_build_type):
conanfile = textwrap.dedent("""
import os
from conan import ConanFile
from conan.tools.layout import basic_layout
from conan.tools.files import save, load, copy
class Pkg(ConanFile):
settings = "os", "compiler", "arch"{}
exports_sources = "exported.txt"
def layout(self):
basic_layout(self, src_folder="src")
def generate(self):
save(self, "generate.txt", "generate")
def source(self):
save(self, "source.txt", "foo")
def build(self):
c1 = load(self, os.path.join(self.source_folder, "source.txt"))
c2 = load(self, os.path.join(self.source_folder, "..", "exported.txt"))
save(self, "build.txt", c1 + c2)
def package(self):
copy(self, "build.txt", self.build_folder, os.path.join(self.package_folder, "res"))
""")
if with_build_type:
conanfile = conanfile.format(', "build_type"')
else:
conanfile = conanfile.format("")
client = TestClient()
client.save({"conanfile.py": conanfile, "exported.txt": "exported_contents"})
client.run("create . --name=foo --version=1.0 -s os=Linux")
assert "Packaged 1 '.txt' file: build.txt" in client.out
# Local flow
build_folder = "build-release" if with_build_type else "build"
client.run("install . --name=foo --version=1.0 -s os=Linux")
assert os.path.exists(os.path.join(client.current_folder, build_folder, "conan", "generate.txt"))
client.run("source .")
assert os.path.exists(os.path.join(client.current_folder, "src", "source.txt"))
client.run("build .")
contents = load(os.path.join(client.current_folder, build_folder, "build.txt"))
assert contents == "fooexported_contents"
client.run("export-pkg . --name=foo --version=1.0")
assert "Packaged 1 '.txt' file: build.txt" in client.out
@pytest.mark.parametrize("with_build_type", [True, False])
def test_basic_layout_no_external_sources(with_build_type):
conanfile = textwrap.dedent("""
import os
from conan import ConanFile
from conan.tools.layout import basic_layout
from conan.tools.files import save, load, copy
class Pkg(ConanFile):
settings = "os", "compiler", "arch"{}
exports_sources = "exported.txt"
def layout(self):
basic_layout(self)
def generate(self):
save(self, "generate.txt", "generate")
def build(self):
contents = load(self, os.path.join(self.source_folder, "exported.txt"))
save(self, "build.txt", contents)
def package(self):
copy(self, "build.txt", self.build_folder, os.path.join(self.package_folder,
"res"))
""")
if with_build_type:
conanfile = conanfile.format(', "build_type"')
else:
conanfile = conanfile.format("")
client = TestClient()
client.save({"conanfile.py": conanfile, "exported.txt": "exported_contents"})
client.run("create . --name=foo --version=1.0 -s os=Linux")
assert "Packaged 1 '.txt' file: build.txt" in client.out
# Local flow
client.run("install . --name=foo --version=1.0 -s os=Linux")
build_folder = "build-release" if with_build_type else "build"
assert os.path.exists(os.path.join(client.current_folder, build_folder, "conan", "generate.txt"))
client.run("build .")
contents = load(os.path.join(client.current_folder, build_folder, "build.txt"))
assert contents == "exported_contents"
client.run("export-pkg . --name=foo --version=1.0")
assert "Packaged 1 '.txt' file: build.txt" in client.out
def test_cmake_layout_custom_build_folder():
# https://github.com/conan-io/conan/issues/11838
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.tools.cmake import cmake_layout
class Pkg(ConanFile):
settings = "os", "build_type"
generators = "CMakeToolchain"
def layout(self):
cmake_layout(self, src_folder="src", build_folder="mybuild")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("install .")
assert os.path.exists(os.path.join(client.current_folder,
"mybuild/Release/generators/conan_toolchain.cmake"))
|
25,383 | bb67cac25136fa628ed66ebea55c2024e45d92f1 | import numpy as np
import sys
import random as rd
from loadData import LoadData
class Inspection():
def __init__(self,ori_dataset):
self.ori_dataset = ori_dataset
self.ld = LoadData()
self.er = 0
self.gi = 0
# majority vote
def majority_vote(self,dataset):
count1 = 0
label = self.ld.get_value(dataset,-1)
for row in dataset:
if row[-1] == label[0]:
count1 += 1
else:
continue
count2 = len(dataset) - count1
if count1 > count2:
return label[0]
elif count2 > count1:
return label[1]
elif count2==0:
return label[0]
else:
return label[1]
# error rate
def error_rate(self,dataset):
label = self.majority_vote(dataset)
count = 0
for row in dataset:
if row[-1] != label:
count += 1
self.er = count/len(dataset)
return self.er
#gini impurity
def gini_impurity(self,dataset):
if len(dataset)==0:
self.gi=0
else:
count1 = 0
for item in dataset:
if item[-1]==dataset[0][-1]:
count1+=1
count2 = len(dataset)-count1
self.gi = (count1/len(dataset))*(count2/len(dataset))+(count2/len(dataset))*(count1/len(dataset))
return self.gi
# evaluate with error_rate and gini_impurity
def evaluate(self):
err_rate = self.error_rate(self.ori_dataset)
gini_impurity = self.gini_impurity(self.ori_dataset)
return err_rate,gini_impurity
if __name__ == '__main__':
infile = sys.argv[1]
outfile = sys.argv[2]
ld = LoadData()
ori_dataset = ld.load_data(infile)
ins = Inspection(ori_dataset)
eva = ins.evaluate()
err_rate = eva[0]
gini_impurity = eva[1]
with open(outfile, 'w') as f:
f.writelines("gini_impurity: {}\n".format(gini_impurity))
f.writelines("error: {}\n".format(err_rate))
# print(err_rate)
# print(gini_impurity) |
25,384 | 8d85727f3e96c3e1fd515b081c36ea23f846c2d2 | class pendulum():
def __init__(self, mass=1.0, length=1.0, theta=90.0, vel=0.0):
self.mass = mass
self.length = length
self.theta = theta
self.vel = vel
class double_pendulum():
def __init__(self, pendulum1=pendulum(), pendulum2=pendulum(), gravity=9.8):
self.pendulum1 = pendulum1
self.pendulum2 = pendulum2
self.gravity = gravity
|
25,385 | 5800c8f9601a00d217c1c0802c796dac8607f325 | #!/usr/bin/env python3
"""Metamath formula parser based on $a TOP ... $. axioms
Usage:
./r [-i <file>]
./r -h
Options:
-h, --help Show this message and exit.
-i <file>, --input <file> The .mm file to parse [default: set.mm]
"""
from type_docopt import docopt
import mmlib
from collections import defaultdict
import findsubst
from itertools import product
import json
"""
Exception: multiple parse trees found: at least '
('TOP.turnstile', (('wi', (('wor', (('cY', ()), ('crpss', ()))), ('wor', (('cra', (('wcel', (('cdif', (('cA.wceq', ()), ('cv', (('vu', ()),)))), ('cY', ()))), ('vu', ()), ('cpw', (('cA.wceq', ()),)))), ('crpss', ()))))),))
' and '
('TOP.turnstile', (('wi', (('wor', (('cY', ()), ('crpss', ()))), ('wor', (('cra', (('wcel', (('cdif', (('cA.wceq', ()), ('cv', (('vu', ()),)))), ('cY', ()))), ('vu', ()), ('cpw', (('wcel.cA', ()),)))), ('crpss', ()))))),))
'
"""
def parse(expression, top_kind, asss):
"""
Find the one proof of [top_kind, *expression] using assertions from asss : kind -> (name -> assertion)
"""
result = None
for p in find_proofs((top_kind, *expression), asss):
if result:
raise Exception(
"multiple parse trees found: at least '%s' and '%s'" % (result, p))
result = p
break
if not result:
raise Exception("no parse tree found")
return result
def find_proofs(expression, asss):
yielded_once = False
#print('>looking at:', expression)
kind = expression[0]
for name, assertion in asss[kind].items():
#print('>>trying:', name, assertion.conclusion)
assert len(assertion.hypotheses) == 0, (
"short-cut assumption that a syntax $a has no $e expected to hold for assertion %s: %s" % (name, assertion))
assertion_vars = [v for l, (t, v) in assertion.types]
assert len(assertion.types) > 0 or assertion_vars == []
for substitution in findsubst.find_substitutions(assertion_vars, assertion.conclusion, expression):
# we found a possible proof step
#print('s', substitution)
subproofss = (
find_proofs((t, *substitution[v]), asss)
for l, (t, v) in assertion.types
)
for subproofs_combination in product(*subproofss):
#print('=', name, subproofs_combination)
yield (name, *subproofs_combination) if subproofs_combination else name
return
assert not yielded_once
yielded_once = True
#print('<<tried :', assertion.conclusion)
#print('<looked at :', expression)
return
if __name__ == '__main__':
from type_docopt import docopt
arguments = docopt()
with open(arguments['--input'], 'rb') as f:
setmm = f.read()
assertions_by_kind = defaultdict(dict)
for nr, (name, typ, assertion) in enumerate(mmlib.mm_assertions(setmm)):
#print(name, assertion.conclusion)
t = assertion.conclusion[0]
once = True
while once:
once = False
if t == b'TOP':
# don't parse
continue
if typ == ord(b'f'):
# don't parse $f statements
continue
if typ == ord(b'a') and chr(t[0]).upper() != chr(t[0]):
# don't parse $a statements whose typecode starts with lowercase letter
continue
try:
if nr % 1 == 0:
print(nr, ':', str(name, 'ASCII'), str(
b' '.join(assertion.conclusion), 'ASCII'))
parse_tree = parse(assertion.conclusion,
b'TOP', assertions_by_kind)
if nr % 1 == 0:
print(nr, ':', '-->', json.dumps(parse_tree,
indent=None, default=lambda x: str(x, 'ASCII')))
print()
except Exception:
print(name, assertion.conclusion)
import traceback
traceback.print_exc()
import sys
sys.exit(1)
if typ in (ord(b'a'), ord(b'f')):
assertions_by_kind[t][name] = assertion
if False:
print('using:', t, name, assertion)
|
25,386 | abf0b9c55c740de0ec5602192c96606e6d04efce | # -*- coding: utf-8 -*-
import tensorflow as tf
def build_graph(vocab_size, state_size=64, batch_size=256, num_classes=6):
# Placeholders
x = tf.placeholder(tf.int32, [batch_size, None]) # [batch_size, num_steps]
seqlen = tf.placeholder(tf.int32, [batch_size])
y = tf.placeholder(tf.int32, [batch_size])
keep_prob = tf.constant(1.0)
# Embedding layer
embeddings = tf.get_variable('embedding_matrix', [vocab_size, state_size])
rnn_inputs = tf.nn.embedding_lookup(embeddings, x)
# RNN
cell = tf.nn.rnn_cell.GRUCell(state_size)
init_state = tf.get_variable('init_state', [1, state_size],
initializer=tf.constant_initializer(0.0))
init_state = tf.tile(init_state, [batch_size, 1])
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell,
rnn_inputs,
sequence_length=seqlen,
initial_state=init_state)
# Add dropout, as the model otherwise quickly overfits
rnn_outputs = tf.nn.dropout(rnn_outputs, keep_prob)
"""
Obtain the last relevant output. The best approach in the future will be to use:
last_rnn_output = tf.gather_nd(rnn_outputs, tf.pack([tf.range(batch_size), seqlen-1], axis=1))
which is the Tensorflow equivalent of numpy's rnn_outputs[range(30), seqlen-1, :], but the
gradient for this op has not been implemented as of this writing.
The below solution works, but throws a UserWarning re: the gradient.
"""
idx = tf.range(batch_size) * tf.shape(rnn_outputs)[1] + (seqlen - 1)
last_rnn_output = tf.gather(tf.reshape(rnn_outputs, [-1, state_size]), idx)
# Softmax layer
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
logits = tf.matmul(last_rnn_output, W) + b
preds = tf.nn.softmax(logits)
correct = tf.equal(tf.cast(tf.argmax(preds, 1), tf.int32), y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
return {
'x': x,
'seqlen': seqlen,
'y': y,
'dropout': keep_prob,
'loss': loss,
'ts': train_step,
'preds': preds,
'accuracy': accuracy
}
def train_graph(graph, batch_size=256, num_epochs=10, iterator=PaddedDataIterator):
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tr = iterator(train)
te = iterator(test)
step, accuracy = 0, 0
tr_losses, te_losses = [], []
current_epoch = 0
while current_epoch < num_epochs:
step += 1
batch = tr.next_batch(batch_size)
feed = {g['x']: batch[0], g['y']: batch[1], g['seqlen']: batch[2], g['dropout']: 0.6}
accuracy_, _ = sess.run([g['accuracy'], g['ts']], feed_dict=feed)
accuracy += accuracy_
if tr.epochs > current_epoch:
current_epoch += 1
tr_losses.append(accuracy / step)
step, accuracy = 0, 0
# eval test set
te_epoch = te.epochs
while te.epochs == te_epoch:
step += 1
batch = te.next_batch(batch_size)
feed = {g['x']: batch[0], g['y']: batch[1], g['seqlen']: batch[2]}
accuracy_ = sess.run([g['accuracy']], feed_dict=feed)[0]
accuracy += accuracy_
te_losses.append(accuracy / step)
step, accuracy = 0, 0
print("Accuracy after epoch", current_epoch, " - tr:", tr_losses[-1], "- te:", te_losses[-1])
return tr_losses, te_losses
g = build_graph()
tr_losses, te_losses = train_graph(g)
|
25,387 | d036dc48c29fc2b5bc557cb87925e2c8f5e13ade | # coding=utf-8
from pyspark import SparkConf, SparkContext
from pyspark.sql import HiveContext, Row
conf = SparkConf().setAppName("spark_sql_cache")
sc = SparkContext(conf=conf)
hc = HiveContext(sc)
source = sc.parallelize(
['{"col1": "row1_col1","col2":"row1_col2","col3":"row1_col3"}', '{"col1": "row2_col1","col2":"row2_col2","col3":"row2_col3"}', '{"col1": "row3_col1","col2":"row3_col2","col3":"row3_col3"}'])
sourceRDD = hc.jsonRDD(source)
sourceRDD.registerTempTable("temp_source")
"""
def convert(row):
mydict = row.asDict()
mydict["col1"] = mydict["col1"].upper()
return Row(**mydict)
convertRDD = hc.sql(
"select col1, col2, col3 from temp_source").map(convert)
mytable = hc.inferSchema(convertRDD)
mytable.registerTempTable("temp_mytable")
"""
def convert(val):
return val.upper()
hc.registerFunction("temp_convert", convert)
convertRDD = hc.sql(
"select temp_convert(col1) as col1, col2, col3 from temp_source")
convertRDD.registerAsTable("temp_mytable")
hc.cacheTable("temp_mytable")
def printRows(rows):
for row in rows:
print row
datas = hc.sql("select * from temp_mytable").collect()
printRows(datas)
datas = hc.sql("select col1 from temp_mytable").collect()
printRows(datas)
# hc.uncacheTable("temp_mytable")
sc.stop()
|
25,388 | f9194c48954ebc2036f0a62050f705cf351ba921 | import os
from nltk.tokenize import word_tokenize
import re
#def main():
def getAcronyms(file):
pattern = r'^[A-Z]{3,5}'
acr_dict = {}
if file[-4:] == '.txt':
f = open(file)
text = f.read()
tokens = word_tokenize(text)
for t in tokens:
if re.search(pattern, t):
match = find_words_for_acr(t, tokens)
acr_dict.update(match)
return acr_dict
def find_words_for_acr(acronym, tokens):
num_words = len(acronym)
match = {}
i = 0
while i < (len(tokens) - num_words):
n_gram = []
for j in range(i, i + num_words):
n_gram.append(tokens[j])
letters = [word[0] for word in n_gram]
potential_acronym = ''.join(letters).upper()
if acronym == potential_acronym:
match = {acronym: n_gram}
i += 1
return match
#if __name__=='__main__': main() |
25,389 | f33e98c9ff0410c2ab03f6b5104b5bab792c8c60 | from math import sqrt
import numpy as np
from scipy import fftpack as fp
from ..util_math import dft2, idft2
# --- high pass filters ---
def high_pass_ideal(d, cutoff):
if d >= cutoff:
return 1
else:
return 0
def high_pass_gauss(d, cutoff):
return 1 - np.exp(-(d ** 2) / (2 * cutoff ** 2))
def high_pass_butterworth(d, cutoff, order):
if d == 0:
return 0
return 1.0 / (1 + (cutoff / d) ** (2 * order))
# --- low pass filters ---
def low_pass_ideal(d, cutoff):
if d <= cutoff:
return 1
else:
return 0
def low_pass_gauss(d, cutoff):
return np.exp(-(d ** 2) / (2 * cutoff ** 2))
def low_pass_butterworth(d, cutoff, order):
return 1.0 / (1 + (d / cutoff) ** (2 * order))
# --- band pass filters ---
def band_pass_ideal(d, cutoff, width):
if cutoff - width / 2.0 <= d <= cutoff + width / 2.0:
return 1
else:
return 0
def band_pass_gauss(d, cutoff, width):
if d == 0:
return 0
return np.exp(-((d ** 2 - cutoff ** 2)/(d * width)) ** 2)
def band_pass_butterworth(d, cutoff, width, order):
if d == 0:
return 0
return 1 / (1 + ((d ** 2 - cutoff ** 2) / (d * width)) ** (2 * order))
# --- band reject filters ---
def band_reject_ideal(d, cutoff, width):
if cutoff - width / 2.0 <= d <= cutoff + width / 2.0:
return 0
else:
return 1
def band_reject_gauss(d, cutoff, width):
if d == 0:
return 1
return 1 - np.exp(-((d ** 2 - cutoff ** 2)/(d * width)) ** 2)
def band_reject_butterworth(d, cutoff, width, order):
if d ** 2 - cutoff ** 2 == 0:
return 1
return 1 / (1 + ((d * width) / (d ** 2 - cutoff ** 2)) ** (2 * order))
def distance(u, v, (M, N)):
return sqrt((u - M / 2.0) ** 2 + (v - N / 2.0) ** 2)
def filter_image(image, filter, options):
im_dft = fp.fftshift(dft2(image))
result = np.zeros(im_dft.shape, dtype=np.complex)
for u in range(im_dft.shape[0]):
for v in range(im_dft.shape[1]):
d = distance(u, v, im_dft.shape)
result[u, v] = filter(d, **options) * im_dft[u, v]
return abs(idft2(result))
LOW_PASS_FILTERS = {
'ideal': low_pass_ideal,
'gauss': low_pass_gauss,
'butterworth': low_pass_butterworth,
}
HIGH_PASS_FILTERS = {
'ideal': high_pass_ideal,
'gauss': high_pass_gauss,
'butterworth': high_pass_butterworth,
}
BAND_PASS_FILTERS = {
'ideal': band_pass_ideal,
'gauss': band_pass_gauss,
'butterworth': band_pass_butterworth,
}
BAND_REJECT_FILTERS = {
'ideal': band_reject_ideal,
'gauss': band_reject_gauss,
'butterworth': band_reject_butterworth,
}
FILTERS = {
'low_pass': LOW_PASS_FILTERS,
'high_pass': HIGH_PASS_FILTERS,
'band_pass': BAND_PASS_FILTERS,
'band_reject': BAND_REJECT_FILTERS,
}
|
25,390 | f1c74676643480c61a918b5164e9dac47c7a4eb7 | from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
from application.apps.pydoc import create_app,db
app = create_app('developemen')
manager = Manager(app)
Migrate(app,db)
manager.add_command('db', MigrateCommand)
@app.route('/')
def inde():
return 'hello world'
if __name__ == '__main__':
manager.run()
|
25,391 | 95e11e342eecffe02c8a2267f9e96c9013bd1dc1 | """Share request backend module."""
import asyncio
import logging
import sys
import typing
import aiohttp.web
import uvloop
import swift_browser_ui.common.common_handlers
import swift_browser_ui.common.common_middleware
import swift_browser_ui.common.common_util
from swift_browser_ui.request.api import (
handle_container_request_listing,
handle_health_check,
handle_share_request_post,
handle_user_add_token,
handle_user_delete_token,
handle_user_list_tokens,
handle_user_made_request_listing,
handle_user_owned_request_listing,
handle_user_share_request_delete,
)
from swift_browser_ui.request.db import DBConn
logging.basicConfig(level=logging.DEBUG)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
async def resume_on_start(app: aiohttp.web.Application) -> None:
"""Resume old instance on start."""
app["db_conn"] = DBConn()
app["tokens"] = []
await app["db_conn"].open()
async def graceful_shutdown(app: aiohttp.web.Application) -> None:
"""Correctly close the service."""
if app["db_conn"] is not None:
await app["db_conn"].close()
async def init_server() -> aiohttp.web.Application:
"""Initialize the sharing request server."""
app = aiohttp.web.Application(
middlewares=[
swift_browser_ui.common.common_middleware.add_cors, # type: ignore
swift_browser_ui.common.common_middleware.check_db_conn, # type: ignore
swift_browser_ui.common.common_middleware.handle_validate_authentication, # type: ignore
swift_browser_ui.common.common_middleware.catch_uniqueness_error, # type: ignore
swift_browser_ui.common.common_middleware.error_handler, # type: ignore
]
)
async def on_prepare(
_: aiohttp.web.Request, response: aiohttp.web.StreamResponse
) -> None:
"""Modify Server headers."""
response.headers["Server"] = "Swift Browser Request"
# add custom response headers
app.on_response_prepare.append(on_prepare)
app.add_routes(
[
aiohttp.web.get("/health", handle_health_check),
]
)
app.add_routes(
[
aiohttp.web.options(
"/request/user/{user}/{container}",
swift_browser_ui.common.common_handlers.handle_delete_preflight,
),
aiohttp.web.post(
"/request/user/{user}/{container}", handle_share_request_post
),
aiohttp.web.delete(
"/request/user/{user}/{container}", handle_user_share_request_delete
),
aiohttp.web.get("/request/user/{user}", handle_user_made_request_listing),
aiohttp.web.get("/request/owner/{user}", handle_user_owned_request_listing),
aiohttp.web.get(
"/request/container/{container}", handle_container_request_listing
),
]
)
app.add_routes(
[
aiohttp.web.options(
"/token/{project}/{id}",
swift_browser_ui.common.common_handlers.handle_delete_preflight,
),
aiohttp.web.post("/token/{project}/{id}", handle_user_add_token),
aiohttp.web.delete("/token/{project}/{id}", handle_user_delete_token),
aiohttp.web.get("/token/{project}", handle_user_list_tokens),
]
)
app.on_startup.append(resume_on_start)
app.on_startup.append(swift_browser_ui.common.common_util.read_in_keys)
app.on_shutdown.append(graceful_shutdown)
return app
def run_server_devel(
app: typing.Coroutine[typing.Any, typing.Any, aiohttp.web.Application]
) -> None:
"""Run the server in development mode (without HTTPS)."""
aiohttp.web.run_app(app, access_log=logging.getLogger("aiohttp.access"), port=9091)
def main() -> None:
"""Run the server with the default run function."""
if sys.version_info < (3, 6):
logging.error("swift-sharing-request requires >= python3.6")
sys.exit(1)
run_server_devel(init_server())
if __name__ == "__main__":
run_server_devel(init_server())
|
25,392 | d04ebd2aa3323151ccd05cfe056465631df89722 | #
# Generate By: dol2asm
# Module: 144
#
# Libraries
LIBRARIES = [
"d/a/b/d_a_b_gm",
]
# Translation Units
TRANSLATION_UNITS = [
"executor",
"unknown_translation_unit_ctors",
"global_destructor_chain",
"d_a_b_gm",
]
# Sections
SECTIONS = [
".text",
".ctors",
".dtors",
".bss",
".rodata",
".data",
]
# Symbols
SYMBOLS = [
{'addr':0x805ED860,'size':44,'pad':0,'label':"_prolog",'name':"_prolog",'lib':-1,'tu':0,'section':0,'class_template':None,'static':False,'is_reachable':True,'r':[0,1,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805ED88C,'size':44,'pad':0,'label':"_epilog",'name':"_epilog",'lib':-1,'tu':0,'section':0,'class_template':None,'static':False,'is_reachable':True,'r':[0,1,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805ED8B8,'size':32,'pad':0,'label':"_unresolved",'name':"_unresolved",'lib':-1,'tu':0,'section':0,'class_template':None,'static':False,'is_reachable':True,'r':[0,1,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805ED8D8,'size':28,'pad':0,'label':"__register_global_object",'name':"__register_global_object",'lib':-1,'tu':2,'section':0,'class_template':None,'static':False,'is_reachable':True,'r':[0,1,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805ED8F4,'size':88,'pad':0,'label':"__destroy_global_chain",'name':"__destroy_global_chain",'lib':-1,'tu':2,'section':0,'class_template':None,'static':False,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805ED94C,'size':176,'pad':0,'label':"__ct__12daB_GM_HIO_cFv",'name':"__ct__12daB_GM_HIO_cFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':True,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805ED9FC,'size':172,'pad':0,'label':"anm_init__FP10b_gm_classifUcf",'name':"anm_init__FP10b_gm_classifUcf",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[6,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EDAA8,'size':768,'pad':0,'label':"nodeCallBack__FP8J3DJointi",'name':"nodeCallBack__FP8J3DJointi",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EDDA8,'size':632,'pad':0,'label':"daB_GM_Draw__FP10b_gm_class",'name':"daB_GM_Draw__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE020,'size':60,'pad':0,'label':"__dt__4cXyzFv",'name':"__dt__4cXyzFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':True,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE05C,'size':88,'pad':0,'label':"s_ko_del__FPvPv",'name':"s_ko_del__FPvPv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE0B4,'size':900,'pad':0,'label':"damage_check__FP10b_gm_class",'name':"damage_check__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE438,'size':416,'pad':0,'label':"bg_check__FP10b_gm_class",'name':"bg_check__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE5D8,'size':72,'pad':0,'label':"__dt__8cM3dGPlaFv",'name':"__dt__8cM3dGPlaFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE620,'size':88,'pad':0,'label':"s_ko_sub__FPvPv",'name':"s_ko_sub__FPvPv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE678,'size':104,'pad':0,'label':"s_ko2_move__FPvPv",'name':"s_ko2_move__FPvPv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE6E0,'size':88,'pad':0,'label':"s_ko2_get__FPvPv",'name':"s_ko2_get__FPvPv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE738,'size':120,'pad':0,'label':"s_ko_move__FPvPv",'name':"s_ko_move__FPvPv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE7B0,'size':216,'pad':0,'label':"b_gm_wait__FP10b_gm_class",'name':"b_gm_wait__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EE888,'size':1480,'pad':0,'label':"b_gm_move__FP10b_gm_class",'name':"b_gm_move__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EEE50,'size':448,'pad':0,'label':"b_gm_beam__FP10b_gm_class",'name':"b_gm_beam__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EF010,'size':1268,'pad':0,'label':"b_gm_kogoma__FP10b_gm_class",'name':"b_gm_kogoma__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EF504,'size':300,'pad':0,'label':"b_gm_damage__FP10b_gm_class",'name':"b_gm_damage__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EF630,'size':908,'pad':0,'label':"b_gm_drop__FP10b_gm_class",'name':"b_gm_drop__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EF9BC,'size':364,'pad':0,'label':"action__FP10b_gm_class",'name':"action__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EFB28,'size':684,'pad':0,'label':"foot_IK__FP10b_gm_classP9b_gm_foot",'name':"foot_IK__FP10b_gm_classP9b_gm_foot",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EFDD4,'size':380,'pad':0,'label':"foot_IK_main__FP10b_gm_class",'name':"foot_IK_main__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805EFF50,'size':1516,'pad':0,'label':"anm_se_set__FP10b_gm_class",'name':"anm_se_set__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F053C,'size':228,'pad':0,'label':"cam_3d_morf__FP10b_gm_classf",'name':"cam_3d_morf__FP10b_gm_classf",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F0620,'size':160,'pad':0,'label':"cam_spd_set__FP10b_gm_class",'name':"cam_spd_set__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F06C0,'size':6296,'pad':0,'label':"demo_camera__FP10b_gm_class",'name':"demo_camera__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F1F58,'size':4424,'pad':0,'label':"daB_GM_Execute__FP10b_gm_class",'name':"daB_GM_Execute__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F30A0,'size':8,'pad':0,'label':"daB_GM_IsDelete__FP10b_gm_class",'name':"daB_GM_IsDelete__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReturnFunction"},
{'addr':0x805F30A8,'size':112,'pad':0,'label':"daB_GM_Delete__FP10b_gm_class",'name':"daB_GM_Delete__FP10b_gm_class",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3118,'size':1096,'pad':0,'label':"useHeapInit__FP10fopAc_ac_c",'name':"useHeapInit__FP10fopAc_ac_c",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3560,'size':72,'pad':0,'label':"__dt__12J3DFrameCtrlFv",'name':"__dt__12J3DFrameCtrlFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F35A8,'size':828,'pad':0,'label':"daB_GM_Create__FP10fopAc_ac_c",'name':"daB_GM_Create__FP10fopAc_ac_c",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F38E4,'size':476,'pad':0,'label':"__ct__10b_gm_classFv",'name':"__ct__10b_gm_classFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3AC0,'size':60,'pad':0,'label':"__dt__9b_gm_footFv",'name':"__dt__9b_gm_footFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3AFC,'size':4,'pad':0,'label':"__ct__9b_gm_footFv",'name':"__ct__9b_gm_footFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReturnFunction"},
{'addr':0x805F3B00,'size':132,'pad':0,'label':"__ct__8dCcD_SphFv",'name':"__ct__8dCcD_SphFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3B84,'size':204,'pad':0,'label':"__dt__8dCcD_SphFv",'name':"__dt__8dCcD_SphFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3C50,'size':72,'pad':0,'label':"__dt__8cM3dGSphFv",'name':"__dt__8cM3dGSphFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3C98,'size':72,'pad':0,'label':"__dt__8cM3dGAabFv",'name':"__dt__8cM3dGAabFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3CE0,'size':92,'pad':0,'label':"__dt__10dCcD_GSttsFv",'name':"__dt__10dCcD_GSttsFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3D3C,'size':112,'pad':0,'label':"__dt__12dBgS_ObjAcchFv",'name':"__dt__12dBgS_ObjAcchFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[3,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3DAC,'size':112,'pad':0,'label':"__dt__12dBgS_AcchCirFv",'name':"__dt__12dBgS_AcchCirFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3E1C,'size':72,'pad':0,'label':"__dt__10cCcD_GSttsFv",'name':"__dt__10cCcD_GSttsFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3E64,'size':72,'pad':0,'label':"__dt__12daB_GM_HIO_cFv",'name':"__dt__12daB_GM_HIO_cFv",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':True,'r':[2,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F3EAC,'size':532,'pad':0,'label':"__sinit_d_a_b_gm_cpp",'name':"__sinit_d_a_b_gm_cpp",'lib':-1,'tu':3,'section':0,'class_template':None,'static':False,'is_reachable':False,'r':[0,1,0],'sh':[0,0,0],'type':"SInitFunction"},
{'addr':0x805F40C0,'size':8,'pad':0,'label':"func_805F40C0",'name':"@36@__dt__12dBgS_ObjAcchFv",'lib':-1,'tu':3,'section':0,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F40C8,'size':8,'pad':0,'label':"func_805F40C8",'name':"@20@__dt__12dBgS_ObjAcchFv",'lib':-1,'tu':3,'section':0,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F40D0,'size':28,'pad':0,'label':"setCurrentPos__16obj_ystone_classF4cXyz",'name':"setCurrentPos__16obj_ystone_classF4cXyz",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F40EC,'size':116,'pad':0,'label':"dComIfGp_particle_set__FUsPC4cXyzPC5csXyzPC4cXyz",'name':"dComIfGp_particle_set__FUsPC4cXyzPC5csXyzPC4cXyz",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F4160,'size':20,'pad':0,'label':"changeDemoMode__9daPy_py_cFUliis",'name':"changeDemoMode__9daPy_py_cFUliis",'lib':-1,'tu':3,'section':0,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ASMFunction"},
{'addr':0x805F4174,'size':8,'pad':0,'label':"_ctors",'name':"_ctors",'lib':-1,'tu':1,'section':1,'class_template':None,'static':False,'is_reachable':True,'r':[0,1,0],'sh':[0,0,0],'type':"LinkerGenerated"},
{'addr':0x805F417C,'size':12,'pad':0,'label':"_dtors",'name':"_dtors",'lib':-1,'tu':2,'section':2,'class_template':None,'static':False,'is_reachable':True,'r':[0,1,0],'sh':[0,0,0],'type':"LinkerGenerated"},
{'addr':0x805F4188,'size':4,'pad':0,'label':"lit_3774",'name':"@3774",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':True,'r':[18,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F418C,'size':4,'pad':0,'label':"lit_3775",'name':"@3775",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4190,'size':4,'pad':0,'label':"lit_3776",'name':"@3776",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4194,'size':4,'pad':0,'label':"lit_3777",'name':"@3777",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4198,'size':4,'pad':0,'label':"lit_3778",'name':"@3778",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F419C,'size':4,'pad':0,'label':"lit_3779",'name':"@3779",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41A0,'size':4,'pad':0,'label':"lit_3780",'name':"@3780",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41A4,'size':4,'pad':0,'label':"lit_3794",'name':"@3794",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[5,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F41A8,'size':4,'pad':0,'label':"lit_3987",'name':"@3987",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41AC,'size':4,'pad':0,'label':"lit_3988",'name':"@3988",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41B0,'size':4,'pad':0,'label':"lit_3989",'name':"@3989",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41B4,'size':4,'pad':0,'label':"lit_4154",'name':"@4154",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41B8,'size':4,'pad':0,'label':"lit_4155",'name':"@4155",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41BC,'size':4,'pad':0,'label':"lit_4214",'name':"@4214",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41C0,'size':4,'pad':0,'label':"lit_4215",'name':"@4215",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41C4,'size':4,'pad':0,'label':"lit_4216",'name':"@4216",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41C8,'size':4,'pad':0,'label':"lit_4341",'name':"@4341",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41CC,'size':4,'pad':0,'label':"lit_4342",'name':"@4342",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41D0,'size':4,'pad':0,'label':"lit_4343",'name':"@4343",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41D4,'size':4,'pad':0,'label':"lit_4344",'name':"@4344",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41D8,'size':4,'pad':0,'label':"lit_4345",'name':"@4345",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41DC,'size':4,'pad':0,'label':"lit_4346",'name':"@4346",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41E0,'size':4,'pad':4,'label':"lit_4347",'name':"@4347",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F41E8,'size':8,'pad':0,'label':"lit_4348",'name':"@4348",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F41F0,'size':8,'pad':0,'label':"lit_4349",'name':"@4349",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F41F8,'size':8,'pad':0,'label':"lit_4350",'name':"@4350",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4200,'size':4,'pad':0,'label':"lit_4351",'name':"@4351",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4204,'size':4,'pad':0,'label':"lit_4352",'name':"@4352",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4208,'size':4,'pad':0,'label':"lit_4353",'name':"@4353",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"Integer"},
{'addr':0x805F420C,'size':4,'pad':0,'label':"lit_4390",'name':"@4390",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4210,'size':4,'pad':0,'label':"lit_4391",'name':"@4391",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4214,'size':4,'pad':0,'label':"lit_4540",'name':"@4540",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4218,'size':4,'pad':0,'label':"lit_4541",'name':"@4541",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F421C,'size':4,'pad':0,'label':"lit_4615",'name':"@4615",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4220,'size':4,'pad':0,'label':"lit_4616",'name':"@4616",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4224,'size':4,'pad':0,'label':"lit_4617",'name':"@4617",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4228,'size':4,'pad':0,'label':"lit_4618",'name':"@4618",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F422C,'size':4,'pad':0,'label':"lit_4703",'name':"@4703",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4230,'size':4,'pad':0,'label':"lit_4704",'name':"@4704",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4234,'size':4,'pad':0,'label':"lit_4705",'name':"@4705",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4238,'size':4,'pad':0,'label':"lit_4910",'name':"@4910",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F423C,'size':4,'pad':0,'label':"lit_4911",'name':"@4911",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4240,'size':4,'pad':0,'label':"lit_4912",'name':"@4912",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4244,'size':4,'pad':0,'label':"lit_4913",'name':"@4913",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4248,'size':4,'pad':0,'label':"lit_4914",'name':"@4914",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F424C,'size':4,'pad':0,'label':"lit_4915",'name':"@4915",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4250,'size':4,'pad':0,'label':"lit_4916",'name':"@4916",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4254,'size':4,'pad':0,'label':"lit_4917",'name':"@4917",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4258,'size':4,'pad':0,'label':"lit_4918",'name':"@4918",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F425C,'size':4,'pad':0,'label':"lit_4919",'name':"@4919",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4260,'size':4,'pad':0,'label':"lit_4920",'name':"@4920",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4264,'size':4,'pad':0,'label':"lit_5466",'name':"@5466",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4268,'size':4,'pad':0,'label':"lit_5467",'name':"@5467",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F426C,'size':4,'pad':0,'label':"lit_5468",'name':"@5468",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4270,'size':4,'pad':0,'label':"lit_5469",'name':"@5469",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4274,'size':4,'pad':0,'label':"lit_5470",'name':"@5470",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4278,'size':4,'pad':0,'label':"lit_5471",'name':"@5471",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F427C,'size':4,'pad':0,'label':"lit_5472",'name':"@5472",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4280,'size':4,'pad':0,'label':"lit_5473",'name':"@5473",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4284,'size':4,'pad':0,'label':"lit_5474",'name':"@5474",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4288,'size':4,'pad':0,'label':"lit_5475",'name':"@5475",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F428C,'size':4,'pad':0,'label':"lit_5476",'name':"@5476",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4290,'size':4,'pad':0,'label':"lit_5477",'name':"@5477",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4294,'size':4,'pad':0,'label':"lit_5478",'name':"@5478",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4298,'size':4,'pad':0,'label':"lit_5479",'name':"@5479",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F429C,'size':4,'pad':0,'label':"lit_5480",'name':"@5480",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42A0,'size':4,'pad':0,'label':"lit_5481",'name':"@5481",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42A4,'size':4,'pad':0,'label':"lit_5482",'name':"@5482",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42A8,'size':4,'pad':0,'label':"lit_5483",'name':"@5483",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42AC,'size':4,'pad':0,'label':"lit_5484",'name':"@5484",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42B0,'size':4,'pad':0,'label':"lit_5485",'name':"@5485",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42B4,'size':4,'pad':0,'label':"lit_5486",'name':"@5486",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42B8,'size':4,'pad':0,'label':"lit_5487",'name':"@5487",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42BC,'size':4,'pad':0,'label':"lit_5488",'name':"@5488",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42C0,'size':4,'pad':0,'label':"lit_5489",'name':"@5489",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42C4,'size':4,'pad':0,'label':"lit_5490",'name':"@5490",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42C8,'size':4,'pad':0,'label':"lit_5491",'name':"@5491",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42CC,'size':4,'pad':0,'label':"lit_5492",'name':"@5492",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42D0,'size':4,'pad':0,'label':"lit_5493",'name':"@5493",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42D4,'size':4,'pad':0,'label':"lit_5494",'name':"@5494",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42D8,'size':4,'pad':0,'label':"lit_5495",'name':"@5495",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42DC,'size':4,'pad':0,'label':"lit_5496",'name':"@5496",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42E0,'size':4,'pad':0,'label':"lit_5497",'name':"@5497",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42E4,'size':4,'pad':0,'label':"lit_5498",'name':"@5498",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42E8,'size':4,'pad':0,'label':"lit_5499",'name':"@5499",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42EC,'size':4,'pad':0,'label':"lit_5500",'name':"@5500",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42F0,'size':4,'pad':0,'label':"lit_5501",'name':"@5501",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42F4,'size':4,'pad':0,'label':"lit_5502",'name':"@5502",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42F8,'size':4,'pad':0,'label':"lit_5503",'name':"@5503",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F42FC,'size':4,'pad':0,'label':"lit_5504",'name':"@5504",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4300,'size':4,'pad':0,'label':"lit_5505",'name':"@5505",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4304,'size':4,'pad':0,'label':"lit_5506",'name':"@5506",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4308,'size':4,'pad':0,'label':"lit_5507",'name':"@5507",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F430C,'size':4,'pad':0,'label':"lit_5508",'name':"@5508",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4310,'size':4,'pad':0,'label':"lit_5509",'name':"@5509",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4314,'size':4,'pad':0,'label':"lit_5510",'name':"@5510",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4318,'size':4,'pad':0,'label':"lit_5511",'name':"@5511",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F431C,'size':4,'pad':0,'label':"lit_5512",'name':"@5512",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4320,'size':4,'pad':0,'label':"lit_5513",'name':"@5513",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4324,'size':4,'pad':0,'label':"lit_5514",'name':"@5514",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"Integer"},
{'addr':0x805F4328,'size':4,'pad':0,'label':"lit_5515",'name':"@5515",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F432C,'size':4,'pad':0,'label':"lit_5516",'name':"@5516",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4330,'size':4,'pad':0,'label':"lit_5517",'name':"@5517",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4334,'size':4,'pad':0,'label':"lit_5518",'name':"@5518",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4338,'size':8,'pad':0,'label':"lit_5522",'name':"@5522",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4340,'size':4,'pad':0,'label':"lit_6079",'name':"@6079",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4344,'size':4,'pad':0,'label':"lit_6080",'name':"@6080",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4348,'size':4,'pad':0,'label':"lit_6081",'name':"@6081",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F434C,'size':4,'pad':0,'label':"lit_6082",'name':"@6082",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4350,'size':4,'pad':0,'label':"lit_6083",'name':"@6083",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4354,'size':4,'pad':0,'label':"lit_6084",'name':"@6084",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4358,'size':4,'pad':0,'label':"lit_6085",'name':"@6085",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F435C,'size':4,'pad':0,'label':"lit_6086",'name':"@6086",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4360,'size':4,'pad':0,'label':"lit_6087",'name':"@6087",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4364,'size':4,'pad':0,'label':"lit_6088",'name':"@6088",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4368,'size':4,'pad':0,'label':"lit_6089",'name':"@6089",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"Integer"},
{'addr':0x805F436C,'size':4,'pad':0,'label':"lit_6249",'name':"@6249",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4370,'size':4,'pad':0,'label':"lit_6363",'name':"@6363",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4374,'size':4,'pad':0,'label':"lit_6364",'name':"@6364",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4378,'size':4,'pad':0,'label':"lit_6595",'name':"@6595",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F437C,'size':4,'pad':0,'label':"lit_6596",'name':"@6596",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4380,'size':4,'pad':0,'label':"lit_6597",'name':"@6597",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4384,'size':4,'pad':0,'label':"lit_6598",'name':"@6598",'lib':-1,'tu':3,'section':4,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"FloatingPoint"},
{'addr':0x805F4388,'size':5,'pad':0,'label':"d_a_b_gm__stringBase0",'name':"@stringBase0",'lib':-1,'tu':3,'section':4,'class_template':None,'static':False,'is_reachable':False,'r':[4,0,0],'sh':[0,0,0],'type':"StringBase"},
{'addr':0x805F4390,'size':12,'pad':0,'label':"cNullVec__6Z2Calc",'name':"cNullVec__6Z2Calc",'lib':-1,'tu':3,'section':5,'class_template':False,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F439C,'size':4,'pad':16,'label':"lit_1787",'name':"@1787",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"Integer"},
{'addr':0x805F43B0,'size':6,'pad':2,'label':"name_4019",'name':"name$4019",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F43B8,'size':8,'pad':0,'label':"name_4042",'name':"name$4042",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F43C0,'size':44,'pad':0,'label':"lit_4354",'name':"@4354",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReferenceArray"},
{'addr':0x805F43EC,'size':88,'pad':0,'label':"lit_4619",'name':"@4619",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReferenceArray"},
{'addr':0x805F4444,'size':48,'pad':0,'label':"lit_4656",'name':"@4656",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReferenceArray"},
{'addr':0x805F4474,'size':40,'pad':0,'label':"top_j",'name':"top_j",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F449C,'size':208,'pad':0,'label':"lit_5519",'name':"@5519",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReferenceArray"},
{'addr':0x805F456C,'size':8,'pad':0,'label':"name_5641",'name':"name$5641",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4574,'size':64,'pad':0,'label':"body_sph_src",'name':"body_sph_src$6272",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F45B4,'size':64,'pad':0,'label':"core_sph_src",'name':"core_sph_src$6273",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F45F4,'size':64,'pad':0,'label':"hand_sph_src",'name':"hand_sph_src$6274",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4634,'size':64,'pad':0,'label':"foot_sph_src",'name':"foot_sph_src$6275",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4674,'size':64,'pad':0,'label':"beam_sph_src",'name':"beam_sph_src$6276",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F46B4,'size':32,'pad':0,'label':"l_daB_GM_Method",'name':"l_daB_GM_Method",'lib':-1,'tu':3,'section':5,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ReferenceArray"},
{'addr':0x805F46D4,'size':48,'pad':0,'label':"g_profile_B_GM",'name':"g_profile_B_GM",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[0,0,1],'sh':[0,0,0],'type':"ReferenceArray"},
{'addr':0x805F4704,'size':12,'pad':0,'label':"__vt__12dBgS_AcchCir",'name':"__vt__12dBgS_AcchCir",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4710,'size':12,'pad':0,'label':"__vt__10cCcD_GStts",'name':"__vt__10cCcD_GStts",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F471C,'size':12,'pad':0,'label':"__vt__10dCcD_GStts",'name':"__vt__10dCcD_GStts",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4728,'size':12,'pad':0,'label':"__vt__8cM3dGSph",'name':"__vt__8cM3dGSph",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[4,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4734,'size':12,'pad':0,'label':"__vt__8cM3dGAab",'name':"__vt__8cM3dGAab",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[4,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4740,'size':36,'pad':0,'label':"__vt__12dBgS_ObjAcch",'name':"__vt__12dBgS_ObjAcch",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4764,'size':12,'pad':0,'label':"__vt__12J3DFrameCtrl",'name':"__vt__12J3DFrameCtrl",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4770,'size':12,'pad':0,'label':"__vt__8cM3dGPla",'name':"__vt__8cM3dGPla",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':False,'r':[3,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F477C,'size':12,'pad':0,'label':"__vt__12daB_GM_HIO_c",'name':"__vt__12daB_GM_HIO_c",'lib':-1,'tu':3,'section':5,'class_template':None,'static':False,'is_reachable':True,'r':[2,0,0],'sh':[0,0,0],'type':"VirtualTable"},
{'addr':0x805F4788,'size':4,'pad':4,'label':"__global_destructor_chain",'name':"__global_destructor_chain",'lib':-1,'tu':2,'section':3,'class_template':None,'static':True,'is_reachable':True,'r':[2,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4790,'size':1,'pad':3,'label':"lit_1109",'name':"@1109",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':True,'r':[1,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4794,'size':1,'pad':3,'label':"lit_1107",'name':"@1107",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4798,'size':1,'pad':3,'label':"lit_1105",'name':"@1105",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F479C,'size':1,'pad':3,'label':"lit_1104",'name':"@1104",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47A0,'size':1,'pad':3,'label':"lit_1099",'name':"@1099",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47A4,'size':1,'pad':3,'label':"lit_1097",'name':"@1097",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47A8,'size':1,'pad':3,'label':"lit_1095",'name':"@1095",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47AC,'size':1,'pad':3,'label':"lit_1094",'name':"@1094",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47B0,'size':1,'pad':3,'label':"lit_1057",'name':"@1057",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47B4,'size':1,'pad':3,'label':"lit_1055",'name':"@1055",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47B8,'size':1,'pad':3,'label':"lit_1053",'name':"@1053",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47BC,'size':1,'pad':3,'label':"lit_1052",'name':"@1052",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47C0,'size':1,'pad':3,'label':"lit_1014",'name':"@1014",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47C4,'size':1,'pad':3,'label':"lit_1012",'name':"@1012",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47C8,'size':1,'pad':3,'label':"lit_1010",'name':"@1010",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47CC,'size':4,'pad':0,'label':"struct_805F47CC",'name':None,'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"Structure"},
{'addr':0x805F47D0,'size':12,'pad':0,'label':"lit_3769",'name':"@3769",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F47DC,'size':76,'pad':0,'label':"l_HIO",'name':"l_HIO",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[11,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4828,'size':4,'pad':0,'label':"ko_ct",'name':"ko_ct",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F482C,'size':12,'pad':0,'label':"lit_4218",'name':"@4218",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4838,'size':12,'pad':0,'label':"lit_4219",'name':"@4219",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4844,'size':12,'pad':0,'label':"lit_4220",'name':"@4220",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4850,'size':12,'pad':0,'label':"lit_4221",'name':"@4221",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F485C,'size':48,'pad':0,'label':"target_pos",'name':"target_pos",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[2,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F488C,'size':12,'pad':0,'label':"lit_4708",'name':"@4708",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4898,'size':12,'pad':0,'label':"lit_4709",'name':"@4709",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48A4,'size':12,'pad':0,'label':"lit_4710",'name':"@4710",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48B0,'size':12,'pad':0,'label':"lit_4711",'name':"@4711",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48BC,'size':48,'pad':0,'label':"top_pos_data",'name':"top_pos_data",'lib':-1,'tu':3,'section':3,'class_template':None,'static':True,'is_reachable':False,'r':[1,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48EC,'size':4,'pad':0,'label':"data_805F48EC",'name':"sInstance__40JASGlobalInstance<19JASDefaultBankTable>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48F0,'size':4,'pad':0,'label':"data_805F48F0",'name':"sInstance__35JASGlobalInstance<14JASAudioThread>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48F4,'size':4,'pad':0,'label':"data_805F48F4",'name':"sInstance__27JASGlobalInstance<7Z2SeMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48F8,'size':4,'pad':0,'label':"data_805F48F8",'name':"sInstance__28JASGlobalInstance<8Z2SeqMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F48FC,'size':4,'pad':0,'label':"data_805F48FC",'name':"sInstance__31JASGlobalInstance<10Z2SceneMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4900,'size':4,'pad':0,'label':"data_805F4900",'name':"sInstance__32JASGlobalInstance<11Z2StatusMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4904,'size':4,'pad':0,'label':"data_805F4904",'name':"sInstance__31JASGlobalInstance<10Z2DebugSys>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4908,'size':4,'pad':0,'label':"data_805F4908",'name':"sInstance__36JASGlobalInstance<15JAISoundStarter>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F490C,'size':4,'pad':0,'label':"data_805F490C",'name':"sInstance__35JASGlobalInstance<14Z2SoundStarter>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4910,'size':4,'pad':0,'label':"data_805F4910",'name':"sInstance__33JASGlobalInstance<12Z2SpeechMgr2>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4914,'size':4,'pad':0,'label':"data_805F4914",'name':"sInstance__28JASGlobalInstance<8JAISeMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4918,'size':4,'pad':0,'label':"data_805F4918",'name':"sInstance__29JASGlobalInstance<9JAISeqMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F491C,'size':4,'pad':0,'label':"data_805F491C",'name':"sInstance__33JASGlobalInstance<12JAIStreamMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4920,'size':4,'pad':0,'label':"data_805F4920",'name':"sInstance__31JASGlobalInstance<10Z2SoundMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4924,'size':4,'pad':0,'label':"data_805F4924",'name':"sInstance__33JASGlobalInstance<12JAISoundInfo>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4928,'size':4,'pad':0,'label':"data_805F4928",'name':"sInstance__34JASGlobalInstance<13JAUSoundTable>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F492C,'size':4,'pad':0,'label':"data_805F492C",'name':"sInstance__38JASGlobalInstance<17JAUSoundNameTable>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4930,'size':4,'pad':0,'label':"data_805F4930",'name':"sInstance__33JASGlobalInstance<12JAUSoundInfo>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4934,'size':4,'pad':0,'label':"data_805F4934",'name':"sInstance__32JASGlobalInstance<11Z2SoundInfo>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4938,'size':4,'pad':0,'label':"data_805F4938",'name':"sInstance__34JASGlobalInstance<13Z2SoundObjMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F493C,'size':4,'pad':0,'label':"data_805F493C",'name':"sInstance__31JASGlobalInstance<10Z2Audience>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4940,'size':4,'pad':0,'label':"data_805F4940",'name':"sInstance__32JASGlobalInstance<11Z2FxLineMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4944,'size':4,'pad':0,'label':"data_805F4944",'name':"sInstance__31JASGlobalInstance<10Z2EnvSeMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F4948,'size':4,'pad':0,'label':"data_805F4948",'name':"sInstance__32JASGlobalInstance<11Z2SpeechMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
{'addr':0x805F494C,'size':4,'pad':0,'label':"data_805F494C",'name':"sInstance__34JASGlobalInstance<13Z2WolfHowlMgr>",'lib':-1,'tu':3,'section':3,'class_template':True,'static':True,'is_reachable':False,'r':[0,0,0],'sh':[0,0,0],'type':"ArbitraryData"},
]
# Names
SYMBOL_NAMES = {
"_prolog":0,
"_epilog":1,
"_unresolved":2,
"__register_global_object":3,
"__destroy_global_chain":4,
"__ct__12daB_GM_HIO_cFv":5,
"anm_init__FP10b_gm_classifUcf":6,
"nodeCallBack__FP8J3DJointi":7,
"daB_GM_Draw__FP10b_gm_class":8,
"__dt__4cXyzFv":9,
"s_ko_del__FPvPv":10,
"damage_check__FP10b_gm_class":11,
"bg_check__FP10b_gm_class":12,
"__dt__8cM3dGPlaFv":13,
"s_ko_sub__FPvPv":14,
"s_ko2_move__FPvPv":15,
"s_ko2_get__FPvPv":16,
"s_ko_move__FPvPv":17,
"b_gm_wait__FP10b_gm_class":18,
"b_gm_move__FP10b_gm_class":19,
"b_gm_beam__FP10b_gm_class":20,
"b_gm_kogoma__FP10b_gm_class":21,
"b_gm_damage__FP10b_gm_class":22,
"b_gm_drop__FP10b_gm_class":23,
"action__FP10b_gm_class":24,
"foot_IK__FP10b_gm_classP9b_gm_foot":25,
"foot_IK_main__FP10b_gm_class":26,
"anm_se_set__FP10b_gm_class":27,
"cam_3d_morf__FP10b_gm_classf":28,
"cam_spd_set__FP10b_gm_class":29,
"demo_camera__FP10b_gm_class":30,
"daB_GM_Execute__FP10b_gm_class":31,
"daB_GM_IsDelete__FP10b_gm_class":32,
"daB_GM_Delete__FP10b_gm_class":33,
"useHeapInit__FP10fopAc_ac_c":34,
"__dt__12J3DFrameCtrlFv":35,
"daB_GM_Create__FP10fopAc_ac_c":36,
"__ct__10b_gm_classFv":37,
"__dt__9b_gm_footFv":38,
"__ct__9b_gm_footFv":39,
"__ct__8dCcD_SphFv":40,
"__dt__8dCcD_SphFv":41,
"__dt__8cM3dGSphFv":42,
"__dt__8cM3dGAabFv":43,
"__dt__10dCcD_GSttsFv":44,
"__dt__12dBgS_ObjAcchFv":45,
"__dt__12dBgS_AcchCirFv":46,
"__dt__10cCcD_GSttsFv":47,
"__dt__12daB_GM_HIO_cFv":48,
"__sinit_d_a_b_gm_cpp":49,
"func_805F40C0":50,
"func_805F40C8":51,
"setCurrentPos__16obj_ystone_classF4cXyz":52,
"dComIfGp_particle_set__FUsPC4cXyzPC5csXyzPC4cXyz":53,
"changeDemoMode__9daPy_py_cFUliis":54,
"_ctors":55,
"_dtors":56,
"lit_3774":57,
"lit_3775":58,
"lit_3776":59,
"lit_3777":60,
"lit_3778":61,
"lit_3779":62,
"lit_3780":63,
"lit_3794":64,
"lit_3987":65,
"lit_3988":66,
"lit_3989":67,
"lit_4154":68,
"lit_4155":69,
"lit_4214":70,
"lit_4215":71,
"lit_4216":72,
"lit_4341":73,
"lit_4342":74,
"lit_4343":75,
"lit_4344":76,
"lit_4345":77,
"lit_4346":78,
"lit_4347":79,
"lit_4348":80,
"lit_4349":81,
"lit_4350":82,
"lit_4351":83,
"lit_4352":84,
"lit_4353":85,
"lit_4390":86,
"lit_4391":87,
"lit_4540":88,
"lit_4541":89,
"lit_4615":90,
"lit_4616":91,
"lit_4617":92,
"lit_4618":93,
"lit_4703":94,
"lit_4704":95,
"lit_4705":96,
"lit_4910":97,
"lit_4911":98,
"lit_4912":99,
"lit_4913":100,
"lit_4914":101,
"lit_4915":102,
"lit_4916":103,
"lit_4917":104,
"lit_4918":105,
"lit_4919":106,
"lit_4920":107,
"lit_5466":108,
"lit_5467":109,
"lit_5468":110,
"lit_5469":111,
"lit_5470":112,
"lit_5471":113,
"lit_5472":114,
"lit_5473":115,
"lit_5474":116,
"lit_5475":117,
"lit_5476":118,
"lit_5477":119,
"lit_5478":120,
"lit_5479":121,
"lit_5480":122,
"lit_5481":123,
"lit_5482":124,
"lit_5483":125,
"lit_5484":126,
"lit_5485":127,
"lit_5486":128,
"lit_5487":129,
"lit_5488":130,
"lit_5489":131,
"lit_5490":132,
"lit_5491":133,
"lit_5492":134,
"lit_5493":135,
"lit_5494":136,
"lit_5495":137,
"lit_5496":138,
"lit_5497":139,
"lit_5498":140,
"lit_5499":141,
"lit_5500":142,
"lit_5501":143,
"lit_5502":144,
"lit_5503":145,
"lit_5504":146,
"lit_5505":147,
"lit_5506":148,
"lit_5507":149,
"lit_5508":150,
"lit_5509":151,
"lit_5510":152,
"lit_5511":153,
"lit_5512":154,
"lit_5513":155,
"lit_5514":156,
"lit_5515":157,
"lit_5516":158,
"lit_5517":159,
"lit_5518":160,
"lit_5522":161,
"lit_6079":162,
"lit_6080":163,
"lit_6081":164,
"lit_6082":165,
"lit_6083":166,
"lit_6084":167,
"lit_6085":168,
"lit_6086":169,
"lit_6087":170,
"lit_6088":171,
"lit_6089":172,
"lit_6249":173,
"lit_6363":174,
"lit_6364":175,
"lit_6595":176,
"lit_6596":177,
"lit_6597":178,
"lit_6598":179,
"d_a_b_gm__stringBase0":180,
"cNullVec__6Z2Calc":181,
"lit_1787":182,
"name_4019":183,
"name_4042":184,
"lit_4354":185,
"lit_4619":186,
"lit_4656":187,
"top_j":188,
"lit_5519":189,
"name_5641":190,
"body_sph_src":191,
"core_sph_src":192,
"hand_sph_src":193,
"foot_sph_src":194,
"beam_sph_src":195,
"l_daB_GM_Method":196,
"g_profile_B_GM":197,
"__vt__12dBgS_AcchCir":198,
"__vt__10cCcD_GStts":199,
"__vt__10dCcD_GStts":200,
"__vt__8cM3dGSph":201,
"__vt__8cM3dGAab":202,
"__vt__12dBgS_ObjAcch":203,
"__vt__12J3DFrameCtrl":204,
"__vt__8cM3dGPla":205,
"__vt__12daB_GM_HIO_c":206,
"__global_destructor_chain":207,
"lit_1109":208,
"lit_1107":209,
"lit_1105":210,
"lit_1104":211,
"lit_1099":212,
"lit_1097":213,
"lit_1095":214,
"lit_1094":215,
"lit_1057":216,
"lit_1055":217,
"lit_1053":218,
"lit_1052":219,
"lit_1014":220,
"lit_1012":221,
"lit_1010":222,
"struct_805F47CC":223,
"lit_3769":224,
"l_HIO":225,
"ko_ct":226,
"lit_4218":227,
"lit_4219":228,
"lit_4220":229,
"lit_4221":230,
"target_pos":231,
"lit_4708":232,
"lit_4709":233,
"lit_4710":234,
"lit_4711":235,
"top_pos_data":236,
"data_805F48EC":237,
"data_805F48F0":238,
"data_805F48F4":239,
"data_805F48F8":240,
"data_805F48FC":241,
"data_805F4900":242,
"data_805F4904":243,
"data_805F4908":244,
"data_805F490C":245,
"data_805F4910":246,
"data_805F4914":247,
"data_805F4918":248,
"data_805F491C":249,
"data_805F4920":250,
"data_805F4924":251,
"data_805F4928":252,
"data_805F492C":253,
"data_805F4930":254,
"data_805F4934":255,
"data_805F4938":256,
"data_805F493C":257,
"data_805F4940":258,
"data_805F4944":259,
"data_805F4948":260,
"data_805F494C":261,
}
|
25,393 | 081ccbcf551d22b5d3bf32b44c9804ec5e655f18 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
This submits the scaling test, i.e. the same calculation on a different number of nodes.
Repository aiida.out in c.get_retrieved_node()._repository._get_base_folder().abspath,
where c is the calculation node which the workchain produces
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import click
import sys
import ase.build
from pathlib import Path
import os
from glob import glob
from aiida.engine import submit
from aiida.orm import Code, Dict, StructureData
from aiida.common import NotExistent
from aiida_cp2k.workchains import Cp2kMultistageWorkChain
from aiida.plugins import DataFactory
CifData = DataFactory("cif")
@click.command("cli")
@click.argument("codelabel")
@click.option("--run_test", is_flag=True, help="Actually submit calculation")
def main(codelabel, run_test):
try:
code = Code.get_from_string(codelabel)
except NotExistent:
print("The code '{}' does not exist".format(codelabel))
sys.exit(1)
allstructures = [
"/home/kevin/Dropbox (LSMO)/proj61_metal_channels_shared/8_benchmark_daint/structures/dft_opt/NAVJAW.cif"
]
for num_nodes in [1, 2, 4, 8, 12, 16, 32]:
for s in allstructures:
cif = CifData(file=s)
name = Path(s).stem
structure = cif.get_structure()
structure.label = name
structure.store()
parameters = Dict(dict={})
options = {
"resources": {"num_machines": num_nodes, "num_cores_per_mpiproc": 1},
"max_wallclock_seconds": 1 * 60 * 60,
}
inputs = {
"protocol_tag": Str("sp"),
"cp2k_base": {
"cp2k": {
"structure": structure,
"parameters": parameters,
"code": code,
"metadata": {"options": options},
}
},
"metadata": {"label": "scaling_test_" + str(num_nodes)},
}
if run_test:
submit(Cp2kMultistageWorkChain, **inputs)
else:
print("Generating test input ...")
inputs["base"]["cp2k"]["metadata"]["dry_run"] = True
inputs["base"]["cp2k"]["metadata"]["store_provenance"] = False
run(Cp2kMultistageWorkChain, **inputs)
print("Submission test successful")
print("In order to actually submit, add '--run'")
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
25,394 | 34b393bced3496b5170b24093e4b6e6bce923dc1 | from classes.game import Person
from classes.magic import Spell
from classes.colors import bcolors
from classes.inventory import Item
# Attack Magic
fire = Spell("Fire Heat", 27, 150, "Dark")
thunder = Spell("Thunder", 30, 160, "Dark")
blizzard = Spell("Blizzard", 35, 170, "Dark")
quake = Spell("Quake", 40, 180, "Dark")
meteor = Spell("Meteor", 43, 190, "Dark")
crunch = Spell("Crunch", 45, 200, "Dark")
dark_pulse = Spell("Dark Pulse", 47, 210, "Dark")
shadow_ball = Spell("Shadow Ball", 50, 220, "Dark")
destiny_bond = Spell("Destiny Bond", 53, 230, "Dark")
# Heal magic
cure = Spell("Recover", 20, 300, "Healing")
cure2 = Spell("Regenerator", 25, 350, "Healing")
# Usables
heal_potion1 = Item("Heal Potion(Low)", "potion", "Heals 50 HP", 150)
heal_potion2 = Item("Hi-Heal Potion", "potion", "Heals 100 HP", 200)
heal_potion3 = Item("Super Heal Potion", "potion", "Heals 500 HP", 600)
heal_elixir = Item("Elixir", "elixir", "Heals full HP/MP of one member", 99999)
heal_mega_elixir = Item("Mega Elixir", "elixir", "Heals full HP/MP of the whole team", 999999)
# Damage Items
grenade = Item("Grenade", "throwable", "Deals 500 damage", 500)
# Used by player
player_magic = [fire, thunder, blizzard, quake, meteor, crunch, dark_pulse, shadow_ball, destiny_bond, cure, cure2]
player_usables = [{"item": heal_potion1, "quantity": 50}, {"item": heal_potion2, "quantity": 20},
{"item": heal_potion3, "quantity": 3}, {"item": heal_elixir, "quantity": 2},
{"item": heal_mega_elixir, "quantity": 2}, {"item": grenade, "quantity": 2}]
# Players
player1 = Person("Tony :", 4567, 245, 80, 34, player_magic, player_usables)
player2 = Person("Bruce:", 3647, 245, 100, 34, player_magic, player_usables)
player3 = Person("Steve :", 4125, 245, 120, 34, player_magic, player_usables)
enemy = Person("Thanos:", 12000, 245, 300, 7, [], [])
players = [player1, player2, player3]
run = True
i = 0
print(bcolors.FAIL + bcolors.BOLD + "AN ENEMY ATTACKS" + bcolors.ENDC)
# Choose the type of attack
while run:
print("======================================")
print("\n")
for player in players:
player.get_player_stats()
print("\n")
enemy.get_enemy_stats()
print("\n")
for player in players:
player.choose_action()
choice = input(bcolors.BOLD + "Choose your attack type: " + bcolors.ENDC)
index = int(choice) - 1
# Melee Attack (1)
if index == 0:
print(bcolors.ATTACKCHOSEN + bcolors.BOLD + "You chose Melee Attack!", str(index) + bcolors.ENDC)
dmg = player.generate_damage()
enemy.take_damage(dmg)
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You attacked for", str(dmg), "damage!" + bcolors.ENDC)
# Magic Attack (2)
elif index == 1:
print(bcolors.ATTACKCHOSEN + bcolors.BOLD + "You chose Magic Attack!", str(index) + bcolors.ENDC)
player.choose_magic()
magic_choice = input(bcolors.BOLD + "Choose your Magic attack: " + bcolors.ENDC)
magic_index = int(magic_choice) - 1
if magic_index == -1:
continue
spell = player.magic[magic_index]
magic_dmg = spell.generate_damage()
magic_heal = spell.generate_heal()
# Verify if Magic Points Available
current_mp = player.get_mp()
if spell.cost > current_mp:
print(bcolors.FAIL + bcolors.BOLD + "\nNot enough Magic Points\n" + bcolors.ENDC)
continue
# Magic points reduced
player.reduce_mp(spell.cost)
# Spell Choice
print(bcolors.ATTACKCHOSEN + bcolors.BOLD + "You chose", spell.name + "!" + bcolors.ENDC)
# healing
if spell.stype == "Healing":
player.heal(magic_heal)
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You healed yourself for", str(magic_heal),
"HP!" + bcolors.ENDC)
# Dark magic damage done
elif spell.stype == "Dark":
enemy.take_damage(magic_dmg)
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You attacked for", str(magic_dmg),
"spell damage!" + bcolors.ENDC)
# Usable and Items
elif index == 2:
print(bcolors.ATTACKCHOSEN + bcolors.BOLD + "You chose Items!", str(index) + bcolors.ENDC)
player.choose_items()
items_choice = input(bcolors.BOLD + "Choose Item: " + bcolors.ENDC)
items_index = int(items_choice) - 1
if items_index == -1:
continue
item = player.items[items_index]["item"]
if player.items[items_index]["quantity"] == 0:
print(bcolors.FAIL + bcolors.BOLD + "\nNot enough", item.name, "\n" + bcolors.ENDC)
continue
item_dmg = item.generate_damage()
player.items[items_index]["quantity"] -= 1
# Item Choice
print(bcolors.ATTACKCHOSEN + bcolors.BOLD + "You chose", item.name + "!" + bcolors.ENDC)
if item.itype == "potion":
player.heal(item.value)
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You healed yourself for", str(item.value),
"HP!" + bcolors.ENDC)
elif item.itype == "elixir":
if item.name == "Mega Elixir":
player.hp = player.maxhp
player.mp = player.maxmp
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You fully restored your Team's HP and MP!" + bcolors.ENDC)
else:
player.hp = player.maxhp
player.mp = player.maxmp
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You fully restored your HP and MP!" + bcolors.ENDC)
elif item.itype == "throwable":
enemy.take_damage(item_dmg)
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You attacked for", str(item_dmg), "damage!" + bcolors.ENDC)
# Quit Game
elif index == 3:
print(bcolors.FAIL + bcolors.BOLD + "You Quit!" + bcolors.ENDC)
break
else:
print("Such choice does not exists!!")
continue
if index == 3:
break
enemy_choice = 1
if enemy_choice == 1:
print(bcolors.ATTACKCHOSEN + bcolors.BOLD + "Enemy chose Melee Attack!" + bcolors.ENDC)
dmg = enemy.generate_damage()
for player in players:
player.take_damage(dmg)
print(bcolors.ATTACKGIVETAKE + bcolors.BOLD + "You were attacked for", str(dmg), "damage!" + bcolors.ENDC)
if enemy.get_hp() == 0:
print(bcolors.OKGREEN + bcolors.BOLD + "You Won!!" + bcolors.ENDC)
break
elif player1.get_hp() == 0:
print(bcolors.FAIL + bcolors.BOLD + "You Lost!!" + bcolors.ENDC)
break
|
25,395 | be8accefb644f3fce4437b20a1184bbff2c95224 | colorname = input()
colorlist = colorname.split()
colorname1 = input()
colorlist1 = colorname1.split()
colorname2 = input()
colorlist2 = colorname2.split()
print('First & Last Color is : ["' + colorlist[1] + '" , "' + colorlist[-1] +'"]')
print('First & Last Color is : ["' + colorlist1[1] + '" , "' + colorlist1[-1] +'"]')
print('First & Last Color is : ["' + colorlist2[1] + '" , "' + colorlist2[-1] +'"]')
|
25,396 | 195872ee013fe559d9c4fb7b364f2df7fdb6faf2 | import numpy as np
import os
import tools
import matplotlib.pyplot as plt
########################
# 33_frrn_slim_valid_scheduled
# 34_frrn_slim_valid_deep_scheduled
# 35_frrn_slim_valid_2_scheduled
sm = 3
names = 'C1O_16_L_NNSC','C1O_16_L_NNSC_WD'
smooth_window = 0
n_train_oas = []
n_train_mean_f1s = []
n_valid_oas = []
n_valid_mean_f1s = []
n_test_oas = []
n_test_mean_f1s = []
for name in names:
root = './results/' + name + '/confusion_matrices/'
n = max([int(txt.split('_')[-1].split('.')[0]) for txt in os.listdir(root)]) + 1
train_oas = np.zeros(n)
train_mean_f1s = np.zeros(n)
valid_oas = np.zeros(n)
valid_mean_f1s = np.zeros(n)
test_oas = np.zeros(n)
test_mean_f1s = np.zeros(n)
for i in range(n):
confusions = np.load(root + 'CM_TRAINING_{}.npy'.format(i))
pctgs, precisions, recall, f1, mean_f1, oa = tools.get_confusion_metrics(confusions)
train_oas[i] = oa
train_mean_f1s[i] = mean_f1
confusions = np.load(root + 'CM_VALIDATION_{}.npy'.format(i))
pctgs, precisions, recall, f1, mean_f1, oa = tools.get_confusion_metrics(confusions)
valid_oas[i] = oa
valid_mean_f1s[i] = mean_f1
try:
confusions = np.load(root + 'CM_TESTING_{}.npy'.format(i))
pctgs, precisions, recall, f1, mean_f1, oa = tools.get_confusion_metrics(confusions)
test_oas[i] = oa
test_mean_f1s[i] = mean_f1
except IOError:
test_oas[i] = test_oas[i - 1]
test_mean_f1s[i] = test_mean_f1s[i - 1]
if smooth_window:
train_oas = tools.smooth1d(train_oas, smooth_window)
train_mean_f1s = tools.smooth1d(train_mean_f1s, smooth_window)
valid_oas = tools.smooth1d(valid_oas, smooth_window)
valid_mean_f1s = tools.smooth1d(valid_mean_f1s, smooth_window)
test_oas = tools.smooth1d(test_oas, smooth_window)
test_mean_f1s = tools.smooth1d(test_mean_f1s, smooth_window)
n_train_oas.append(train_oas)
n_train_mean_f1s.append(train_mean_f1s)
n_valid_oas.append(valid_oas)
n_valid_mean_f1s.append(valid_mean_f1s)
n_test_oas.append(test_oas)
n_test_mean_f1s.append(test_mean_f1s)
# best_train_oa_epo = np.argmax(train_oas)
# best_train_oa_val = train_oas[best_train_oa_epo]*100
# best_valid_oa_epo = np.argmax(valid_oas)
# best_valid_oa_val = valid_oas[best_valid_oa_epo]*100
# best_test_oa_epo = np.argmax(test_oas)
# best_test_oa_val = test_oas[best_test_oa_epo]*100
#
# best_train_f1_epo = np.argmax(train_mean_f1s)
# best_train_f1_val = train_mean_f1s[best_train_f1_epo]*100
# best_valid_f1_epo = np.argmax(valid_mean_f1s)
# best_valid_f1_val = valid_mean_f1s[best_valid_f1_epo]*100
# best_test_f1_epo = np.argmax(test_mean_f1s)
# best_test_f1_val = test_mean_f1s[best_test_f1_epo]*100
N = ''
for n in names:
N += n
N += ' vs. '
plt.suptitle(N[:-4])
linestyles = ['-', '--', '-.']
colors = ['red', 'green', 'blue', 'black', 'cyan', 'orange', 'magenta']
plt.subplot(1, 2, 1)
plt.title('Overall accuracies')
for i in range(len(names)):
plt.plot(tools.smooth1d(np.array(n_train_oas[i]), sm) * 100, label='Training - ' + names[i], linestyle=linestyles[0],
color=colors[i])
plt.plot(tools.smooth1d(np.array(n_valid_oas[i]), sm) * 100, label='Validation - ' + names[i], linestyle=linestyles[1],
color=colors[i])
plt.plot(tools.smooth1d(np.array(n_test_oas[i]), sm) * 100, label='Testing - ' + names[i], linestyle=linestyles[2],
color=colors[i])
plt.ylim((0, 100))
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Overall accuracy [%]')
# plt.hlines(y=best_train_oa_val, xmin=0, xmax=n, linewidth=1, color='blue')
# plt.vlines(x=best_train_oa_epo, ymin=0, ymax=100, linewidth=1, color='blue')
# plt.hlines(y=best_valid_oa_val, xmin=0, xmax=n, linewidth=1, color='orange')
# plt.vlines(x=best_valid_oa_epo, ymin=0, ymax=100, linewidth=1, color='orange')
# plt.hlines(y=best_test_oa_val, xmin=0, xmax=n, linewidth=1, color='green')
# plt.vlines(x=best_test_oa_epo, ymin=0, ymax=100, linewidth=1, color='green')
# # plt.savefig('run1000.png', bbox_inches='tight')
# MEAN F1 SCORE
plt.subplot(1, 2, 2)
plt.title('Mean F1 scores')
for i in range(len(names)):
plt.plot(tools.smooth1d(np.array(n_train_mean_f1s[i]), sm) * 100, label='Training - ' + names[i][:2], linestyle=linestyles[0],
color=colors[i])
plt.plot(tools.smooth1d(np.array(n_valid_mean_f1s[i]), sm) * 100, label='Validation - ' + names[i][:2], linestyle=linestyles[1],
color=colors[i])
plt.plot(tools.smooth1d(np.array(n_test_mean_f1s[i]), sm) * 100, label='Testing - ' + names[i][:2], linestyle=linestyles[2],
color=colors[i])
plt.ylim((0, 100))
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Mean F1 score [%]')
# plt.hlines(y=best_train_f1_val, xmin=0, xmax=n, linewidth=1, color='blue')
# plt.vlines(x=best_train_f1_epo, ymin=0, ymax=100, linewidth=1, color='blue')
# plt.hlines(y=best_valid_f1_val, xmin=0, xmax=n, linewidth=1, color='orange')
# plt.vlines(x=best_valid_f1_epo, ymin=0, ymax=100, linewidth=1, color='orange')
# plt.hlines(y=best_test_f1_val, xmin=0, xmax=n, linewidth=1, color='green')
# plt.vlines(x=best_test_f1_epo, ymin=0, ymax=100, linewidth=1, color='green')
# # plt.savefig('run1000.png', bbox_inches='tight')
plt.show()
|
25,397 | efbef8647094aa717b48ea49afb06c3254ac0f5a | from tkinter import*
from PIL import ImageTk, Image
root = Tk()
root.title('Say Cheese!')
root.iconbitmap('images/icons/arrow.ico')
my_img = ImageTk.PhotoImage(Image.open('images/5.jpeg'))
my_label = Label(image=my_img)
my_label.pack()
button_quit = Button(root, text='Exit', command=root.quit, padx=20)
button_quit.pack()
root.mainloop()
|
25,398 | 84cc572fc0be761bbdeef9d724e46b9e75fc1fd8 | import requests
import time
import json
import argparse
import numpy as np
import service
non_keyword_set = set(["_silence_", "_unknown_"])
keyword_list = ["_silence_", "_unknown_", "yes", "no", "up", "down", \
"left", "right", "on", "off", "stop", "go"]
model_list = ["cnn_one_fstride4.onnx", "cnn_one_fstride8.onnx", "cnn_tpool2.onnx", "cnn_tpool3.onnx", "cnn_trad_fpool3.onnx", "google-speech-dataset-full.onnx", "google-speech-dataset-compact.onnx"]
def wait_util_idle(get_last_read_url, idle_read=2.2):
for _ in range(15):
time.sleep(5)
last_read = float(requests.get(get_last_read_url).json())
if last_read <= idle_read:
break
def evaluate_model(get_read_url, get_last_read_url, reset_read_url):
for model in model_list:
serv = service.Caffe2LabelService("model/{}".format(model), keyword_list)
model_accuracy = []
model_consumption = []
model_duration = []
model_peak = -1
for ind, keyword in enumerate(keyword_list):
if keyword in non_keyword_set:
continue
wait_util_idle(get_last_read_url)
requests.get(reset_read_url)
start_time = time.time()
accuracy = round(serv.evaluate([keyword], [ind]), 3)
duration = round(time.time() - start_time, 1)
read_dic = requests.get(get_read_url).json()
consumption = round(read_dic["consumption"], 1)
peak = read_dic["peak"]
print(model, keyword, accuracy, duration, consumption, peak)
model_accuracy.append(accuracy)
model_duration.append(duration)
model_consumption.append(consumption)
model_peak = max(model_peak, peak)
print("model:", model)
print("avg accuracy:", np.mean(model_accuracy))
print("avg duration:", np.mean(model_duration))
print("avg consumption:", np.mean(model_consumption))
print("peak watt:", model_peak, "\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--ip",
type=str,
default="",
help="The ip address to run this server on")
parser.add_argument(
"--port",
type=str,
default="",
help="The port to run this server on")
flags, _ = parser.parse_known_args()
if not flags.ip or not flags.port:
print("the ip address and the port of the wattsup_server must be provided")
exit(0)
wattsup_server_ip, wattsup_server_port = flags.ip, flags.port
get_read_url = "http://{}:{}/get_read".format(wattsup_server_ip, wattsup_server_port)
get_last_read_url = "http://{}:{}/get_last_read".format(wattsup_server_ip, wattsup_server_port)
reset_read_url = "http://{}:{}/reset_read".format(wattsup_server_ip, wattsup_server_port)
evaluate_model(get_read_url, get_last_read_url, reset_read_url)
if __name__ == '__main__':
main()
|
25,399 | f150f752745f87a471bdbc8bbb0ffcb942817537 | #!/usr/bin/env python
from typing import List
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
len1 = len(nums1)
len2 = len(nums2)
total = len1 + len2
if nums1[len1//2] > nums2[len2//2]:
self.findMedianSortedArrays(
return 0
def getMedian(self,nums):
if not nums:
return 0
ln = len(nums)
if ln % 2 == 0:
median =
p = lambda n1,n2: print(Solution().findMedianSortedArrays(n1,n2))
p([1,3],[2])
p([1,2],[3,4])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.