max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
example/arduino-example.py | krprashant94/Hybrid-Cryptography-on-Cloud | 10 | 6617451 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 22:30:25 2018
Author: <NAME>
"""
import time
from securefile import Encrypt
from securefile.keyset import RSA_KEY, DES_KEY, AES_KEY
from securefile.secureserial import SerialPort
ser = SerialPort()
ser.scan()
ser.open('COM3')
arduino_key = ser.read_key(console_log=True)
ser.close()
des_key = DES_KEY.genrate(arduino_key.des_key)
aes_key = AES_KEY.genrate(arduino_key.aes_key)
rsa_public_key = RSA_KEY.public_key_genrate(int(arduino_key.rsa_tuple[0]),
int(arduino_key.rsa_tuple[1]))
rsa_private_key = RSA_KEY.private_key_genrate(6861, 57067)
chiper_shift = int(arduino_key.shift)
enc = Encrypt('test.md', delimiter=':')
enc.open()
start_time = time.time()
enc.base64_encrypt()
enc.aes_encrypt(aes_key, commit=True)
enc.des_encrypt(des_key, commit=True)
enc.rsa_encrypt(rsa_private_key, commit=True)
enc.caesar_cipher(key_shift=chiper_shift, commit=True)
encode_time = time.time() - start_time
print("--- %s seconds ---" % str(encode_time))
enc.caesar_decipher(key_shift=chiper_shift, commit=True)
enc.rsa_decrypt(rsa_public_key, commit=True)
enc.des_decrypt(des_key, commit=True)
enc.aes_decrypt(aes_key, commit=True)
enc.base64_decrypt(commit=True)
decode_time = time.time() - start_time
print("--- %s seconds ---" % (decode_time))
with open("cipher.csv", "a", encoding="utf8") as file:
file.write(str(len(enc.get_text())) + ','+ str(encode_time) + ','+ str(decode_time) + ",\n")
file.close()
enc.close() | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 22:30:25 2018
Author: <NAME>
"""
import time
from securefile import Encrypt
from securefile.keyset import RSA_KEY, DES_KEY, AES_KEY
from securefile.secureserial import SerialPort
ser = SerialPort()
ser.scan()
ser.open('COM3')
arduino_key = ser.read_key(console_log=True)
ser.close()
des_key = DES_KEY.genrate(arduino_key.des_key)
aes_key = AES_KEY.genrate(arduino_key.aes_key)
rsa_public_key = RSA_KEY.public_key_genrate(int(arduino_key.rsa_tuple[0]),
int(arduino_key.rsa_tuple[1]))
rsa_private_key = RSA_KEY.private_key_genrate(6861, 57067)
chiper_shift = int(arduino_key.shift)
enc = Encrypt('test.md', delimiter=':')
enc.open()
start_time = time.time()
enc.base64_encrypt()
enc.aes_encrypt(aes_key, commit=True)
enc.des_encrypt(des_key, commit=True)
enc.rsa_encrypt(rsa_private_key, commit=True)
enc.caesar_cipher(key_shift=chiper_shift, commit=True)
encode_time = time.time() - start_time
print("--- %s seconds ---" % str(encode_time))
enc.caesar_decipher(key_shift=chiper_shift, commit=True)
enc.rsa_decrypt(rsa_public_key, commit=True)
enc.des_decrypt(des_key, commit=True)
enc.aes_decrypt(aes_key, commit=True)
enc.base64_decrypt(commit=True)
decode_time = time.time() - start_time
print("--- %s seconds ---" % (decode_time))
with open("cipher.csv", "a", encoding="utf8") as file:
file.write(str(len(enc.get_text())) + ','+ str(encode_time) + ','+ str(decode_time) + ",\n")
file.close()
enc.close() | en | 0.79441 | # -*- coding: utf-8 -*- Created on Sun Oct 21 22:30:25 2018 Author: <NAME> | 2.818552 | 3 |
eoxserver/resources/coverages/migrations/0002_browse_type_fields.py | kalxas/eoxserver | 25 | 6617452 | <filename>eoxserver/resources/coverages/migrations/0002_browse_type_fields.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-10 12:22
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import eoxserver.resources.coverages.models
import re
class Migration(migrations.Migration):
dependencies = [
('coverages', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='browsetype',
name='alpha_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='alpha_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='alpha_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='blue_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='blue_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='blue_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='green_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='green_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='green_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='red_or_grey_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='red_or_grey_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='red_or_grey_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='browsetype',
name='alpha_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
migrations.AlterField(
model_name='browsetype',
name='blue_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
migrations.AlterField(
model_name='browsetype',
name='green_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
migrations.AlterField(
model_name='browsetype',
name='name',
field=models.CharField(blank=True, max_length=256, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')]),
),
migrations.AlterField(
model_name='browsetype',
name='red_or_grey_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
]
| <filename>eoxserver/resources/coverages/migrations/0002_browse_type_fields.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-10 12:22
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import eoxserver.resources.coverages.models
import re
class Migration(migrations.Migration):
dependencies = [
('coverages', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='browsetype',
name='alpha_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='alpha_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='alpha_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='blue_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='blue_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='blue_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='green_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='green_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='green_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='red_or_grey_nodata_value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='red_or_grey_range_max',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='browsetype',
name='red_or_grey_range_min',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='browsetype',
name='alpha_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
migrations.AlterField(
model_name='browsetype',
name='blue_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
migrations.AlterField(
model_name='browsetype',
name='green_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
migrations.AlterField(
model_name='browsetype',
name='name',
field=models.CharField(blank=True, max_length=256, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')]),
),
migrations.AlterField(
model_name='browsetype',
name='red_or_grey_expression',
field=models.CharField(blank=True, max_length=512, null=True, validators=[eoxserver.resources.coverages.models.band_expression_validator]),
),
]
| en | 0.717219 | # -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-11-10 12:22 | 1.70602 | 2 |
survey/forms.py | ONSdigital/alpha-eq-author | 1 | 6617453 | <reponame>ONSdigital/alpha-eq-author
from django.forms import ModelForm, Select, ChoiceField, ValidationError, Textarea, CheckboxInput
from .models import Survey, Questionnaire
SURVEY_STORE = (
("01","Social Survey"),
("02", "Census 2021"),
("002", "Survey of Research and Development Carried Out in the UK",),
("007", "Low Carbon and Renewable Energy Economy Survey",),
("009", "Monthly Business Survey",),
("014", "Annual PRODcom Survey PRODucts of the European COMmunity",),
("017", "Quarterly Stocks Survey",),
("019", "Quarterly Acquisitions and Disposals of Capital Assets Survey (QCAS) 019 - Quarterly Survey of Capital Expenditure",),
("022", "Quarterly Profits Inquiry",),
("023", "Monthly Business Survey - Retail Sales Index",),
("024", "Quarterly Fuels Inquiry",),
("057", "Quarterly International Trade in Services (QITIS)",),
("058", "Annual International Trade in Services (AITIS)",),
("061", "Services Provider Price Indices",),
("062", "Annual Inward Foreign Direct Investment Survey",),
("063", "Annual Outward Foreign Direct Investment Survey",),
("064", "Quarterly Inward Foreign Direct Investment Survey",),
)
class SurveyForm(ModelForm):
survey_list = ChoiceField(choices=[(k, k + "-" + v) for k,v in SURVEY_STORE], label="Choose a Survey")
class Meta:
model = Survey
exclude = ('title', 'survey_id')
widgets = {
'survey_list': Select(),
}
def clean_survey_list(self):
survey_id = self.cleaned_data['survey_list']
if Survey.objects.filter(survey_id=survey_id).exists():
raise ValidationError("Survey has already been added")
return survey_id
def save(self):
data = self.cleaned_data
survey_id = data['survey_list']
title = [item for item in SURVEY_STORE if item[0] == survey_id]
survey = super(SurveyForm, self).save(commit=False)
survey.title = title[0][1]
survey.survey_id = survey_id
survey.save()
return survey
class QuestionnaireForm(ModelForm):
class Meta:
model = Questionnaire
fields = ('title', 'questionnaire_id', 'overview')
exclude = {'reviewed', 'introduction_text'}
labels = {
'title': 'Questionnaire title',
'questionnaire_id': 'Questionnaire id',
'overview': 'Provide a description of your Questionnaire'
}
widgets = {
'overview': Textarea(attrs={'rows': 6}),
}
class QuestionnaireDetailForm(ModelForm):
class Meta:
model = Questionnaire
fields = ('reviewed',)
| from django.forms import ModelForm, Select, ChoiceField, ValidationError, Textarea, CheckboxInput
from .models import Survey, Questionnaire
SURVEY_STORE = (
("01","Social Survey"),
("02", "Census 2021"),
("002", "Survey of Research and Development Carried Out in the UK",),
("007", "Low Carbon and Renewable Energy Economy Survey",),
("009", "Monthly Business Survey",),
("014", "Annual PRODcom Survey PRODucts of the European COMmunity",),
("017", "Quarterly Stocks Survey",),
("019", "Quarterly Acquisitions and Disposals of Capital Assets Survey (QCAS) 019 - Quarterly Survey of Capital Expenditure",),
("022", "Quarterly Profits Inquiry",),
("023", "Monthly Business Survey - Retail Sales Index",),
("024", "Quarterly Fuels Inquiry",),
("057", "Quarterly International Trade in Services (QITIS)",),
("058", "Annual International Trade in Services (AITIS)",),
("061", "Services Provider Price Indices",),
("062", "Annual Inward Foreign Direct Investment Survey",),
("063", "Annual Outward Foreign Direct Investment Survey",),
("064", "Quarterly Inward Foreign Direct Investment Survey",),
)
class SurveyForm(ModelForm):
survey_list = ChoiceField(choices=[(k, k + "-" + v) for k,v in SURVEY_STORE], label="Choose a Survey")
class Meta:
model = Survey
exclude = ('title', 'survey_id')
widgets = {
'survey_list': Select(),
}
def clean_survey_list(self):
survey_id = self.cleaned_data['survey_list']
if Survey.objects.filter(survey_id=survey_id).exists():
raise ValidationError("Survey has already been added")
return survey_id
def save(self):
data = self.cleaned_data
survey_id = data['survey_list']
title = [item for item in SURVEY_STORE if item[0] == survey_id]
survey = super(SurveyForm, self).save(commit=False)
survey.title = title[0][1]
survey.survey_id = survey_id
survey.save()
return survey
class QuestionnaireForm(ModelForm):
class Meta:
model = Questionnaire
fields = ('title', 'questionnaire_id', 'overview')
exclude = {'reviewed', 'introduction_text'}
labels = {
'title': 'Questionnaire title',
'questionnaire_id': 'Questionnaire id',
'overview': 'Provide a description of your Questionnaire'
}
widgets = {
'overview': Textarea(attrs={'rows': 6}),
}
class QuestionnaireDetailForm(ModelForm):
class Meta:
model = Questionnaire
fields = ('reviewed',) | none | 1 | 2.448257 | 2 | |
convert.py | thiagochacon/Northern_Arawak | 0 | 6617454 | from lingpy import *
from collections import defaultdict
def parse_thiago(string):
strings = ['']
for char in string:
strings[-1] += char
if char == '}':
strings += ['']
dsets = []
for string in strings:
current = ''
bcount = 0
data = {}
for char in string:
if char == '$':
if not current:
current = 'form'
data[current] = ''
else:
current = ''
elif char == '/':
if not current:
current = 'phonemic'
data[current] = ''
elif current == 'phonemic':
current = ''
else:
data[current] += char
elif char == '{':
current = 'source'
data[current] = ''
elif char == '}':
current = ''
elif char == '<':
current = 'value'
data[current] = ''
elif char == '(':
bcount += 1
if not current:
current = 'spanish'
data[current] = ''
elif current == 'spanish':
data[current] += char
else:
data[current] += char
elif char == '<':
current = 'value'
data[current] = ''
elif char == '>':
current = ''
elif char == ')':
bcount -= 1
if current == 'spanish' and bcount == 0:
current = ''
elif current == 'spanish':
data[current] += char
else:
data[current] += char
elif current:
data[current] += char
dsets += [data]
return dsets
csv = csv2list('raw/compiled_750.tsv', strip_lines=False, comment='>>>')
header = csv[0][5:]
D = {0: [
'doculect',
'concept',
'concept_spanish',
'concept_french',
'concept_portuguese',
'semantic_field',
'value_in_source',
'value',
'form1', 'form2', 'form', 'segments', 'source']}
idx = 1
concepts = []
cmaps = {}
for i, line in enumerate(csv[1:]):
concept = line[0]
spanish = line[1]
french = line[2]
port = line[3]
rest = line[5:]
cmaps[port] = concept
semfield = line[4]
concepts += [(str(i+1), concept, spanish, french, port, semfield)]
for language, cell in zip(header, rest):
print(cell)
datapoints = parse_thiago(cell)
for data in [x for x in datapoints if x]:
form = data.get('phonemic',
data.get('form', data.get('value', '')))
if form:
segments = ' '.join(ipa2tokens(form.replace(' ','_'),
merge_vowels=False,
semi_diacritics = 'hsʃzʒ'))
new_line = [language, concept,
spanish,
french,
port,
semfield,
cell,
data.get('value', ''), data.get('form', ''),
data.get('phonemic', ''), form, segments, data.get('source')]
D[idx] = [str(x) for x in new_line]
idx += 1
csv = csv2list('raw/Baniwa_only_750', strip_lines=False)
for i, line in enumerate(csv[1:]):
port = line[0]
rest = [line[1]]
concept = cmaps.get(port, '?')
language = 'Baniwa'
for cell in rest:
print(cell)
datapoints = parse_thiago(cell)
for data in [x for x in datapoints if x]:
form = data.get('phonemic',
data.get('form', data.get('value', '')))
if form:
segments = ' '.join(ipa2tokens(form.replace(' ','_'),
merge_vowels=False,
semi_diacritics = 'hsʃzʒ'))
new_line = [language, concept,
'',
'',
port,
'',
cell,
data.get('value', ''), data.get('form', ''),
data.get('phonemic', ''), form, segments, data.get('source')]
D[idx] = [str(x) for x in new_line]
idx += 1
wl = Wordlist(D)
counts = defaultdict(lambda: defaultdict(list))
problematic = {}
for k, val, lang in iter_rows(wl, 'form', 'doculect'):
try:
tks = ipa2tokens(val, semi_diacritics='shzʃʒʂʐɕʑ', merge_vowels=False)
cls = tokens2class(tks, 'dolgo')
for t, c in zip(tks, cls):
counts[lang][t, c] += [val]
problematic[k] = ''
except:
problematic[k] = '!'
for lang, vals in counts.items():
with open(lang+'.orthography.tsv', 'w') as f:
f.write('Grapheme\tIPA\tFREQUENCY\tEXAMPLE\n')
for (t, c), lst in sorted(counts[lang].items(), key=lambda x: len(x[1]),
reverse=True):
if c != '0':
cpart = t
else:
cpart = '<?>'
print(t, c, lst)
f.write('{0}\t{1}\t{2}\t{3}\n'.format(
t, cpart, len(lst), lst[0]))
wl.add_entries('problematic', problematic, lambda x: x)
lex = LexStat(wl, segments='segments')
#lex.get_scorer()
lex.cluster(method='sca', threshold=0.45, ref='cogid')
lex.output('tsv', filename='wordlist-750', ignore='all', prettify=False,
subset=True, cols=['doculect', 'concept',
'concept_spanish',
'concept_french',
'concept_portuguese',
'semantic_field',
'value_in_source', 'value',
'form1', 'form2', 'form', 'segments', 'source', 'cogid', 'problematic'])
with open('concepts.tsv', 'w') as f:
f.write('NUMBER\tENGLISH\tSPANISH\tFRENCH\tPORTUGUESE\tSEMANTIC_FIELD\n')
for line in concepts:
f.write('\t'.join(line)+'\n')
| from lingpy import *
from collections import defaultdict
def parse_thiago(string):
strings = ['']
for char in string:
strings[-1] += char
if char == '}':
strings += ['']
dsets = []
for string in strings:
current = ''
bcount = 0
data = {}
for char in string:
if char == '$':
if not current:
current = 'form'
data[current] = ''
else:
current = ''
elif char == '/':
if not current:
current = 'phonemic'
data[current] = ''
elif current == 'phonemic':
current = ''
else:
data[current] += char
elif char == '{':
current = 'source'
data[current] = ''
elif char == '}':
current = ''
elif char == '<':
current = 'value'
data[current] = ''
elif char == '(':
bcount += 1
if not current:
current = 'spanish'
data[current] = ''
elif current == 'spanish':
data[current] += char
else:
data[current] += char
elif char == '<':
current = 'value'
data[current] = ''
elif char == '>':
current = ''
elif char == ')':
bcount -= 1
if current == 'spanish' and bcount == 0:
current = ''
elif current == 'spanish':
data[current] += char
else:
data[current] += char
elif current:
data[current] += char
dsets += [data]
return dsets
csv = csv2list('raw/compiled_750.tsv', strip_lines=False, comment='>>>')
header = csv[0][5:]
D = {0: [
'doculect',
'concept',
'concept_spanish',
'concept_french',
'concept_portuguese',
'semantic_field',
'value_in_source',
'value',
'form1', 'form2', 'form', 'segments', 'source']}
idx = 1
concepts = []
cmaps = {}
for i, line in enumerate(csv[1:]):
concept = line[0]
spanish = line[1]
french = line[2]
port = line[3]
rest = line[5:]
cmaps[port] = concept
semfield = line[4]
concepts += [(str(i+1), concept, spanish, french, port, semfield)]
for language, cell in zip(header, rest):
print(cell)
datapoints = parse_thiago(cell)
for data in [x for x in datapoints if x]:
form = data.get('phonemic',
data.get('form', data.get('value', '')))
if form:
segments = ' '.join(ipa2tokens(form.replace(' ','_'),
merge_vowels=False,
semi_diacritics = 'hsʃzʒ'))
new_line = [language, concept,
spanish,
french,
port,
semfield,
cell,
data.get('value', ''), data.get('form', ''),
data.get('phonemic', ''), form, segments, data.get('source')]
D[idx] = [str(x) for x in new_line]
idx += 1
csv = csv2list('raw/Baniwa_only_750', strip_lines=False)
for i, line in enumerate(csv[1:]):
port = line[0]
rest = [line[1]]
concept = cmaps.get(port, '?')
language = 'Baniwa'
for cell in rest:
print(cell)
datapoints = parse_thiago(cell)
for data in [x for x in datapoints if x]:
form = data.get('phonemic',
data.get('form', data.get('value', '')))
if form:
segments = ' '.join(ipa2tokens(form.replace(' ','_'),
merge_vowels=False,
semi_diacritics = 'hsʃzʒ'))
new_line = [language, concept,
'',
'',
port,
'',
cell,
data.get('value', ''), data.get('form', ''),
data.get('phonemic', ''), form, segments, data.get('source')]
D[idx] = [str(x) for x in new_line]
idx += 1
wl = Wordlist(D)
counts = defaultdict(lambda: defaultdict(list))
problematic = {}
for k, val, lang in iter_rows(wl, 'form', 'doculect'):
try:
tks = ipa2tokens(val, semi_diacritics='shzʃʒʂʐɕʑ', merge_vowels=False)
cls = tokens2class(tks, 'dolgo')
for t, c in zip(tks, cls):
counts[lang][t, c] += [val]
problematic[k] = ''
except:
problematic[k] = '!'
for lang, vals in counts.items():
with open(lang+'.orthography.tsv', 'w') as f:
f.write('Grapheme\tIPA\tFREQUENCY\tEXAMPLE\n')
for (t, c), lst in sorted(counts[lang].items(), key=lambda x: len(x[1]),
reverse=True):
if c != '0':
cpart = t
else:
cpart = '<?>'
print(t, c, lst)
f.write('{0}\t{1}\t{2}\t{3}\n'.format(
t, cpart, len(lst), lst[0]))
wl.add_entries('problematic', problematic, lambda x: x)
lex = LexStat(wl, segments='segments')
#lex.get_scorer()
lex.cluster(method='sca', threshold=0.45, ref='cogid')
lex.output('tsv', filename='wordlist-750', ignore='all', prettify=False,
subset=True, cols=['doculect', 'concept',
'concept_spanish',
'concept_french',
'concept_portuguese',
'semantic_field',
'value_in_source', 'value',
'form1', 'form2', 'form', 'segments', 'source', 'cogid', 'problematic'])
with open('concepts.tsv', 'w') as f:
f.write('NUMBER\tENGLISH\tSPANISH\tFRENCH\tPORTUGUESE\tSEMANTIC_FIELD\n')
for line in concepts:
f.write('\t'.join(line)+'\n')
| en | 0.301193 | #lex.get_scorer() | 2.877234 | 3 |
apps/markets1/models.py | uktrade/enav-alpha | 0 | 6617455 | <reponame>uktrade/enav-alpha<filename>apps/markets1/models.py
from django.db import models
from ckeditor.fields import RichTextField
class Article(models.Model):
title = models.CharField(max_length=200, null=True, blank=True)
content = RichTextField(null=True, blank=True)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
class Market(models.Model):
# Trading name of the marketplace
name = models.CharField(max_length=200, null=True, blank=True)
# Description of the marketplace suitable for a seller.
description = models.CharField(max_length=200, null=True, blank=True)
# URL of the market
web_address = models.URLField(max_length=200, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
| from django.db import models
from ckeditor.fields import RichTextField
class Article(models.Model):
title = models.CharField(max_length=200, null=True, blank=True)
content = RichTextField(null=True, blank=True)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
class Market(models.Model):
# Trading name of the marketplace
name = models.CharField(max_length=200, null=True, blank=True)
# Description of the marketplace suitable for a seller.
description = models.CharField(max_length=200, null=True, blank=True)
# URL of the market
web_address = models.URLField(max_length=200, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
ordering = ('name',) | en | 0.807838 | # Trading name of the marketplace # Description of the marketplace suitable for a seller. # URL of the market | 2.534799 | 3 |
Subjects/models.py | Mithzyl/Master-college-selecting-api | 0 | 6617456 | <reponame>Mithzyl/Master-college-selecting-api<filename>Subjects/models.py<gh_stars>0
from django.db import models
class PoliticSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class ForeignLanguageSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class FirstMajorSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class SecondMajorSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
| from django.db import models
class PoliticSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class ForeignLanguageSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class FirstMajorSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class SecondMajorSubject(models.Model):
code = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=40)
def __str__(self):
return self.name | none | 1 | 2.240924 | 2 | |
af_scripts/misc/afRandomColor.py | aaronfang/small-Scripts | 1 | 6617457 | import maya.cmds as mc
import random
def afRandomColor():
objs = mc.ls(sl=1,fl=1)
for obj in objs:
mats = mc.ls(mat=1,fl=1)
if not ((obj+'_mat')) in mats:
mat = mc.shadingNode('lambert',n=(obj+'_mat'),asShader=1)
mc.setAttr((mat+".color"),random.random(),random.random(),random.random(),type='double3')
mc.select(obj,r=1)
mc.hyperShade(assign=mat)
mc.select(cl=1)
else:
mc.setAttr((obj+"_mat.color"),random.random(),random.random(),random.random(),type='double3')
mc.select(objs,r=1)
afRandomColor()
| import maya.cmds as mc
import random
def afRandomColor():
objs = mc.ls(sl=1,fl=1)
for obj in objs:
mats = mc.ls(mat=1,fl=1)
if not ((obj+'_mat')) in mats:
mat = mc.shadingNode('lambert',n=(obj+'_mat'),asShader=1)
mc.setAttr((mat+".color"),random.random(),random.random(),random.random(),type='double3')
mc.select(obj,r=1)
mc.hyperShade(assign=mat)
mc.select(cl=1)
else:
mc.setAttr((obj+"_mat.color"),random.random(),random.random(),random.random(),type='double3')
mc.select(objs,r=1)
afRandomColor()
| none | 1 | 2.571866 | 3 | |
chamber/forms/fields.py | jsilhan/django-chamber | 0 | 6617458 | from django import forms
from django.utils.translation import ugettext
class DecimalField(forms.DecimalField):
def __init__(self, *args, **kwargs):
self.step = kwargs.pop('step', 'any')
self.min = kwargs.pop('min', None)
self.max = kwargs.pop('max', None)
super().__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
attrs['step'] = self.step
if self.min is not None:
attrs['min'] = self.min
if self.max is not None:
attrs['max'] = self.max
return attrs
class PriceNumberInput(forms.NumberInput):
def __init__(self, currency, *args, **kwargs):
super().__init__(*args, **kwargs)
self.placeholder = currency
class PriceField(DecimalField):
widget = PriceNumberInput
def __init__(self, *args, **kwargs):
currency = kwargs.pop('currency', ugettext('CZK'))
if 'widget' not in kwargs:
kwargs['widget'] = PriceNumberInput(currency)
super().__init__(*args, **kwargs)
| from django import forms
from django.utils.translation import ugettext
class DecimalField(forms.DecimalField):
def __init__(self, *args, **kwargs):
self.step = kwargs.pop('step', 'any')
self.min = kwargs.pop('min', None)
self.max = kwargs.pop('max', None)
super().__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
attrs['step'] = self.step
if self.min is not None:
attrs['min'] = self.min
if self.max is not None:
attrs['max'] = self.max
return attrs
class PriceNumberInput(forms.NumberInput):
def __init__(self, currency, *args, **kwargs):
super().__init__(*args, **kwargs)
self.placeholder = currency
class PriceField(DecimalField):
widget = PriceNumberInput
def __init__(self, *args, **kwargs):
currency = kwargs.pop('currency', ugettext('CZK'))
if 'widget' not in kwargs:
kwargs['widget'] = PriceNumberInput(currency)
super().__init__(*args, **kwargs)
| none | 1 | 2.228999 | 2 | |
Templates/02.LinkedList/LinkedList.py | AlgorithmAndLeetCode/itcharge-LeetCode-Py | 2 | 6617459 | <filename>Templates/02.LinkedList/LinkedList.py
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class LinkedList:
def __init__(self):
self.head = None
# 根据 data 初始化一个新链表
def create(self, data):
self.head = ListNode(0)
cur = self.head
for i in range(len(data)):
node = ListNode(data[i])
cur.next = node
cur = cur.next
# 获取链表长度
def length(self):
count = 0
cur = self.head
while cur:
count += 1
cur = cur.next
return count
# 查找元素
def find(self, val):
cur = self.head
while cur:
if val == cur.val:
return cur
cur = cur.next
return None
# 头部插入元素
def insertFront(self, val):
node = ListNode(val)
node.next = self.head
self.head = node
# 尾部插入元素
def insertRear(self, val):
node = ListNode(val)
cur = self.head
while cur.next:
cur = cur.next
cur.next = node
# 中间插入元素
def insertInside(self, index, val):
count = 0
cur = self.head
while cur and count < index - 1:
count += 1
cur = cur.next
if not cur:
return 'Error'
node = ListNode(val)
node.next = cur.next
cur.next = node
# 改变元素
def change(self, index, val):
count = 0
cur = self.head
while cur and count < index:
count += 1
cur = cur.next
if not cur:
return 'Error'
cur.val = val
# 移除链表头部元素
def removeFront(self):
if self.head:
self.head = self.head.next
# 移除链表尾部元素
def removeRear(self):
if not self.head.next:
return 'Error'
cur = self.head
while cur.next.next:
cur = cur.next
cur.next = None
# 移除链表中间元素
def removeInside(self, index):
count = 0
cur = self.head
while cur.next and count < index - 1:
count += 1
cur = cur.next
if not cur:
return 'Error'
del_node = cur.next
cur.next = del_node.next | <filename>Templates/02.LinkedList/LinkedList.py
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class LinkedList:
def __init__(self):
self.head = None
# 根据 data 初始化一个新链表
def create(self, data):
self.head = ListNode(0)
cur = self.head
for i in range(len(data)):
node = ListNode(data[i])
cur.next = node
cur = cur.next
# 获取链表长度
def length(self):
count = 0
cur = self.head
while cur:
count += 1
cur = cur.next
return count
# 查找元素
def find(self, val):
cur = self.head
while cur:
if val == cur.val:
return cur
cur = cur.next
return None
# 头部插入元素
def insertFront(self, val):
node = ListNode(val)
node.next = self.head
self.head = node
# 尾部插入元素
def insertRear(self, val):
node = ListNode(val)
cur = self.head
while cur.next:
cur = cur.next
cur.next = node
# 中间插入元素
def insertInside(self, index, val):
count = 0
cur = self.head
while cur and count < index - 1:
count += 1
cur = cur.next
if not cur:
return 'Error'
node = ListNode(val)
node.next = cur.next
cur.next = node
# 改变元素
def change(self, index, val):
count = 0
cur = self.head
while cur and count < index:
count += 1
cur = cur.next
if not cur:
return 'Error'
cur.val = val
# 移除链表头部元素
def removeFront(self):
if self.head:
self.head = self.head.next
# 移除链表尾部元素
def removeRear(self):
if not self.head.next:
return 'Error'
cur = self.head
while cur.next.next:
cur = cur.next
cur.next = None
# 移除链表中间元素
def removeInside(self, index):
count = 0
cur = self.head
while cur.next and count < index - 1:
count += 1
cur = cur.next
if not cur:
return 'Error'
del_node = cur.next
cur.next = del_node.next | zh | 0.991775 | # 根据 data 初始化一个新链表 # 获取链表长度 # 查找元素 # 头部插入元素 # 尾部插入元素 # 中间插入元素 # 改变元素 # 移除链表头部元素 # 移除链表尾部元素 # 移除链表中间元素 | 3.792414 | 4 |
Chapter06/comprehension/dictcomp1.py | MichaelRW/Python-for-Geeks | 31 | 6617460 | #dictcomp1.py
dict1 = {'a': 100, 'b': 200, 'c': 300}
dict2 = {x : int(y/2) for (x, y) in dict1.items() if y <=200}
print(dict2)
dict3 = {}
for x,y in dict1.items():
if y <= 200:
dict3[x] = int(y/2)
print(dict3) | #dictcomp1.py
dict1 = {'a': 100, 'b': 200, 'c': 300}
dict2 = {x : int(y/2) for (x, y) in dict1.items() if y <=200}
print(dict2)
dict3 = {}
for x,y in dict1.items():
if y <= 200:
dict3[x] = int(y/2)
print(dict3) | en | 0.292216 | #dictcomp1.py | 3.915483 | 4 |
python/artm/wrapper/constants.py | MelLain/bigartm | 638 | 6617461 | # Copyright 2017, Additive Regularization of Topic Models.
"""
Constants values, used in messages
This file was generated using python/artm/utility/gen_constants.py
Don't modify this file by hand!
"""
RegularizerType_SmoothSparseTheta = 0
RegularizerType_SmoothSparsePhi = 1
RegularizerType_DecorrelatorPhi = 2
RegularizerType_MultiLanguagePhi = 3
RegularizerType_LabelRegularizationPhi = 4
RegularizerType_SpecifiedSparsePhi = 5
RegularizerType_ImproveCoherencePhi = 6
RegularizerType_SmoothPtdw = 7
RegularizerType_TopicSelectionTheta = 8
RegularizerType_BitermsPhi = 9
RegularizerType_HierarchySparsingTheta = 10
RegularizerType_TopicSegmentationPtdw = 11
RegularizerType_SmoothTimeInTopicsPhi = 12
RegularizerType_NetPlsaPhi = 13
RegularizerType_Unknown = 9999
SpecifiedSparsePhiConfig_SparseMode_SparseTopics = 0
SpecifiedSparsePhiConfig_SparseMode_SparseTokens = 1
SmoothPtdwConfig_SmoothType_MovingAverage = 1
SmoothPtdwConfig_SmoothType_MovingProduct = 2
TransformConfig_TransformType_Logarithm = 0
TransformConfig_TransformType_Polynomial = 1
TransformConfig_TransformType_Constant = 2
ScoreType_Perplexity = 0
ScoreType_SparsityTheta = 1
ScoreType_SparsityPhi = 2
ScoreType_ItemsProcessed = 3
ScoreType_TopTokens = 4
ScoreType_ThetaSnippet = 5
ScoreType_TopicKernel = 6
ScoreType_TopicMassPhi = 7
ScoreType_ClassPrecision = 8
ScoreType_PeakMemory = 9
ScoreType_BackgroundTokensRatio = 10
ScoreType_Unknown = 9999
PerplexityScoreConfig_Type_UnigramDocumentModel = 0
PerplexityScoreConfig_Type_UnigramCollectionModel = 1
CollectionParserConfig_CollectionFormat_BagOfWordsUci = 0
CollectionParserConfig_CollectionFormat_MatrixMarket = 1
CollectionParserConfig_CollectionFormat_VowpalWabbit = 2
CollectionParserConfig_BatchNameType_Guid = 0
CollectionParserConfig_BatchNameType_Code = 1
MatrixLayout_Dense = 0
MatrixLayout_Sparse = 1
ThetaMatrixType_None = 0
ThetaMatrixType_Dense = 1
ThetaMatrixType_Sparse = 2
ThetaMatrixType_Cache = 3
ThetaMatrixType_DensePtdw = 4
ThetaMatrixType_SparsePtdw = 5
| # Copyright 2017, Additive Regularization of Topic Models.
"""
Constants values, used in messages
This file was generated using python/artm/utility/gen_constants.py
Don't modify this file by hand!
"""
RegularizerType_SmoothSparseTheta = 0
RegularizerType_SmoothSparsePhi = 1
RegularizerType_DecorrelatorPhi = 2
RegularizerType_MultiLanguagePhi = 3
RegularizerType_LabelRegularizationPhi = 4
RegularizerType_SpecifiedSparsePhi = 5
RegularizerType_ImproveCoherencePhi = 6
RegularizerType_SmoothPtdw = 7
RegularizerType_TopicSelectionTheta = 8
RegularizerType_BitermsPhi = 9
RegularizerType_HierarchySparsingTheta = 10
RegularizerType_TopicSegmentationPtdw = 11
RegularizerType_SmoothTimeInTopicsPhi = 12
RegularizerType_NetPlsaPhi = 13
RegularizerType_Unknown = 9999
SpecifiedSparsePhiConfig_SparseMode_SparseTopics = 0
SpecifiedSparsePhiConfig_SparseMode_SparseTokens = 1
SmoothPtdwConfig_SmoothType_MovingAverage = 1
SmoothPtdwConfig_SmoothType_MovingProduct = 2
TransformConfig_TransformType_Logarithm = 0
TransformConfig_TransformType_Polynomial = 1
TransformConfig_TransformType_Constant = 2
ScoreType_Perplexity = 0
ScoreType_SparsityTheta = 1
ScoreType_SparsityPhi = 2
ScoreType_ItemsProcessed = 3
ScoreType_TopTokens = 4
ScoreType_ThetaSnippet = 5
ScoreType_TopicKernel = 6
ScoreType_TopicMassPhi = 7
ScoreType_ClassPrecision = 8
ScoreType_PeakMemory = 9
ScoreType_BackgroundTokensRatio = 10
ScoreType_Unknown = 9999
PerplexityScoreConfig_Type_UnigramDocumentModel = 0
PerplexityScoreConfig_Type_UnigramCollectionModel = 1
CollectionParserConfig_CollectionFormat_BagOfWordsUci = 0
CollectionParserConfig_CollectionFormat_MatrixMarket = 1
CollectionParserConfig_CollectionFormat_VowpalWabbit = 2
CollectionParserConfig_BatchNameType_Guid = 0
CollectionParserConfig_BatchNameType_Code = 1
MatrixLayout_Dense = 0
MatrixLayout_Sparse = 1
ThetaMatrixType_None = 0
ThetaMatrixType_Dense = 1
ThetaMatrixType_Sparse = 2
ThetaMatrixType_Cache = 3
ThetaMatrixType_DensePtdw = 4
ThetaMatrixType_SparsePtdw = 5
| en | 0.841251 | # Copyright 2017, Additive Regularization of Topic Models. Constants values, used in messages This file was generated using python/artm/utility/gen_constants.py Don't modify this file by hand! | 1.045924 | 1 |
pythonProject2/exercicios/exercicio23.py | DeyvidMonteiro/PycharmProjects | 0 | 6617462 | <reponame>DeyvidMonteiro/PycharmProjects<gh_stars>0
from time import sleep
n1 = int(input('primeiro valor: '))
n2 = int(input('segundo valor: '))
opcao = 0
while opcao != 5:
print(''' [ 1 ] somar
[ 2 ] multiplicar
[ 3 ] maior
[ 4 ] novos numeros
[ 5 ] sair do programa ''')
opcao = int(input(' >>> qual a sua opção? '))
if opcao == 1:
soma = n1 + n2
print(' a soma entre {} + {} é {}' .format(n1, n2, soma))
elif opcao == 2:
produto = n1 * n2
print('o resultado de {} X {} é {} '.format(n1, n2, produto))
elif opcao == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('entre {} e {} o maior valor é {} '.format(n1, n2, maior))
elif opcao == 4:
print('informe os numeros novamente: ')
n1 = int(input('primeiro valor: '))
n2 = int(input('segundo valor: '))
elif opcao == 5:
print('finalizando..!')
else:
print('opção invalda. tente novamente! ')
print(('=-=' *15))
sleep(2)
print('fim do programa! ') | from time import sleep
n1 = int(input('primeiro valor: '))
n2 = int(input('segundo valor: '))
opcao = 0
while opcao != 5:
print(''' [ 1 ] somar
[ 2 ] multiplicar
[ 3 ] maior
[ 4 ] novos numeros
[ 5 ] sair do programa ''')
opcao = int(input(' >>> qual a sua opção? '))
if opcao == 1:
soma = n1 + n2
print(' a soma entre {} + {} é {}' .format(n1, n2, soma))
elif opcao == 2:
produto = n1 * n2
print('o resultado de {} X {} é {} '.format(n1, n2, produto))
elif opcao == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('entre {} e {} o maior valor é {} '.format(n1, n2, maior))
elif opcao == 4:
print('informe os numeros novamente: ')
n1 = int(input('primeiro valor: '))
n2 = int(input('segundo valor: '))
elif opcao == 5:
print('finalizando..!')
else:
print('opção invalda. tente novamente! ')
print(('=-=' *15))
sleep(2)
print('fim do programa! ') | pt | 0.234454 | [ 1 ] somar [ 2 ] multiplicar [ 3 ] maior [ 4 ] novos numeros [ 5 ] sair do programa | 3.894367 | 4 |
examples/electrophysiology/fibers/multiple_fibers/format.py | maierbn/opendihu | 17 | 6617463 | #!/usr/bin/env python
from ctypes import c_longlong
import math
import sys
def str_format_memory(value):
'''
format a value representing a number of bytes
'''
result = ""
if value >= (1024 * 1024 * 1024 * 1024):
tera = value / (1024. * 1024 * 1024 * 1024)
result += "{:.3f} TiB".format(int(tera))
elif (value >= (1024 * 1024 * 1024)):
giga = value / (1024. * 1024 * 1024)
result += "{:.3f} GiB".format(int(giga))
elif (value >= (1024 * 1024)):
mega = value / (1024. * 1024)
result += "{:.3f} MiB".format(int(mega))
elif (value >= (1024)):
kilo = value / (1024.);
result += "{:.3f} kiB".format(int(kilo))
else:
result += "{} B".format(int(value))
if value >= 1024:
result += " ("+str_format_number(int(value))+" Bytes)";
return result
def str_format_nanoseconds(value):
'''
format a value representing a number of nanoseconds
'''
result = ""
return value
def str_format_seconds(value):
'''
format a float value representing a number of seconds
'''
#return str_format_nanoseconds(value*1000000000.0)
return "{:0.5}".format(float(value))+" s"
def str_format_number(value):
'''
format a value representing a large number
'''
result = ""
number = "{}".format(value)
pos = len(number) % 3
while pos < len(number):
start_pos = max(0, (int)(pos - 3))
if pos > 0:
result += number[start_pos:pos]+"'";
pos += 3
result += number[max(0, (int)(pos - 3)):]
return result;
if __name__ == '__main__':
# read in arguments
if len(sys.argv) == 1:
print("usage: {} <number> <format>\n Output number with a special format.\nValid values for <format> are:\n mem Format as number of bytes\n ns Format as duration in nanoseconds\n n Format as big number.".format(sys.argv[0]))
quit()
number = int(sys.argv[1])
formatSpec = "n"
if len(sys.argv) == 3:
formatSpec = sys.argv[2]
if formatSpec == "mem":
print(str_format_memory(number))
elif formatSpec == "ns":
print(str_format_nanoseconds(number))
else:
print(str_format_number(number))
| #!/usr/bin/env python
from ctypes import c_longlong
import math
import sys
def str_format_memory(value):
'''
format a value representing a number of bytes
'''
result = ""
if value >= (1024 * 1024 * 1024 * 1024):
tera = value / (1024. * 1024 * 1024 * 1024)
result += "{:.3f} TiB".format(int(tera))
elif (value >= (1024 * 1024 * 1024)):
giga = value / (1024. * 1024 * 1024)
result += "{:.3f} GiB".format(int(giga))
elif (value >= (1024 * 1024)):
mega = value / (1024. * 1024)
result += "{:.3f} MiB".format(int(mega))
elif (value >= (1024)):
kilo = value / (1024.);
result += "{:.3f} kiB".format(int(kilo))
else:
result += "{} B".format(int(value))
if value >= 1024:
result += " ("+str_format_number(int(value))+" Bytes)";
return result
def str_format_nanoseconds(value):
'''
format a value representing a number of nanoseconds
'''
result = ""
return value
def str_format_seconds(value):
'''
format a float value representing a number of seconds
'''
#return str_format_nanoseconds(value*1000000000.0)
return "{:0.5}".format(float(value))+" s"
def str_format_number(value):
'''
format a value representing a large number
'''
result = ""
number = "{}".format(value)
pos = len(number) % 3
while pos < len(number):
start_pos = max(0, (int)(pos - 3))
if pos > 0:
result += number[start_pos:pos]+"'";
pos += 3
result += number[max(0, (int)(pos - 3)):]
return result;
if __name__ == '__main__':
# read in arguments
if len(sys.argv) == 1:
print("usage: {} <number> <format>\n Output number with a special format.\nValid values for <format> are:\n mem Format as number of bytes\n ns Format as duration in nanoseconds\n n Format as big number.".format(sys.argv[0]))
quit()
number = int(sys.argv[1])
formatSpec = "n"
if len(sys.argv) == 3:
formatSpec = sys.argv[2]
if formatSpec == "mem":
print(str_format_memory(number))
elif formatSpec == "ns":
print(str_format_nanoseconds(number))
else:
print(str_format_number(number))
| en | 0.52771 | #!/usr/bin/env python format a value representing a number of bytes format a value representing a number of nanoseconds format a float value representing a number of seconds #return str_format_nanoseconds(value*1000000000.0) format a value representing a large number # read in arguments | 3.215304 | 3 |
app/shared/enums.py | neurothrone/book-library | 0 | 6617464 | from enum import Enum
class Category(str, Enum):
IS_READING = "is_reading"
HAVE_READ = "have_read"
WILL_READ = "will_read"
| from enum import Enum
class Category(str, Enum):
IS_READING = "is_reading"
HAVE_READ = "have_read"
WILL_READ = "will_read"
| none | 1 | 2.639436 | 3 | |
preprocess/Python/process.py | chengjunyan1/GN-Transformer-AST | 8 | 6617465 | import ast,re,os
from queue import Queue
from io import BytesIO
import numpy as np
from ast_utils import parse_source
from code_tokenizer import CodeTokenizer
import tokenize as T
S = CodeTokenizer()
nodevoc={'null':0}
nodevoc_id=len(nodevoc)
typevoc={'null':0}
typevoc_id=len(nodevoc)
def tuple_le(a,b):
if a[0]<b[0]:
return True
elif a[0]>b[0]:
return False
else:
return a[1]<=b[1]
def fetch_ids(tk,ps,node):
low=(node['begin_row'],node['begin_col'])
high=(node['end_row'],node['end_col'])
ids=[]
for i in range(len(tk)):
if tuple_le(low,ps[i]) and tuple_le(ps[i],high):
ids.append(i)
return ids
def ast2id(astnodes,tok,pos):
global nodevoc,nodevoc_id
init=[]
types=[]
nodes=[]
edges=[]
for i in astnodes:
init_ids=fetch_ids(tok,pos,i)
if (i['begin_row']==0 and i['begin_col']==0
and i['end_row']==0 and i['end_col']==0):
types.append(nodevoc['null'])
else:
if i['type'] not in nodevoc:
nodevoc[i['type']]=nodevoc_id
nodevoc_id+=1
types.append(nodevoc[i['type']])
nodes.append(i['id'])
for e in i['children']:
edges.append((i['id'],e))
init.append(init_ids)
return nodes,edges,types,init
def tok2seq(tok):
s=''
for i in tok:
for k in i:
s+=k+' '
return s[:-1]
def truncate_syntax(d,wcut):
wtype,gnode,gedge,gtype,ginit=d
vwtype=wtype[:wcut]
boundary=wcut
vgnode=[]
vgtype=[]
vginit=[]
nonleaf=set()
for e in gedge: nonleaf.add(e[0])
for n in range(len(ginit)):
add=True
if ginit[n]==[] and gnode[n] not in nonleaf: add=False
for i in ginit[n]:
if i>=boundary: add=False
if add:
vgnode.append(gnode[n])
vgtype.append(gtype[n])
vginit.append(ginit[n])
vgedge=[]
for e in gedge:
if e[0] in vgnode and e[1] in vgnode:
vgedge.append(e)
return [vwtype,vgnode,vgedge,vgtype,vginit]
def to_src(s):
r=''
for i in s.strip().split():
if i=='': continue
elif i=='DCNL': r+='\n'
elif i=='DCSP': r+='\t'
else: r+=i+' '
return r[:-1]
def tokenize(q):
global typevoc,typevoc_id
toks=T.tokenize(BytesIO(q.encode('utf-8')).readline)
tks=[]
tps=[]
pos=[]
while(True):
tok=next(toks,-1)
if tok==-1: break
if tok.string in ['','\n','\t']: continue
if tok.start==tok.end: continue
sts=[]
for i in S.tokenize(tok.string):
if i!='': sts.append(i)
if sts==[]: continue
if tok.type==2: tks.append(['<NUM>'])
elif tok.type==3: tks.append(['<STR>'])
else: tks.append(sts)
if tok.type not in typevoc:
typevoc[tok.type]=typevoc_id
typevoc_id+=1
tps.append(typevoc[tok.type])
pos.append(tok.start)
return tks,pos,tps
class node:
def __init__(self):
self.d={}
def traversal(q):
tree=parse_source(q)
Q=Queue()
N=Queue()
ind=-1
root=node()
root.d['parent']=-1
Q.put(tree)
N.put(root)
Nodes=[root]
while not Q.empty():
tree=Q.get()
cur=N.get()
ind+=1
cur.d['id']=ind
cur.d['type']=tree.__class__.__name__
if cur.d['type']=='Load': continue
if not hasattr(tree,'first_token'): continue
if not hasattr(tree,'last_token'): continue
cur.d['begin_row']=tree.first_token.start[0]
cur.d['begin_col']=tree.first_token.start[1]
cur.d['end_row']=tree.last_token.end[0]
cur.d['end_col']=tree.last_token.end[1]
tree_it=ast.iter_child_nodes(tree)
while(True):
child=next(tree_it,-1)
if child==-1: break
Q.put(child)
vn=node()
vn.d['parent']=ind
N.put(vn)
Nodes.append(vn)
children={}
for n in Nodes:
i=n.d
if 'begin_row' not in i: continue
if i['parent']==-1: continue
if i['parent'] not in children:
children[i['parent']]=[]
children[i['parent']].append(i['id'])
astnodes=[]
for n in Nodes:
i=n.d
if 'begin_row' not in i: continue
i['children']=children[i['id']] if i['id'] in children else []
astnodes.append(i)
return astnodes
def process_line(q,wcut=400):
astnodes=traversal(q)
tks,pos,wtype=tokenize(q)
vtype=[]
for i in range(len(tks)):
for j in range(len(tks[i])): vtype.append(wtype[i])
nodes,edges,types,inits=ast2id(astnodes,tks,pos)
src=tok2seq(tks)
return src,truncate_syntax([vtype,nodes,edges,types,inits],wcut)
# from: https://github.com/wanyao1992/code_summarization_public/blob/master/script/github/python_process.py
def clean_comment(description):
description = description.replace(' DCNL DCSP', ' ')
description = description.replace(' DCNL ', ' ')
description = description.replace(' DCSP ', ' ')
description = description.lower()
description = description.replace("this's", 'this is')
description = description.replace("that's", 'that is')
description = description.replace("there's", 'there is')
description = description.replace('\\', '')
description = description.replace('``', '')
description = description.replace('`', '')
description = description.replace('\'', '')
removes = re.findall("(?<=[(])[^()]+[^()]+(?=[)])", description)
for r in removes:
description = description.replace('('+r+')', '')
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', description)
for url in urls:
description = description.replace(url, 'URL')
description = description.split('.')[0]
description = description.split(',')[0]
description = description.split(':param')[0]
description = description.split('@param')[0]
description = description.split('>>>')[0]
description = description.strip().strip('\n') + ' .'
return description
def process(rawpath='./Raw/'):
allcoms=[]
allsrcs=[]
for mode in ['train','test','valid']:
with open(rawpath+'/'+mode+'/data_ps.declbodies.'+mode,'r',encoding='utf8') as f:
s=f.readlines()
with open(rawpath+'/'+mode+'/data_ps.descriptions.'+mode,'r',encoding='utf8', errors='ignore') as f:
c=f.readlines()
for i in range(len(s)):
if len(s[i].split())>1680: continue
allcoms.append(clean_comment(c[i]))
allsrcs.append(s[i])
pidx = list(np.random.permutation(len(allsrcs)))
split={'train':pidx[0:int(len(allsrcs)*0.6)],
'test':pidx[int(len(allsrcs)*0.6):int(len(allsrcs)*0.8)],
'valid':pidx[int(len(allsrcs)*0.8):int(len(allsrcs))]}
srcs=allsrcs
coms=allcoms
count=0
for mode in ['train','test','valid']:
print('Processing',mode)
for i in split[mode]:
src=to_src(srcs[i])
try:
s,dataline=process_line(src)
except: continue
if s=='' or coms[i]=='': continue
if coms[i]==' .': continue
mname='dev' if mode=='valid' else mode
with open('./python/'+mname+'/code.original_subtoken','a',encoding='utf8') as f:
f.write(s+'\n')
with open('./python/'+mname+'/javadoc.original','a',encoding='utf8') as f:
f.write(coms[i]+'\n')
with open('./python/'+mname+'/syntax.adddata','a',encoding='utf8') as f:
f.write(str(dataline)+'\n')
count+=1
print('Processed',count)
with open('./python/nodevoc.txt','w',encoding='utf8') as f:
f.write(str(nodevoc))
with open('./python/typevoc.txt','w',encoding='utf8') as f:
f.write(str(typevoc))
if __name__=='__main__':
save='./python/'
for mode in ['train','test','dev']:
if not os.path.exists(save+mode): os.makedirs(save+mode)
if not os.path.exists(save+mode): os.makedirs(save+mode)
if not os.path.exists(save+mode): os.makedirs(save+mode)
process() | import ast,re,os
from queue import Queue
from io import BytesIO
import numpy as np
from ast_utils import parse_source
from code_tokenizer import CodeTokenizer
import tokenize as T
S = CodeTokenizer()
nodevoc={'null':0}
nodevoc_id=len(nodevoc)
typevoc={'null':0}
typevoc_id=len(nodevoc)
def tuple_le(a,b):
if a[0]<b[0]:
return True
elif a[0]>b[0]:
return False
else:
return a[1]<=b[1]
def fetch_ids(tk,ps,node):
low=(node['begin_row'],node['begin_col'])
high=(node['end_row'],node['end_col'])
ids=[]
for i in range(len(tk)):
if tuple_le(low,ps[i]) and tuple_le(ps[i],high):
ids.append(i)
return ids
def ast2id(astnodes,tok,pos):
global nodevoc,nodevoc_id
init=[]
types=[]
nodes=[]
edges=[]
for i in astnodes:
init_ids=fetch_ids(tok,pos,i)
if (i['begin_row']==0 and i['begin_col']==0
and i['end_row']==0 and i['end_col']==0):
types.append(nodevoc['null'])
else:
if i['type'] not in nodevoc:
nodevoc[i['type']]=nodevoc_id
nodevoc_id+=1
types.append(nodevoc[i['type']])
nodes.append(i['id'])
for e in i['children']:
edges.append((i['id'],e))
init.append(init_ids)
return nodes,edges,types,init
def tok2seq(tok):
s=''
for i in tok:
for k in i:
s+=k+' '
return s[:-1]
def truncate_syntax(d,wcut):
wtype,gnode,gedge,gtype,ginit=d
vwtype=wtype[:wcut]
boundary=wcut
vgnode=[]
vgtype=[]
vginit=[]
nonleaf=set()
for e in gedge: nonleaf.add(e[0])
for n in range(len(ginit)):
add=True
if ginit[n]==[] and gnode[n] not in nonleaf: add=False
for i in ginit[n]:
if i>=boundary: add=False
if add:
vgnode.append(gnode[n])
vgtype.append(gtype[n])
vginit.append(ginit[n])
vgedge=[]
for e in gedge:
if e[0] in vgnode and e[1] in vgnode:
vgedge.append(e)
return [vwtype,vgnode,vgedge,vgtype,vginit]
def to_src(s):
r=''
for i in s.strip().split():
if i=='': continue
elif i=='DCNL': r+='\n'
elif i=='DCSP': r+='\t'
else: r+=i+' '
return r[:-1]
def tokenize(q):
global typevoc,typevoc_id
toks=T.tokenize(BytesIO(q.encode('utf-8')).readline)
tks=[]
tps=[]
pos=[]
while(True):
tok=next(toks,-1)
if tok==-1: break
if tok.string in ['','\n','\t']: continue
if tok.start==tok.end: continue
sts=[]
for i in S.tokenize(tok.string):
if i!='': sts.append(i)
if sts==[]: continue
if tok.type==2: tks.append(['<NUM>'])
elif tok.type==3: tks.append(['<STR>'])
else: tks.append(sts)
if tok.type not in typevoc:
typevoc[tok.type]=typevoc_id
typevoc_id+=1
tps.append(typevoc[tok.type])
pos.append(tok.start)
return tks,pos,tps
class node:
def __init__(self):
self.d={}
def traversal(q):
tree=parse_source(q)
Q=Queue()
N=Queue()
ind=-1
root=node()
root.d['parent']=-1
Q.put(tree)
N.put(root)
Nodes=[root]
while not Q.empty():
tree=Q.get()
cur=N.get()
ind+=1
cur.d['id']=ind
cur.d['type']=tree.__class__.__name__
if cur.d['type']=='Load': continue
if not hasattr(tree,'first_token'): continue
if not hasattr(tree,'last_token'): continue
cur.d['begin_row']=tree.first_token.start[0]
cur.d['begin_col']=tree.first_token.start[1]
cur.d['end_row']=tree.last_token.end[0]
cur.d['end_col']=tree.last_token.end[1]
tree_it=ast.iter_child_nodes(tree)
while(True):
child=next(tree_it,-1)
if child==-1: break
Q.put(child)
vn=node()
vn.d['parent']=ind
N.put(vn)
Nodes.append(vn)
children={}
for n in Nodes:
i=n.d
if 'begin_row' not in i: continue
if i['parent']==-1: continue
if i['parent'] not in children:
children[i['parent']]=[]
children[i['parent']].append(i['id'])
astnodes=[]
for n in Nodes:
i=n.d
if 'begin_row' not in i: continue
i['children']=children[i['id']] if i['id'] in children else []
astnodes.append(i)
return astnodes
def process_line(q,wcut=400):
astnodes=traversal(q)
tks,pos,wtype=tokenize(q)
vtype=[]
for i in range(len(tks)):
for j in range(len(tks[i])): vtype.append(wtype[i])
nodes,edges,types,inits=ast2id(astnodes,tks,pos)
src=tok2seq(tks)
return src,truncate_syntax([vtype,nodes,edges,types,inits],wcut)
# from: https://github.com/wanyao1992/code_summarization_public/blob/master/script/github/python_process.py
def clean_comment(description):
description = description.replace(' DCNL DCSP', ' ')
description = description.replace(' DCNL ', ' ')
description = description.replace(' DCSP ', ' ')
description = description.lower()
description = description.replace("this's", 'this is')
description = description.replace("that's", 'that is')
description = description.replace("there's", 'there is')
description = description.replace('\\', '')
description = description.replace('``', '')
description = description.replace('`', '')
description = description.replace('\'', '')
removes = re.findall("(?<=[(])[^()]+[^()]+(?=[)])", description)
for r in removes:
description = description.replace('('+r+')', '')
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', description)
for url in urls:
description = description.replace(url, 'URL')
description = description.split('.')[0]
description = description.split(',')[0]
description = description.split(':param')[0]
description = description.split('@param')[0]
description = description.split('>>>')[0]
description = description.strip().strip('\n') + ' .'
return description
def process(rawpath='./Raw/'):
allcoms=[]
allsrcs=[]
for mode in ['train','test','valid']:
with open(rawpath+'/'+mode+'/data_ps.declbodies.'+mode,'r',encoding='utf8') as f:
s=f.readlines()
with open(rawpath+'/'+mode+'/data_ps.descriptions.'+mode,'r',encoding='utf8', errors='ignore') as f:
c=f.readlines()
for i in range(len(s)):
if len(s[i].split())>1680: continue
allcoms.append(clean_comment(c[i]))
allsrcs.append(s[i])
pidx = list(np.random.permutation(len(allsrcs)))
split={'train':pidx[0:int(len(allsrcs)*0.6)],
'test':pidx[int(len(allsrcs)*0.6):int(len(allsrcs)*0.8)],
'valid':pidx[int(len(allsrcs)*0.8):int(len(allsrcs))]}
srcs=allsrcs
coms=allcoms
count=0
for mode in ['train','test','valid']:
print('Processing',mode)
for i in split[mode]:
src=to_src(srcs[i])
try:
s,dataline=process_line(src)
except: continue
if s=='' or coms[i]=='': continue
if coms[i]==' .': continue
mname='dev' if mode=='valid' else mode
with open('./python/'+mname+'/code.original_subtoken','a',encoding='utf8') as f:
f.write(s+'\n')
with open('./python/'+mname+'/javadoc.original','a',encoding='utf8') as f:
f.write(coms[i]+'\n')
with open('./python/'+mname+'/syntax.adddata','a',encoding='utf8') as f:
f.write(str(dataline)+'\n')
count+=1
print('Processed',count)
with open('./python/nodevoc.txt','w',encoding='utf8') as f:
f.write(str(nodevoc))
with open('./python/typevoc.txt','w',encoding='utf8') as f:
f.write(str(typevoc))
if __name__=='__main__':
save='./python/'
for mode in ['train','test','dev']:
if not os.path.exists(save+mode): os.makedirs(save+mode)
if not os.path.exists(save+mode): os.makedirs(save+mode)
if not os.path.exists(save+mode): os.makedirs(save+mode)
process() | en | 0.459067 | # from: https://github.com/wanyao1992/code_summarization_public/blob/master/script/github/python_process.py | 2.436514 | 2 |
tests/integration/test_diffusion2d.py | sab-inf/sse-github-actions-exercise | 0 | 6617466 | <gh_stars>0
"""
Tests for functionality checks in class SolveDiffusion2D
"""
from diffusion2d import SolveDiffusion2D
import numpy as np
import unittest
class TestDiffusion2D(unittest.TestCase):
def test_initialize_physical_parameters(self):
"""
Checks function SolveDiffusion2D.initialize_domain
"""
solver = SolveDiffusion2D()
solver.initialize_domain(w=5.0, h=20.0, dx=0.2, dy=0.4)
solver.initialize_physical_parameters(d=2.0, T_cold=250.0, T_hot=650.0)
expected_dt = 0.008
self.assertAlmostEqual(expected_dt, expected_dt, 2)
def test_set_initial_condition(self):
"""
Checks function SolveDiffusion2D.set_initial_function
"""
solver = SolveDiffusion2D()
solver.initialize_domain(w=5.0, h=20.0, dx=0.2, dy=0.4)
solver.initialize_physical_parameters(d=2.0, T_cold=250.0, T_hot=650.0)
expected_min = 250.0
expected_max = 650.0
expected_mean = 273.68
u0 = solver.set_initial_condition()
min_value = np.min(u0)
max_value = np.max(u0)
mean_value = np.mean(u0)
self.assertAlmostEqual(min_value, expected_min)
self.assertAlmostEqual(max_value, expected_max)
self.assertAlmostEqual(mean_value, expected_mean)
| """
Tests for functionality checks in class SolveDiffusion2D
"""
from diffusion2d import SolveDiffusion2D
import numpy as np
import unittest
class TestDiffusion2D(unittest.TestCase):
def test_initialize_physical_parameters(self):
"""
Checks function SolveDiffusion2D.initialize_domain
"""
solver = SolveDiffusion2D()
solver.initialize_domain(w=5.0, h=20.0, dx=0.2, dy=0.4)
solver.initialize_physical_parameters(d=2.0, T_cold=250.0, T_hot=650.0)
expected_dt = 0.008
self.assertAlmostEqual(expected_dt, expected_dt, 2)
def test_set_initial_condition(self):
"""
Checks function SolveDiffusion2D.set_initial_function
"""
solver = SolveDiffusion2D()
solver.initialize_domain(w=5.0, h=20.0, dx=0.2, dy=0.4)
solver.initialize_physical_parameters(d=2.0, T_cold=250.0, T_hot=650.0)
expected_min = 250.0
expected_max = 650.0
expected_mean = 273.68
u0 = solver.set_initial_condition()
min_value = np.min(u0)
max_value = np.max(u0)
mean_value = np.mean(u0)
self.assertAlmostEqual(min_value, expected_min)
self.assertAlmostEqual(max_value, expected_max)
self.assertAlmostEqual(mean_value, expected_mean) | en | 0.21304 | Tests for functionality checks in class SolveDiffusion2D Checks function SolveDiffusion2D.initialize_domain Checks function SolveDiffusion2D.set_initial_function | 2.956431 | 3 |
recursive_algorithms.py | ImadDabbura/grokking_algorithms | 0 | 6617467 | <reponame>ImadDabbura/grokking_algorithms<gh_stars>0
'''
Implementing some funtions to compute measurements recursively.
'''
def find_gcd(a, b):
'''Find greatest common divisor of `a` and `b`.'''
if a == 0:
return b
elif b == 0:
return a
else:
if a < b:
a, b = b, a
remainder = a % b
return find_gcd(b, remainder)
def fact(n):
'''Compute n factorial.'''
if n < 0 or type(n) == 'float':
raise ValueError('n should be positive integer.')
if n == 0 or n == 1:
return 1
else:
return n * fact(n - 1)
def compute_sum(L):
'''Compute the sum of element in `L` recursively.'''
if len(L) == 0:
return 0
elif len(L) == 1:
return L[0]
else:
return L[0] + compute_sum(L[1:])
def count_elements(L):
'''Count number of element in `L` recursively.'''
if len(L) == 0:
return 0
elif len(L) == 1:
return 1
else:
return 1 + count_elements(L[1:])
def find_max(L):
if len(L) == 1:
return L[0]
elif len(L) == 2:
return L[0] if L[0] > L[1] else L[1]
return L[0] if L[0] > find_max(L[1:]) else find_max(L[1:])
| '''
Implementing some funtions to compute measurements recursively.
'''
def find_gcd(a, b):
'''Find greatest common divisor of `a` and `b`.'''
if a == 0:
return b
elif b == 0:
return a
else:
if a < b:
a, b = b, a
remainder = a % b
return find_gcd(b, remainder)
def fact(n):
'''Compute n factorial.'''
if n < 0 or type(n) == 'float':
raise ValueError('n should be positive integer.')
if n == 0 or n == 1:
return 1
else:
return n * fact(n - 1)
def compute_sum(L):
'''Compute the sum of element in `L` recursively.'''
if len(L) == 0:
return 0
elif len(L) == 1:
return L[0]
else:
return L[0] + compute_sum(L[1:])
def count_elements(L):
'''Count number of element in `L` recursively.'''
if len(L) == 0:
return 0
elif len(L) == 1:
return 1
else:
return 1 + count_elements(L[1:])
def find_max(L):
if len(L) == 1:
return L[0]
elif len(L) == 2:
return L[0] if L[0] > L[1] else L[1]
return L[0] if L[0] > find_max(L[1:]) else find_max(L[1:]) | en | 0.87756 | Implementing some funtions to compute measurements recursively. Find greatest common divisor of `a` and `b`. Compute n factorial. Compute the sum of element in `L` recursively. Count number of element in `L` recursively. | 4.012289 | 4 |
lib/examples/basic_nacl/common_files/get_distance.py | mczwier/westpa_py3 | 4 | 6617468 | <filename>lib/examples/basic_nacl/common_files/get_distance.py<gh_stars>1-10
import mdtraj
import numpy
traj = mdtraj.load('seg.dcd', top='bstate.pdb')
dist = mdtraj.compute_distances(traj, [[0,1]], periodic=True)
d_arr = numpy.asarray(dist)
d_arr = d_arr*10
numpy.savetxt("dist.dat", d_arr)
| <filename>lib/examples/basic_nacl/common_files/get_distance.py<gh_stars>1-10
import mdtraj
import numpy
traj = mdtraj.load('seg.dcd', top='bstate.pdb')
dist = mdtraj.compute_distances(traj, [[0,1]], periodic=True)
d_arr = numpy.asarray(dist)
d_arr = d_arr*10
numpy.savetxt("dist.dat", d_arr)
| none | 1 | 2.519511 | 3 | |
Chapter09/pt09_printAPI.py | allen-zqh/plotly | 0 | 6617469 | from visdom import Visdom
vis = Visdom()
print(help( vis.scatter ) ) | from visdom import Visdom
vis = Visdom()
print(help( vis.scatter ) ) | none | 1 | 1.523072 | 2 | |
util/test_hpsplit.py | juandesant/astrometry.net | 460 | 6617470 | <reponame>juandesant/astrometry.net<filename>util/test_hpsplit.py
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from astrometry.util.fits import *
from astrometry.util.util import *
from astrometry.util.run_command import *
from astrometry.util.plotutils import *
# Create test file with a grid of RA,Dec points.
r = np.arange(0, 360, 1)
d = np.arange(-90, 90.05, 1)
R,D = np.meshgrid(r, d)
T = tabledata()
T.ra = R.ravel()
T.dec = D.ravel()
fn = 'test-hpsplit-in1.fits'
T.writeto(fn)
splitpat = 'test-hpsplit-1-%02i.fits'
cmd = 'hpsplit -o %s -n 1 -m 10 %s' % (splitpat, fn)
rtn,out,err = run_command(cmd)
assert(rtn == 0)
ps = PlotSequence('test_hpsplit')
for hp in range(12):
T = fits_table(splitpat % hp)
print('Healpix', hp, 'got', len(T))
plt.clf()
plothist(T.ra, T.dec, 360, range=((0,360),(-90,90)))
vv = np.linspace(0, 1, 100)
rd = []
for v in vv:
rd.append(healpix_to_radecdeg(hp, 1, 0., v))
for v in vv:
rd.append(healpix_to_radecdeg(hp, 1, v, 1.))
for v in reversed(vv):
rd.append(healpix_to_radecdeg(hp, 1, 1., v))
for v in reversed(vv):
rd.append(healpix_to_radecdeg(hp, 1, v, 0.))
rd = np.array(rd)
plt.plot(rd[:,0], rd[:,1], '-', color=(0,0.5,1.), lw=2)
plt.axis([0, 360, -90, 90])
ps.savefig()
| # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from astrometry.util.fits import *
from astrometry.util.util import *
from astrometry.util.run_command import *
from astrometry.util.plotutils import *
# Create test file with a grid of RA,Dec points.
r = np.arange(0, 360, 1)
d = np.arange(-90, 90.05, 1)
R,D = np.meshgrid(r, d)
T = tabledata()
T.ra = R.ravel()
T.dec = D.ravel()
fn = 'test-hpsplit-in1.fits'
T.writeto(fn)
splitpat = 'test-hpsplit-1-%02i.fits'
cmd = 'hpsplit -o %s -n 1 -m 10 %s' % (splitpat, fn)
rtn,out,err = run_command(cmd)
assert(rtn == 0)
ps = PlotSequence('test_hpsplit')
for hp in range(12):
T = fits_table(splitpat % hp)
print('Healpix', hp, 'got', len(T))
plt.clf()
plothist(T.ra, T.dec, 360, range=((0,360),(-90,90)))
vv = np.linspace(0, 1, 100)
rd = []
for v in vv:
rd.append(healpix_to_radecdeg(hp, 1, 0., v))
for v in vv:
rd.append(healpix_to_radecdeg(hp, 1, v, 1.))
for v in reversed(vv):
rd.append(healpix_to_radecdeg(hp, 1, 1., v))
for v in reversed(vv):
rd.append(healpix_to_radecdeg(hp, 1, v, 0.))
rd = np.array(rd)
plt.plot(rd[:,0], rd[:,1], '-', color=(0,0.5,1.), lw=2)
plt.axis([0, 360, -90, 90])
ps.savefig() | en | 0.91661 | # This file is part of the Astrometry.net suite. # Licensed under a 3-clause BSD style license - see LICENSE # Create test file with a grid of RA,Dec points. | 2.161059 | 2 |
src/validation.py | keyanyang/udacity-data-engineering-capstone | 0 | 6617471 | <reponame>keyanyang/udacity-data-engineering-capstone<gh_stars>0
import os
from dotenv import load_dotenv, find_dotenv
from src.utility import check_db_table_size
# load environment variables
load_dotenv(find_dotenv())
DATABASE_URL = os.getenv("DB_URL")
print(f"Checking {DATABASE_URL}")
tables = ["i94_fact",
"visa_dim",
"temperature_dim",
"airport_dim",
"visa_dim"
]
for table in tables:
check_db_table_size(table, DATABASE_URL)
| import os
from dotenv import load_dotenv, find_dotenv
from src.utility import check_db_table_size
# load environment variables
load_dotenv(find_dotenv())
DATABASE_URL = os.getenv("DB_URL")
print(f"Checking {DATABASE_URL}")
tables = ["i94_fact",
"visa_dim",
"temperature_dim",
"airport_dim",
"visa_dim"
]
for table in tables:
check_db_table_size(table, DATABASE_URL) | en | 0.496147 | # load environment variables | 2.312991 | 2 |
alert_over.py | morestart/Alertover-Notifications-Plugin | 9 | 6617472 | <gh_stars>1-10
import requests
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET, ATTR_TITLE, PLATFORM_SCHEMA, BaseNotificationService)
import homeassistant.helpers.config_validation as cv
from requests.exceptions import (
ConnectionError as ConnectError, HTTPError, Timeout)
_logging = logging.getlogging(__name__)
CONF_FROM_SOURCE = "from_source"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_FROM_SOURCE): cv.string,
})
def get_service(hass, config, discovery_info=None):
return AlertOverNotificationService(config[CONF_FROM_SOURCE])
class AlertOverNotificationService(BaseNotificationService):
def __init__(self, from_source):
self.from_source = from_source
def send_message(self, message="", **kwargs):
receivers = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE)
try:
for receiver in receivers:
_logging.info("已发送")
data = {
"source": self.from_source,
"receiver": receiver,
"content": message,
"title": title
}
_logging.info(data)
requests.post(
"https://api.alertover.com/v1/alert",
data=data
)
except (ConnectError, HTTPError, Timeout, ValueError) as error:
_logging.error("连接失败,ERROR:" + error)
| import requests
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET, ATTR_TITLE, PLATFORM_SCHEMA, BaseNotificationService)
import homeassistant.helpers.config_validation as cv
from requests.exceptions import (
ConnectionError as ConnectError, HTTPError, Timeout)
_logging = logging.getlogging(__name__)
CONF_FROM_SOURCE = "from_source"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_FROM_SOURCE): cv.string,
})
def get_service(hass, config, discovery_info=None):
return AlertOverNotificationService(config[CONF_FROM_SOURCE])
class AlertOverNotificationService(BaseNotificationService):
def __init__(self, from_source):
self.from_source = from_source
def send_message(self, message="", **kwargs):
receivers = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE)
try:
for receiver in receivers:
_logging.info("已发送")
data = {
"source": self.from_source,
"receiver": receiver,
"content": message,
"title": title
}
_logging.info(data)
requests.post(
"https://api.alertover.com/v1/alert",
data=data
)
except (ConnectError, HTTPError, Timeout, ValueError) as error:
_logging.error("连接失败,ERROR:" + error) | none | 1 | 2.236058 | 2 | |
src/vardb/util/db.py | Dabble-of-DevOps-Bio/ella | 0 | 6617473 | import os
import re
import json
from sqlalchemy.orm import scoped_session
from .extended_query import ExtendedQuery
class DB(object):
def __init__(self):
self.engine = None
self.session = None
def connect(self, host=None, engine_kwargs=None):
# Lazy load dependencies to avoid problems in code not actually using DB, but uses modules from which this module is referenced.
from sqlalchemy import create_engine, event
from sqlalchemy.orm import sessionmaker
# Disconnect in case we're already connected
self.disconnect()
self.host = host or os.environ.get("DB_URL")
if not engine_kwargs:
engine_kwargs = dict()
self.engine = create_engine(self.host, client_encoding="utf8", **engine_kwargs)
self.sessionmaker = sessionmaker( # Class for creating session instances
bind=self.engine, query_cls=ExtendedQuery
)
self.session = scoped_session(self.sessionmaker)
# Error handling. Extend if required.
@event.listens_for(self.engine, "handle_error")
def handle_exception(context):
if context.original_exception.pgcode != "JSONV":
raise
else:
# We handle only one error in python, as the error raised by json validation is very limited in information.
# Create a more meaningful error message with jsonschema here.
from sqlalchemy.orm import sessionmaker
from vardb.datamodel.jsonschemas.jsonvalidationerror import (
concatenate_json_validation_errors,
JSONValidationError,
)
message = context.original_exception.diag.message_primary
message_data = message.split(" ---- ")[0]
m = re.match("schema_name=([^,]*), data=(.*)", message_data)
if not m:
raise
else:
schema_name, data = m.groups()
data = json.loads(data)
session = scoped_session(
sessionmaker(bind=context.engine, query_cls=ExtendedQuery)
)
error_message = concatenate_json_validation_errors(session, data, schema_name)
raise JSONValidationError(error_message)
def disconnect(self):
if self.session:
self.session.close()
if self.engine:
self.engine.dispose()
| import os
import re
import json
from sqlalchemy.orm import scoped_session
from .extended_query import ExtendedQuery
class DB(object):
def __init__(self):
self.engine = None
self.session = None
def connect(self, host=None, engine_kwargs=None):
# Lazy load dependencies to avoid problems in code not actually using DB, but uses modules from which this module is referenced.
from sqlalchemy import create_engine, event
from sqlalchemy.orm import sessionmaker
# Disconnect in case we're already connected
self.disconnect()
self.host = host or os.environ.get("DB_URL")
if not engine_kwargs:
engine_kwargs = dict()
self.engine = create_engine(self.host, client_encoding="utf8", **engine_kwargs)
self.sessionmaker = sessionmaker( # Class for creating session instances
bind=self.engine, query_cls=ExtendedQuery
)
self.session = scoped_session(self.sessionmaker)
# Error handling. Extend if required.
@event.listens_for(self.engine, "handle_error")
def handle_exception(context):
if context.original_exception.pgcode != "JSONV":
raise
else:
# We handle only one error in python, as the error raised by json validation is very limited in information.
# Create a more meaningful error message with jsonschema here.
from sqlalchemy.orm import sessionmaker
from vardb.datamodel.jsonschemas.jsonvalidationerror import (
concatenate_json_validation_errors,
JSONValidationError,
)
message = context.original_exception.diag.message_primary
message_data = message.split(" ---- ")[0]
m = re.match("schema_name=([^,]*), data=(.*)", message_data)
if not m:
raise
else:
schema_name, data = m.groups()
data = json.loads(data)
session = scoped_session(
sessionmaker(bind=context.engine, query_cls=ExtendedQuery)
)
error_message = concatenate_json_validation_errors(session, data, schema_name)
raise JSONValidationError(error_message)
def disconnect(self):
if self.session:
self.session.close()
if self.engine:
self.engine.dispose()
| en | 0.87649 | # Lazy load dependencies to avoid problems in code not actually using DB, but uses modules from which this module is referenced. # Disconnect in case we're already connected # Class for creating session instances # Error handling. Extend if required. # We handle only one error in python, as the error raised by json validation is very limited in information. # Create a more meaningful error message with jsonschema here. | 2.672328 | 3 |
stapler/tests/tests.py | danjer/django-stapler | 0 | 6617474 | from django.test import TestCase
from .test_app.models import Bike, Manufacturer, Country, Wheel
from stapler.tests.test_app.forms import BikeManufacturerForm, CustomBikeManufacturerForm, M2mBikeManufacturerForm, \
BikeWheelForm, BikeModelForm
from django import forms
# Create your tests here.
class StaplerFormTestCase(TestCase):
def test_copies_fields_with_clashing_names(self):
form = BikeManufacturerForm()
self.assertEqual(4, len(form.fields))
def test_handles_auto_prefix_option(self):
form = BikeWheelForm()
field_names = set([fn for fn in form.fields.keys()])
expected_field_names = set(('price', 'brand', 'name', 'available_countries'))
self.assertEqual(field_names, expected_field_names)
def test_does_not_override_declared_fields(self):
form = CustomBikeManufacturerForm()
self.assertEqual(4, len(form.fields))
self.assertEqual(type(form.fields['bike__name']), forms.IntegerField)
def test_accepts_instance_keyword(self):
bike = Bike.objects.create(name='Propel', price=200)
form = BikeManufacturerForm(instance=bike)
self.assertEqual(form.bike_instance, bike)
def test_is_not_bound(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm(instances=(bike, manufacturer))
self.assertFalse(form.is_bound)
def test_is_bound(self):
form = BikeManufacturerForm({})
self.assertTrue(form.is_bound)
def test_loads_instances_corretly(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm(instances=(bike, manufacturer))
self.assertEqual(form.manufacturer_instance, manufacturer)
self.assertEqual(form.bike_instance, bike)
def test_loads_initial_from_instances(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm(instances=(bike, manufacturer))
self.assertEqual(form.initial['bike__price'], 200)
self.assertEqual(form.initial['bike__name'], 'Propel')
def test_initial_overrides_instance(self):
bike = Bike.objects.create(name='Propel', price=200)
form = BikeManufacturerForm(instances=(bike,), initial={'bike__name': 'Oltre xr4'})
self.assertEqual(form.initial['bike__name'], 'Oltre xr4')
def test_valid_invalid(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name': 'Giant',
'manufacturer__revenue': '30000,-'})
self.assertTrue(form.is_bound)
self.assertFalse(form.is_valid())
def test_valid_valid(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name':
'Giant', 'manufacturer__revenue': '30000,-', 'bike__price': 300})
self.assertTrue(form.is_bound)
self.assertTrue(form.is_valid())
def test_saves_models(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name': 'Giant',
'manufacturer__revenue': '30000,-', 'bike__price': 300})
form.is_valid()
result = form.save()
b = Bike.objects.first()
m = Manufacturer.objects.first()
self.assertEqual(b.price, 300)
self.assertEqual(m.name, 'Giant')
def tests_valid_with_auto_prefix_off(self):
data = {'brand': 'Giant', 'name': 'Propel', 'price': 300}
form = BikeWheelForm(data)
self.assertTrue(form.is_valid())
def test_returns_saved_models(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name': 'Giant',
'manufacturer__revenue': '30000,-', 'bike__price': 300})
form.is_valid()
result = form.save()
b = Bike.objects.first()
m = Manufacturer.objects.first()
self.assertEqual(b, result['bike_instance'])
self.assertEqual(m, result['manufacturer_instance'])
def test_returns_saved_models_with_auto_prefix_off(self):
data = {'brand': 'Giant', 'name': 'Propel', 'price': 300}
form = BikeWheelForm(data)
result = form.save()
bike = result['bike_instance']
wheel = result['wheel_instance']
self.assertEqual(wheel.brand, 'Giant')
self.assertEqual(bike.price, 300)
self.assertEqual(bike.pk, 1)
self.assertEqual(wheel.pk, 1)
def test_updates_models(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm({'bike__name': 'Oltre xr4', 'manufacturer__name': 'Bianchi',
'manufacturer__revenue': '30000,-', 'bike__price': 300},
instances=(bike, manufacturer))
form.is_valid()
form.save()
b = Bike.objects.first()
m = Manufacturer.objects.first()
self.assertEqual(b.price, 300)
self.assertEqual(b.name, 'Oltre xr4')
self.assertEqual(m.name, 'Bianchi')
self.assertEqual(m.revenue, '30000,-')
self.assertEqual(b.pk, bike.pk)
self.assertEqual(m.pk, manufacturer.pk)
def test_saves_m2m(self):
countries = [Country.objects.create(name=f'country_{0}') for i in range(3)]
for c in countries:
c.save()
data = {'bike__name': 'Oltre xr4',
'bike__price': 300,
'bike__available_countries': [1,2,3],
'manufacturer__name': 'Bianchi',
'manufacturer__revenue': '30000,-'}
form = M2mBikeManufacturerForm(data)
result = form.save(commit=True)
bike = result['bike_instance']
self.assertTrue(len(bike.available_countries.all()), 3)
def test_saves_m2m_with_auto_prefix_off(self):
countries = [Country.objects.create(name=f'country_{0}') for i in range(3)]
for c in countries:
c.save()
data = {'brand': 'Giant', 'name': 'Propel', 'price': 300, 'available_countries': [1, 2, 3]}
form = BikeWheelForm(data)
result = form.save()
bike = result['bike_instance']
wheel = result['wheel_instance']
self.assertEqual(wheel.brand, 'Giant')
self.assertEqual(bike.price, 300)
self.assertEqual(bike.pk, 1)
self.assertEqual(wheel.pk, 1)
self.assertEqual(len(wheel.available_countries.all()), 3)
def test_initial_with_auto_prefix_off(self):
bike = Bike.objects.create(name='Propel', price=300)
bike.save()
wheel = Wheel.objects.create(brand='Giant')
wheel.save()
form = BikeWheelForm(instances=(bike, wheel))
self.assertEqual(form.initial['name'], 'Propel')
self.assertEqual(form.initial['brand'], 'Giant')
def test_required_modelforms_option(self):
for _ in range(2):
c = Country.objects.create(name=f'land_{_}')
c.save()
data = {'name': 'Giant', 'price': 2000, 'available_countries': [1, 2]}
form = BikeWheelForm(data)
self.assertTrue(BikeModelForm in form._meta.required)
self.assertTrue(form.is_valid())
def test_saves_valid_instances_only(self):
for _ in range(2):
c = Country.objects.create(name=f'land_{_}')
c.save()
data = {'name': 'Giant', 'price': 2000, 'available_countries': [1, 2]}
form = BikeWheelForm(data)
self.assertTrue(BikeModelForm in form._meta.required)
self.assertTrue(form.is_valid())
results = form.save()
saved_bike = results['bike_instance']
failed_wheel = results['wheel_instance']
self.assertEqual(len(Bike.objects.all()), 1)
self.assertEqual(len(Wheel.objects.all()), 0)
self.assertEqual(saved_bike.pk, 1)
self.assertEqual(failed_wheel, None)
# def test_overrides_save_method(self):
# raise Exception('TODO') | from django.test import TestCase
from .test_app.models import Bike, Manufacturer, Country, Wheel
from stapler.tests.test_app.forms import BikeManufacturerForm, CustomBikeManufacturerForm, M2mBikeManufacturerForm, \
BikeWheelForm, BikeModelForm
from django import forms
# Create your tests here.
class StaplerFormTestCase(TestCase):
def test_copies_fields_with_clashing_names(self):
form = BikeManufacturerForm()
self.assertEqual(4, len(form.fields))
def test_handles_auto_prefix_option(self):
form = BikeWheelForm()
field_names = set([fn for fn in form.fields.keys()])
expected_field_names = set(('price', 'brand', 'name', 'available_countries'))
self.assertEqual(field_names, expected_field_names)
def test_does_not_override_declared_fields(self):
form = CustomBikeManufacturerForm()
self.assertEqual(4, len(form.fields))
self.assertEqual(type(form.fields['bike__name']), forms.IntegerField)
def test_accepts_instance_keyword(self):
bike = Bike.objects.create(name='Propel', price=200)
form = BikeManufacturerForm(instance=bike)
self.assertEqual(form.bike_instance, bike)
def test_is_not_bound(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm(instances=(bike, manufacturer))
self.assertFalse(form.is_bound)
def test_is_bound(self):
form = BikeManufacturerForm({})
self.assertTrue(form.is_bound)
def test_loads_instances_corretly(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm(instances=(bike, manufacturer))
self.assertEqual(form.manufacturer_instance, manufacturer)
self.assertEqual(form.bike_instance, bike)
def test_loads_initial_from_instances(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm(instances=(bike, manufacturer))
self.assertEqual(form.initial['bike__price'], 200)
self.assertEqual(form.initial['bike__name'], 'Propel')
def test_initial_overrides_instance(self):
bike = Bike.objects.create(name='Propel', price=200)
form = BikeManufacturerForm(instances=(bike,), initial={'bike__name': 'Oltre xr4'})
self.assertEqual(form.initial['bike__name'], 'Oltre xr4')
def test_valid_invalid(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name': 'Giant',
'manufacturer__revenue': '30000,-'})
self.assertTrue(form.is_bound)
self.assertFalse(form.is_valid())
def test_valid_valid(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name':
'Giant', 'manufacturer__revenue': '30000,-', 'bike__price': 300})
self.assertTrue(form.is_bound)
self.assertTrue(form.is_valid())
def test_saves_models(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name': 'Giant',
'manufacturer__revenue': '30000,-', 'bike__price': 300})
form.is_valid()
result = form.save()
b = Bike.objects.first()
m = Manufacturer.objects.first()
self.assertEqual(b.price, 300)
self.assertEqual(m.name, 'Giant')
def tests_valid_with_auto_prefix_off(self):
data = {'brand': 'Giant', 'name': 'Propel', 'price': 300}
form = BikeWheelForm(data)
self.assertTrue(form.is_valid())
def test_returns_saved_models(self):
form = BikeManufacturerForm({'bike__name': 'Propel', 'manufacturer__name': 'Giant',
'manufacturer__revenue': '30000,-', 'bike__price': 300})
form.is_valid()
result = form.save()
b = Bike.objects.first()
m = Manufacturer.objects.first()
self.assertEqual(b, result['bike_instance'])
self.assertEqual(m, result['manufacturer_instance'])
def test_returns_saved_models_with_auto_prefix_off(self):
data = {'brand': 'Giant', 'name': 'Propel', 'price': 300}
form = BikeWheelForm(data)
result = form.save()
bike = result['bike_instance']
wheel = result['wheel_instance']
self.assertEqual(wheel.brand, 'Giant')
self.assertEqual(bike.price, 300)
self.assertEqual(bike.pk, 1)
self.assertEqual(wheel.pk, 1)
def test_updates_models(self):
bike = Bike.objects.create(name='Propel', price=200)
manufacturer = Manufacturer.objects.create(name='Giant', revenue='2000.000,-')
form = BikeManufacturerForm({'bike__name': 'Oltre xr4', 'manufacturer__name': 'Bianchi',
'manufacturer__revenue': '30000,-', 'bike__price': 300},
instances=(bike, manufacturer))
form.is_valid()
form.save()
b = Bike.objects.first()
m = Manufacturer.objects.first()
self.assertEqual(b.price, 300)
self.assertEqual(b.name, 'Oltre xr4')
self.assertEqual(m.name, 'Bianchi')
self.assertEqual(m.revenue, '30000,-')
self.assertEqual(b.pk, bike.pk)
self.assertEqual(m.pk, manufacturer.pk)
def test_saves_m2m(self):
countries = [Country.objects.create(name=f'country_{0}') for i in range(3)]
for c in countries:
c.save()
data = {'bike__name': 'Oltre xr4',
'bike__price': 300,
'bike__available_countries': [1,2,3],
'manufacturer__name': 'Bianchi',
'manufacturer__revenue': '30000,-'}
form = M2mBikeManufacturerForm(data)
result = form.save(commit=True)
bike = result['bike_instance']
self.assertTrue(len(bike.available_countries.all()), 3)
def test_saves_m2m_with_auto_prefix_off(self):
countries = [Country.objects.create(name=f'country_{0}') for i in range(3)]
for c in countries:
c.save()
data = {'brand': 'Giant', 'name': 'Propel', 'price': 300, 'available_countries': [1, 2, 3]}
form = BikeWheelForm(data)
result = form.save()
bike = result['bike_instance']
wheel = result['wheel_instance']
self.assertEqual(wheel.brand, 'Giant')
self.assertEqual(bike.price, 300)
self.assertEqual(bike.pk, 1)
self.assertEqual(wheel.pk, 1)
self.assertEqual(len(wheel.available_countries.all()), 3)
def test_initial_with_auto_prefix_off(self):
bike = Bike.objects.create(name='Propel', price=300)
bike.save()
wheel = Wheel.objects.create(brand='Giant')
wheel.save()
form = BikeWheelForm(instances=(bike, wheel))
self.assertEqual(form.initial['name'], 'Propel')
self.assertEqual(form.initial['brand'], 'Giant')
def test_required_modelforms_option(self):
for _ in range(2):
c = Country.objects.create(name=f'land_{_}')
c.save()
data = {'name': 'Giant', 'price': 2000, 'available_countries': [1, 2]}
form = BikeWheelForm(data)
self.assertTrue(BikeModelForm in form._meta.required)
self.assertTrue(form.is_valid())
def test_saves_valid_instances_only(self):
for _ in range(2):
c = Country.objects.create(name=f'land_{_}')
c.save()
data = {'name': 'Giant', 'price': 2000, 'available_countries': [1, 2]}
form = BikeWheelForm(data)
self.assertTrue(BikeModelForm in form._meta.required)
self.assertTrue(form.is_valid())
results = form.save()
saved_bike = results['bike_instance']
failed_wheel = results['wheel_instance']
self.assertEqual(len(Bike.objects.all()), 1)
self.assertEqual(len(Wheel.objects.all()), 0)
self.assertEqual(saved_bike.pk, 1)
self.assertEqual(failed_wheel, None)
# def test_overrides_save_method(self):
# raise Exception('TODO') | en | 0.469214 | # Create your tests here. # def test_overrides_save_method(self): # raise Exception('TODO') | 2.424208 | 2 |
Day_09/part1.py | Uklusi/AdventOfCode2017 | 0 | 6617475 | import re
result = 0
with open("input.txt", "r") as input:
for line in input:
line = line.strip()
stream = line
stream = re.sub(r"!.", "", stream)
stream = re.sub(r"<[^>]*>", "", stream)
stream = re.sub(r"[^{}]", "", stream)
value = 0
for c in stream:
if c == "{":
value += 1
result += value
elif c == "}":
value -= 1
with open("output1.txt", "w") as output:
output.write(str(result))
print(str(result))
| import re
result = 0
with open("input.txt", "r") as input:
for line in input:
line = line.strip()
stream = line
stream = re.sub(r"!.", "", stream)
stream = re.sub(r"<[^>]*>", "", stream)
stream = re.sub(r"[^{}]", "", stream)
value = 0
for c in stream:
if c == "{":
value += 1
result += value
elif c == "}":
value -= 1
with open("output1.txt", "w") as output:
output.write(str(result))
print(str(result))
| none | 1 | 3.248837 | 3 | |
crabageprediction/venv/Lib/site-packages/pandas/io/__init__.py | 13rianlucero/CrabAgePrediction | 28,899 | 6617476 | <filename>crabageprediction/venv/Lib/site-packages/pandas/io/__init__.py
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# import modules that have public classes/functions
from pandas.io import (
formats,
json,
stata,
)
# and mark only those modules as public
__all__ = ["formats", "json", "stata"]
| <filename>crabageprediction/venv/Lib/site-packages/pandas/io/__init__.py
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# import modules that have public classes/functions
from pandas.io import (
formats,
json,
stata,
)
# and mark only those modules as public
__all__ = ["formats", "json", "stata"]
| en | 0.92034 | # import modules that have public classes/functions # and mark only those modules as public | 1.477587 | 1 |
pointcloud/pcl_edge_detection.py | miroslavradojevic/python-snippets | 0 | 6617477 | <filename>pointcloud/pcl_edge_detection.py
#!/usr/bin/env python
import numpy as np
import argparse
import open3d as o3d
from os.path import exists, splitext, abspath, dirname, join
from utils import edge_detection
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("pcl_path", help="Path to point cloud file (.pcd | .xyz | .txt)", type=str)
parser.add_argument("-r", help="Neighborhood: radius", type=float, default=0.15)
parser.add_argument("-nn", help="Neighborhood: N nearest neighbors", type=int, default=30)
parser.add_argument("-t", help="Threshold", type=float, default=100)
args = parser.parse_args()
# Load points
if not exists(args.pcl_path):
exit(args.pcl_path + " could not be found")
ext = splitext(args.pcl_path)[-1].lower()
if ext is None or ext not in [".pcd", ".txt", ".xyz"]:
exit("Point-cloud file has wrong extension")
# read point cloud
pcd = o3d.io.read_point_cloud(args.pcl_path, format="pcd" if ext[1:] == "pcd" else "xyz")
# Method 1
pcd1, pcd2, pcd3 = edge_detection(pcd, args.r, args.nn, args.t)
print("{:d} ({:3.2f}%) after edge detection".format(len(pcd1.points), 100.0 * len(pcd1.points) / len(pcd.points)))
print("{:d} ({:3.2f}%) after edge detection".format(len(pcd2.points), 100.0 * len(pcd2.points) / len(pcd.points)))
print("{:d} ({:3.2f}%) after edge detection".format(len(pcd3.points), 100.0 * len(pcd3.points) / len(pcd.points)))
# save
outdir = dirname(abspath(args.pcl_path))
fname = splitext(args.pcl_path)[0]
o3d.io.write_point_cloud(join(outdir, fname + "_edge_detection1.pcd"), pcd1)
o3d.io.write_point_cloud(join(outdir, fname + "_edge_detection2.pcd"), pcd2)
o3d.io.write_point_cloud(join(outdir, fname + "_edge_detection3.pcd"), pcd3)
| <filename>pointcloud/pcl_edge_detection.py
#!/usr/bin/env python
import numpy as np
import argparse
import open3d as o3d
from os.path import exists, splitext, abspath, dirname, join
from utils import edge_detection
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("pcl_path", help="Path to point cloud file (.pcd | .xyz | .txt)", type=str)
parser.add_argument("-r", help="Neighborhood: radius", type=float, default=0.15)
parser.add_argument("-nn", help="Neighborhood: N nearest neighbors", type=int, default=30)
parser.add_argument("-t", help="Threshold", type=float, default=100)
args = parser.parse_args()
# Load points
if not exists(args.pcl_path):
exit(args.pcl_path + " could not be found")
ext = splitext(args.pcl_path)[-1].lower()
if ext is None or ext not in [".pcd", ".txt", ".xyz"]:
exit("Point-cloud file has wrong extension")
# read point cloud
pcd = o3d.io.read_point_cloud(args.pcl_path, format="pcd" if ext[1:] == "pcd" else "xyz")
# Method 1
pcd1, pcd2, pcd3 = edge_detection(pcd, args.r, args.nn, args.t)
print("{:d} ({:3.2f}%) after edge detection".format(len(pcd1.points), 100.0 * len(pcd1.points) / len(pcd.points)))
print("{:d} ({:3.2f}%) after edge detection".format(len(pcd2.points), 100.0 * len(pcd2.points) / len(pcd.points)))
print("{:d} ({:3.2f}%) after edge detection".format(len(pcd3.points), 100.0 * len(pcd3.points) / len(pcd.points)))
# save
outdir = dirname(abspath(args.pcl_path))
fname = splitext(args.pcl_path)[0]
o3d.io.write_point_cloud(join(outdir, fname + "_edge_detection1.pcd"), pcd1)
o3d.io.write_point_cloud(join(outdir, fname + "_edge_detection2.pcd"), pcd2)
o3d.io.write_point_cloud(join(outdir, fname + "_edge_detection3.pcd"), pcd3)
| en | 0.362437 | #!/usr/bin/env python # Load points # read point cloud # Method 1 # save | 2.848296 | 3 |
test/IECore/CSGImplicitSurfaceFunction.py | gcodebackups/cortex-vfx | 5 | 6617478 | <reponame>gcodebackups/cortex-vfx
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class TestCSGImplicitSurfaceFunction( unittest.TestCase ) :
def testUnion( self ) :
""" Test implicit surface CSG union """
sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 )
sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 )
csgFn = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Union )
builder = MeshPrimitiveBuilder()
marcher = MarchingCubesf( csgFn, builder )
marchMin = V3f(-2.5, -2.5, -2.5)
marchMax = V3f( 2.5, 2.5, 2.5)
marchBound = Box3f( marchMin, marchMax )
marchResolution = V3i( 30, 30, 30 )
marcher.march( marchBound, marchResolution, -0.000001 )
m = builder.mesh()
# Verified visually
self.assert_( len( m.vertexIds ) > 5700 )
self.assert_( len( m.vertexIds ) < 5900 )
def testIntersection( self ):
""" Test implicit surface CSG intersection """
sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 )
sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 )
plane = PlaneImplicitSurfaceFunctionV3ff( V3f(1,0,0), 0.2 )
csgFn1 = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Intersection )
csgFn2 = CSGImplicitSurfaceFunctionV3ff( csgFn1, plane, CSGImplicitSurfaceFunctionV3ff.Mode.Intersection )
builder = MeshPrimitiveBuilder()
marcher = MarchingCubesf( csgFn2, builder )
marchMin = V3f(-2.5, -2.5, -2.5)
marchMax = V3f( 2.5, 2.5, 2.5)
marchBound = Box3f( marchMin, marchMax )
marchResolution = V3i( 30, 30, 30 )
marcher.march( marchBound, marchResolution )
m = builder.mesh()
# Verified visually
self.assert_( len( m.vertexIds ) > 850 )
self.assert_( len( m.vertexIds ) < 950 )
def testDifference( self ):
""" Test implicit surface CSG difference """
sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 )
sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 )
csgFn = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Difference )
builder = MeshPrimitiveBuilder()
marcher = MarchingCubesf( csgFn, builder )
marchMin = V3f(-2.5, -2.5, -2.5)
marchMax = V3f( 2.5, 2.5, 2.5)
marchBound = Box3f( marchMin, marchMax )
marchResolution = V3i( 30, 30, 30 )
marcher.march( marchBound, marchResolution )
m = builder.mesh()
# Verified visually
self.assert_( len( m.vertexIds ) > 3600 )
self.assert_( len( m.vertexIds ) < 3750 )
if __name__ == "__main__":
unittest.main()
| ##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class TestCSGImplicitSurfaceFunction( unittest.TestCase ) :
def testUnion( self ) :
""" Test implicit surface CSG union """
sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 )
sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 )
csgFn = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Union )
builder = MeshPrimitiveBuilder()
marcher = MarchingCubesf( csgFn, builder )
marchMin = V3f(-2.5, -2.5, -2.5)
marchMax = V3f( 2.5, 2.5, 2.5)
marchBound = Box3f( marchMin, marchMax )
marchResolution = V3i( 30, 30, 30 )
marcher.march( marchBound, marchResolution, -0.000001 )
m = builder.mesh()
# Verified visually
self.assert_( len( m.vertexIds ) > 5700 )
self.assert_( len( m.vertexIds ) < 5900 )
def testIntersection( self ):
""" Test implicit surface CSG intersection """
sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 )
sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 )
plane = PlaneImplicitSurfaceFunctionV3ff( V3f(1,0,0), 0.2 )
csgFn1 = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Intersection )
csgFn2 = CSGImplicitSurfaceFunctionV3ff( csgFn1, plane, CSGImplicitSurfaceFunctionV3ff.Mode.Intersection )
builder = MeshPrimitiveBuilder()
marcher = MarchingCubesf( csgFn2, builder )
marchMin = V3f(-2.5, -2.5, -2.5)
marchMax = V3f( 2.5, 2.5, 2.5)
marchBound = Box3f( marchMin, marchMax )
marchResolution = V3i( 30, 30, 30 )
marcher.march( marchBound, marchResolution )
m = builder.mesh()
# Verified visually
self.assert_( len( m.vertexIds ) > 850 )
self.assert_( len( m.vertexIds ) < 950 )
def testDifference( self ):
""" Test implicit surface CSG difference """
sphere1 = SphereImplicitSurfaceFunctionV3ff( V3f(0,0,0), 1 )
sphere2 = SphereImplicitSurfaceFunctionV3ff( V3f(0,1,0), 1 )
csgFn = CSGImplicitSurfaceFunctionV3ff( sphere1, sphere2, CSGImplicitSurfaceFunctionV3ff.Mode.Difference )
builder = MeshPrimitiveBuilder()
marcher = MarchingCubesf( csgFn, builder )
marchMin = V3f(-2.5, -2.5, -2.5)
marchMax = V3f( 2.5, 2.5, 2.5)
marchBound = Box3f( marchMin, marchMax )
marchResolution = V3i( 30, 30, 30 )
marcher.march( marchBound, marchResolution )
m = builder.mesh()
# Verified visually
self.assert_( len( m.vertexIds ) > 3600 )
self.assert_( len( m.vertexIds ) < 3750 )
if __name__ == "__main__":
unittest.main() | en | 0.644094 | ########################################################################## # # Copyright (c) 2007, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## Test implicit surface CSG union # Verified visually Test implicit surface CSG intersection # Verified visually Test implicit surface CSG difference # Verified visually | 1.243019 | 1 |
opskit_api/resources/admin/comments.py | yxxhero/opskit-api | 0 | 6617479 | from flask_restful import Resource, reqparse
from flask import g, current_app
from opskit_api.models import Comment, User
import traceback
from opskit_api.common.login_helper import auth_decorator
class AdminComment(Resource):
method_decorators = [auth_decorator]
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser(bundle_errors=True)
def get(self):
try:
self.parser.add_argument(
"page", type=int, required=False, location="args", default=1
)
self.parser.add_argument(
"pagesize", type=int, required=False, location="args", default=10
)
self.parser.add_argument(
"state", type=str, required=False, location="args", default=0
)
args = self.parser.parse_args()
username = g.username
user_ins = User.query.filter_by(user_name=username).first()
if user_ins and user_ins.user_role.code == 1:
comment_total = (
Comment.query.filter(Comment.state == args.state)
.order_by(Comment.update_time.desc())
.count()
)
comment_ins_list = (
Comment.query.filter(Comment.state == args.state)
.order_by(Comment.update_time.desc())
.limit(args.pagesize)
.offset(args.pagesize * (args.page - 1))
.all()
)
comment_info_list = [
{
"username": item.user.user_name,
"id": item.id,
"content": item.content,
"prase_count": item.prase_count,
"title": item.note.title,
"avatar": item.user.user_avatar,
"create_time": item.create_time,
"update_time": item.update_time,
"state": item.state,
"note_type": item.note.note_type.value,
}
for item in comment_ins_list
]
else:
return {"code": 1, "msg": "无操作权限"}
except Exception:
current_app.logger.error(traceback.format_exc())
return {"code": 1, "msg": "获取文章信息异常"}
else:
return {
"code": 0,
"msg": "请求成功",
"data": comment_info_list,
"total": comment_total,
}
def put(self):
self.parser.add_argument("id", type=str, required=True, location="json")
self.parser.add_argument("state", type=int, required=True, location="json")
args = self.parser.parse_args()
try:
username = g.username
admin_user = User.query.filter_by(user_name=username).first()
if admin_user and admin_user.user_role.code == 1:
comment_ins = Comment.query.filter_by(id=args.id).first()
if comment_ins:
comment_ins.state = args.state
comment_ins.update()
else:
return {"code": 1, "msg": "评论不存在"}
else:
return {"code": 1, "msg": "无权限操作"}
except Exception:
current_app.logger.error(traceback.format_exc())
return {"code": 1, "msg": "更新评论信息异常"}
else:
return {"code": 0, "msg": "更新成功"}
def delete(self):
self.parser.add_argument("id", type=str, required=True, location="json")
args = self.parser.parse_args()
try:
username = g.username
admin_user = User.query.filter_by(user_name=username).first()
if admin_user and admin_user.user_role.code == 1:
comment_ins = Comment.query.filter_by(id=args.id).first()
if comment_ins:
comment_ins.remove()
else:
return {"code": 1, "msg": "评论不存在"}
else:
return {"code": 1, "msg": "无权限操作"}
except Exception:
current_app.logger.error(traceback.format_exc())
return {"code": 1, "msg": "更新评论信息异常"}
else:
return {"code": 0, "msg": "更新成功"}
| from flask_restful import Resource, reqparse
from flask import g, current_app
from opskit_api.models import Comment, User
import traceback
from opskit_api.common.login_helper import auth_decorator
class AdminComment(Resource):
method_decorators = [auth_decorator]
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser(bundle_errors=True)
def get(self):
try:
self.parser.add_argument(
"page", type=int, required=False, location="args", default=1
)
self.parser.add_argument(
"pagesize", type=int, required=False, location="args", default=10
)
self.parser.add_argument(
"state", type=str, required=False, location="args", default=0
)
args = self.parser.parse_args()
username = g.username
user_ins = User.query.filter_by(user_name=username).first()
if user_ins and user_ins.user_role.code == 1:
comment_total = (
Comment.query.filter(Comment.state == args.state)
.order_by(Comment.update_time.desc())
.count()
)
comment_ins_list = (
Comment.query.filter(Comment.state == args.state)
.order_by(Comment.update_time.desc())
.limit(args.pagesize)
.offset(args.pagesize * (args.page - 1))
.all()
)
comment_info_list = [
{
"username": item.user.user_name,
"id": item.id,
"content": item.content,
"prase_count": item.prase_count,
"title": item.note.title,
"avatar": item.user.user_avatar,
"create_time": item.create_time,
"update_time": item.update_time,
"state": item.state,
"note_type": item.note.note_type.value,
}
for item in comment_ins_list
]
else:
return {"code": 1, "msg": "无操作权限"}
except Exception:
current_app.logger.error(traceback.format_exc())
return {"code": 1, "msg": "获取文章信息异常"}
else:
return {
"code": 0,
"msg": "请求成功",
"data": comment_info_list,
"total": comment_total,
}
def put(self):
self.parser.add_argument("id", type=str, required=True, location="json")
self.parser.add_argument("state", type=int, required=True, location="json")
args = self.parser.parse_args()
try:
username = g.username
admin_user = User.query.filter_by(user_name=username).first()
if admin_user and admin_user.user_role.code == 1:
comment_ins = Comment.query.filter_by(id=args.id).first()
if comment_ins:
comment_ins.state = args.state
comment_ins.update()
else:
return {"code": 1, "msg": "评论不存在"}
else:
return {"code": 1, "msg": "无权限操作"}
except Exception:
current_app.logger.error(traceback.format_exc())
return {"code": 1, "msg": "更新评论信息异常"}
else:
return {"code": 0, "msg": "更新成功"}
def delete(self):
self.parser.add_argument("id", type=str, required=True, location="json")
args = self.parser.parse_args()
try:
username = g.username
admin_user = User.query.filter_by(user_name=username).first()
if admin_user and admin_user.user_role.code == 1:
comment_ins = Comment.query.filter_by(id=args.id).first()
if comment_ins:
comment_ins.remove()
else:
return {"code": 1, "msg": "评论不存在"}
else:
return {"code": 1, "msg": "无权限操作"}
except Exception:
current_app.logger.error(traceback.format_exc())
return {"code": 1, "msg": "更新评论信息异常"}
else:
return {"code": 0, "msg": "更新成功"}
| none | 1 | 2.226766 | 2 | |
cc_backend_lib/clients/countries_client.py | prio-data/cc_backend_lib | 0 | 6617480 |
import json
import pydantic
from pymonad.either import Either, Right, Left
from cc_backend_lib import models
from cc_backend_lib.errors import http_error
from . import model_api_client
class CountriesClient(model_api_client.ModelApiClient[models.country.Country, models.country.CountryPropertiesList]):
def deserialize_detail(self, data:bytes) -> Either[http_error.HttpError, models.country.Country]:
try:
return Right(models.country.Country(**json.loads(data)))
except (json.JSONDecodeError, pydantic.ValidationError) as err:
return Left(http_error.HttpError(message = str(err), http_code = 500))
def deserialize_list(self, data:bytes) -> Either[http_error.HttpError, models.country.CountryPropertiesList]:
try:
return Right(models.country.CountryPropertiesList(
countries = json.loads(data)
))
except (json.JSONDecodeError, pydantic.ValidationError) as err:
return Left(http_error.HttpError(message = str(err), http_code = 500))
|
import json
import pydantic
from pymonad.either import Either, Right, Left
from cc_backend_lib import models
from cc_backend_lib.errors import http_error
from . import model_api_client
class CountriesClient(model_api_client.ModelApiClient[models.country.Country, models.country.CountryPropertiesList]):
def deserialize_detail(self, data:bytes) -> Either[http_error.HttpError, models.country.Country]:
try:
return Right(models.country.Country(**json.loads(data)))
except (json.JSONDecodeError, pydantic.ValidationError) as err:
return Left(http_error.HttpError(message = str(err), http_code = 500))
def deserialize_list(self, data:bytes) -> Either[http_error.HttpError, models.country.CountryPropertiesList]:
try:
return Right(models.country.CountryPropertiesList(
countries = json.loads(data)
))
except (json.JSONDecodeError, pydantic.ValidationError) as err:
return Left(http_error.HttpError(message = str(err), http_code = 500))
| none | 1 | 2.452846 | 2 | |
refbee/query.py | kmdn/RefBee | 0 | 6617481 | <reponame>kmdn/RefBee
from refbee.platforms import platform_names
from refbee.platforms.wikidata import platform_ids_for_person
from refbee.disambiguation import disambiguate_titles
from refbee.fetching import get_titles_parallel as get_titles
def query(wd_person_id):
# begin by fetching platform specific ids for person from wikidata entry
persons_dict = platform_ids_for_person(wd_person_id)
# Now we've got our IDs - time to query the other endpoints
grouped_titles_dict = get_titles(persons_dict=persons_dict)
ret_json = {}
for person in grouped_titles_dict:
# new person - add it to our data structure <3
papers_dict = ret_json.get(person, {})
# for default behaviour
ret_json[person] = papers_dict
# get a person along with its currently-associated titles
person_titles_dict = grouped_titles_dict[person]
for platform in person_titles_dict:
# print("Platform: ", platform)
for title in person_titles_dict[platform]:
# print("Paper: ", title)
paper_id = title
# add info for the specific paper
paper_dict = papers_dict.get(paper_id, {})
papers_dict[paper_id] = paper_dict
paper_dict["title"] = title
paper_dict[platform] = 1
# add 0-count platforms to the returned JSON
for person in ret_json:
for paper in ret_json[person]:
for platform_not_found in set.difference(set(platform_names), set(ret_json[person][paper].keys())):
ret_json[person][paper][platform_not_found] = 0
for platform_without_id in set.difference(set(platform_names), persons_dict[person].keys()):
ret_json[person][paper][platform_without_id] = -1
# disambiguate titles
for person in ret_json:
ret_json[person] = disambiguate_titles(ret_json[person])
#print("Returned JSON: ", ret_json)
return ret_json
| from refbee.platforms import platform_names
from refbee.platforms.wikidata import platform_ids_for_person
from refbee.disambiguation import disambiguate_titles
from refbee.fetching import get_titles_parallel as get_titles
def query(wd_person_id):
# begin by fetching platform specific ids for person from wikidata entry
persons_dict = platform_ids_for_person(wd_person_id)
# Now we've got our IDs - time to query the other endpoints
grouped_titles_dict = get_titles(persons_dict=persons_dict)
ret_json = {}
for person in grouped_titles_dict:
# new person - add it to our data structure <3
papers_dict = ret_json.get(person, {})
# for default behaviour
ret_json[person] = papers_dict
# get a person along with its currently-associated titles
person_titles_dict = grouped_titles_dict[person]
for platform in person_titles_dict:
# print("Platform: ", platform)
for title in person_titles_dict[platform]:
# print("Paper: ", title)
paper_id = title
# add info for the specific paper
paper_dict = papers_dict.get(paper_id, {})
papers_dict[paper_id] = paper_dict
paper_dict["title"] = title
paper_dict[platform] = 1
# add 0-count platforms to the returned JSON
for person in ret_json:
for paper in ret_json[person]:
for platform_not_found in set.difference(set(platform_names), set(ret_json[person][paper].keys())):
ret_json[person][paper][platform_not_found] = 0
for platform_without_id in set.difference(set(platform_names), persons_dict[person].keys()):
ret_json[person][paper][platform_without_id] = -1
# disambiguate titles
for person in ret_json:
ret_json[person] = disambiguate_titles(ret_json[person])
#print("Returned JSON: ", ret_json)
return ret_json | en | 0.842247 | # begin by fetching platform specific ids for person from wikidata entry # Now we've got our IDs - time to query the other endpoints # new person - add it to our data structure <3 # for default behaviour # get a person along with its currently-associated titles # print("Platform: ", platform) # print("Paper: ", title) # add info for the specific paper # add 0-count platforms to the returned JSON # disambiguate titles #print("Returned JSON: ", ret_json) | 2.69696 | 3 |
util_scripts/web_scrape.py | JinghuiZhao/dogfind | 0 | 6617482 | <gh_stars>0
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import random
import time
import pandas as pd
import logging
import os, sys
import urllib
import shutil
def get_hrefs(urls):
"""
The webpage scrapper for list of all dogs. Design for adoptapet.com
param:
url: the list of links to webpage holding dog's infor
return:
hrefs: hrefs for all the dogs.
"""
hrefs = []
driver = webdriver.Chrome(executable_path="/Users/elainezhao/Desktop/puppylover_test/chromedriver")
for url in urls:
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
for link in soup.find_all("a", {"class": "pet__item__link"}):
href = link.get('href')
hrefs.append('https://www.adoptapet.com' + href)
driver.close()
return hrefs
def get_photo(hrefs):
"""
Input: a list of hrefs for different dogs
Output: list of titles and list of photo urls for those dogs
"""
titles = []
photo_urls = []
driver = webdriver.Chrome(executable_path="/Users/elainezhao/Desktop/puppylover_test/chromedriver")
for i, href in enumerate(hrefs):
driver.get(href) # use selenium
soup = BeautifulSoup(driver.page_source, 'html.parser')
try:
title = soup.title.text
titles.append(title)
except:
titles.append(' ')
try:
div = soup.find('div', {"data-gallery": "gallery-image"})
photo_url = div.find('img')['src']
photo_urls.append(photo_url)
except:
photo_urls.append(' ')
return titles, photo_urls
def download_image(link, name, web_site, path):
"""
This function is to build a file folder that contains the downloaded picture.
param:
link: the list of image url, for which should only be the url.
name: the name of the sub-directory.
web_site: the website’s name.
return:
Nothing.
"""
path += name
# build a folder
if os.path.exists(path):
shutil.rmtree(path)
try:
os.mkdir(path)
except OSError:
logging.info("Creation of the directory %s failed" % path)
# iterate through all url link
for i, url in enumerate(link):
# save the image
request = urllib.request.Request(
url, headers={'User-Agent': 'Mozilla/5.0'})
img_name = str(i)+".jpg"
with urllib.request.urlopen(request) as response, open(path+"/"+img_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
# return a notice.
logging.info("Store all images from link.")
def get_urls(url):
urls = []
for i in range(1, 3):
urls.append(url + f'&page={i}#')
return urls
def main():
url = str(sys.argv[1])
print(url)
urls = get_urls(url)
hrefs = get_hrefs(urls)
name = 'adoptapet'
web_site = 'adoptapet'
path = '/Users/elainezhao/Desktop/dog_img'
titles, photo_urls = get_photo(hrefs)
df = pd.DataFrame({'links': hrefs, 'titles': titles, 'photo_url': photo_urls})
df = df[df.photo_url != ' '].reset_index()
df.to_csv('adoptapet.csv', index=False)
download_image(df.photo_url, name, web_site, path)
if __name__ == "__main__":
# execute only if run as a script
main()
| from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import random
import time
import pandas as pd
import logging
import os, sys
import urllib
import shutil
def get_hrefs(urls):
"""
The webpage scrapper for list of all dogs. Design for adoptapet.com
param:
url: the list of links to webpage holding dog's infor
return:
hrefs: hrefs for all the dogs.
"""
hrefs = []
driver = webdriver.Chrome(executable_path="/Users/elainezhao/Desktop/puppylover_test/chromedriver")
for url in urls:
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
for link in soup.find_all("a", {"class": "pet__item__link"}):
href = link.get('href')
hrefs.append('https://www.adoptapet.com' + href)
driver.close()
return hrefs
def get_photo(hrefs):
"""
Input: a list of hrefs for different dogs
Output: list of titles and list of photo urls for those dogs
"""
titles = []
photo_urls = []
driver = webdriver.Chrome(executable_path="/Users/elainezhao/Desktop/puppylover_test/chromedriver")
for i, href in enumerate(hrefs):
driver.get(href) # use selenium
soup = BeautifulSoup(driver.page_source, 'html.parser')
try:
title = soup.title.text
titles.append(title)
except:
titles.append(' ')
try:
div = soup.find('div', {"data-gallery": "gallery-image"})
photo_url = div.find('img')['src']
photo_urls.append(photo_url)
except:
photo_urls.append(' ')
return titles, photo_urls
def download_image(link, name, web_site, path):
"""
This function is to build a file folder that contains the downloaded picture.
param:
link: the list of image url, for which should only be the url.
name: the name of the sub-directory.
web_site: the website’s name.
return:
Nothing.
"""
path += name
# build a folder
if os.path.exists(path):
shutil.rmtree(path)
try:
os.mkdir(path)
except OSError:
logging.info("Creation of the directory %s failed" % path)
# iterate through all url link
for i, url in enumerate(link):
# save the image
request = urllib.request.Request(
url, headers={'User-Agent': 'Mozilla/5.0'})
img_name = str(i)+".jpg"
with urllib.request.urlopen(request) as response, open(path+"/"+img_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
# return a notice.
logging.info("Store all images from link.")
def get_urls(url):
urls = []
for i in range(1, 3):
urls.append(url + f'&page={i}#')
return urls
def main():
url = str(sys.argv[1])
print(url)
urls = get_urls(url)
hrefs = get_hrefs(urls)
name = 'adoptapet'
web_site = 'adoptapet'
path = '/Users/elainezhao/Desktop/dog_img'
titles, photo_urls = get_photo(hrefs)
df = pd.DataFrame({'links': hrefs, 'titles': titles, 'photo_url': photo_urls})
df = df[df.photo_url != ' '].reset_index()
df.to_csv('adoptapet.csv', index=False)
download_image(df.photo_url, name, web_site, path)
if __name__ == "__main__":
# execute only if run as a script
main() | en | 0.859822 | The webpage scrapper for list of all dogs. Design for adoptapet.com param: url: the list of links to webpage holding dog's infor return: hrefs: hrefs for all the dogs. Input: a list of hrefs for different dogs Output: list of titles and list of photo urls for those dogs # use selenium This function is to build a file folder that contains the downloaded picture. param: link: the list of image url, for which should only be the url. name: the name of the sub-directory. web_site: the website’s name. return: Nothing. # build a folder # iterate through all url link # save the image # return a notice. #') # execute only if run as a script | 3.210128 | 3 |
Firmware/tools/pattern.py | davidbuzz/SiK | 20 | 6617483 | #!/usr/bin/env python
# reflect input bytes to output, printing as it goes
import serial, sys, optparse, time
parser = optparse.OptionParser("pattern")
parser.add_option("--baudrate", type='int', default=57600, help='baud rate')
parser.add_option("--delay", type='float', default=0.0, help='delay between lines')
parser.add_option("--pattern", type='str', default='0123456789', help='pattern to send')
parser.add_option("--echo", action='store_true', default=False, help='echo any bytes received')
parser.add_option("--crlf", action='store_true', default=False, help='add crlf')
parser.add_option("--counter", action='store_true', default=False, help='add counter')
parser.add_option("--rtscts", action='store_true', default=False, help='enable rtscts')
parser.add_option("--dsrdtr", action='store_true', default=False, help='enable dsrdtr')
parser.add_option("--xonxoff", action='store_true', default=False, help='enable xonxoff')
opts, args = parser.parse_args()
if len(args) != 1:
print("usage: pattern.py <DEVICE>")
sys.exit(1)
device = args[0]
port = serial.Serial(device, opts.baudrate, timeout=0,
dsrdtr=opts.dsrdtr, rtscts=opts.rtscts, xonxoff=opts.xonxoff)
counter = 0
while True:
try:
buf = opts.pattern[:]
if opts.counter:
buf += "%02u" % (counter % 100)
if opts.crlf:
buf += '\r\n'
port.write(buf)
port.flush()
if opts.echo:
try:
count = port.inWaiting()
if count > 0:
buf = port.read(count)
if len(buf) > 0:
sys.stdout.write(buf)
sys.stdout.flush()
except Exception:
pass
if opts.delay > 0.0:
time.sleep(opts.delay)
counter += 1
except KeyboardInterrupt:
sys.exit(0)
| #!/usr/bin/env python
# reflect input bytes to output, printing as it goes
import serial, sys, optparse, time
parser = optparse.OptionParser("pattern")
parser.add_option("--baudrate", type='int', default=57600, help='baud rate')
parser.add_option("--delay", type='float', default=0.0, help='delay between lines')
parser.add_option("--pattern", type='str', default='0123456789', help='pattern to send')
parser.add_option("--echo", action='store_true', default=False, help='echo any bytes received')
parser.add_option("--crlf", action='store_true', default=False, help='add crlf')
parser.add_option("--counter", action='store_true', default=False, help='add counter')
parser.add_option("--rtscts", action='store_true', default=False, help='enable rtscts')
parser.add_option("--dsrdtr", action='store_true', default=False, help='enable dsrdtr')
parser.add_option("--xonxoff", action='store_true', default=False, help='enable xonxoff')
opts, args = parser.parse_args()
if len(args) != 1:
print("usage: pattern.py <DEVICE>")
sys.exit(1)
device = args[0]
port = serial.Serial(device, opts.baudrate, timeout=0,
dsrdtr=opts.dsrdtr, rtscts=opts.rtscts, xonxoff=opts.xonxoff)
counter = 0
while True:
try:
buf = opts.pattern[:]
if opts.counter:
buf += "%02u" % (counter % 100)
if opts.crlf:
buf += '\r\n'
port.write(buf)
port.flush()
if opts.echo:
try:
count = port.inWaiting()
if count > 0:
buf = port.read(count)
if len(buf) > 0:
sys.stdout.write(buf)
sys.stdout.flush()
except Exception:
pass
if opts.delay > 0.0:
time.sleep(opts.delay)
counter += 1
except KeyboardInterrupt:
sys.exit(0)
| en | 0.725654 | #!/usr/bin/env python # reflect input bytes to output, printing as it goes | 2.716729 | 3 |
dms/schema.py | ambrozic/dms | 1 | 6617484 | <gh_stars>1-10
from dms import state
from dms.ds import Field, Meta
from dms.orm import FNS
class Schema:
def __init__(self, name: str, meta: Meta):
self.name: str = name
self.meta = meta
self.pk = list(state.items.table(name=self.name).primary_key.columns)[0]
self._fields = None
@property
def fields(self):
if self._fields is None:
self._fields = {}
table = state.items.table(name=self.name)
for i, c in enumerate(table.columns):
type_ = c.type.__class__.__name__.lower()
default = c.default or FNS.get(self.meta.defaults.get(c.name))
onupdate = c.onupdate or FNS.get(self.meta.updates.get(c.name))
ld = self.meta.list_display
pos = ld.index(c.name) if c.name in ld else len(ld) + i
self._fields[c.name] = Field(
name=c.name,
pos=pos,
label=c._label,
type=type_,
index=c.index,
unique=c.unique,
default=default,
onupdate=onupdate,
choices=getattr(c.type, "choices", None),
max_length=getattr(c.type, "length", None),
is_nullable=c.nullable,
is_primary_key=c.primary_key,
is_required=not c.nullable and default is None,
is_clean=not hasattr(self.meta, f"_{c.name}"),
is_excluded=c.name in self.meta.exclude,
is_list_display=bool(set(ld) & {c.name, f"_{c.name}"}),
is_readonly=c.name in self.meta.readonly_fields,
)
return self._fields
@property
def field_names(self):
return tuple(f.name for f in sorted(self.fields.values(), key=lambda o: o.pos))
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return f"<{self.__class__.__name__} name={self.name}>"
| from dms import state
from dms.ds import Field, Meta
from dms.orm import FNS
class Schema:
def __init__(self, name: str, meta: Meta):
self.name: str = name
self.meta = meta
self.pk = list(state.items.table(name=self.name).primary_key.columns)[0]
self._fields = None
@property
def fields(self):
if self._fields is None:
self._fields = {}
table = state.items.table(name=self.name)
for i, c in enumerate(table.columns):
type_ = c.type.__class__.__name__.lower()
default = c.default or FNS.get(self.meta.defaults.get(c.name))
onupdate = c.onupdate or FNS.get(self.meta.updates.get(c.name))
ld = self.meta.list_display
pos = ld.index(c.name) if c.name in ld else len(ld) + i
self._fields[c.name] = Field(
name=c.name,
pos=pos,
label=c._label,
type=type_,
index=c.index,
unique=c.unique,
default=default,
onupdate=onupdate,
choices=getattr(c.type, "choices", None),
max_length=getattr(c.type, "length", None),
is_nullable=c.nullable,
is_primary_key=c.primary_key,
is_required=not c.nullable and default is None,
is_clean=not hasattr(self.meta, f"_{c.name}"),
is_excluded=c.name in self.meta.exclude,
is_list_display=bool(set(ld) & {c.name, f"_{c.name}"}),
is_readonly=c.name in self.meta.readonly_fields,
)
return self._fields
@property
def field_names(self):
return tuple(f.name for f in sorted(self.fields.values(), key=lambda o: o.pos))
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return f"<{self.__class__.__name__} name={self.name}>" | none | 1 | 2.210881 | 2 | |
jobs/migrations/0001_initial.py | masssoud/hr-plus | 0 | 6617485 | # Generated by Django 3.0.4 on 2020-03-09 10:01
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import jobs.validators.mobile_validator
import jobs.validators.pdf_validator
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Applicant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254)),
('mobile', models.CharField(max_length=11, validators=[jobs.validators.mobile_validator.validate_mobile_number])),
('description', models.TextField(blank=True, default='')),
('status', models.CharField(choices=[('APL', 'Applied'), ('UE', 'Under Evaluation'), ('AF', 'Accepted for First Interview'), ('AS', 'Accepted for Second Interview'), ('AT', 'Accepted for Third Interview'), ('RJ', 'Rejected'), ('OF', 'Offer'), ('CL', 'Canceled')], default='APL', max_length=4)),
('picture', models.ImageField(blank=True, null=True, upload_to='media/%Y/%m/%d/')),
('cv', models.FileField(null=True, upload_to='media/%Y/%m/%d/', validators=[jobs.validators.pdf_validator.validate_pdf, django.core.validators.FileExtensionValidator(['pdf'])])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'applicants',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True, default='')),
],
options={
'verbose_name_plural': 'Categories',
'db_table': 'categories',
},
),
migrations.CreateModel(
name='JobPosting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('is_open', models.BooleanField(db_index=True, default=True)),
('description', models.TextField(blank=True, default='')),
('qualifications', models.TextField(blank=True, default='')),
('requirements', models.TextField(blank=True, default='')),
('good_to_have', models.TextField(blank=True, default='')),
('benefits', models.TextField(blank=True, default='')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.Category')),
],
options={
'db_table': 'job_postings',
},
),
migrations.CreateModel(
name='ApplicantHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('APL', 'Applied'), ('UE', 'Under Evaluation'), ('AF', 'Accepted for First Interview'), ('AS', 'Accepted for Second Interview'), ('AT', 'Accepted for Third Interview'), ('RJ', 'Rejected'), ('OF', 'Offer'), ('CL', 'Canceled')], default='APL', max_length=4)),
('created_at', models.DateTimeField(auto_now_add=True)),
('applicant', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.Applicant')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'applicants_history',
},
),
migrations.AddField(
model_name='applicant',
name='job_posting',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.JobPosting'),
),
]
| # Generated by Django 3.0.4 on 2020-03-09 10:01
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import jobs.validators.mobile_validator
import jobs.validators.pdf_validator
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Applicant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254)),
('mobile', models.CharField(max_length=11, validators=[jobs.validators.mobile_validator.validate_mobile_number])),
('description', models.TextField(blank=True, default='')),
('status', models.CharField(choices=[('APL', 'Applied'), ('UE', 'Under Evaluation'), ('AF', 'Accepted for First Interview'), ('AS', 'Accepted for Second Interview'), ('AT', 'Accepted for Third Interview'), ('RJ', 'Rejected'), ('OF', 'Offer'), ('CL', 'Canceled')], default='APL', max_length=4)),
('picture', models.ImageField(blank=True, null=True, upload_to='media/%Y/%m/%d/')),
('cv', models.FileField(null=True, upload_to='media/%Y/%m/%d/', validators=[jobs.validators.pdf_validator.validate_pdf, django.core.validators.FileExtensionValidator(['pdf'])])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'applicants',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True, default='')),
],
options={
'verbose_name_plural': 'Categories',
'db_table': 'categories',
},
),
migrations.CreateModel(
name='JobPosting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('is_open', models.BooleanField(db_index=True, default=True)),
('description', models.TextField(blank=True, default='')),
('qualifications', models.TextField(blank=True, default='')),
('requirements', models.TextField(blank=True, default='')),
('good_to_have', models.TextField(blank=True, default='')),
('benefits', models.TextField(blank=True, default='')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.Category')),
],
options={
'db_table': 'job_postings',
},
),
migrations.CreateModel(
name='ApplicantHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('APL', 'Applied'), ('UE', 'Under Evaluation'), ('AF', 'Accepted for First Interview'), ('AS', 'Accepted for Second Interview'), ('AT', 'Accepted for Third Interview'), ('RJ', 'Rejected'), ('OF', 'Offer'), ('CL', 'Canceled')], default='APL', max_length=4)),
('created_at', models.DateTimeField(auto_now_add=True)),
('applicant', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.Applicant')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'applicants_history',
},
),
migrations.AddField(
model_name='applicant',
name='job_posting',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.JobPosting'),
),
]
| en | 0.715598 | # Generated by Django 3.0.4 on 2020-03-09 10:01 | 1.67988 | 2 |
atests/libraries/RobotSqliteDatabase.py | ravihuang/rfdbbot | 42 | 6617486 | <reponame>ravihuang/rfdbbot<filename>atests/libraries/RobotSqliteDatabase.py
import sqlite3
class RobotSqliteDatabase:
def __init__(self):
self._connection = None
def connect_to_database(self, db_file_path):
self._connection = sqlite3.connect(db_file_path)
def close_connection(self):
self._connection.close()
def row_count_is_equal_to(self, count, db_table_name):
actual_count = self._number_of_rows_in(db_table_name)
if not actual_count == int(count):
raise AssertionError('Expected to have %s rows but was %s' %
(count, actual_count))
def _number_of_rows_in(self, db_table_name):
cursor = self._execute('SELECT count() FROM %s' % db_table_name)
return cursor.fetchone()[0]
def _execute(self, sql_statement):
return self._connection.execute(sql_statement)
| import sqlite3
class RobotSqliteDatabase:
def __init__(self):
self._connection = None
def connect_to_database(self, db_file_path):
self._connection = sqlite3.connect(db_file_path)
def close_connection(self):
self._connection.close()
def row_count_is_equal_to(self, count, db_table_name):
actual_count = self._number_of_rows_in(db_table_name)
if not actual_count == int(count):
raise AssertionError('Expected to have %s rows but was %s' %
(count, actual_count))
def _number_of_rows_in(self, db_table_name):
cursor = self._execute('SELECT count() FROM %s' % db_table_name)
return cursor.fetchone()[0]
def _execute(self, sql_statement):
return self._connection.execute(sql_statement) | none | 1 | 3.033975 | 3 | |
utils.py | Aralas/icassp19 | 93 | 6617487 |
import numpy as np
import os, re
import matplotlib
matplotlib.use('agg')
#########################################################################
# Some of these functions have been inspired on a framework by <NAME> developed for a pydata workshop
# https://github.com/nkundiushuti/pydata2017bcn/blob/master/util.py
#########################################################################
def save_tensor(var, out_path=None, suffix='_mel'):
"""
Saves a numpy array as a binary file
-review the shape saving when it is a label
"""
assert os.path.isdir(os.path.dirname(out_path)), "path to save tensor does not exist"
var.tofile(out_path.replace('.data', suffix + '.data'))
save_shape(out_path.replace('.data', suffix + '.shape'), var.shape)
def load_tensor(in_path, suffix=''):
"""
Loads a binary .data file
"""
assert os.path.isdir(os.path.dirname(in_path)), "path to load tensor does not exist"
f_in = np.fromfile(in_path.replace('.data', suffix + '.data'))
shape = get_shape(in_path.replace('.data', suffix + '.shape'))
f_in = f_in.reshape(shape)
return f_in
def save_shape(shape_file, shape):
"""
Saves the shape of a numpy array
"""
with open(shape_file, 'w') as fout:
fout.write(u'#'+'\t'.join(str(e) for e in shape)+'\n')
def get_shape(shape_file):
"""
Reads a .shape file
"""
with open(shape_file, 'rb') as f:
line=f.readline().decode('ascii')
if line.startswith('#'):
shape=tuple(map(int, re.findall(r'(\d+)', line)))
return shape
else:
raise IOError('Failed to find shape in file')
def get_num_instances_per_file(f_name, patch_len=25, patch_hop=12):
"""
Return the number of context_windows or instances generated out of a given file
"""
shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))
file_frames = float(shape[0])
return np.maximum(1, int(np.ceil((file_frames-patch_len)/patch_hop)))
def get_feature_size_per_file(f_name):
"""
Return the dimensionality of the features in a given file.
Typically, this will be the number of bins in a T-F representation
"""
shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))
return shape[1]
def make_sure_isdir(pre_path, _out_file):
"""
make sure the a directory at the end of pre_path exists. Else create it
:param pre_path:
:param args:
:return:
"""
full_path = os.path.join(pre_path, _out_file)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
|
import numpy as np
import os, re
import matplotlib
matplotlib.use('agg')
#########################################################################
# Some of these functions have been inspired on a framework by <NAME> developed for a pydata workshop
# https://github.com/nkundiushuti/pydata2017bcn/blob/master/util.py
#########################################################################
def save_tensor(var, out_path=None, suffix='_mel'):
"""
Saves a numpy array as a binary file
-review the shape saving when it is a label
"""
assert os.path.isdir(os.path.dirname(out_path)), "path to save tensor does not exist"
var.tofile(out_path.replace('.data', suffix + '.data'))
save_shape(out_path.replace('.data', suffix + '.shape'), var.shape)
def load_tensor(in_path, suffix=''):
"""
Loads a binary .data file
"""
assert os.path.isdir(os.path.dirname(in_path)), "path to load tensor does not exist"
f_in = np.fromfile(in_path.replace('.data', suffix + '.data'))
shape = get_shape(in_path.replace('.data', suffix + '.shape'))
f_in = f_in.reshape(shape)
return f_in
def save_shape(shape_file, shape):
"""
Saves the shape of a numpy array
"""
with open(shape_file, 'w') as fout:
fout.write(u'#'+'\t'.join(str(e) for e in shape)+'\n')
def get_shape(shape_file):
"""
Reads a .shape file
"""
with open(shape_file, 'rb') as f:
line=f.readline().decode('ascii')
if line.startswith('#'):
shape=tuple(map(int, re.findall(r'(\d+)', line)))
return shape
else:
raise IOError('Failed to find shape in file')
def get_num_instances_per_file(f_name, patch_len=25, patch_hop=12):
"""
Return the number of context_windows or instances generated out of a given file
"""
shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))
file_frames = float(shape[0])
return np.maximum(1, int(np.ceil((file_frames-patch_len)/patch_hop)))
def get_feature_size_per_file(f_name):
"""
Return the dimensionality of the features in a given file.
Typically, this will be the number of bins in a T-F representation
"""
shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))
return shape[1]
def make_sure_isdir(pre_path, _out_file):
"""
make sure the a directory at the end of pre_path exists. Else create it
:param pre_path:
:param args:
:return:
"""
full_path = os.path.join(pre_path, _out_file)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
| en | 0.600509 | ######################################################################### # Some of these functions have been inspired on a framework by <NAME> developed for a pydata workshop # https://github.com/nkundiushuti/pydata2017bcn/blob/master/util.py ######################################################################### Saves a numpy array as a binary file -review the shape saving when it is a label Loads a binary .data file Saves the shape of a numpy array Reads a .shape file Return the number of context_windows or instances generated out of a given file Return the dimensionality of the features in a given file. Typically, this will be the number of bins in a T-F representation make sure the a directory at the end of pre_path exists. Else create it :param pre_path: :param args: :return: | 2.494908 | 2 |
FUSS/polmisc.py | HeloiseS/FUSS | 0 | 6617488 | """
4 - Jan - 2018 / <NAME> / <EMAIL>
This is the main module of FUSS. It contains general utility functions, a couple of interactive routines and
also defines a new class: PolData, to deal with specpol data.
All this should make dealing with and analysing specpol data easier.
Functions:
----------
get_spctr(): Gets flux data from text file.
get_pol(): Gets pol data from text file.
dopcor(): Doppler Correction.
dopcor_file(): Doppler correction from data from a file output into a new file
ylim_def(): Used to define y limits for plots. Used within FUSS.
rot_data(): To rotate 2D data.
norm_ellipse(): Creates random data where the x and y coordinates are described by 2 different normal distributions.
Interactive routines:
---------------------
ep_date(): Taking a date as reference point, finds epoch from date or date from epoch.
vel(): Finds expansion velocity of element from observed and rest wavelength.
Class PolData():
----------------
Attributes:
Defined by __init__
- name: name
- wlp = wavelength bins of polarisation data
- p = p
- pr = Delta p
- q = q
- qr = Delta q
- u = u
- ur = Delta u
- a = Polarisation Angle P.A
- ar = Delta P.A
- wlf = wavelength bins of flux spectrum
- f = Flux
- fr = Delta F
Defined by find_isp() or add_isp()
- qisp, qispr, uisp, uispr, aisp, aispr: Stokes parameters and P.A of ISP
Defined by rmv_isp()
- p0, p0r, q0, ... , a0r : Original polarisation data before ISP correction
- Updates p, pr, q, ..., ar with ISP corrected values.
Methods:
- add_flux_data()
- flu_n_pol()
- find_isp()
- add_isp()
- rmv_isp()
- qu_plt()
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import math as m
import matplotlib.gridspec as gridspec
from scipy.odr import ODR, Model, Data, RealData, odr, Output
import os
import datetime as dt
from FUSS import isp as isp
import sys
import pandas as pd
if sys.version_info.major < 3:
range = xrange
input = raw_input
# ################## FUNCTIONS ###################### FUNCTIONS #################### FUNCTIONS ################# #
def get_spctr(filename, wlmin=0, wlmax=100000, err=False, scale=True, skiprows = 0 ):
"""
Imports spectrum.
Notes
-----
Required file format: wl(Angstrom) flux *flux_error* (*optional*)
Parameters
----------
filename : string
Name of the ASCII file where the spectrum is.
wlmin : int, optional
Lower wavelength cutoff. Default = 0.
wlmax : int, optional
Upper wavelength cutoff. Default = 100000.
err : bool, optional
If there is an error column, set to True. Default is False.
scale : bool, optional
Default is True. Multiplies the spectrum (and error) by the median values of the flux.
skiprows : int, optional
Default is 0, number of rows to skip
Returns
-------
Tuple of 1D Arrays
=> Wavelength, Flux, *flux_error* (optional)
"""
if err is False:
flux = np.loadtxt(filename, unpack=True, usecols=(0, 1), skiprows=skiprows)
cond = (flux[0] > wlmin) & (flux[0] < wlmax)
wl = flux[0][cond]
f = flux[1][cond]
if scale is True:
s = 1 / np.median(f) # normalising the spectrum
f = f * s
return wl, f
else:
flux = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2))
cond = (flux[0] > wlmin) & (flux[0] < wlmax)
wl = flux[0][cond]
f = flux[1][cond]
r = flux[2][cond]
if scale is True:
s = 1 / np.median(f)
f = f * s
r = r * s
return wl, f, r
def get_pol(filename, wlmin=0, wlmax=100000, skiprows = 0):
"""
Imports values from polarisation files (given by the old specpol routine in datred (pre Dec 2017)).
Notes
-----
Required File format: 9 columns.
First column must be wavelength in Angstrom.
The other 8 columns are for stokes parameters, degree of pol and P.A, and associated errors:
=> wl p p_err q q_err u u_err angle angle_err
Parameters
----------
filename : string
Name of the ASCII file.
wlmin : int, optional
Lower wavelength cutoff. Default = 0.
wlmax : int, optional
Upper wavelength cutoff. Default = 100000.
Returns
-------
Tuple of 1D Arrays
One 1 D array per parameter (so first must be wavelength, order of the rest depends on input file).
=> 9 arrays total.
"""
pol0 = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8), skiprows=skiprows)
pol = []
cond = (pol0[0] > wlmin) & (pol0[0] < wlmax) # pol0[0] should contain the wavelength bins
for val in pol0:
# Applies the limits determined by wlmin, wlmax
valn = val[cond]
pol.append(valn)
return pol[0], pol[1], pol[2], pol[3], pol[4], pol[5], pol[6], pol[7], pol[8]
def dopcor(val, z):
"""
Doppler Correction.
Parameters
----------
val : array
Array containing the data. val[0] MUST BE THE WAVELENGTH. NEED AT LEAST 2 COLUMNS!!
z : float
Redshift
Returns
--------
Array containing the data with the wavelength column doppler corrected.
"""
values = np.array(val) # need this in case val is not an array but a list
wl0 = values[0]
wln = np.array([])
for wl in wl0:
wl_dopcor = (wl) - (wl * z)
wln = np.append(wln, wl_dopcor)
values[0] = wln
return values
def dopcor_file(filename, z, dataframe=True, sep='\t'):
"""
Doppler Correction of data from a file (filename), into another file (output)
Parameters
----------
filename : str
Name of the file where the data to be doppler corrected is
z : float
Redshift
"""
if dataframe is False:
output = 'dc_' + filename
os.system('cp -i ' + filename + ' ' + output)
f = file(output, 'r+')
dopcor = []
for line in f:
columns = line.split()
wl = float(columns[0])
wl_dopcor = (wl) - (wl * z)
dopcor.append(wl_dopcor)
f.close()
f0 = file(filename, 'r')
f = file(output, 'w')
i = 0
for line in f0:
columns = line.split()
n_line = line.replace(columns[0], str(dopcor[i]))
f.write(n_line)
i = i + 1
print(output + ' created')
elif dataframe is True:
data = pd.read_csv(filename, sep = sep)
#data['wl'] -= data['wl']*z
data.iloc[:,0] = data.iloc[:,0].values - data.iloc[:,0].values*z
data.to_csv('dc_'+filename, sep = '\t', index=False)
print('dc_'+filename + ' created')
def ylim_def(wl, f, wlmin=4500, wlmax=9500):
"""
(Yes I need this in PolData) finds appropriate y limits for a spectrum. Look at values between a given range (Default: 4500-9500A) where
we don't expect few order of magnitudes discrepancies like we see sometimes at the extremeties of the spectrum, then
find the max and min value then define ymax and ymin.
"""
fmax = -100000
fmin = 1000
for i in range(len(wl)):
if wl[i] < wlmax and wl[i] > wlmin:
if f[i] < fmin:
fmin = f[i]
#print(fmin)
elif f[i] > fmax:
fmax = f[i]
#print(fmax)
# These tweaks to make the y limit okay were determined through testing. May not always
# be appropriate and might need fixing later.
if fmin > 0 and fmin < 1:
ymin = fmin - 1.2 * fmin
elif fmin > 0 and fmin > 1:
ymin = fmin - fmin / 5
elif fmin < 0 and fmin > -1:
ymin = fmin + 1.2 * fmin
elif fmin < 0 and fmin < -1:
ymin = fmin + fmin / 5
if fmax > 0 and fmax < 1:
ymax = fmax + 1.2 * fmax
elif fmax > 0 and fmax > 1:
ymax = fmax + fmax / 5
elif fmax < 0 and fmax > -1:
ymax = fmax - 1.2 * fmax
elif fmax < 0 and fmin < -1:
ymax = fmax - fmax / 10
return ymin, ymax
def rot_data(q, u, theta):
"""
Used to rotate Stokes parameters (or any 2D data set) by angle theta.
Parameters
----------
q : 1D np.array
u : 1D np.array
theta : float
Returns
-------
Two 1D np.arrays: q rotated, u rotated
"""
rot_matrix = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
q_rot = np.array([])
u_rot = np.array([])
# Applying rotation to all bins and storing result in q_rot and u_rot
for i in range(len(u)):
coor = np.array([[q[i]],
[u[i]]])
new_coor_i = np.dot(rot_matrix, coor)
q_rot = np.append(q_rot, new_coor_i[0])
u_rot = np.append(u_rot, new_coor_i[1])
return q_rot, u_rot
def norm_ellipse(xc, yc, a, b, theta, n):
"""
Creates ellipsoidal data set normally distributed around (xc,yc).
Parameters
----------
xc : flaot
X coordinate of ellipse center
yc : float
Y coordinate of ellipse center
a : float
major axis
b : float
minor axis
theta :
Angle of ellipse
n : int
Number of points
Returns
-------
Two 1D np.arrays containing the x and y coordinates (respectively) of the data created.
"""
i = 0
x = np.array([])
y = np.array([])
# This creates data within ellipse. The x an y coordinates are defined by normal distribution.
# That means we get normally distributed points in 2D, also means the ellipse's major and minor axis
# are aligned with x and y axis or vice versa. So also give possibility to rotate the data set created
while i <= n:
x = np.append(x, np.random.normal(xc, a))
y = np.append(y, np.random.normal(yc, b))
i += 1
if theta != 0:
x, y = rot_data(x, y, theta) # Applying rotation
return x, y
def ep_date():
"""
Interactive Routine. Finds epoch from date or date from epoch given a maximum date.
"""
# ####### Functions used by ep_date ########## #
def date_input():
yr = input("Year: ")
month = input("Month: ")
day = input("Day: ")
date = dt.date(int(yr), int(month), int(day))
return date
def date_from_epoch():
ep = dt.timedelta(float(input("\n What epoch (in days) would you like to know the date for: ")))
print('\nDate at epoch ' + str(ep) + ' days: ')
print(vmax + ep)
return vmax + ep
def ep_from_dates():
print("\nDate of epoch you want in days")
date_ep = date_input()
ep = date_ep - vmax
print('\nEpoch:')
print(ep)
return ep
# ############### MAIN ##################### #
print("\nDate at V-band max")
vmax = date_input()
print("\n What do you want to do? \n (1) Get epoch in days. Inputs: Date of epoch" \
"\n (2) Get date for an epoch in days. Inputs: Epoch in days (can be negative)" \
"\n (3) Update the V-band max date" \
"\n (4) Exit")
to_do = input("#> ")
while to_do != '4':
if to_do == '1':
ep_from_dates()
if to_do == '2':
date_from_epoch()
if to_do == '3':
print("\nDate at V-band max")
vmax = date_input()
if to_do != '1' and to_do != '2' and to_do != '3' and to_do != '4':
print("Must choose option 1, 2, 3 or 4")
to_do = input("#> ")
return "Good Bye"
def vel():
"""
Interactive routine. Finds the velocity for a given observed wavelength and rest wavelength.
"""
cont = 'y'
while cont == 'y' or cont == '':
l_obs = float(input('What is the observed wavelength: '))
l_emit = float(input('What is the rest wavelength: '))
c = 299792.458 # Speed of light in km/s
v = ((l_obs - l_emit) / l_emit) * c
print(v)
cont = input('Continue?(y/n): ')
# ################################################################################# #
# ############## CLASSE ############## POLDATA ########### CLASSE ############### #
# ################################################################################# #
class PolData(object):
"""
Each instance contains one spectropolarimetric data set.
Note
-----
The attributes wlp, p, pr, q, qr, u, ur, a and ar are 1D arrays and must have the
same length.
The attributes wlf, f and fr must have the same length, but it can differ from the
length of wlp, p, etc...
When the ISP is removed, the attributes p0, p0r, q0, etc... store the original values
of p, pr, q, etc..., and the latter are updated to have the ISP corrected values of polarisation.
Parameters
----------
poldata : str or tuple
The polarisation data can be imported from a text file containing only the data, where
the column order is: wavelength p p_err q q_err u u_err a a_err.
Alternatively a tuple of arrays containing the data can be provided. Make sure the order
of the arrays in the tuple corresponds to wavelength p p_err q q_err u u_err a a_err.
name : str
A short handle to make your data object recognisable (e.g. 'ep1', '14ad')
wlmin : int, optional
Minimum wavelength cutoff
wlmax : int, optional
Maximum wavelength cutoff
Attributes
----------
name : str
A short handle to make your data object recognisable (e.g. 'ep1', '14ad')
wlp : array
1D array containing the wavelength bins of the polarisation data.
p : array
1D array containing the degree of polarisation in each bin.
pr : array
1D array containing the error on p in each bin.
q : array
1D array containing Stokes q in each bin.
qr : array
1D array containing the error on q in each bin.
u : array
1D array containing Stokes u in each bin.
ur : array
1D array containing the error on u in each bin.
a : array
1D array containing the polarisation angle in each bin.
ar : array
1D array containing the error on the polarisation in each bin.
wlf : array, optional
1D array containing wavelength bins of the flux spectrum.
f : array, optional
1D array containing the flux in each bin.
fr : array, optional
1D array containing the error on the flux in each bin.
qisp : float, optional
Stokes q of the ISP.
qispr : float, optional
Error on q ISP.
uisp : float, optional
Stokes u of the ISP
uispr : float, optional
Error on u ISP
gradq : tuple, optional
Gradient of Stokes q ISP and error on the gradient.
constq : tuple, optional
Intercept of Stokes q ISP and error on the intercept.
gradu : tuple, optional
Gradient of Stokes u ISP and error on the gradient.
constu : tuple, optional
Intercept of Stokes u ISP and error on the intercept.
p0 : array
1D array containing the degree of polarisation in each bin BEFORE ISP REMOVAL.
p0r : array
1D array containing the error on p in each bin BEFORE ISP REMOVAL.
q0 : array
1D array containing Stokes q in each bin BEFORE ISP REMOVAL.
q0r : array
1D array containing the error on q in each bin BEFORE ISP REMOVAL.
u0 : array
1D array containing Stokes u in each bin BEFORE ISP REMOVAL.
u0r : array
1D array containing the error on u in each bin BEFORE ISP REMOVAL.
a0 : array
1D array containing the polarisation angle in each bin BEFORE ISP REMOVAL.
a0r : array
1D array containing the error on the polarisation in each bin BEFORE ISP REMOVAL.
"""
def __init__(self, poldata, name=' ', wlmin=0, wlmax=1000000):
if type(poldata) is str:
try:
# This if we use the old way of creating pol data files fron datred (space separate no header )
pol0 = get_pol(poldata, wlmin=wlmin, wlmax=wlmax)
self.wlp, self.p, self.pr= pol0[0], pol0[1], pol0[2]
self.q , self.qr, self.u, self.ur, self.a, self.ar = pol0[3], pol0[4], pol0[5], pol0[6], pol0[7], pol0[8]
except ValueError:
# This we got the new pol data files for datred (pandas data frame to tab separated file with col names)
poldf = pd.read_csv(poldata, sep='\t')
mask = (poldf.iloc[:,0].values > wlmin) & (poldf.iloc[:,0].values < wlmax)
self.wlp, self.p, self.pr = poldf.iloc[:,0].values[mask], poldf.iloc[:,1].values[mask], poldf.iloc[:,2].values[mask]
self.q, self.qr = poldf.iloc[:,3].values[mask], poldf.iloc[:,4].values[mask]
self.u, self.ur = poldf.iloc[:,5].values[mask], poldf.iloc[:,6].values[mask]
self.a, self.ar = poldf.iloc[:,7].values[mask], poldf.iloc[:,8].values[mask]
else:
pol0 = poldata
self.wlp, self.p, self.pr= pol0[0], pol0[1], pol0[2]
self.q , self.qr, self.u, self.ur, self.a, self.ar = pol0[3], pol0[4], pol0[5], pol0[6], pol0[7], pol0[8]
self.name = name
self.wlf = None
self.f = None
self.fr = None
self.qisp = None
self.qispr = None
self.uisp = None
self.uispr = None
self.pisp = None
self.pispr = None
self.aisp = None
self.aispr = None
self.gradq = None
self.constq = None
self.gradu = None
self.constu = None
self.q0 = None
self.u0 = None
self.q0r = None
self.u0r = None
self.p0 = None
self.p0r = None
self.a0 = None
self.a0r = None
print(" ==== PolData - instance: " + self.name + " ====")
print("Polarisation data initialised. If you want to add Stokes I use add_flux_data(). " \
"To find ISP use find_isp(). \n")
def add_flux_data(self, filename, wlmin=0, wlmax=1000000, err=False, scale=False, skiprows = 0):
"""
Adds flux spectrum data attributes to the PolData.
Parameters
----------
filename : str
File containing the flux data. File format: wl, f, fr (no comas)
wlmin : int
Minimum wavelength cut off
wlmax :
Maximum wavelength cut off
err : bool
If false, only imports wavelength and flux, not the error on the flux. Default = False.
skiprows : int, optional
efault is 0, number of rows to skip
"""
try:
flux = get_spctr(filename, wlmin=wlmin, wlmax=wlmax, scale=scale, skiprows = skiprows)
self.wlf = flux[0]
self.f = flux[1]
if err is True:
self.fr = flux[2]
print(" ==== PolData - instance: " + self.name + " ====")
print("Flux spectrum added.")
except ValueError as error:
print("ValueError: "+str(error) + "\n /!\ This function uses np.loadtxt, if there are rows of text at the top of your file that need to be skipped add the argument skiprows = [number of rows to skip]")
def flu_n_pol(self, save=False):
"""
Creates plot of p, q, u, theta, and flux.
Note
----
/!\ The x-axis is SHARED, so limits on polarisation attributes and flux
attributes should be the same.
Parameters
----------
save : bool
Whether to save the plot or not. Saved as [self.name]_fnp.png
"""
fnp = plt.figure(figsize=(10, 10))
grid = gridspec.GridSpec(5, 1, hspace=0)
p_plot = plt.subplot(grid[0])
q_plot = plt.subplot(grid[1])
u_plot = plt.subplot(grid[2])
a_plot = plt.subplot(grid[3])
f_plot = plt.subplot(grid[4])
p_plot.errorbar(self.wlp, self.p, yerr=self.pr, color='purple', capsize=0, ecolor='grey')
q_plot.errorbar(self.wlp, self.q, yerr=self.qr, color='r', alpha=0.8, capsize=0, ecolor='grey')
u_plot.errorbar(self.wlp, self.u, yerr=self.ur, color='blue', alpha=0.8, capsize=0, ecolor='grey')
a_plot.errorbar(self.wlp, self.a, yerr=self.ar, color='orange', alpha=0.8, capsize=0, ecolor='grey')
try:
f_plot.errorbar(self.wlf, self.f, yerr=self.fr, color='k', alpha=0.5, lw=1.5, capsize=0, ecolor='grey')
except:
print('Flux attributes not defined')
p_plot.set_ylim(ylim_def(self.wlp, self.p, wlmin=4700))
p_plot.set_ylabel('p (%)')
p_plot.set_title(self.name, fontsize=16)
q_plot.set_ylim(ylim_def(self.wlp, self.q, wlmin=4700))
q_plot.set_ylabel('q (%)')
u_plot.set_ylim(ylim_def(self.wlp, self.u, wlmin=4700))
u_plot.set_ylabel('u (%)')
a_plot.set_ylim(ylim_def(self.wlp, self.a, wlmin=4700))
a_plot.set_ylabel('P.A (deg)')
try:
f_plot.set_ylim(ylim_def(self.wlf, self.f))
f_plot.set_ylabel('Flux')
f_plot.set_xlabel('Wavelength (Ang)', fontsize=14)
except:
print('Flux attributes not defined')
p_plot.xaxis.set_visible(False)
q_plot.xaxis.set_visible(False)
u_plot.xaxis.set_visible(False)
a_plot.xaxis.set_visible(False)
if save is True:
fnp.savefig(self.name + '_fnp.png')
plt.show()
return
def find_isp(self, wlmin, wlmax):
"""
Estimates ISP
Notes
-----
Simply an average of q and u over a given wavelength range which should correspond to line
blanketting region.
Parameters
----------
wlmin : int
Start of wavelength range.
wlmax : int
End of wavelength range.
"""
ls = [self.q, self.qr, self.u, self.ur]
cond = (self.wlp > wlmin) & (self.wlp < wlmax)
crop = []
for val in ls:
valn = val[cond]
crop.append(valn)
# Values of p, q, u, a and their error for ISP
self.qisp = np.average(crop[0], weights=1 / (crop[1] ** 2))
self.qispr = np.std(crop[0])
self.uisp = np.average(crop[2], weights=1 / (crop[3] ** 2))
self.uispr = np.std(crop[2])
self.pisp = np.sqrt(self.qisp ** 2 + self.uisp ** 2)
self.pispr = (1 / self.pisp) * np.sqrt((self.qisp * self.qispr) ** 2 + (self.uisp * self.uispr) ** 2)
if self.pisp > self.pispr:
self.pisp = self.pisp - (self.pispr**2)/self.pisp
self.aisp = (0.5 * m.atan2(self.uisp, self.qisp)) * 180.0 / m.pi
self.aispr = 0.5 * np.sqrt(((self.uispr / self.uisp) ** 2 + (self.qispr / self.qisp) ** 2) * (
1 / (1 + (self.uisp / self.qisp) ** 2)) ** 2)
if self.aisp < 0:
self.aisp = 180 + self.aisp # Making sure P.A range is 0-180 deg
print(" ==== PolData - instance: " + self.name + " ====")
print("ISP found: \n qisp = " + str(self.qisp) + " +/- " + str(self.qispr) \
+ "\n usip = " + str(self.uisp) + " +/- " + str(self.uispr) \
+ "\n pisp = " + str(self.pisp) + " +/- " + str(self.pispr) \
+ "\n P.A isp = " + str(self.aisp) + " +/- " + str(self.aispr))
return self.qisp, self.qispr, self.uisp, self.uispr
def add_isp(self, constisp_params = None, linearisp_params = None):
"""
Adds parameters of isp to the data.
Parameters
----------
constisp_params : list
If the isp is constant give the stokes parameters of the isp here in a list:
[qisp, qisp error, uisp , uisp error]
linearisp_params : list
Tuple of tuples: [[grad_q, grad_q error],[intercept_q, intercept_q error],
[grad_u, grad_u error],[intercept_u, intercept_u error]].
For qisp = grad_q * lambda + intercept_q (and similar equation for u), where lambda is in Angstrom.
Examples
--------
If the ISP is constant across your wavelength range, put its values an associated errors in constisp_params:
>> PolDataObj.add_isp(constisp_params=[0.14, 0.04, 0.08, 0.03])
If the isp changes linearly with wavelength, give the parameters for the lines of q and u ISP here.
>> PolDataObj.add_isp(linearisp_params=[[0.00035, 0.00003],[2.45, 0.19]])
"""
if linearisp_params is None:
self.qisp, self.qispr, self.uisp, self.uispr = constisp_params
# Values of p, q, u, a and their error for ISP
self.pisp = np.sqrt(self.qisp ** 2 + self.uisp ** 2)
self.pispr = (1 / self.pisp) * np.sqrt((self.qisp * self.qispr) ** 2 + (self.uisp * self.uispr) ** 2)
self.aisp = (0.5 * m.atan2(self.uisp, self.qisp)) * 180.0 / m.pi
self.aispr = 0.5 * np.sqrt(((self.uispr / self.uisp) ** 2 + (self.qispr / self.qisp) ** 2) * (
1 / (1 + (self.uisp / self.qisp) ** 2)) ** 2)
self.aispr = (self.aispr * 180.0) / m.pi
if self.aisp < 0:
self.aisp = 180 + self.aisp # Making sure P.A range is 0-180 deg
print(" ==== PolData - instance: " + self.name + " ====")
print("ISP Added: \n qisp = " + str(self.qisp) + " +/- " + str(self.qispr) \
+ "\n usip = " + str(self.uisp) + " +/- " + str(self.uispr) \
+ "\n pisp = " + str(self.pisp) + " +/- " + str(self.pispr) \
+ "\n P.A isp = " + str(self.aisp) + " +/- " + str(self.aispr) + "\n")
self.gradq = None # this will be used as a condition for the method of isp removal in rmv_isp
elif constisp_params is None:
self.gradq, self.constq, self.gradu, self.constu, self.cov = linearisp_params
self.qisp = None # this will be used as a condition for the method of isp removal in rmv_isp
return
def rmv_isp(self, bayesian_pcorr=False, p0_step=0.01):
# TODO: I need 2 tests for this. Maybe will need 14ad data for the constant case and 11hs for the linear case
"""
Removes ISP and updates q, qr, u, ur, p, pr, a and ar.
Note
-----
Stores the original non ISP corrected degree of polarisation, Stokes parameters, polarisation angle,
and associated errors in p0, p0r, q0, q0r, u0, u0r, a0, and a0r, and updates p, pr, q, qr, u, ur, a and ar.
"""
# Storing original values of Stokes parameters and their errors in newly defined
# attributes.
self.q0 = self.q
self.u0 = self.u
self.q0r = self.qr
self.u0r = self.ur
# Storing original degree of polarisation and it's error in new variable and updating p and pr
self.p0 = self.p
self.p0r = self.pr
# Same as before but for the P.A
self.a0 = self.a
self.a0r = self.ar
if self.qisp is None:
new_stokes, __ = isp.linear_isp(self.wlp, self.gradq, self.constq,
self.gradu, self.constu,
self.cov[0], self.cov[1], #respectively covariance of q parameters and u parameters
self.q, self.qr,
self.u, self.ur,
bayesian_pcorr=bayesian_pcorr, p0_step=p0_step)
elif self.gradq is None:
new_stokes = isp.const_isp(self.wlp, self.qisp, self.qispr,
self.uisp, self.uispr,
self.q, self.qr,
self.u, self.ur,
bayesian_pcorr=bayesian_pcorr, p0_step=p0_step)
self.p = new_stokes[1]
self.pr =new_stokes[2]
self.q = new_stokes[3] # new_stokes[0] is wavelength bins
self.qr = new_stokes[4]
self.u = new_stokes[5]
self.ur = new_stokes[6]
self.a = new_stokes[7]
self.ar = new_stokes[8]
def qu_plt(self, subplot_loc=111, wlmin=0, wlmax=100000,
qlim=[-3.0, 3.0], ulim=[-3.0, 3.0], textloc=[-2.7, -2.7], cisp='k', fs=16,
ls=14, isp=False, wlrest=None, colorbar=True, colorbar_labelsize=14, size_clbar=0.05, line_color=None,
marker='.', lambda_xshift=1.7, fit=True,
qlab_vis=True, ulab_vis=True,
qticks_vis=True, uticks_vis=True, cmap='jet'):
# TODO: anyway to use *args here? how does that even work?
"""
Plots the QU plane corresponding to the imported data.
Parameters
----------
subplot_loc : int or matplotlib.gridspec.GridSpec, optional
Location of the subplot. Can be a 3 digit integer or a gridspec location ifcreated a grid using gridspec.
Default = 111.
wlmin : int, optional
Min wavelength cut off. Default None.
wlmax : int, optional
Max wavelength cut off. Default 100000.
qlim : tuple, optional
[min q, max q]. Default = [-3.0, 3.0]
ulim : tuple, optional
[min u, max u]. Default = [-3.0, 3.0]
textloc : tuple, optional
Location of name of qu-plot. Default = [-2.7, -2.7]
cisp : string, optional
Color of ISP marker. Default = 'k'
fs : int, optional
Font size. Applies to text on plot and axis labels, not graduations on the axes. Default = 16
ls : int, optional
Label size. Size of the tick numbers on axes. Default = 14.
isp : bool, optional
Whether to plot ISP. Default False.
wlrest :int, optional
If plotting qu plot of a line, rest wavelength of that line. Otherwise leave default value: None.
colorbar : bool, optional
Default is True. If False the colorbar is not plotted.
colorbar_labelsize : int, optional
Label size of the color bar ticks. Default 15.
size_clbar : float, optional
Modifies the size of the colour bar. Also screws with the plot somehow. Default = 0.05.
line_color : string, optional
If want a solid colour for the lines between the markers. Default is None and gives lines cycling through
rainbow colors to match the color of the point they are associated with.
marker : string, optional
Type of marker to be used. Default is '.'
lambda_xshift : float, optional
Position of the colourbar label define as qmax + shift. This is the shift value. Default is 1.7.
fit : bool, optional
If False the dominant axis will not be plotted. Its parameters will still be calculated and returned.
Default is True.
qlab_vis : bool, optional
If False, the q label is not plotted. Default is True.
ulab_vis : bool, optional
If False, the u label is not plotted. Default is True.
qticks_vis : bool, optional
If False, all q tick labels are invisible. Default is True.
uticks_vis : bool, optional
If False, all u tick labels are invisible. Default is True.
cmap : str, optional
A valid matplotlib colormap. Default = jet
Returns
------
matplotlib.axes._subplots.AxesSubplot
The axis the qu plane is plotted on. That way can plot other things on top, e.g line or ellipse or else.
"""
# ################### FITTING THE DATA WITH DOM AXIS ########################### #
func = lambda beta,x: beta[0] + beta[1] * x # Expression of the line that we want to fit to the data
data = RealData(self.q, self.u, self.qr, self.ur)
model = Model(func)
odr = ODR(data, model, [0, 0])
# Given the levels of pol in SNE, I don't expect to ever have to plot a q-u plot with limits [-10,10]
# The following are just q values from -10 to 10 that will be used to plot the line fit
q_n = np.arange(-10, 10, 0.1)
qu = plt.subplot(subplot_loc, aspect='equal')
odr.set_job(fit_type=0) # fit_type = 0 => explicit ODR.
output = odr.run()
print(" ==== QUplot - instance: " + self.name + " ====")
print("Dom. Axis = a*x + b")
print("a = " + str(output.beta[1]) + " +/- " + str(output.sd_beta[1]))
print("b = " + str(output.beta[0]) + " +/- " + str(output.sd_beta[0]) + "\n")
u_n = func(output.beta, q_n) # Based on fit, get the u values for each q
if fit is True:
qu.plot(q_n, u_n, 'k--', linewidth=2, zorder=1000)
# the zorder is high to sit on top of the scatter created belox
print(wlmin, wlmax)
cond = (self.wlp > wlmin) & (self.wlp < wlmax)
wl_crop = self.wlp[cond]
q_crop = self.q[cond]
qr_crop = self.qr[cond]
u_crop = self.u[cond]
ur_crop = self.ur[cond]
# #################### CREATING THE PLOT ########################
plt.set_cmap(cmap)
if wlrest is None:
# Defining the min and max wavelength, which are going to be the beginning and end of the colour map
wlmin = min(wl_crop)
wlmax = max(wl_crop)
sc = qu.scatter(q_crop, u_crop, s=100,
vmin=wlmin, vmax=wlmax,
c=wl_crop, marker=marker,
zorder=600, lw=0)
else:
vel = np.array([])
c = 299792.0
for i in range(len(wl_crop)):
v = c * ((wl_crop[i] - wlrest) / wlrest)
vel = np.append(vel, v)
# Defining the min and max VELOCITIES, which are going to be the beginning and end of the colour map
velmin = min(vel)
velmax = max(vel)
print(velmin, velmax)
sc = qu.scatter(q_crop, u_crop, s=100,
vmin=velmin, vmax=velmax,
c=vel, marker=marker,
zorder=600, lw=0)
# ################## Plotting Points ###############################
# vmin and vmax are the start and end of the colour map. c = wl because we're defining the colourmap using the
# wavelengths wl. zorder doesn't have to be 600, it just needs to be below that of the fitting line we did above
# and greater than the zorder of the error bars, because otherwise it doesn't look nice.
clbar = plt.colorbar(sc, fraction=size_clbar) # Plotting to colour map. Need to do that to get a rainbow.
clbar.ax.tick_params(labelsize=colorbar_labelsize)
if colorbar is False:
clbar.remove() # Removing Colormap from plot (but still exists so we can plot rainbows)
elif colorbar is True:
if wlrest is None:
qu.text(qlim[1] + lambda_xshift, (ulim[1] + ulim[0]) / 2, r'$\lambda (\AA)$', fontsize=fs)
else:
qu.text(qlim[1] + lambda_xshift, (ulim[1] + ulim[0]) / 2, 'Velocity (km/s)', rotation='vertical',
fontsize=fs)
a, b, c = qu.errorbar(q_crop, u_crop, xerr=qr_crop, yerr=ur_crop, marker='.', capsize=0,
zorder=500, linestyle='None', alpha=0.4) # Plotting error bars
# Convert my wavelengths into the colour map plotted earlier applying the colourbar to "c",
# that is, the errorbars, there are 2 components (c[0] and c[1]) because I have error bars in both x and y.
if wlrest is None:
clmap = clbar.to_rgba(wl_crop)
else:
clmap = clbar.to_rgba(vel)
c[0].set_color(clmap)
c[1].set_color(clmap)
# The following loop cycles through our colormap. Without this the lines we are about to create to connect
# the points of the scatter plot will not have colours corresponding to the points they are linking.
qu.set_prop_cycle(plt.cycler('color', clmap))
for i in range(len(wl_crop) - 1):
qu.plot(q_crop[i:i + 2], u_crop[i:i + 2], c=line_color,
alpha=1) # Here we create line for each pair of points
# Note that it's "i+2" in order for the last point to be i+1 -because it's up to point i+2, excluding i+2.
# To mark ISP with errorbars
if isp is True:
plt.errorbar(self.qisp, self.uisp, xerr=self.qispr, yerr=self.uispr, fmt='o', color=cisp, elinewidth=2.5,
capthick=2.5, zorder=5000)
plt.axvline(0, color='k', linestyle='-.')
plt.axhline(0, color='k', linestyle='-.')
qu.tick_params(axis='both', which='major', labelsize=ls)
# Now fiddling with the ticks: If ticks are made to be visible then sent every other tick to be invisible
# so bring so space to the axes. If ticks are set to be invisible... well make them invisible.
xticks = qu.xaxis.get_major_ticks()
yticks = qu.yaxis.get_major_ticks()
''' Didn't work to resize my tick labels :(
for xtick in xticks:
xtick.label1.set_fontsize(ticklabelsize)
for ytick in yticks:
ytick.label1.set_fontsize(ticklabelsize)
'''
if qticks_vis is False:
for i in range(0, len(xticks)):
xticks[i].label1.set_visible(False)
else:
for i in range(0, len(xticks), 2):
xticks[i].label1.set_visible(False)
if uticks_vis is False:
for i in range(0, len(yticks)):
yticks[i].label1.set_visible(False)
else:
for i in range(0, len(yticks), 2):
yticks[i].label1.set_visible(False)
if qlab_vis is True:
qu.set_xlabel('q (%)', fontsize=fs)
if ulab_vis is True:
qu.set_ylabel('u (%)', labelpad=-1, fontsize=fs)
qu.text(textloc[0], textloc[1], self.name, fontsize=fs)
qu.set_xlim(qlim) # Setting some limits.
qu.set_ylim(ulim)
return qu
| """
4 - Jan - 2018 / <NAME> / <EMAIL>
This is the main module of FUSS. It contains general utility functions, a couple of interactive routines and
also defines a new class: PolData, to deal with specpol data.
All this should make dealing with and analysing specpol data easier.
Functions:
----------
get_spctr(): Gets flux data from text file.
get_pol(): Gets pol data from text file.
dopcor(): Doppler Correction.
dopcor_file(): Doppler correction from data from a file output into a new file
ylim_def(): Used to define y limits for plots. Used within FUSS.
rot_data(): To rotate 2D data.
norm_ellipse(): Creates random data where the x and y coordinates are described by 2 different normal distributions.
Interactive routines:
---------------------
ep_date(): Taking a date as reference point, finds epoch from date or date from epoch.
vel(): Finds expansion velocity of element from observed and rest wavelength.
Class PolData():
----------------
Attributes:
Defined by __init__
- name: name
- wlp = wavelength bins of polarisation data
- p = p
- pr = Delta p
- q = q
- qr = Delta q
- u = u
- ur = Delta u
- a = Polarisation Angle P.A
- ar = Delta P.A
- wlf = wavelength bins of flux spectrum
- f = Flux
- fr = Delta F
Defined by find_isp() or add_isp()
- qisp, qispr, uisp, uispr, aisp, aispr: Stokes parameters and P.A of ISP
Defined by rmv_isp()
- p0, p0r, q0, ... , a0r : Original polarisation data before ISP correction
- Updates p, pr, q, ..., ar with ISP corrected values.
Methods:
- add_flux_data()
- flu_n_pol()
- find_isp()
- add_isp()
- rmv_isp()
- qu_plt()
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import math as m
import matplotlib.gridspec as gridspec
from scipy.odr import ODR, Model, Data, RealData, odr, Output
import os
import datetime as dt
from FUSS import isp as isp
import sys
import pandas as pd
if sys.version_info.major < 3:
range = xrange
input = raw_input
# ################## FUNCTIONS ###################### FUNCTIONS #################### FUNCTIONS ################# #
def get_spctr(filename, wlmin=0, wlmax=100000, err=False, scale=True, skiprows = 0 ):
"""
Imports spectrum.
Notes
-----
Required file format: wl(Angstrom) flux *flux_error* (*optional*)
Parameters
----------
filename : string
Name of the ASCII file where the spectrum is.
wlmin : int, optional
Lower wavelength cutoff. Default = 0.
wlmax : int, optional
Upper wavelength cutoff. Default = 100000.
err : bool, optional
If there is an error column, set to True. Default is False.
scale : bool, optional
Default is True. Multiplies the spectrum (and error) by the median values of the flux.
skiprows : int, optional
Default is 0, number of rows to skip
Returns
-------
Tuple of 1D Arrays
=> Wavelength, Flux, *flux_error* (optional)
"""
if err is False:
flux = np.loadtxt(filename, unpack=True, usecols=(0, 1), skiprows=skiprows)
cond = (flux[0] > wlmin) & (flux[0] < wlmax)
wl = flux[0][cond]
f = flux[1][cond]
if scale is True:
s = 1 / np.median(f) # normalising the spectrum
f = f * s
return wl, f
else:
flux = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2))
cond = (flux[0] > wlmin) & (flux[0] < wlmax)
wl = flux[0][cond]
f = flux[1][cond]
r = flux[2][cond]
if scale is True:
s = 1 / np.median(f)
f = f * s
r = r * s
return wl, f, r
def get_pol(filename, wlmin=0, wlmax=100000, skiprows = 0):
"""
Imports values from polarisation files (given by the old specpol routine in datred (pre Dec 2017)).
Notes
-----
Required File format: 9 columns.
First column must be wavelength in Angstrom.
The other 8 columns are for stokes parameters, degree of pol and P.A, and associated errors:
=> wl p p_err q q_err u u_err angle angle_err
Parameters
----------
filename : string
Name of the ASCII file.
wlmin : int, optional
Lower wavelength cutoff. Default = 0.
wlmax : int, optional
Upper wavelength cutoff. Default = 100000.
Returns
-------
Tuple of 1D Arrays
One 1 D array per parameter (so first must be wavelength, order of the rest depends on input file).
=> 9 arrays total.
"""
pol0 = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8), skiprows=skiprows)
pol = []
cond = (pol0[0] > wlmin) & (pol0[0] < wlmax) # pol0[0] should contain the wavelength bins
for val in pol0:
# Applies the limits determined by wlmin, wlmax
valn = val[cond]
pol.append(valn)
return pol[0], pol[1], pol[2], pol[3], pol[4], pol[5], pol[6], pol[7], pol[8]
def dopcor(val, z):
"""
Doppler Correction.
Parameters
----------
val : array
Array containing the data. val[0] MUST BE THE WAVELENGTH. NEED AT LEAST 2 COLUMNS!!
z : float
Redshift
Returns
--------
Array containing the data with the wavelength column doppler corrected.
"""
values = np.array(val) # need this in case val is not an array but a list
wl0 = values[0]
wln = np.array([])
for wl in wl0:
wl_dopcor = (wl) - (wl * z)
wln = np.append(wln, wl_dopcor)
values[0] = wln
return values
def dopcor_file(filename, z, dataframe=True, sep='\t'):
"""
Doppler Correction of data from a file (filename), into another file (output)
Parameters
----------
filename : str
Name of the file where the data to be doppler corrected is
z : float
Redshift
"""
if dataframe is False:
output = 'dc_' + filename
os.system('cp -i ' + filename + ' ' + output)
f = file(output, 'r+')
dopcor = []
for line in f:
columns = line.split()
wl = float(columns[0])
wl_dopcor = (wl) - (wl * z)
dopcor.append(wl_dopcor)
f.close()
f0 = file(filename, 'r')
f = file(output, 'w')
i = 0
for line in f0:
columns = line.split()
n_line = line.replace(columns[0], str(dopcor[i]))
f.write(n_line)
i = i + 1
print(output + ' created')
elif dataframe is True:
data = pd.read_csv(filename, sep = sep)
#data['wl'] -= data['wl']*z
data.iloc[:,0] = data.iloc[:,0].values - data.iloc[:,0].values*z
data.to_csv('dc_'+filename, sep = '\t', index=False)
print('dc_'+filename + ' created')
def ylim_def(wl, f, wlmin=4500, wlmax=9500):
"""
(Yes I need this in PolData) finds appropriate y limits for a spectrum. Look at values between a given range (Default: 4500-9500A) where
we don't expect few order of magnitudes discrepancies like we see sometimes at the extremeties of the spectrum, then
find the max and min value then define ymax and ymin.
"""
fmax = -100000
fmin = 1000
for i in range(len(wl)):
if wl[i] < wlmax and wl[i] > wlmin:
if f[i] < fmin:
fmin = f[i]
#print(fmin)
elif f[i] > fmax:
fmax = f[i]
#print(fmax)
# These tweaks to make the y limit okay were determined through testing. May not always
# be appropriate and might need fixing later.
if fmin > 0 and fmin < 1:
ymin = fmin - 1.2 * fmin
elif fmin > 0 and fmin > 1:
ymin = fmin - fmin / 5
elif fmin < 0 and fmin > -1:
ymin = fmin + 1.2 * fmin
elif fmin < 0 and fmin < -1:
ymin = fmin + fmin / 5
if fmax > 0 and fmax < 1:
ymax = fmax + 1.2 * fmax
elif fmax > 0 and fmax > 1:
ymax = fmax + fmax / 5
elif fmax < 0 and fmax > -1:
ymax = fmax - 1.2 * fmax
elif fmax < 0 and fmin < -1:
ymax = fmax - fmax / 10
return ymin, ymax
def rot_data(q, u, theta):
"""
Used to rotate Stokes parameters (or any 2D data set) by angle theta.
Parameters
----------
q : 1D np.array
u : 1D np.array
theta : float
Returns
-------
Two 1D np.arrays: q rotated, u rotated
"""
rot_matrix = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
q_rot = np.array([])
u_rot = np.array([])
# Applying rotation to all bins and storing result in q_rot and u_rot
for i in range(len(u)):
coor = np.array([[q[i]],
[u[i]]])
new_coor_i = np.dot(rot_matrix, coor)
q_rot = np.append(q_rot, new_coor_i[0])
u_rot = np.append(u_rot, new_coor_i[1])
return q_rot, u_rot
def norm_ellipse(xc, yc, a, b, theta, n):
"""
Creates ellipsoidal data set normally distributed around (xc,yc).
Parameters
----------
xc : flaot
X coordinate of ellipse center
yc : float
Y coordinate of ellipse center
a : float
major axis
b : float
minor axis
theta :
Angle of ellipse
n : int
Number of points
Returns
-------
Two 1D np.arrays containing the x and y coordinates (respectively) of the data created.
"""
i = 0
x = np.array([])
y = np.array([])
# This creates data within ellipse. The x an y coordinates are defined by normal distribution.
# That means we get normally distributed points in 2D, also means the ellipse's major and minor axis
# are aligned with x and y axis or vice versa. So also give possibility to rotate the data set created
while i <= n:
x = np.append(x, np.random.normal(xc, a))
y = np.append(y, np.random.normal(yc, b))
i += 1
if theta != 0:
x, y = rot_data(x, y, theta) # Applying rotation
return x, y
def ep_date():
"""
Interactive Routine. Finds epoch from date or date from epoch given a maximum date.
"""
# ####### Functions used by ep_date ########## #
def date_input():
yr = input("Year: ")
month = input("Month: ")
day = input("Day: ")
date = dt.date(int(yr), int(month), int(day))
return date
def date_from_epoch():
ep = dt.timedelta(float(input("\n What epoch (in days) would you like to know the date for: ")))
print('\nDate at epoch ' + str(ep) + ' days: ')
print(vmax + ep)
return vmax + ep
def ep_from_dates():
print("\nDate of epoch you want in days")
date_ep = date_input()
ep = date_ep - vmax
print('\nEpoch:')
print(ep)
return ep
# ############### MAIN ##################### #
print("\nDate at V-band max")
vmax = date_input()
print("\n What do you want to do? \n (1) Get epoch in days. Inputs: Date of epoch" \
"\n (2) Get date for an epoch in days. Inputs: Epoch in days (can be negative)" \
"\n (3) Update the V-band max date" \
"\n (4) Exit")
to_do = input("#> ")
while to_do != '4':
if to_do == '1':
ep_from_dates()
if to_do == '2':
date_from_epoch()
if to_do == '3':
print("\nDate at V-band max")
vmax = date_input()
if to_do != '1' and to_do != '2' and to_do != '3' and to_do != '4':
print("Must choose option 1, 2, 3 or 4")
to_do = input("#> ")
return "Good Bye"
def vel():
"""
Interactive routine. Finds the velocity for a given observed wavelength and rest wavelength.
"""
cont = 'y'
while cont == 'y' or cont == '':
l_obs = float(input('What is the observed wavelength: '))
l_emit = float(input('What is the rest wavelength: '))
c = 299792.458 # Speed of light in km/s
v = ((l_obs - l_emit) / l_emit) * c
print(v)
cont = input('Continue?(y/n): ')
# ################################################################################# #
# ############## CLASSE ############## POLDATA ########### CLASSE ############### #
# ################################################################################# #
class PolData(object):
"""
Each instance contains one spectropolarimetric data set.
Note
-----
The attributes wlp, p, pr, q, qr, u, ur, a and ar are 1D arrays and must have the
same length.
The attributes wlf, f and fr must have the same length, but it can differ from the
length of wlp, p, etc...
When the ISP is removed, the attributes p0, p0r, q0, etc... store the original values
of p, pr, q, etc..., and the latter are updated to have the ISP corrected values of polarisation.
Parameters
----------
poldata : str or tuple
The polarisation data can be imported from a text file containing only the data, where
the column order is: wavelength p p_err q q_err u u_err a a_err.
Alternatively a tuple of arrays containing the data can be provided. Make sure the order
of the arrays in the tuple corresponds to wavelength p p_err q q_err u u_err a a_err.
name : str
A short handle to make your data object recognisable (e.g. 'ep1', '14ad')
wlmin : int, optional
Minimum wavelength cutoff
wlmax : int, optional
Maximum wavelength cutoff
Attributes
----------
name : str
A short handle to make your data object recognisable (e.g. 'ep1', '14ad')
wlp : array
1D array containing the wavelength bins of the polarisation data.
p : array
1D array containing the degree of polarisation in each bin.
pr : array
1D array containing the error on p in each bin.
q : array
1D array containing Stokes q in each bin.
qr : array
1D array containing the error on q in each bin.
u : array
1D array containing Stokes u in each bin.
ur : array
1D array containing the error on u in each bin.
a : array
1D array containing the polarisation angle in each bin.
ar : array
1D array containing the error on the polarisation in each bin.
wlf : array, optional
1D array containing wavelength bins of the flux spectrum.
f : array, optional
1D array containing the flux in each bin.
fr : array, optional
1D array containing the error on the flux in each bin.
qisp : float, optional
Stokes q of the ISP.
qispr : float, optional
Error on q ISP.
uisp : float, optional
Stokes u of the ISP
uispr : float, optional
Error on u ISP
gradq : tuple, optional
Gradient of Stokes q ISP and error on the gradient.
constq : tuple, optional
Intercept of Stokes q ISP and error on the intercept.
gradu : tuple, optional
Gradient of Stokes u ISP and error on the gradient.
constu : tuple, optional
Intercept of Stokes u ISP and error on the intercept.
p0 : array
1D array containing the degree of polarisation in each bin BEFORE ISP REMOVAL.
p0r : array
1D array containing the error on p in each bin BEFORE ISP REMOVAL.
q0 : array
1D array containing Stokes q in each bin BEFORE ISP REMOVAL.
q0r : array
1D array containing the error on q in each bin BEFORE ISP REMOVAL.
u0 : array
1D array containing Stokes u in each bin BEFORE ISP REMOVAL.
u0r : array
1D array containing the error on u in each bin BEFORE ISP REMOVAL.
a0 : array
1D array containing the polarisation angle in each bin BEFORE ISP REMOVAL.
a0r : array
1D array containing the error on the polarisation in each bin BEFORE ISP REMOVAL.
"""
def __init__(self, poldata, name=' ', wlmin=0, wlmax=1000000):
if type(poldata) is str:
try:
# This if we use the old way of creating pol data files fron datred (space separate no header )
pol0 = get_pol(poldata, wlmin=wlmin, wlmax=wlmax)
self.wlp, self.p, self.pr= pol0[0], pol0[1], pol0[2]
self.q , self.qr, self.u, self.ur, self.a, self.ar = pol0[3], pol0[4], pol0[5], pol0[6], pol0[7], pol0[8]
except ValueError:
# This we got the new pol data files for datred (pandas data frame to tab separated file with col names)
poldf = pd.read_csv(poldata, sep='\t')
mask = (poldf.iloc[:,0].values > wlmin) & (poldf.iloc[:,0].values < wlmax)
self.wlp, self.p, self.pr = poldf.iloc[:,0].values[mask], poldf.iloc[:,1].values[mask], poldf.iloc[:,2].values[mask]
self.q, self.qr = poldf.iloc[:,3].values[mask], poldf.iloc[:,4].values[mask]
self.u, self.ur = poldf.iloc[:,5].values[mask], poldf.iloc[:,6].values[mask]
self.a, self.ar = poldf.iloc[:,7].values[mask], poldf.iloc[:,8].values[mask]
else:
pol0 = poldata
self.wlp, self.p, self.pr= pol0[0], pol0[1], pol0[2]
self.q , self.qr, self.u, self.ur, self.a, self.ar = pol0[3], pol0[4], pol0[5], pol0[6], pol0[7], pol0[8]
self.name = name
self.wlf = None
self.f = None
self.fr = None
self.qisp = None
self.qispr = None
self.uisp = None
self.uispr = None
self.pisp = None
self.pispr = None
self.aisp = None
self.aispr = None
self.gradq = None
self.constq = None
self.gradu = None
self.constu = None
self.q0 = None
self.u0 = None
self.q0r = None
self.u0r = None
self.p0 = None
self.p0r = None
self.a0 = None
self.a0r = None
print(" ==== PolData - instance: " + self.name + " ====")
print("Polarisation data initialised. If you want to add Stokes I use add_flux_data(). " \
"To find ISP use find_isp(). \n")
def add_flux_data(self, filename, wlmin=0, wlmax=1000000, err=False, scale=False, skiprows = 0):
"""
Adds flux spectrum data attributes to the PolData.
Parameters
----------
filename : str
File containing the flux data. File format: wl, f, fr (no comas)
wlmin : int
Minimum wavelength cut off
wlmax :
Maximum wavelength cut off
err : bool
If false, only imports wavelength and flux, not the error on the flux. Default = False.
skiprows : int, optional
efault is 0, number of rows to skip
"""
try:
flux = get_spctr(filename, wlmin=wlmin, wlmax=wlmax, scale=scale, skiprows = skiprows)
self.wlf = flux[0]
self.f = flux[1]
if err is True:
self.fr = flux[2]
print(" ==== PolData - instance: " + self.name + " ====")
print("Flux spectrum added.")
except ValueError as error:
print("ValueError: "+str(error) + "\n /!\ This function uses np.loadtxt, if there are rows of text at the top of your file that need to be skipped add the argument skiprows = [number of rows to skip]")
def flu_n_pol(self, save=False):
"""
Creates plot of p, q, u, theta, and flux.
Note
----
/!\ The x-axis is SHARED, so limits on polarisation attributes and flux
attributes should be the same.
Parameters
----------
save : bool
Whether to save the plot or not. Saved as [self.name]_fnp.png
"""
fnp = plt.figure(figsize=(10, 10))
grid = gridspec.GridSpec(5, 1, hspace=0)
p_plot = plt.subplot(grid[0])
q_plot = plt.subplot(grid[1])
u_plot = plt.subplot(grid[2])
a_plot = plt.subplot(grid[3])
f_plot = plt.subplot(grid[4])
p_plot.errorbar(self.wlp, self.p, yerr=self.pr, color='purple', capsize=0, ecolor='grey')
q_plot.errorbar(self.wlp, self.q, yerr=self.qr, color='r', alpha=0.8, capsize=0, ecolor='grey')
u_plot.errorbar(self.wlp, self.u, yerr=self.ur, color='blue', alpha=0.8, capsize=0, ecolor='grey')
a_plot.errorbar(self.wlp, self.a, yerr=self.ar, color='orange', alpha=0.8, capsize=0, ecolor='grey')
try:
f_plot.errorbar(self.wlf, self.f, yerr=self.fr, color='k', alpha=0.5, lw=1.5, capsize=0, ecolor='grey')
except:
print('Flux attributes not defined')
p_plot.set_ylim(ylim_def(self.wlp, self.p, wlmin=4700))
p_plot.set_ylabel('p (%)')
p_plot.set_title(self.name, fontsize=16)
q_plot.set_ylim(ylim_def(self.wlp, self.q, wlmin=4700))
q_plot.set_ylabel('q (%)')
u_plot.set_ylim(ylim_def(self.wlp, self.u, wlmin=4700))
u_plot.set_ylabel('u (%)')
a_plot.set_ylim(ylim_def(self.wlp, self.a, wlmin=4700))
a_plot.set_ylabel('P.A (deg)')
try:
f_plot.set_ylim(ylim_def(self.wlf, self.f))
f_plot.set_ylabel('Flux')
f_plot.set_xlabel('Wavelength (Ang)', fontsize=14)
except:
print('Flux attributes not defined')
p_plot.xaxis.set_visible(False)
q_plot.xaxis.set_visible(False)
u_plot.xaxis.set_visible(False)
a_plot.xaxis.set_visible(False)
if save is True:
fnp.savefig(self.name + '_fnp.png')
plt.show()
return
def find_isp(self, wlmin, wlmax):
"""
Estimates ISP
Notes
-----
Simply an average of q and u over a given wavelength range which should correspond to line
blanketting region.
Parameters
----------
wlmin : int
Start of wavelength range.
wlmax : int
End of wavelength range.
"""
ls = [self.q, self.qr, self.u, self.ur]
cond = (self.wlp > wlmin) & (self.wlp < wlmax)
crop = []
for val in ls:
valn = val[cond]
crop.append(valn)
# Values of p, q, u, a and their error for ISP
self.qisp = np.average(crop[0], weights=1 / (crop[1] ** 2))
self.qispr = np.std(crop[0])
self.uisp = np.average(crop[2], weights=1 / (crop[3] ** 2))
self.uispr = np.std(crop[2])
self.pisp = np.sqrt(self.qisp ** 2 + self.uisp ** 2)
self.pispr = (1 / self.pisp) * np.sqrt((self.qisp * self.qispr) ** 2 + (self.uisp * self.uispr) ** 2)
if self.pisp > self.pispr:
self.pisp = self.pisp - (self.pispr**2)/self.pisp
self.aisp = (0.5 * m.atan2(self.uisp, self.qisp)) * 180.0 / m.pi
self.aispr = 0.5 * np.sqrt(((self.uispr / self.uisp) ** 2 + (self.qispr / self.qisp) ** 2) * (
1 / (1 + (self.uisp / self.qisp) ** 2)) ** 2)
if self.aisp < 0:
self.aisp = 180 + self.aisp # Making sure P.A range is 0-180 deg
print(" ==== PolData - instance: " + self.name + " ====")
print("ISP found: \n qisp = " + str(self.qisp) + " +/- " + str(self.qispr) \
+ "\n usip = " + str(self.uisp) + " +/- " + str(self.uispr) \
+ "\n pisp = " + str(self.pisp) + " +/- " + str(self.pispr) \
+ "\n P.A isp = " + str(self.aisp) + " +/- " + str(self.aispr))
return self.qisp, self.qispr, self.uisp, self.uispr
def add_isp(self, constisp_params = None, linearisp_params = None):
"""
Adds parameters of isp to the data.
Parameters
----------
constisp_params : list
If the isp is constant give the stokes parameters of the isp here in a list:
[qisp, qisp error, uisp , uisp error]
linearisp_params : list
Tuple of tuples: [[grad_q, grad_q error],[intercept_q, intercept_q error],
[grad_u, grad_u error],[intercept_u, intercept_u error]].
For qisp = grad_q * lambda + intercept_q (and similar equation for u), where lambda is in Angstrom.
Examples
--------
If the ISP is constant across your wavelength range, put its values an associated errors in constisp_params:
>> PolDataObj.add_isp(constisp_params=[0.14, 0.04, 0.08, 0.03])
If the isp changes linearly with wavelength, give the parameters for the lines of q and u ISP here.
>> PolDataObj.add_isp(linearisp_params=[[0.00035, 0.00003],[2.45, 0.19]])
"""
if linearisp_params is None:
self.qisp, self.qispr, self.uisp, self.uispr = constisp_params
# Values of p, q, u, a and their error for ISP
self.pisp = np.sqrt(self.qisp ** 2 + self.uisp ** 2)
self.pispr = (1 / self.pisp) * np.sqrt((self.qisp * self.qispr) ** 2 + (self.uisp * self.uispr) ** 2)
self.aisp = (0.5 * m.atan2(self.uisp, self.qisp)) * 180.0 / m.pi
self.aispr = 0.5 * np.sqrt(((self.uispr / self.uisp) ** 2 + (self.qispr / self.qisp) ** 2) * (
1 / (1 + (self.uisp / self.qisp) ** 2)) ** 2)
self.aispr = (self.aispr * 180.0) / m.pi
if self.aisp < 0:
self.aisp = 180 + self.aisp # Making sure P.A range is 0-180 deg
print(" ==== PolData - instance: " + self.name + " ====")
print("ISP Added: \n qisp = " + str(self.qisp) + " +/- " + str(self.qispr) \
+ "\n usip = " + str(self.uisp) + " +/- " + str(self.uispr) \
+ "\n pisp = " + str(self.pisp) + " +/- " + str(self.pispr) \
+ "\n P.A isp = " + str(self.aisp) + " +/- " + str(self.aispr) + "\n")
self.gradq = None # this will be used as a condition for the method of isp removal in rmv_isp
elif constisp_params is None:
self.gradq, self.constq, self.gradu, self.constu, self.cov = linearisp_params
self.qisp = None # this will be used as a condition for the method of isp removal in rmv_isp
return
def rmv_isp(self, bayesian_pcorr=False, p0_step=0.01):
# TODO: I need 2 tests for this. Maybe will need 14ad data for the constant case and 11hs for the linear case
"""
Removes ISP and updates q, qr, u, ur, p, pr, a and ar.
Note
-----
Stores the original non ISP corrected degree of polarisation, Stokes parameters, polarisation angle,
and associated errors in p0, p0r, q0, q0r, u0, u0r, a0, and a0r, and updates p, pr, q, qr, u, ur, a and ar.
"""
# Storing original values of Stokes parameters and their errors in newly defined
# attributes.
self.q0 = self.q
self.u0 = self.u
self.q0r = self.qr
self.u0r = self.ur
# Storing original degree of polarisation and it's error in new variable and updating p and pr
self.p0 = self.p
self.p0r = self.pr
# Same as before but for the P.A
self.a0 = self.a
self.a0r = self.ar
if self.qisp is None:
new_stokes, __ = isp.linear_isp(self.wlp, self.gradq, self.constq,
self.gradu, self.constu,
self.cov[0], self.cov[1], #respectively covariance of q parameters and u parameters
self.q, self.qr,
self.u, self.ur,
bayesian_pcorr=bayesian_pcorr, p0_step=p0_step)
elif self.gradq is None:
new_stokes = isp.const_isp(self.wlp, self.qisp, self.qispr,
self.uisp, self.uispr,
self.q, self.qr,
self.u, self.ur,
bayesian_pcorr=bayesian_pcorr, p0_step=p0_step)
self.p = new_stokes[1]
self.pr =new_stokes[2]
self.q = new_stokes[3] # new_stokes[0] is wavelength bins
self.qr = new_stokes[4]
self.u = new_stokes[5]
self.ur = new_stokes[6]
self.a = new_stokes[7]
self.ar = new_stokes[8]
def qu_plt(self, subplot_loc=111, wlmin=0, wlmax=100000,
qlim=[-3.0, 3.0], ulim=[-3.0, 3.0], textloc=[-2.7, -2.7], cisp='k', fs=16,
ls=14, isp=False, wlrest=None, colorbar=True, colorbar_labelsize=14, size_clbar=0.05, line_color=None,
marker='.', lambda_xshift=1.7, fit=True,
qlab_vis=True, ulab_vis=True,
qticks_vis=True, uticks_vis=True, cmap='jet'):
# TODO: anyway to use *args here? how does that even work?
"""
Plots the QU plane corresponding to the imported data.
Parameters
----------
subplot_loc : int or matplotlib.gridspec.GridSpec, optional
Location of the subplot. Can be a 3 digit integer or a gridspec location ifcreated a grid using gridspec.
Default = 111.
wlmin : int, optional
Min wavelength cut off. Default None.
wlmax : int, optional
Max wavelength cut off. Default 100000.
qlim : tuple, optional
[min q, max q]. Default = [-3.0, 3.0]
ulim : tuple, optional
[min u, max u]. Default = [-3.0, 3.0]
textloc : tuple, optional
Location of name of qu-plot. Default = [-2.7, -2.7]
cisp : string, optional
Color of ISP marker. Default = 'k'
fs : int, optional
Font size. Applies to text on plot and axis labels, not graduations on the axes. Default = 16
ls : int, optional
Label size. Size of the tick numbers on axes. Default = 14.
isp : bool, optional
Whether to plot ISP. Default False.
wlrest :int, optional
If plotting qu plot of a line, rest wavelength of that line. Otherwise leave default value: None.
colorbar : bool, optional
Default is True. If False the colorbar is not plotted.
colorbar_labelsize : int, optional
Label size of the color bar ticks. Default 15.
size_clbar : float, optional
Modifies the size of the colour bar. Also screws with the plot somehow. Default = 0.05.
line_color : string, optional
If want a solid colour for the lines between the markers. Default is None and gives lines cycling through
rainbow colors to match the color of the point they are associated with.
marker : string, optional
Type of marker to be used. Default is '.'
lambda_xshift : float, optional
Position of the colourbar label define as qmax + shift. This is the shift value. Default is 1.7.
fit : bool, optional
If False the dominant axis will not be plotted. Its parameters will still be calculated and returned.
Default is True.
qlab_vis : bool, optional
If False, the q label is not plotted. Default is True.
ulab_vis : bool, optional
If False, the u label is not plotted. Default is True.
qticks_vis : bool, optional
If False, all q tick labels are invisible. Default is True.
uticks_vis : bool, optional
If False, all u tick labels are invisible. Default is True.
cmap : str, optional
A valid matplotlib colormap. Default = jet
Returns
------
matplotlib.axes._subplots.AxesSubplot
The axis the qu plane is plotted on. That way can plot other things on top, e.g line or ellipse or else.
"""
# ################### FITTING THE DATA WITH DOM AXIS ########################### #
func = lambda beta,x: beta[0] + beta[1] * x # Expression of the line that we want to fit to the data
data = RealData(self.q, self.u, self.qr, self.ur)
model = Model(func)
odr = ODR(data, model, [0, 0])
# Given the levels of pol in SNE, I don't expect to ever have to plot a q-u plot with limits [-10,10]
# The following are just q values from -10 to 10 that will be used to plot the line fit
q_n = np.arange(-10, 10, 0.1)
qu = plt.subplot(subplot_loc, aspect='equal')
odr.set_job(fit_type=0) # fit_type = 0 => explicit ODR.
output = odr.run()
print(" ==== QUplot - instance: " + self.name + " ====")
print("Dom. Axis = a*x + b")
print("a = " + str(output.beta[1]) + " +/- " + str(output.sd_beta[1]))
print("b = " + str(output.beta[0]) + " +/- " + str(output.sd_beta[0]) + "\n")
u_n = func(output.beta, q_n) # Based on fit, get the u values for each q
if fit is True:
qu.plot(q_n, u_n, 'k--', linewidth=2, zorder=1000)
# the zorder is high to sit on top of the scatter created belox
print(wlmin, wlmax)
cond = (self.wlp > wlmin) & (self.wlp < wlmax)
wl_crop = self.wlp[cond]
q_crop = self.q[cond]
qr_crop = self.qr[cond]
u_crop = self.u[cond]
ur_crop = self.ur[cond]
# #################### CREATING THE PLOT ########################
plt.set_cmap(cmap)
if wlrest is None:
# Defining the min and max wavelength, which are going to be the beginning and end of the colour map
wlmin = min(wl_crop)
wlmax = max(wl_crop)
sc = qu.scatter(q_crop, u_crop, s=100,
vmin=wlmin, vmax=wlmax,
c=wl_crop, marker=marker,
zorder=600, lw=0)
else:
vel = np.array([])
c = 299792.0
for i in range(len(wl_crop)):
v = c * ((wl_crop[i] - wlrest) / wlrest)
vel = np.append(vel, v)
# Defining the min and max VELOCITIES, which are going to be the beginning and end of the colour map
velmin = min(vel)
velmax = max(vel)
print(velmin, velmax)
sc = qu.scatter(q_crop, u_crop, s=100,
vmin=velmin, vmax=velmax,
c=vel, marker=marker,
zorder=600, lw=0)
# ################## Plotting Points ###############################
# vmin and vmax are the start and end of the colour map. c = wl because we're defining the colourmap using the
# wavelengths wl. zorder doesn't have to be 600, it just needs to be below that of the fitting line we did above
# and greater than the zorder of the error bars, because otherwise it doesn't look nice.
clbar = plt.colorbar(sc, fraction=size_clbar) # Plotting to colour map. Need to do that to get a rainbow.
clbar.ax.tick_params(labelsize=colorbar_labelsize)
if colorbar is False:
clbar.remove() # Removing Colormap from plot (but still exists so we can plot rainbows)
elif colorbar is True:
if wlrest is None:
qu.text(qlim[1] + lambda_xshift, (ulim[1] + ulim[0]) / 2, r'$\lambda (\AA)$', fontsize=fs)
else:
qu.text(qlim[1] + lambda_xshift, (ulim[1] + ulim[0]) / 2, 'Velocity (km/s)', rotation='vertical',
fontsize=fs)
a, b, c = qu.errorbar(q_crop, u_crop, xerr=qr_crop, yerr=ur_crop, marker='.', capsize=0,
zorder=500, linestyle='None', alpha=0.4) # Plotting error bars
# Convert my wavelengths into the colour map plotted earlier applying the colourbar to "c",
# that is, the errorbars, there are 2 components (c[0] and c[1]) because I have error bars in both x and y.
if wlrest is None:
clmap = clbar.to_rgba(wl_crop)
else:
clmap = clbar.to_rgba(vel)
c[0].set_color(clmap)
c[1].set_color(clmap)
# The following loop cycles through our colormap. Without this the lines we are about to create to connect
# the points of the scatter plot will not have colours corresponding to the points they are linking.
qu.set_prop_cycle(plt.cycler('color', clmap))
for i in range(len(wl_crop) - 1):
qu.plot(q_crop[i:i + 2], u_crop[i:i + 2], c=line_color,
alpha=1) # Here we create line for each pair of points
# Note that it's "i+2" in order for the last point to be i+1 -because it's up to point i+2, excluding i+2.
# To mark ISP with errorbars
if isp is True:
plt.errorbar(self.qisp, self.uisp, xerr=self.qispr, yerr=self.uispr, fmt='o', color=cisp, elinewidth=2.5,
capthick=2.5, zorder=5000)
plt.axvline(0, color='k', linestyle='-.')
plt.axhline(0, color='k', linestyle='-.')
qu.tick_params(axis='both', which='major', labelsize=ls)
# Now fiddling with the ticks: If ticks are made to be visible then sent every other tick to be invisible
# so bring so space to the axes. If ticks are set to be invisible... well make them invisible.
xticks = qu.xaxis.get_major_ticks()
yticks = qu.yaxis.get_major_ticks()
''' Didn't work to resize my tick labels :(
for xtick in xticks:
xtick.label1.set_fontsize(ticklabelsize)
for ytick in yticks:
ytick.label1.set_fontsize(ticklabelsize)
'''
if qticks_vis is False:
for i in range(0, len(xticks)):
xticks[i].label1.set_visible(False)
else:
for i in range(0, len(xticks), 2):
xticks[i].label1.set_visible(False)
if uticks_vis is False:
for i in range(0, len(yticks)):
yticks[i].label1.set_visible(False)
else:
for i in range(0, len(yticks), 2):
yticks[i].label1.set_visible(False)
if qlab_vis is True:
qu.set_xlabel('q (%)', fontsize=fs)
if ulab_vis is True:
qu.set_ylabel('u (%)', labelpad=-1, fontsize=fs)
qu.text(textloc[0], textloc[1], self.name, fontsize=fs)
qu.set_xlim(qlim) # Setting some limits.
qu.set_ylim(ulim)
return qu
| en | 0.634889 | 4 - Jan - 2018 / <NAME> / <EMAIL> This is the main module of FUSS. It contains general utility functions, a couple of interactive routines and also defines a new class: PolData, to deal with specpol data. All this should make dealing with and analysing specpol data easier. Functions: ---------- get_spctr(): Gets flux data from text file. get_pol(): Gets pol data from text file. dopcor(): Doppler Correction. dopcor_file(): Doppler correction from data from a file output into a new file ylim_def(): Used to define y limits for plots. Used within FUSS. rot_data(): To rotate 2D data. norm_ellipse(): Creates random data where the x and y coordinates are described by 2 different normal distributions. Interactive routines: --------------------- ep_date(): Taking a date as reference point, finds epoch from date or date from epoch. vel(): Finds expansion velocity of element from observed and rest wavelength. Class PolData(): ---------------- Attributes: Defined by __init__ - name: name - wlp = wavelength bins of polarisation data - p = p - pr = Delta p - q = q - qr = Delta q - u = u - ur = Delta u - a = Polarisation Angle P.A - ar = Delta P.A - wlf = wavelength bins of flux spectrum - f = Flux - fr = Delta F Defined by find_isp() or add_isp() - qisp, qispr, uisp, uispr, aisp, aispr: Stokes parameters and P.A of ISP Defined by rmv_isp() - p0, p0r, q0, ... , a0r : Original polarisation data before ISP correction - Updates p, pr, q, ..., ar with ISP corrected values. Methods: - add_flux_data() - flu_n_pol() - find_isp() - add_isp() - rmv_isp() - qu_plt() # ################## FUNCTIONS ###################### FUNCTIONS #################### FUNCTIONS ################# # Imports spectrum. Notes ----- Required file format: wl(Angstrom) flux *flux_error* (*optional*) Parameters ---------- filename : string Name of the ASCII file where the spectrum is. wlmin : int, optional Lower wavelength cutoff. Default = 0. wlmax : int, optional Upper wavelength cutoff. Default = 100000. err : bool, optional If there is an error column, set to True. Default is False. scale : bool, optional Default is True. Multiplies the spectrum (and error) by the median values of the flux. skiprows : int, optional Default is 0, number of rows to skip Returns ------- Tuple of 1D Arrays => Wavelength, Flux, *flux_error* (optional) # normalising the spectrum Imports values from polarisation files (given by the old specpol routine in datred (pre Dec 2017)). Notes ----- Required File format: 9 columns. First column must be wavelength in Angstrom. The other 8 columns are for stokes parameters, degree of pol and P.A, and associated errors: => wl p p_err q q_err u u_err angle angle_err Parameters ---------- filename : string Name of the ASCII file. wlmin : int, optional Lower wavelength cutoff. Default = 0. wlmax : int, optional Upper wavelength cutoff. Default = 100000. Returns ------- Tuple of 1D Arrays One 1 D array per parameter (so first must be wavelength, order of the rest depends on input file). => 9 arrays total. # pol0[0] should contain the wavelength bins # Applies the limits determined by wlmin, wlmax Doppler Correction. Parameters ---------- val : array Array containing the data. val[0] MUST BE THE WAVELENGTH. NEED AT LEAST 2 COLUMNS!! z : float Redshift Returns -------- Array containing the data with the wavelength column doppler corrected. # need this in case val is not an array but a list Doppler Correction of data from a file (filename), into another file (output) Parameters ---------- filename : str Name of the file where the data to be doppler corrected is z : float Redshift #data['wl'] -= data['wl']*z (Yes I need this in PolData) finds appropriate y limits for a spectrum. Look at values between a given range (Default: 4500-9500A) where we don't expect few order of magnitudes discrepancies like we see sometimes at the extremeties of the spectrum, then find the max and min value then define ymax and ymin. #print(fmin) #print(fmax) # These tweaks to make the y limit okay were determined through testing. May not always # be appropriate and might need fixing later. Used to rotate Stokes parameters (or any 2D data set) by angle theta. Parameters ---------- q : 1D np.array u : 1D np.array theta : float Returns ------- Two 1D np.arrays: q rotated, u rotated # Applying rotation to all bins and storing result in q_rot and u_rot Creates ellipsoidal data set normally distributed around (xc,yc). Parameters ---------- xc : flaot X coordinate of ellipse center yc : float Y coordinate of ellipse center a : float major axis b : float minor axis theta : Angle of ellipse n : int Number of points Returns ------- Two 1D np.arrays containing the x and y coordinates (respectively) of the data created. # This creates data within ellipse. The x an y coordinates are defined by normal distribution. # That means we get normally distributed points in 2D, also means the ellipse's major and minor axis # are aligned with x and y axis or vice versa. So also give possibility to rotate the data set created # Applying rotation Interactive Routine. Finds epoch from date or date from epoch given a maximum date. # ####### Functions used by ep_date ########## # # ############### MAIN ##################### # Interactive routine. Finds the velocity for a given observed wavelength and rest wavelength. # Speed of light in km/s # ################################################################################# # # ############## CLASSE ############## POLDATA ########### CLASSE ############### # # ################################################################################# # Each instance contains one spectropolarimetric data set. Note ----- The attributes wlp, p, pr, q, qr, u, ur, a and ar are 1D arrays and must have the same length. The attributes wlf, f and fr must have the same length, but it can differ from the length of wlp, p, etc... When the ISP is removed, the attributes p0, p0r, q0, etc... store the original values of p, pr, q, etc..., and the latter are updated to have the ISP corrected values of polarisation. Parameters ---------- poldata : str or tuple The polarisation data can be imported from a text file containing only the data, where the column order is: wavelength p p_err q q_err u u_err a a_err. Alternatively a tuple of arrays containing the data can be provided. Make sure the order of the arrays in the tuple corresponds to wavelength p p_err q q_err u u_err a a_err. name : str A short handle to make your data object recognisable (e.g. 'ep1', '14ad') wlmin : int, optional Minimum wavelength cutoff wlmax : int, optional Maximum wavelength cutoff Attributes ---------- name : str A short handle to make your data object recognisable (e.g. 'ep1', '14ad') wlp : array 1D array containing the wavelength bins of the polarisation data. p : array 1D array containing the degree of polarisation in each bin. pr : array 1D array containing the error on p in each bin. q : array 1D array containing Stokes q in each bin. qr : array 1D array containing the error on q in each bin. u : array 1D array containing Stokes u in each bin. ur : array 1D array containing the error on u in each bin. a : array 1D array containing the polarisation angle in each bin. ar : array 1D array containing the error on the polarisation in each bin. wlf : array, optional 1D array containing wavelength bins of the flux spectrum. f : array, optional 1D array containing the flux in each bin. fr : array, optional 1D array containing the error on the flux in each bin. qisp : float, optional Stokes q of the ISP. qispr : float, optional Error on q ISP. uisp : float, optional Stokes u of the ISP uispr : float, optional Error on u ISP gradq : tuple, optional Gradient of Stokes q ISP and error on the gradient. constq : tuple, optional Intercept of Stokes q ISP and error on the intercept. gradu : tuple, optional Gradient of Stokes u ISP and error on the gradient. constu : tuple, optional Intercept of Stokes u ISP and error on the intercept. p0 : array 1D array containing the degree of polarisation in each bin BEFORE ISP REMOVAL. p0r : array 1D array containing the error on p in each bin BEFORE ISP REMOVAL. q0 : array 1D array containing Stokes q in each bin BEFORE ISP REMOVAL. q0r : array 1D array containing the error on q in each bin BEFORE ISP REMOVAL. u0 : array 1D array containing Stokes u in each bin BEFORE ISP REMOVAL. u0r : array 1D array containing the error on u in each bin BEFORE ISP REMOVAL. a0 : array 1D array containing the polarisation angle in each bin BEFORE ISP REMOVAL. a0r : array 1D array containing the error on the polarisation in each bin BEFORE ISP REMOVAL. # This if we use the old way of creating pol data files fron datred (space separate no header ) # This we got the new pol data files for datred (pandas data frame to tab separated file with col names) Adds flux spectrum data attributes to the PolData. Parameters ---------- filename : str File containing the flux data. File format: wl, f, fr (no comas) wlmin : int Minimum wavelength cut off wlmax : Maximum wavelength cut off err : bool If false, only imports wavelength and flux, not the error on the flux. Default = False. skiprows : int, optional efault is 0, number of rows to skip Creates plot of p, q, u, theta, and flux. Note ---- /!\ The x-axis is SHARED, so limits on polarisation attributes and flux attributes should be the same. Parameters ---------- save : bool Whether to save the plot or not. Saved as [self.name]_fnp.png Estimates ISP Notes ----- Simply an average of q and u over a given wavelength range which should correspond to line blanketting region. Parameters ---------- wlmin : int Start of wavelength range. wlmax : int End of wavelength range. # Values of p, q, u, a and their error for ISP # Making sure P.A range is 0-180 deg Adds parameters of isp to the data. Parameters ---------- constisp_params : list If the isp is constant give the stokes parameters of the isp here in a list: [qisp, qisp error, uisp , uisp error] linearisp_params : list Tuple of tuples: [[grad_q, grad_q error],[intercept_q, intercept_q error], [grad_u, grad_u error],[intercept_u, intercept_u error]]. For qisp = grad_q * lambda + intercept_q (and similar equation for u), where lambda is in Angstrom. Examples -------- If the ISP is constant across your wavelength range, put its values an associated errors in constisp_params: >> PolDataObj.add_isp(constisp_params=[0.14, 0.04, 0.08, 0.03]) If the isp changes linearly with wavelength, give the parameters for the lines of q and u ISP here. >> PolDataObj.add_isp(linearisp_params=[[0.00035, 0.00003],[2.45, 0.19]]) # Values of p, q, u, a and their error for ISP # Making sure P.A range is 0-180 deg # this will be used as a condition for the method of isp removal in rmv_isp # this will be used as a condition for the method of isp removal in rmv_isp # TODO: I need 2 tests for this. Maybe will need 14ad data for the constant case and 11hs for the linear case Removes ISP and updates q, qr, u, ur, p, pr, a and ar. Note ----- Stores the original non ISP corrected degree of polarisation, Stokes parameters, polarisation angle, and associated errors in p0, p0r, q0, q0r, u0, u0r, a0, and a0r, and updates p, pr, q, qr, u, ur, a and ar. # Storing original values of Stokes parameters and their errors in newly defined # attributes. # Storing original degree of polarisation and it's error in new variable and updating p and pr # Same as before but for the P.A #respectively covariance of q parameters and u parameters # new_stokes[0] is wavelength bins # TODO: anyway to use *args here? how does that even work? Plots the QU plane corresponding to the imported data. Parameters ---------- subplot_loc : int or matplotlib.gridspec.GridSpec, optional Location of the subplot. Can be a 3 digit integer or a gridspec location ifcreated a grid using gridspec. Default = 111. wlmin : int, optional Min wavelength cut off. Default None. wlmax : int, optional Max wavelength cut off. Default 100000. qlim : tuple, optional [min q, max q]. Default = [-3.0, 3.0] ulim : tuple, optional [min u, max u]. Default = [-3.0, 3.0] textloc : tuple, optional Location of name of qu-plot. Default = [-2.7, -2.7] cisp : string, optional Color of ISP marker. Default = 'k' fs : int, optional Font size. Applies to text on plot and axis labels, not graduations on the axes. Default = 16 ls : int, optional Label size. Size of the tick numbers on axes. Default = 14. isp : bool, optional Whether to plot ISP. Default False. wlrest :int, optional If plotting qu plot of a line, rest wavelength of that line. Otherwise leave default value: None. colorbar : bool, optional Default is True. If False the colorbar is not plotted. colorbar_labelsize : int, optional Label size of the color bar ticks. Default 15. size_clbar : float, optional Modifies the size of the colour bar. Also screws with the plot somehow. Default = 0.05. line_color : string, optional If want a solid colour for the lines between the markers. Default is None and gives lines cycling through rainbow colors to match the color of the point they are associated with. marker : string, optional Type of marker to be used. Default is '.' lambda_xshift : float, optional Position of the colourbar label define as qmax + shift. This is the shift value. Default is 1.7. fit : bool, optional If False the dominant axis will not be plotted. Its parameters will still be calculated and returned. Default is True. qlab_vis : bool, optional If False, the q label is not plotted. Default is True. ulab_vis : bool, optional If False, the u label is not plotted. Default is True. qticks_vis : bool, optional If False, all q tick labels are invisible. Default is True. uticks_vis : bool, optional If False, all u tick labels are invisible. Default is True. cmap : str, optional A valid matplotlib colormap. Default = jet Returns ------ matplotlib.axes._subplots.AxesSubplot The axis the qu plane is plotted on. That way can plot other things on top, e.g line or ellipse or else. # ################### FITTING THE DATA WITH DOM AXIS ########################### # # Expression of the line that we want to fit to the data # Given the levels of pol in SNE, I don't expect to ever have to plot a q-u plot with limits [-10,10] # The following are just q values from -10 to 10 that will be used to plot the line fit # fit_type = 0 => explicit ODR. # Based on fit, get the u values for each q # the zorder is high to sit on top of the scatter created belox # #################### CREATING THE PLOT ######################## # Defining the min and max wavelength, which are going to be the beginning and end of the colour map # Defining the min and max VELOCITIES, which are going to be the beginning and end of the colour map # ################## Plotting Points ############################### # vmin and vmax are the start and end of the colour map. c = wl because we're defining the colourmap using the # wavelengths wl. zorder doesn't have to be 600, it just needs to be below that of the fitting line we did above # and greater than the zorder of the error bars, because otherwise it doesn't look nice. # Plotting to colour map. Need to do that to get a rainbow. # Removing Colormap from plot (but still exists so we can plot rainbows) # Plotting error bars # Convert my wavelengths into the colour map plotted earlier applying the colourbar to "c", # that is, the errorbars, there are 2 components (c[0] and c[1]) because I have error bars in both x and y. # The following loop cycles through our colormap. Without this the lines we are about to create to connect # the points of the scatter plot will not have colours corresponding to the points they are linking. # Here we create line for each pair of points # Note that it's "i+2" in order for the last point to be i+1 -because it's up to point i+2, excluding i+2. # To mark ISP with errorbars # Now fiddling with the ticks: If ticks are made to be visible then sent every other tick to be invisible # so bring so space to the axes. If ticks are set to be invisible... well make them invisible. Didn't work to resize my tick labels :( for xtick in xticks: xtick.label1.set_fontsize(ticklabelsize) for ytick in yticks: ytick.label1.set_fontsize(ticklabelsize) # Setting some limits. | 2.773753 | 3 |
setup.py | jayvdb/landinggear | 1 | 6617489 | <gh_stars>1-10
import os
from setuptools import setup, find_packages
def readfile(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
setup(
name="landinggear",
version="0.0.3a0",
author="<NAME>",
author_email="<EMAIL>",
description=("Wheels for aeroplanes:"
" a tool to extract packages from the pip cache."),
long_description=readfile("README.rst"),
license="MIT",
keywords=["pip", "wheel", "aeroplane", "cache"],
url="https://github.com/jerith/landinggear",
install_requires=["pip", "wheel"],
packages=find_packages(),
include_package_data=True,
entry_points={
"console_scripts": ['landinggear=landinggear.command:main'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Software Development",
],
)
| import os
from setuptools import setup, find_packages
def readfile(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
setup(
name="landinggear",
version="0.0.3a0",
author="<NAME>",
author_email="<EMAIL>",
description=("Wheels for aeroplanes:"
" a tool to extract packages from the pip cache."),
long_description=readfile("README.rst"),
license="MIT",
keywords=["pip", "wheel", "aeroplane", "cache"],
url="https://github.com/jerith/landinggear",
install_requires=["pip", "wheel"],
packages=find_packages(),
include_package_data=True,
entry_points={
"console_scripts": ['landinggear=landinggear.command:main'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Software Development",
],
) | none | 1 | 1.716961 | 2 | |
store/migrations/0005_remove_order_payment_status_order_complete.py | Sarah358/ecommerce | 0 | 6617490 | <reponame>Sarah358/ecommerce
# Generated by Django 4.0.2 on 2022-03-01 19:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0004_rename_shippingddress_shippingaddress_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='payment_status',
),
migrations.AddField(
model_name='order',
name='complete',
field=models.BooleanField(default=False, max_length=1, null=True),
),
]
| # Generated by Django 4.0.2 on 2022-03-01 19:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0004_rename_shippingddress_shippingaddress_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='payment_status',
),
migrations.AddField(
model_name='order',
name='complete',
field=models.BooleanField(default=False, max_length=1, null=True),
),
] | en | 0.887445 | # Generated by Django 4.0.2 on 2022-03-01 19:40 | 1.515009 | 2 |
distiller/distiller.py | watson21/Knowledge_Distilling | 0 | 6617491 | from typing import Any, Tuple, Dict
import os, copy, os.path as osp
from colorama import Fore
from time import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
from torch import Tensor, nn, optim
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
from .loss import KDLoss
from distiller.print_utils import print_msg, print_time, desc
folder_save = osp.realpath(osp.join(__file__, '..', '..', 'weights'))
if not osp.isdir(folder_save): os.makedirs(folder_save)
print('Model will save in ', Fore.MAGENTA, folder_save)
batch_num:int = 0
def reset_batch_num(): global batch_num; batch_num = 0
def train(loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int], device:torch.device,
teacher:nn.Module, best_acc:float,
criterion:_Loss, optimizer:optim.Optimizer, scheduler,
epochs:int, ckpt:int=20
) -> Tuple[nn.Module, float]:
global batch_num
best_teacher = copy.deepcopy(teacher)
since = time()
for epoch in range(1, epochs+1):
for phase in ('train', 'val'):
if phase == 'train':
teacher.train()
print(Fore.RED); print('Epoch : {:>2d}/{:<2d}'.format(
epoch, epochs), Fore.RESET, ' {:>48}'.format('='*46))
else:
teacher.eval()
running_loss = 0.0
running_corrects = 0.0
for datas, targets in tqdm(loaders[phase], ncols=64, colour='green',
desc='{:6}'.format(phase).capitalize()):
if phase == 'train': batch_num += 1
datas, targets = datas.to(device), targets.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outp = teacher(datas)
_, pred = torch.max(outp, 1)
loss = criterion(outp, targets)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item()*datas.size(0)
running_corrects += torch.sum(pred == targets.data)
#save checkpoint
if not batch_num % ckpt:
path_save = osp.join(folder_save, '{}_{}.pth'.format(
teacher.__class__.__name__, batch_num))
torch.save(teacher.state_dict(), path_save)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'train':
scheduler.step(100. * epoch_acc) #acc
print('{} - loss = {:.6f}, accuracy = {:.3f}'.format(
'{:5}'.format(phase).capitalize(), epoch_loss, 100*epoch_acc))
if phase == 'val':
time_elapsed = time() - since
print('Time: {}m {:.3f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_teacher = copy.deepcopy(teacher)
path_save = osp.join(folder_save, '{}_best.pth'.format(
teacher.__class__.__name__))
torch.save(teacher.state_dict(), path_save)
return best_teacher, best_acc
def train_kd_4(loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int], device:torch.device,
teacher:nn.Module, student:nn.Module, best_acc:float,
criterion:KDLoss, optimizer:optim.Optimizer, scheduler:Any,
epochs:int, ckpt:int ) -> Tuple[nn.Module, float]:
global batch_num
model_name = student.__class__.__name__
student.to(device)
best_student = copy.deepcopy(student)
since = time()
for epoch in range(1, epochs + 1):
for phase in ('train', 'val'):
if phase == 'train':
student.train()
print(Fore.RED); print('Epoch : {:>2d}/{:<2d}'.format(
epoch, epochs), Fore.RESET, ' {:>48}'.format('='*46))
else:
student.eval()
running_loss = 0.0
running_corrects = 0.0
with tqdm(loaders[phase], ncols=128, colour='YELLOW',
desc=desc(epoch, epochs, phase, 0.0, best_acc)) as stream:
for idx, (datas, targets, soft_label) in enumerate(loaders[phase], start=1):
if phase == 'train': batch_num += 1
datas= datas.to(device)
targets = targets.to(device)
# if is_aug: soft_label = teacher(datas)
soft_label = soft_label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outp_S = student(datas) # forward
_, pred = torch.max(outp_S, 1)
loss = criterion(outp_S, targets, soft_label)
if phase == 'train':
loss.backward()
optimizer.step()
#save checkpoint
if not batch_num % ckpt:
path_save = osp.join(folder_save, '{}_{}.pth'.format(model_name, batch_num))
torch.save(student.state_dict(), path_save)
running_loss += loss.item()*datas.size(0)
running_corrects += torch.sum(pred == targets.data)
# num_iter_data = idx*loaders[phase].batch_size
num_iter_data = idx*datas.size(0)
stream.set_description(
desc(epoch, epochs, phase,
loss=running_loss/num_iter_data,
acc= running_corrects / num_iter_data))
stream.update()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
stream.set_description(
desc(epoch, epochs, phase, epoch_loss, epoch_acc))
print('{} - loss = {:.6f}, accuracy = {:.3f}'.format(
'{:5}'.format(phase).capitalize(), epoch_loss, 100*epoch_acc))
if phase == 'train':
scheduler.step(100. * epoch_acc) #acc
else:# phase == 'val'
time_elapsed = time() - since
print('Time: {}m {:.3f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_student = copy.deepcopy(student)
path_save = osp.join(folder_save, '{}_best.pth'.format(model_name))
torch.save(student.state_dict(), path_save)
return best_student, best_acc
def train_kd(loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int], device:torch.device,
teacher:nn.Module, student:nn.Module, best_acc:float,
criterion:KDLoss, optimizer:optim.Optimizer, scheduler:Any,
epochs:int, ckpt:int ) -> Tuple[nn.Module, float]:
global batch_num
model_name = student.__class__.__name__
student.to(device)
teacher.to(device).eval()
best_student = copy.deepcopy(student)
since = time()
for epoch in range(1, epochs + 1):
for phase in ('train', 'val'):
if phase == 'train':
student.train()
print(Fore.RED); print('Epoch : {:>2d}/{:<2d}'.format(
epoch, epochs), Fore.RESET, ' {:>48}'.format('='*46))
else:
student.eval()
running_loss = 0.0
running_corrects = 0.0
with tqdm(loaders[phase], ncols=128, colour='YELLOW',
desc=desc(epoch, epochs, phase, 0.0, best_acc)) as stream:
for idx, (datas, targets) in enumerate(loaders[phase], start=1):
if phase == 'train': batch_num += 1
datas= datas.to(device)
targets = targets.to(device)
with torch.no_grad():
outp_T = teacher(datas).detach()
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outp_S = student(datas) # forward
_, pred = torch.max(outp_S, 1)
loss = criterion(outp_S, targets, outp_T)
if phase == 'train':
loss.backward()
optimizer.step()
#save checkpoint
if not batch_num % ckpt:
path_save = osp.join(folder_save, '{}_{}.pth'.format(model_name, batch_num))
torch.save(student.state_dict(), path_save)
running_loss += loss.item()*datas.size(0)
running_corrects += torch.sum(pred == targets.data)
# num_iter_data = idx*loaders[phase].batch_size
num_iter_data = idx*datas.size(0)
stream.set_description(
desc(epoch, epochs, phase,
loss=running_loss/num_iter_data,
acc= running_corrects / num_iter_data))
stream.update()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
stream.set_description(
desc(epoch, epochs, phase, epoch_loss, epoch_acc))
print('{} - loss = {:.6f}, accuracy = {:.3f}'.format(
'{:5}'.format(phase).capitalize(), epoch_loss, 100*epoch_acc))
if phase == 'train':
scheduler.step(100. * epoch_acc) #acc
else:# phase == 'val'
time_elapsed = time() - since
print('Time: {}m {:.3f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_student = copy.deepcopy(student)
path_save = osp.join(folder_save, '{}_best.pth'.format(model_name))
torch.save(student.state_dict(), path_save)
return best_student, best_acc
class _Distiller(object):
def __init__(self, teacher:Any, student:nn.Module, criterion:_Loss) -> None:
# super().__init__()
self.teacher = teacher
self.student = student
self.criterion = criterion # KDLoss(T=6, alpha=0.1, reduction='batchmean')
@staticmethod
def distillation_loss(preds:Tensor, labels:Tensor, teacher_preds:Tensor, T, alpha:float) -> Tensor:
return T * T * alpha * F.kl_div(F.log_softmax(preds / T, dim=1),
F.softmax(teacher_preds / T, dim=1),
reduction='batchmean') + (1. - alpha) * F.cross_entropy(preds, labels)
def training_student(self, device:torch.device,
loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int],
epochs_warmup:int, epochs:int, model_name:str, ckpt:int,
) -> nn.Module:
assert len(loaders) >= 2 and len(dataset_sizes) >= 2, 'please check loaders'
reset_batch_num()
#TODO write desc of input and output
#TODO https://tinyurl.com/8wnknv9p
# all param unless classify layer must freeze at the first train
# for param in teacher.parameters():
# param.requires_grad = False
self.student.to(device)
optimizer = optim.Adam(list(self.student.children())[-1].parameters(), lr=0.001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
patience=3, verbose=True)
best_acc = 0.0
since = time()
#NOTE train only classify/fullyconnected/head layers
self.student, best_acc = train_kd(loaders, dataset_sizes, device,
self.student, best_acc,
self.criterion, optimizer, scheduler,
epochs_warmup, model_name, ckpt)
print(end='\n')
print_time('FREEZE TRAINING TIME', time() - since)
print_msg("Unfreeze all layers", model_name)
# unfrezz all layer
for param in self.student.parameters():
param.requires_grad = True
optimizer = optim.Adam(self.student.parameters(), lr=0.0001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2,
patience=2, verbose=True)
#NOTE train all layers of model
self.student, best_acc = train_kd(loaders, dataset_sizes, device,
self.student, best_acc,
self.criterion, optimizer, scheduler,
epochs, model_name, ckpt)
last_student = osp.join(folder_save, '{}_last.pth'.format(model_name))
torch.save(self.student.state_dict(), last_student)
time_elapsed = time() - since
print('ALL NET TRAINING TIME {} m {:.3f}s'.format(
time_elapsed//60, time_elapsed % 60))
return self.student
class Distiller(object):
r"""
# create teacher, resnet34
teacher = resnet34(pretrained=True, progress=True)
teacher.fc = nn.Linear(in_features=teacher.fc.in_features,
out_features=num_classes, bias=True)
teacher.load_state_dict(torch.load('path/to/teacher.pth'))
# create student, resnet18
student = resnet18(pretrained=True, progress=True)
student.fc = nn.Linear(in_features=teacher.fc.in_features,
out_features=num_classes, bias=True)
student.load_state_dict(torch.load('path/to/student.pth'))
# create loss function
kd_loss = KDLoss(T=6., alpha=0.1,reduction='batchmean')
distiller = Distiller(
device = torch.device('cuda:0' if torch.cuda.is_available()else 'cpu'),
teacher= teacher, teacher_name= 'resnet34',
student= student, student_name= 'resnet18',
loaders= dict_of_3_dataloaders,
dataset_sizes= dict_of_3_dataset_size,
S_criterion=kd_loss,
T_criterion=nn.CrossEntropyLoss(),
)
# train_teacher
distiller.training_teacher(0, 10, 20)
# train_student
distiller.training_student(20, 30, 20)
"""
def __init__(self, device:torch.device,
teacher:nn.Module, teacher_name:str,
student:nn.Module, student_name:str,
loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int],
S_criterion:KDLoss, T_criterion:_Loss=nn.CrossEntropyLoss()) -> None:
self.teacher = teacher.to(device)
self.teacher.__class__.__name__ = teacher_name
self.student = student.to(device)
self.student.__class__.__name__ = student_name
self.device = device
assert len(loaders) >= 2; self.loaders = loaders
assert len(dataset_sizes) >=2; self.dataset_sizes = dataset_sizes
self.S_criterion = S_criterion.to(device)
self.T_criterion = T_criterion.to(device)
print(Fore.RED)
print('Device name {}'.format(torch.cuda.get_device_name(0)), Fore.RESET)
def training_teacher(self, epochs_freeze:int, epochs_unfreeze:int, ckpt:int):
optimizer = optim.Adam(list(self.teacher.children())[-1].parameters(), lr=0.001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
patience=3, verbose=True)
since = time()
best_acc = 0.0
reset_batch_num()
self.teacher, best_acc = train(self.loaders, self.dataset_sizes, self.device,
self.teacher, best_acc,
self.T_criterion, optimizer, scheduler,
epochs_freeze, ckpt)
time_elapsed = time() - since
print('CLASSIFIER TRAINING TIME {} : {:.3f}'.format(
time_elapsed//60, time_elapsed % 60))
print_msg("Unfreeze all layers", self.teacher.__class__.__name__)
# unfrezz all layer
for param in self.teacher.parameters():
param.requires_grad = True
optimizer = optim.Adam(self.teacher.parameters(), lr=0.0001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2,
patience=2, verbose=True)
self.teacher, best_acc = train(self.loaders, self.dataset_sizes,
self.teacher, best_acc,
self.T_criterion, optimizer, scheduler,
epochs_unfreeze, ckpt)
last_teacher = osp.join(folder_save, '{}_last.pth'.format(
self.teacher.__class__.__name__))
torch.save(self.teacher.state_dict(), last_teacher)
time_elapsed = time() - since
print('TEACHER TRAINING TIME {} m {:.3f}s'.format(
time_elapsed//60, time_elapsed % 60))
def training_student(self, epochs_freeze:int, epochs_unfreeze:int, ckpt:int):
optimizer = optim.Adam(list(self.student.children())[-1].parameters(), lr=0.001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
patience=3, verbose=True)
best_acc = 0.0
model_name = self.student.__class__.__name__
reset_batch_num()
since = time()
#NOTE train only classify/fullyconnected/head layers
self.student, best_acc = train_kd(self.loaders, self.dataset_sizes, self.device,
self.teacher, self.student, best_acc,
self.S_criterion, optimizer, scheduler,
epochs_freeze, ckpt)
print(end='\n')
print_time('FREEZE TRAINING TIME', time() - since)
print_msg("Unfreeze all layers", model_name)
# unfrezz all layer
for param in self.student.parameters():
param.requires_grad = True
optimizer = optim.Adam(self.student.parameters(), lr=0.0001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2,
patience=2, verbose=True)
#NOTE train all layers of model
self.student, best_acc = train_kd(self.loaders, self.dataset_sizes, self.device,
self.teacher, self.student, best_acc,
self.S_criterion, optimizer, scheduler,
epochs_unfreeze, ckpt)
last_student = osp.join(folder_save, '{}_last.pth'.format(model_name))
torch.save(self.student.state_dict(), last_student)
time_elapsed = time() - since
print('STUDENT TRAINING TIME {} m {:.3f}s'.format(
time_elapsed//60, time_elapsed % 60))
| from typing import Any, Tuple, Dict
import os, copy, os.path as osp
from colorama import Fore
from time import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
from torch import Tensor, nn, optim
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
from .loss import KDLoss
from distiller.print_utils import print_msg, print_time, desc
folder_save = osp.realpath(osp.join(__file__, '..', '..', 'weights'))
if not osp.isdir(folder_save): os.makedirs(folder_save)
print('Model will save in ', Fore.MAGENTA, folder_save)
batch_num:int = 0
def reset_batch_num(): global batch_num; batch_num = 0
def train(loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int], device:torch.device,
teacher:nn.Module, best_acc:float,
criterion:_Loss, optimizer:optim.Optimizer, scheduler,
epochs:int, ckpt:int=20
) -> Tuple[nn.Module, float]:
global batch_num
best_teacher = copy.deepcopy(teacher)
since = time()
for epoch in range(1, epochs+1):
for phase in ('train', 'val'):
if phase == 'train':
teacher.train()
print(Fore.RED); print('Epoch : {:>2d}/{:<2d}'.format(
epoch, epochs), Fore.RESET, ' {:>48}'.format('='*46))
else:
teacher.eval()
running_loss = 0.0
running_corrects = 0.0
for datas, targets in tqdm(loaders[phase], ncols=64, colour='green',
desc='{:6}'.format(phase).capitalize()):
if phase == 'train': batch_num += 1
datas, targets = datas.to(device), targets.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outp = teacher(datas)
_, pred = torch.max(outp, 1)
loss = criterion(outp, targets)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item()*datas.size(0)
running_corrects += torch.sum(pred == targets.data)
#save checkpoint
if not batch_num % ckpt:
path_save = osp.join(folder_save, '{}_{}.pth'.format(
teacher.__class__.__name__, batch_num))
torch.save(teacher.state_dict(), path_save)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'train':
scheduler.step(100. * epoch_acc) #acc
print('{} - loss = {:.6f}, accuracy = {:.3f}'.format(
'{:5}'.format(phase).capitalize(), epoch_loss, 100*epoch_acc))
if phase == 'val':
time_elapsed = time() - since
print('Time: {}m {:.3f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_teacher = copy.deepcopy(teacher)
path_save = osp.join(folder_save, '{}_best.pth'.format(
teacher.__class__.__name__))
torch.save(teacher.state_dict(), path_save)
return best_teacher, best_acc
def train_kd_4(loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int], device:torch.device,
teacher:nn.Module, student:nn.Module, best_acc:float,
criterion:KDLoss, optimizer:optim.Optimizer, scheduler:Any,
epochs:int, ckpt:int ) -> Tuple[nn.Module, float]:
global batch_num
model_name = student.__class__.__name__
student.to(device)
best_student = copy.deepcopy(student)
since = time()
for epoch in range(1, epochs + 1):
for phase in ('train', 'val'):
if phase == 'train':
student.train()
print(Fore.RED); print('Epoch : {:>2d}/{:<2d}'.format(
epoch, epochs), Fore.RESET, ' {:>48}'.format('='*46))
else:
student.eval()
running_loss = 0.0
running_corrects = 0.0
with tqdm(loaders[phase], ncols=128, colour='YELLOW',
desc=desc(epoch, epochs, phase, 0.0, best_acc)) as stream:
for idx, (datas, targets, soft_label) in enumerate(loaders[phase], start=1):
if phase == 'train': batch_num += 1
datas= datas.to(device)
targets = targets.to(device)
# if is_aug: soft_label = teacher(datas)
soft_label = soft_label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outp_S = student(datas) # forward
_, pred = torch.max(outp_S, 1)
loss = criterion(outp_S, targets, soft_label)
if phase == 'train':
loss.backward()
optimizer.step()
#save checkpoint
if not batch_num % ckpt:
path_save = osp.join(folder_save, '{}_{}.pth'.format(model_name, batch_num))
torch.save(student.state_dict(), path_save)
running_loss += loss.item()*datas.size(0)
running_corrects += torch.sum(pred == targets.data)
# num_iter_data = idx*loaders[phase].batch_size
num_iter_data = idx*datas.size(0)
stream.set_description(
desc(epoch, epochs, phase,
loss=running_loss/num_iter_data,
acc= running_corrects / num_iter_data))
stream.update()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
stream.set_description(
desc(epoch, epochs, phase, epoch_loss, epoch_acc))
print('{} - loss = {:.6f}, accuracy = {:.3f}'.format(
'{:5}'.format(phase).capitalize(), epoch_loss, 100*epoch_acc))
if phase == 'train':
scheduler.step(100. * epoch_acc) #acc
else:# phase == 'val'
time_elapsed = time() - since
print('Time: {}m {:.3f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_student = copy.deepcopy(student)
path_save = osp.join(folder_save, '{}_best.pth'.format(model_name))
torch.save(student.state_dict(), path_save)
return best_student, best_acc
def train_kd(loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int], device:torch.device,
teacher:nn.Module, student:nn.Module, best_acc:float,
criterion:KDLoss, optimizer:optim.Optimizer, scheduler:Any,
epochs:int, ckpt:int ) -> Tuple[nn.Module, float]:
global batch_num
model_name = student.__class__.__name__
student.to(device)
teacher.to(device).eval()
best_student = copy.deepcopy(student)
since = time()
for epoch in range(1, epochs + 1):
for phase in ('train', 'val'):
if phase == 'train':
student.train()
print(Fore.RED); print('Epoch : {:>2d}/{:<2d}'.format(
epoch, epochs), Fore.RESET, ' {:>48}'.format('='*46))
else:
student.eval()
running_loss = 0.0
running_corrects = 0.0
with tqdm(loaders[phase], ncols=128, colour='YELLOW',
desc=desc(epoch, epochs, phase, 0.0, best_acc)) as stream:
for idx, (datas, targets) in enumerate(loaders[phase], start=1):
if phase == 'train': batch_num += 1
datas= datas.to(device)
targets = targets.to(device)
with torch.no_grad():
outp_T = teacher(datas).detach()
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outp_S = student(datas) # forward
_, pred = torch.max(outp_S, 1)
loss = criterion(outp_S, targets, outp_T)
if phase == 'train':
loss.backward()
optimizer.step()
#save checkpoint
if not batch_num % ckpt:
path_save = osp.join(folder_save, '{}_{}.pth'.format(model_name, batch_num))
torch.save(student.state_dict(), path_save)
running_loss += loss.item()*datas.size(0)
running_corrects += torch.sum(pred == targets.data)
# num_iter_data = idx*loaders[phase].batch_size
num_iter_data = idx*datas.size(0)
stream.set_description(
desc(epoch, epochs, phase,
loss=running_loss/num_iter_data,
acc= running_corrects / num_iter_data))
stream.update()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
stream.set_description(
desc(epoch, epochs, phase, epoch_loss, epoch_acc))
print('{} - loss = {:.6f}, accuracy = {:.3f}'.format(
'{:5}'.format(phase).capitalize(), epoch_loss, 100*epoch_acc))
if phase == 'train':
scheduler.step(100. * epoch_acc) #acc
else:# phase == 'val'
time_elapsed = time() - since
print('Time: {}m {:.3f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_student = copy.deepcopy(student)
path_save = osp.join(folder_save, '{}_best.pth'.format(model_name))
torch.save(student.state_dict(), path_save)
return best_student, best_acc
class _Distiller(object):
def __init__(self, teacher:Any, student:nn.Module, criterion:_Loss) -> None:
# super().__init__()
self.teacher = teacher
self.student = student
self.criterion = criterion # KDLoss(T=6, alpha=0.1, reduction='batchmean')
@staticmethod
def distillation_loss(preds:Tensor, labels:Tensor, teacher_preds:Tensor, T, alpha:float) -> Tensor:
return T * T * alpha * F.kl_div(F.log_softmax(preds / T, dim=1),
F.softmax(teacher_preds / T, dim=1),
reduction='batchmean') + (1. - alpha) * F.cross_entropy(preds, labels)
def training_student(self, device:torch.device,
loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int],
epochs_warmup:int, epochs:int, model_name:str, ckpt:int,
) -> nn.Module:
assert len(loaders) >= 2 and len(dataset_sizes) >= 2, 'please check loaders'
reset_batch_num()
#TODO write desc of input and output
#TODO https://tinyurl.com/8wnknv9p
# all param unless classify layer must freeze at the first train
# for param in teacher.parameters():
# param.requires_grad = False
self.student.to(device)
optimizer = optim.Adam(list(self.student.children())[-1].parameters(), lr=0.001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
patience=3, verbose=True)
best_acc = 0.0
since = time()
#NOTE train only classify/fullyconnected/head layers
self.student, best_acc = train_kd(loaders, dataset_sizes, device,
self.student, best_acc,
self.criterion, optimizer, scheduler,
epochs_warmup, model_name, ckpt)
print(end='\n')
print_time('FREEZE TRAINING TIME', time() - since)
print_msg("Unfreeze all layers", model_name)
# unfrezz all layer
for param in self.student.parameters():
param.requires_grad = True
optimizer = optim.Adam(self.student.parameters(), lr=0.0001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2,
patience=2, verbose=True)
#NOTE train all layers of model
self.student, best_acc = train_kd(loaders, dataset_sizes, device,
self.student, best_acc,
self.criterion, optimizer, scheduler,
epochs, model_name, ckpt)
last_student = osp.join(folder_save, '{}_last.pth'.format(model_name))
torch.save(self.student.state_dict(), last_student)
time_elapsed = time() - since
print('ALL NET TRAINING TIME {} m {:.3f}s'.format(
time_elapsed//60, time_elapsed % 60))
return self.student
class Distiller(object):
r"""
# create teacher, resnet34
teacher = resnet34(pretrained=True, progress=True)
teacher.fc = nn.Linear(in_features=teacher.fc.in_features,
out_features=num_classes, bias=True)
teacher.load_state_dict(torch.load('path/to/teacher.pth'))
# create student, resnet18
student = resnet18(pretrained=True, progress=True)
student.fc = nn.Linear(in_features=teacher.fc.in_features,
out_features=num_classes, bias=True)
student.load_state_dict(torch.load('path/to/student.pth'))
# create loss function
kd_loss = KDLoss(T=6., alpha=0.1,reduction='batchmean')
distiller = Distiller(
device = torch.device('cuda:0' if torch.cuda.is_available()else 'cpu'),
teacher= teacher, teacher_name= 'resnet34',
student= student, student_name= 'resnet18',
loaders= dict_of_3_dataloaders,
dataset_sizes= dict_of_3_dataset_size,
S_criterion=kd_loss,
T_criterion=nn.CrossEntropyLoss(),
)
# train_teacher
distiller.training_teacher(0, 10, 20)
# train_student
distiller.training_student(20, 30, 20)
"""
def __init__(self, device:torch.device,
teacher:nn.Module, teacher_name:str,
student:nn.Module, student_name:str,
loaders:Dict[str, DataLoader], dataset_sizes:Dict[str, int],
S_criterion:KDLoss, T_criterion:_Loss=nn.CrossEntropyLoss()) -> None:
self.teacher = teacher.to(device)
self.teacher.__class__.__name__ = teacher_name
self.student = student.to(device)
self.student.__class__.__name__ = student_name
self.device = device
assert len(loaders) >= 2; self.loaders = loaders
assert len(dataset_sizes) >=2; self.dataset_sizes = dataset_sizes
self.S_criterion = S_criterion.to(device)
self.T_criterion = T_criterion.to(device)
print(Fore.RED)
print('Device name {}'.format(torch.cuda.get_device_name(0)), Fore.RESET)
def training_teacher(self, epochs_freeze:int, epochs_unfreeze:int, ckpt:int):
optimizer = optim.Adam(list(self.teacher.children())[-1].parameters(), lr=0.001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
patience=3, verbose=True)
since = time()
best_acc = 0.0
reset_batch_num()
self.teacher, best_acc = train(self.loaders, self.dataset_sizes, self.device,
self.teacher, best_acc,
self.T_criterion, optimizer, scheduler,
epochs_freeze, ckpt)
time_elapsed = time() - since
print('CLASSIFIER TRAINING TIME {} : {:.3f}'.format(
time_elapsed//60, time_elapsed % 60))
print_msg("Unfreeze all layers", self.teacher.__class__.__name__)
# unfrezz all layer
for param in self.teacher.parameters():
param.requires_grad = True
optimizer = optim.Adam(self.teacher.parameters(), lr=0.0001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2,
patience=2, verbose=True)
self.teacher, best_acc = train(self.loaders, self.dataset_sizes,
self.teacher, best_acc,
self.T_criterion, optimizer, scheduler,
epochs_unfreeze, ckpt)
last_teacher = osp.join(folder_save, '{}_last.pth'.format(
self.teacher.__class__.__name__))
torch.save(self.teacher.state_dict(), last_teacher)
time_elapsed = time() - since
print('TEACHER TRAINING TIME {} m {:.3f}s'.format(
time_elapsed//60, time_elapsed % 60))
def training_student(self, epochs_freeze:int, epochs_unfreeze:int, ckpt:int):
optimizer = optim.Adam(list(self.student.children())[-1].parameters(), lr=0.001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2,
patience=3, verbose=True)
best_acc = 0.0
model_name = self.student.__class__.__name__
reset_batch_num()
since = time()
#NOTE train only classify/fullyconnected/head layers
self.student, best_acc = train_kd(self.loaders, self.dataset_sizes, self.device,
self.teacher, self.student, best_acc,
self.S_criterion, optimizer, scheduler,
epochs_freeze, ckpt)
print(end='\n')
print_time('FREEZE TRAINING TIME', time() - since)
print_msg("Unfreeze all layers", model_name)
# unfrezz all layer
for param in self.student.parameters():
param.requires_grad = True
optimizer = optim.Adam(self.student.parameters(), lr=0.0001,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2,
patience=2, verbose=True)
#NOTE train all layers of model
self.student, best_acc = train_kd(self.loaders, self.dataset_sizes, self.device,
self.teacher, self.student, best_acc,
self.S_criterion, optimizer, scheduler,
epochs_unfreeze, ckpt)
last_student = osp.join(folder_save, '{}_last.pth'.format(model_name))
torch.save(self.student.state_dict(), last_student)
time_elapsed = time() - since
print('STUDENT TRAINING TIME {} m {:.3f}s'.format(
time_elapsed//60, time_elapsed % 60))
| en | 0.541563 | #save checkpoint #acc # if is_aug: soft_label = teacher(datas) # forward #save checkpoint # num_iter_data = idx*loaders[phase].batch_size #acc # phase == 'val' # forward #save checkpoint # num_iter_data = idx*loaders[phase].batch_size #acc # phase == 'val' # super().__init__() # KDLoss(T=6, alpha=0.1, reduction='batchmean') #TODO write desc of input and output #TODO https://tinyurl.com/8wnknv9p # all param unless classify layer must freeze at the first train # for param in teacher.parameters(): # param.requires_grad = False #NOTE train only classify/fullyconnected/head layers # unfrezz all layer #NOTE train all layers of model # create teacher, resnet34 teacher = resnet34(pretrained=True, progress=True) teacher.fc = nn.Linear(in_features=teacher.fc.in_features, out_features=num_classes, bias=True) teacher.load_state_dict(torch.load('path/to/teacher.pth')) # create student, resnet18 student = resnet18(pretrained=True, progress=True) student.fc = nn.Linear(in_features=teacher.fc.in_features, out_features=num_classes, bias=True) student.load_state_dict(torch.load('path/to/student.pth')) # create loss function kd_loss = KDLoss(T=6., alpha=0.1,reduction='batchmean') distiller = Distiller( device = torch.device('cuda:0' if torch.cuda.is_available()else 'cpu'), teacher= teacher, teacher_name= 'resnet34', student= student, student_name= 'resnet18', loaders= dict_of_3_dataloaders, dataset_sizes= dict_of_3_dataset_size, S_criterion=kd_loss, T_criterion=nn.CrossEntropyLoss(), ) # train_teacher distiller.training_teacher(0, 10, 20) # train_student distiller.training_student(20, 30, 20) # unfrezz all layer #NOTE train only classify/fullyconnected/head layers # unfrezz all layer #NOTE train all layers of model | 2.008888 | 2 |
src/mcare_backend/experts/models.py | BuildForSDG/Team-108-Product | 0 | 6617492 | from django.db import models
from django.contrib.auth import get_user_model
from django.conf import settings
def get_custom_user():
"""Generates as instance of user
Returns:
user -- an instance of the user
"""
return get_user_model().objects.get_or_create(username='deleted')[0]
class ClassModules(models.Model):
"""
A django model that handles the Class Modules
Attributes:
attr1 (str): Inherits from generic models
Inheritance:
models.Model
"""
title = models.CharField(max_length=50)
article = models.TextField()
def __str__(self):
return self.title
class ExpertClass(models.Model):
"""
A Model Class for holding the expert class
Attributes:
attr1 (cls): a generic model class
Inheritance:
models.Model
"""
name = models.CharField(max_length=50)
description = models.TextField()
members = models.ManyToManyField(
'patients.PatientProfile', blank=True)
class_modules = models.ManyToManyField(ClassModules, blank=True)
def __str__(self):
return self.name
class ExpertProfile(models.Model):
"""A django model for expert profile
Arguments:
models {Generic Model} -- A generic model class
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
limit_choices_to={'role': 'Expert'},
on_delete=models.CASCADE,
related_name="expert_profile")
bio = models.TextField(null=True, blank=True)
gender = models.CharField(max_length=10, null=True, blank=True)
message = models.ManyToManyField(
'patients.Messages', blank=True)
assigned_patients = models.ManyToManyField(
'patients.PatientProfile', blank=True)
list_of_classes = models.ManyToManyField(
'ExpertClass', blank=True)
def __str__(self):
return self.user.username
| from django.db import models
from django.contrib.auth import get_user_model
from django.conf import settings
def get_custom_user():
"""Generates as instance of user
Returns:
user -- an instance of the user
"""
return get_user_model().objects.get_or_create(username='deleted')[0]
class ClassModules(models.Model):
"""
A django model that handles the Class Modules
Attributes:
attr1 (str): Inherits from generic models
Inheritance:
models.Model
"""
title = models.CharField(max_length=50)
article = models.TextField()
def __str__(self):
return self.title
class ExpertClass(models.Model):
"""
A Model Class for holding the expert class
Attributes:
attr1 (cls): a generic model class
Inheritance:
models.Model
"""
name = models.CharField(max_length=50)
description = models.TextField()
members = models.ManyToManyField(
'patients.PatientProfile', blank=True)
class_modules = models.ManyToManyField(ClassModules, blank=True)
def __str__(self):
return self.name
class ExpertProfile(models.Model):
"""A django model for expert profile
Arguments:
models {Generic Model} -- A generic model class
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
limit_choices_to={'role': 'Expert'},
on_delete=models.CASCADE,
related_name="expert_profile")
bio = models.TextField(null=True, blank=True)
gender = models.CharField(max_length=10, null=True, blank=True)
message = models.ManyToManyField(
'patients.Messages', blank=True)
assigned_patients = models.ManyToManyField(
'patients.PatientProfile', blank=True)
list_of_classes = models.ManyToManyField(
'ExpertClass', blank=True)
def __str__(self):
return self.user.username
| en | 0.748319 | Generates as instance of user Returns: user -- an instance of the user A django model that handles the Class Modules Attributes: attr1 (str): Inherits from generic models Inheritance: models.Model A Model Class for holding the expert class Attributes: attr1 (cls): a generic model class Inheritance: models.Model A django model for expert profile Arguments: models {Generic Model} -- A generic model class | 2.712684 | 3 |
Image generation codes/preprocessing.py | gescrig/CCSN-GW-detection-with-DL-methods | 0 | 6617493 | <gh_stars>0
import gwpy
import numpy as np
def compute_asd(signal):
asd = signal.spectrogram2(fftlength=4, overlap=2, window='hanning') ** (1/2.)
asd = asd.percentile(50)
return asd
def load_asd_from_file(filename):
asd = np.loadtxt(filename)
f = asd[:,0]
asd = asd[:,1]
asd= gwpy.frequencyseries.FrequencySeries(asd,frequencies=f)
return asd
def asd_calculation(signal,gps_ini,gps_end,file_path,det):
import os
dur = gps_end - gps_ini
filename = det + '_ASD_'+str(gps_ini)+'_'+str(dur)+'.dat'
#Check if file exists
if os.path.exists(file_path+filename):
print('File exists - loading')
asd = load_asd_from_file(file_path+filename)
else:
print('Computing ASD for %s'%det)
asd = compute_asd(signal)
np.savetxt(file_path+filename,np.c_[asd.frequencies.value,asd.value], fmt='%.18e', delimiter=' ', newline='\n')
return asd
def Q_to_image(sig_qt,m=-1):
sig_qt_norm=sig_qt-sig_qt.min()
if m == -1:
sig_qt_norm=np.uint8(sig_qt_norm*255/sig_qt_norm.max())
else:
sig_qt_norm=np.uint8(sig_qt_norm*255/m)
return sig_qt_norm
| import gwpy
import numpy as np
def compute_asd(signal):
asd = signal.spectrogram2(fftlength=4, overlap=2, window='hanning') ** (1/2.)
asd = asd.percentile(50)
return asd
def load_asd_from_file(filename):
asd = np.loadtxt(filename)
f = asd[:,0]
asd = asd[:,1]
asd= gwpy.frequencyseries.FrequencySeries(asd,frequencies=f)
return asd
def asd_calculation(signal,gps_ini,gps_end,file_path,det):
import os
dur = gps_end - gps_ini
filename = det + '_ASD_'+str(gps_ini)+'_'+str(dur)+'.dat'
#Check if file exists
if os.path.exists(file_path+filename):
print('File exists - loading')
asd = load_asd_from_file(file_path+filename)
else:
print('Computing ASD for %s'%det)
asd = compute_asd(signal)
np.savetxt(file_path+filename,np.c_[asd.frequencies.value,asd.value], fmt='%.18e', delimiter=' ', newline='\n')
return asd
def Q_to_image(sig_qt,m=-1):
sig_qt_norm=sig_qt-sig_qt.min()
if m == -1:
sig_qt_norm=np.uint8(sig_qt_norm*255/sig_qt_norm.max())
else:
sig_qt_norm=np.uint8(sig_qt_norm*255/m)
return sig_qt_norm | en | 0.532162 | #Check if file exists | 2.300003 | 2 |
523. Continuous Subarray Sum/solution1.py | sunshot/LeetCode | 0 | 6617494 | <reponame>sunshot/LeetCode
from typing import List
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
if not nums or len(nums) <= 1:
return False
if k < 0:
k = -k
currSum = 0
lastSum = 0
d = set()
for i, num in enumerate(nums):
if i == 1:
d.add(0)
elif i > 1:
d.add(lastSum)
lastSum = currSum
currSum += num
if k == 0:
if currSum in d:
return True
continue
if i > 0 and currSum % k == 0:
return True
n = currSum // k
if n+1 > len(d):
for x in d:
target = currSum - x
if target % k == 0:
return True
else:
for i in range(n+1):
target = currSum - i*k
if target in d:
return True
return False
if __name__== '__main__':
solution = Solution()
nums = [23,2,4,6,7]
k = -6
print(solution.checkSubarraySum(nums, k)) | from typing import List
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
if not nums or len(nums) <= 1:
return False
if k < 0:
k = -k
currSum = 0
lastSum = 0
d = set()
for i, num in enumerate(nums):
if i == 1:
d.add(0)
elif i > 1:
d.add(lastSum)
lastSum = currSum
currSum += num
if k == 0:
if currSum in d:
return True
continue
if i > 0 and currSum % k == 0:
return True
n = currSum // k
if n+1 > len(d):
for x in d:
target = currSum - x
if target % k == 0:
return True
else:
for i in range(n+1):
target = currSum - i*k
if target in d:
return True
return False
if __name__== '__main__':
solution = Solution()
nums = [23,2,4,6,7]
k = -6
print(solution.checkSubarraySum(nums, k)) | none | 1 | 3.367264 | 3 | |
src/dataProcessing/entityCounts.py | jmshen1994/SetExpan | 36 | 6617495 | <gh_stars>10-100
'''
__author__: <NAME>, <NAME>
__description__: generate entity count information
Input: 1) the sentence.json.raw, 2) a map from unnormalized entity surface name to eid
Output: 1) the a map file from entity surface name to its occurrence counts in corpus
__latest_updates__: 08/23/2017
'''
import sys
import json
data=sys.argv[1]
jsonfname = '../../data/'+data+'/source/sentences.json'
mapfname = '../../data/'+data+'/intermediate/entity2id.txt'
countfname = '../../data/'+data+'/intermediate/entityCount.txt'
with open(jsonfname, 'r') as jsonf, open(mapfname, 'r') as mapf, open(countfname, 'w') as countf:
map = {}
countMap = {}
for line in mapf:
seg = line.strip('\r\n').split('\t')
map[int(seg[1])] = seg[0]
for line in jsonf:
sentinfo = json.loads(line)
for em in sentinfo['entityMentions']:
key = map[em['entityId']]
if key in countMap:
countMap[key] += 1
else:
countMap[key] = 1
for k in sorted(countMap, key=countMap.__getitem__, reverse=True):
countf.write(k+'\t'+str(countMap[k])+'\n')
| '''
__author__: <NAME>, <NAME>
__description__: generate entity count information
Input: 1) the sentence.json.raw, 2) a map from unnormalized entity surface name to eid
Output: 1) the a map file from entity surface name to its occurrence counts in corpus
__latest_updates__: 08/23/2017
'''
import sys
import json
data=sys.argv[1]
jsonfname = '../../data/'+data+'/source/sentences.json'
mapfname = '../../data/'+data+'/intermediate/entity2id.txt'
countfname = '../../data/'+data+'/intermediate/entityCount.txt'
with open(jsonfname, 'r') as jsonf, open(mapfname, 'r') as mapf, open(countfname, 'w') as countf:
map = {}
countMap = {}
for line in mapf:
seg = line.strip('\r\n').split('\t')
map[int(seg[1])] = seg[0]
for line in jsonf:
sentinfo = json.loads(line)
for em in sentinfo['entityMentions']:
key = map[em['entityId']]
if key in countMap:
countMap[key] += 1
else:
countMap[key] = 1
for k in sorted(countMap, key=countMap.__getitem__, reverse=True):
countf.write(k+'\t'+str(countMap[k])+'\n') | en | 0.572762 | __author__: <NAME>, <NAME> __description__: generate entity count information Input: 1) the sentence.json.raw, 2) a map from unnormalized entity surface name to eid Output: 1) the a map file from entity surface name to its occurrence counts in corpus __latest_updates__: 08/23/2017 | 3.042927 | 3 |
web-s-py/src/comic.py | smanero/python | 0 | 6617496 | #!/usr/bin/python3
import os
import time
import json
from bs4 import BeautifulSoup
import zipper
##########################################################################
# base website URL
URL : str = 'https://readcomiconline.to'
##########################################################################
# FUNCTION get cover url in a comic
def obtainComicCover(html:BeautifulSoup):
cover = html.find('div', attrs={'class':'col cover'}).find('img', src=True)
img_url:str = cover['src']
if not(img_url.startswith('http')):
img_url = URL + img_url
return img_url
##########################################################################
# FUNCTION list all issues in a comic
def obtainComicIssues(html:BeautifulSoup):
issues_to_do = []
issue_idx : int = 1
issues = html.find('ul', attrs={'class':'list'}).findAll('li')
for issue in issues:
co = issue.find('a', href=True)
co_url : str = URL + co['href']
co_title : str = co.find('span').text #.strip().split()
co_prx : str = "issue-"
if -1 != co_title.find('#'):
issue_idx = int(co_title[co_title.find('#')+1:])
elif -1 != co_title.find('Annual'):
co_prx = "annual-"
issue_idx = int(co_title[co_title.find('Annual')+7:])
elif -1 != co_title.find('TPB'):
co_prx = "tpb-"
issue_idx = int(co_title[co_title.find('TPB')+4:])
elif -1 != co_title.find('Part'):
co_prx = "part-"
issue_idx = int(co_title[co_title.find('Part')+4:-1])
if issue_idx <= 176:
co_title = co_prx + str(issue_idx).strip().zfill(3)
issues_to_do.append('{"co_title":"'+co_title+'","co_url":"'+co_url+'"}')
# next issue
issue_idx = issue_idx+1
return issues_to_do
##########################################################################
#
def obtainIssueImagesByHtml(co_html: BeautifulSoup):
images_to_do = []
img_idx : int = 0
try:
images = co_html.find('div', {'id':'divImage'}).find('img', src=True)
for image in images:
img_url:str = image['src']
img_title = "img-" + str(img_idx).zfill(3) + ".jpg"
images_to_do.append('{"img_title":"'+img_title+'","img_url":"'+img_url+'"}')
# next img
img_idx = img_idx+1
except Exception as e:
print(e)
raise(e)
return images_to_do
##########################################################################
#
def obtainIssueImagesByText(co_html: BeautifulSoup):
images_to_do = []
img_idx : int = 0
co_html_str : str = co_html.prettify()
try:
html_idx : int = co_html_str.find('lstImages.push')
if (-1 == html_idx):
print("Error in html -> " + co_html_str)
return -1
while -1 != html_idx:
co_html_str = co_html_str[html_idx:]
img_url = co_html_str[:co_html_str.find('");')]
img_url = img_url.replace('lstImages.push("', '').replace('")', '')
img_title = "img-" + str(img_idx).zfill(3) + ".jpg"
images_to_do.append('{"img_title":"'+img_title+'","img_url":"'+img_url+'"}')
# next img
co_html_str = co_html_str[co_html_str.find(';'):]
html_idx = co_html_str.find('lstImages.push')
img_idx = img_idx+1
except Exception as e:
print("Error in html -> " + co_html_str)
raise(e)
print(str(len(images_to_do)))
return images_to_do
##########################################################################
# FUNCTION list all images in a issue
def obtainIssueImages(co_url : str):
co_html : BeautifulSoup = zipper.get_html(co_url)
# text treating
images_to_do = obtainIssueImagesByText(co_html)
#if 0 >= len(images_to_do):
# html treating
#obtainIssueImagesByHtml(co_html)
return images_to_do
##########################################################################
# FUNCTION Scrape all images in an issue
def scrapeIssue(co_title:str, co_url:str, dir:str):
print(" " + co_title + ": " + co_url)
# create directory for an issue
co_dir = dir + "/" + co_title
if not os.path.exists(co_dir):
os.makedirs(co_dir)
# obtain images to get in an issue
images_to_do = obtainIssueImages(co_url)
for image_to_do in images_to_do:
image = json.loads(image_to_do)
img_title = image["img_title"]
img_url = image["img_url"]
print(img_title + ": " + img_url)
zipper.get_image(img_url, co_dir + "/" + img_title)
time.sleep(3)
##########################################################################
# FUNCTION Scrape all issues in a comic
def scrapeComic(html:BeautifulSoup, dir:str):
# obtain cover of the comic
img_url = obtainComicCover(html)
cover_file = dir + "/cover.jpg"
zipper.get_image(img_url, cover_file)
print("title: " + dir + "cover: " + img_url)
# obtain array of issues to scrape
issues_to_do = obtainComicIssues(html)
for issue_to_do in issues_to_do:
issue = json.loads(issue_to_do)
co_title = issue["co_title"]
co_url = issue["co_url"]
scrapeIssue(co_title, co_url, dir)
time.sleep(5)
##########################################################################
# execute ./bin/python3 src/comic.py
#Mighty-Thor-At-the-Gates-of-Valhalla
#Conan-the-Barbarian-1970
#The-Savage-Sword-Of-Conan
#Conan-the-Barbarian-2019
#Red-Sonja-Omnibus
def main():
try:
# comic to scrape
title : str = 'Conan-the-Barbarian-1970'
# dir destiny
dir = title.replace(':', '_').replace(',', '_').replace(' ', '_').replace('(', '').replace(')', '')
if not os.path.exists(dir):
os.makedirs(dir)
# scrape all comic items
html = zipper.get_html(URL + "/Comic/" + title)
scrapeComic(html, dir)
# zip result directory with cbz extension
zipper.zip(dir, dir + '.cbz')
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| #!/usr/bin/python3
import os
import time
import json
from bs4 import BeautifulSoup
import zipper
##########################################################################
# base website URL
URL : str = 'https://readcomiconline.to'
##########################################################################
# FUNCTION get cover url in a comic
def obtainComicCover(html:BeautifulSoup):
cover = html.find('div', attrs={'class':'col cover'}).find('img', src=True)
img_url:str = cover['src']
if not(img_url.startswith('http')):
img_url = URL + img_url
return img_url
##########################################################################
# FUNCTION list all issues in a comic
def obtainComicIssues(html:BeautifulSoup):
issues_to_do = []
issue_idx : int = 1
issues = html.find('ul', attrs={'class':'list'}).findAll('li')
for issue in issues:
co = issue.find('a', href=True)
co_url : str = URL + co['href']
co_title : str = co.find('span').text #.strip().split()
co_prx : str = "issue-"
if -1 != co_title.find('#'):
issue_idx = int(co_title[co_title.find('#')+1:])
elif -1 != co_title.find('Annual'):
co_prx = "annual-"
issue_idx = int(co_title[co_title.find('Annual')+7:])
elif -1 != co_title.find('TPB'):
co_prx = "tpb-"
issue_idx = int(co_title[co_title.find('TPB')+4:])
elif -1 != co_title.find('Part'):
co_prx = "part-"
issue_idx = int(co_title[co_title.find('Part')+4:-1])
if issue_idx <= 176:
co_title = co_prx + str(issue_idx).strip().zfill(3)
issues_to_do.append('{"co_title":"'+co_title+'","co_url":"'+co_url+'"}')
# next issue
issue_idx = issue_idx+1
return issues_to_do
##########################################################################
#
def obtainIssueImagesByHtml(co_html: BeautifulSoup):
images_to_do = []
img_idx : int = 0
try:
images = co_html.find('div', {'id':'divImage'}).find('img', src=True)
for image in images:
img_url:str = image['src']
img_title = "img-" + str(img_idx).zfill(3) + ".jpg"
images_to_do.append('{"img_title":"'+img_title+'","img_url":"'+img_url+'"}')
# next img
img_idx = img_idx+1
except Exception as e:
print(e)
raise(e)
return images_to_do
##########################################################################
#
def obtainIssueImagesByText(co_html: BeautifulSoup):
images_to_do = []
img_idx : int = 0
co_html_str : str = co_html.prettify()
try:
html_idx : int = co_html_str.find('lstImages.push')
if (-1 == html_idx):
print("Error in html -> " + co_html_str)
return -1
while -1 != html_idx:
co_html_str = co_html_str[html_idx:]
img_url = co_html_str[:co_html_str.find('");')]
img_url = img_url.replace('lstImages.push("', '').replace('")', '')
img_title = "img-" + str(img_idx).zfill(3) + ".jpg"
images_to_do.append('{"img_title":"'+img_title+'","img_url":"'+img_url+'"}')
# next img
co_html_str = co_html_str[co_html_str.find(';'):]
html_idx = co_html_str.find('lstImages.push')
img_idx = img_idx+1
except Exception as e:
print("Error in html -> " + co_html_str)
raise(e)
print(str(len(images_to_do)))
return images_to_do
##########################################################################
# FUNCTION list all images in a issue
def obtainIssueImages(co_url : str):
co_html : BeautifulSoup = zipper.get_html(co_url)
# text treating
images_to_do = obtainIssueImagesByText(co_html)
#if 0 >= len(images_to_do):
# html treating
#obtainIssueImagesByHtml(co_html)
return images_to_do
##########################################################################
# FUNCTION Scrape all images in an issue
def scrapeIssue(co_title:str, co_url:str, dir:str):
print(" " + co_title + ": " + co_url)
# create directory for an issue
co_dir = dir + "/" + co_title
if not os.path.exists(co_dir):
os.makedirs(co_dir)
# obtain images to get in an issue
images_to_do = obtainIssueImages(co_url)
for image_to_do in images_to_do:
image = json.loads(image_to_do)
img_title = image["img_title"]
img_url = image["img_url"]
print(img_title + ": " + img_url)
zipper.get_image(img_url, co_dir + "/" + img_title)
time.sleep(3)
##########################################################################
# FUNCTION Scrape all issues in a comic
def scrapeComic(html:BeautifulSoup, dir:str):
# obtain cover of the comic
img_url = obtainComicCover(html)
cover_file = dir + "/cover.jpg"
zipper.get_image(img_url, cover_file)
print("title: " + dir + "cover: " + img_url)
# obtain array of issues to scrape
issues_to_do = obtainComicIssues(html)
for issue_to_do in issues_to_do:
issue = json.loads(issue_to_do)
co_title = issue["co_title"]
co_url = issue["co_url"]
scrapeIssue(co_title, co_url, dir)
time.sleep(5)
##########################################################################
# execute ./bin/python3 src/comic.py
#Mighty-Thor-At-the-Gates-of-Valhalla
#Conan-the-Barbarian-1970
#The-Savage-Sword-Of-Conan
#Conan-the-Barbarian-2019
#Red-Sonja-Omnibus
def main():
try:
# comic to scrape
title : str = 'Conan-the-Barbarian-1970'
# dir destiny
dir = title.replace(':', '_').replace(',', '_').replace(' ', '_').replace('(', '').replace(')', '')
if not os.path.exists(dir):
os.makedirs(dir)
# scrape all comic items
html = zipper.get_html(URL + "/Comic/" + title)
scrapeComic(html, dir)
# zip result directory with cbz extension
zipper.zip(dir, dir + '.cbz')
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| de | 0.511351 | #!/usr/bin/python3 ########################################################################## # base website URL ########################################################################## # FUNCTION get cover url in a comic ########################################################################## # FUNCTION list all issues in a comic #.strip().split() # next issue ########################################################################## # # next img ########################################################################## # # next img ########################################################################## # FUNCTION list all images in a issue # text treating #if 0 >= len(images_to_do): # html treating #obtainIssueImagesByHtml(co_html) ########################################################################## # FUNCTION Scrape all images in an issue # create directory for an issue # obtain images to get in an issue ########################################################################## # FUNCTION Scrape all issues in a comic # obtain cover of the comic # obtain array of issues to scrape ########################################################################## # execute ./bin/python3 src/comic.py #Mighty-Thor-At-the-Gates-of-Valhalla #Conan-the-Barbarian-1970 #The-Savage-Sword-Of-Conan #Conan-the-Barbarian-2019 #Red-Sonja-Omnibus # comic to scrape # dir destiny # scrape all comic items # zip result directory with cbz extension | 2.89761 | 3 |
Exercise21.py | JBCFurtado/Rabiscos_Em_Python | 0 | 6617497 | n = int(input('Entre com o limite superior: '))
somatorio = 0
for i in range(1, n + 1):
somatorio += (2*i + 5*i)**2
print(somatorio)
| n = int(input('Entre com o limite superior: '))
somatorio = 0
for i in range(1, n + 1):
somatorio += (2*i + 5*i)**2
print(somatorio)
| none | 1 | 3.761871 | 4 | |
migrations/versions/d4d0517e8385_.py | recursecenter/rsvpbot | 5 | 6617498 | """Add created_by, end_time, start_time, and url to events
Revision ID: d4d0517e8385
Revises: <KEY>
Create Date: 2017-05-16 11:35:58.884254
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd4d0517e8385'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('event', sa.Column('created_by', sa.String(), nullable=True))
op.add_column('event', sa.Column('end_time', sa.TIMESTAMP(timezone=True), nullable=True))
op.add_column('event', sa.Column('start_time', sa.TIMESTAMP(timezone=True), nullable=True))
op.add_column('event', sa.Column('url', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('event', 'url')
op.drop_column('event', 'start_time')
op.drop_column('event', 'end_time')
op.drop_column('event', 'created_by')
# ### end Alembic commands ###
| """Add created_by, end_time, start_time, and url to events
Revision ID: d4d0517e8385
Revises: <KEY>
Create Date: 2017-05-16 11:35:58.884254
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd4d0517e8385'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('event', sa.Column('created_by', sa.String(), nullable=True))
op.add_column('event', sa.Column('end_time', sa.TIMESTAMP(timezone=True), nullable=True))
op.add_column('event', sa.Column('start_time', sa.TIMESTAMP(timezone=True), nullable=True))
op.add_column('event', sa.Column('url', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('event', 'url')
op.drop_column('event', 'start_time')
op.drop_column('event', 'end_time')
op.drop_column('event', 'created_by')
# ### end Alembic commands ###
| en | 0.614106 | Add created_by, end_time, start_time, and url to events Revision ID: d4d0517e8385 Revises: <KEY> Create Date: 2017-05-16 11:35:58.884254 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.506596 | 2 |
main.py | milyasyousuf/sql-backup-to-google-drive | 0 | 6617499 |
from pybackup.gdrive import GoogleDriveConn
from pybackup.backup import BuildBackup
from pybackup.notification import Notification
from pybackup import settings as cf
import datetime
class Driver():
def __init__(self):
self.filename =cf.DATABASES["NAME"]+"_"+datetime.datetime.today().strftime('%Y_%m_%d')+".bak"
self.backup_directory = cf.DIRECTORY["BACKUP_DIRECTORY"]
self.backup_directory_with_root = cf.DIRECTORY["MSSQL_DATA_DIRECTORY"]
self.home_directory = cf.DIRECTORY["HOME_DIRECTORY"]
self.compress_file_name = self.filename.split(".")[0]+cf.COMPRESSION_TYPE
self.destination = self.home_directory+self.backup_directory+self.compress_file_name
def run_script(self):
bb = BuildBackup()
bb.get_mssql_database()
bb.copy_back_file()
bb.compress_backup_file()
dr = GoogleDriveConn()
filename,file_id = dr.upload_file_gdrive(self.destination)
noti = Notification()
noti.send_email(file_id)
drive = Driver()
drive.run_script() |
from pybackup.gdrive import GoogleDriveConn
from pybackup.backup import BuildBackup
from pybackup.notification import Notification
from pybackup import settings as cf
import datetime
class Driver():
def __init__(self):
self.filename =cf.DATABASES["NAME"]+"_"+datetime.datetime.today().strftime('%Y_%m_%d')+".bak"
self.backup_directory = cf.DIRECTORY["BACKUP_DIRECTORY"]
self.backup_directory_with_root = cf.DIRECTORY["MSSQL_DATA_DIRECTORY"]
self.home_directory = cf.DIRECTORY["HOME_DIRECTORY"]
self.compress_file_name = self.filename.split(".")[0]+cf.COMPRESSION_TYPE
self.destination = self.home_directory+self.backup_directory+self.compress_file_name
def run_script(self):
bb = BuildBackup()
bb.get_mssql_database()
bb.copy_back_file()
bb.compress_backup_file()
dr = GoogleDriveConn()
filename,file_id = dr.upload_file_gdrive(self.destination)
noti = Notification()
noti.send_email(file_id)
drive = Driver()
drive.run_script() | none | 1 | 2.532519 | 3 | |
code_opener_cli/path_definitions.py | shan7030/code-opener-cli | 2 | 6617500 | """
This file contains the paths for various directories in package
"""
import os
from code_opener_cli.utils.config import DefaultConfiguration
from enum import Enum
class CodeOpenerDirectoryPath(Enum):
"""
The class containing Paths for tests,package root and other directories in package
"""
COPEN_ROOT_PATH = os.path.dirname(__file__)
# Resources Path
RESOURCE_PATH = os.path.join(COPEN_ROOT_PATH,'resources')
CONFIG_FILE_PATH = os.path.join(RESOURCE_PATH, DefaultConfiguration.CONFIGURATION_FILE_NAME.value) | """
This file contains the paths for various directories in package
"""
import os
from code_opener_cli.utils.config import DefaultConfiguration
from enum import Enum
class CodeOpenerDirectoryPath(Enum):
"""
The class containing Paths for tests,package root and other directories in package
"""
COPEN_ROOT_PATH = os.path.dirname(__file__)
# Resources Path
RESOURCE_PATH = os.path.join(COPEN_ROOT_PATH,'resources')
CONFIG_FILE_PATH = os.path.join(RESOURCE_PATH, DefaultConfiguration.CONFIGURATION_FILE_NAME.value) | en | 0.893602 | This file contains the paths for various directories in package The class containing Paths for tests,package root and other directories in package # Resources Path | 2.622687 | 3 |
lisa_flexbe_states_flexbe_behaviors/src/lisa_flexbe_states_flexbe_behaviors/lisa_basic_examples_sm.py | lawrence-iviani/lisa-flexbe-states | 0 | 6617501 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from lisa_flexbe_states_flexbe_states.lisa_utter_actionlib_state import LisaUtterActionState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Sep 24 2020
@author: <NAME>
'''
class Lisa_Basic_ExamplesSM(Behavior):
'''
Example of basic interaction blocks and usage
'''
def __init__(self):
super(Lisa_Basic_ExamplesSM, self).__init__()
self.name = 'Lisa_Basic_Examples'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:30 y:341, x:130 y:341
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:314 y:116
OperatableStateMachine.add('Utter_Action',
LisaUtterActionState(sentence='bla'),
transitions={'uttered_all': 'finished', 'timeout': 'failed', 'command_error': 'failed'},
autonomy={'uttered_all': Autonomy.Off, 'timeout': Autonomy.Off, 'command_error': Autonomy.Off},
remapping={'result_message': 'result_message'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from lisa_flexbe_states_flexbe_states.lisa_utter_actionlib_state import LisaUtterActionState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Sep 24 2020
@author: <NAME>
'''
class Lisa_Basic_ExamplesSM(Behavior):
'''
Example of basic interaction blocks and usage
'''
def __init__(self):
super(Lisa_Basic_ExamplesSM, self).__init__()
self.name = 'Lisa_Basic_Examples'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:30 y:341, x:130 y:341
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:314 y:116
OperatableStateMachine.add('Utter_Action',
LisaUtterActionState(sentence='bla'),
transitions={'uttered_all': 'finished', 'timeout': 'failed', 'command_error': 'failed'},
autonomy={'uttered_all': Autonomy.Off, 'timeout': Autonomy.Off, 'command_error': Autonomy.Off},
remapping={'result_message': 'result_message'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | en | 0.621692 | #!/usr/bin/env python # -*- coding: utf-8 -*- ########################################################### # WARNING: Generated code! # # ************************** # # Manual changes may get lost if file is generated again. # # Only code inside the [MANUAL] tags will be kept. # ########################################################### # Additional imports can be added inside the following tags # [MANUAL_IMPORT] # [/MANUAL_IMPORT] Created on Thu Sep 24 2020 @author: <NAME> Example of basic interaction blocks and usage # parameters of this behavior # references to used behaviors # Additional initialization code can be added inside the following tags # [MANUAL_INIT] # [/MANUAL_INIT] # Behavior comments: # x:30 y:341, x:130 y:341 # Additional creation code can be added inside the following tags # [MANUAL_CREATE] # [/MANUAL_CREATE] # x:314 y:116 # Private functions can be added inside the following tags # [MANUAL_FUNC] # [/MANUAL_FUNC] | 1.894493 | 2 |
old_logen/pylogen/ErrDialog.py | leuschel/logen | 14 | 6617502 | <reponame>leuschel/logen
from tkSimpleDialog import Dialog
from ScrolledText import ScrolledText
from Tkinter import *
import Pmw
def ask_question(master=None, title="Question", msg=""):
e = OkCancelDialog(master=master, Title=title, Msg=msg)
return e.result
def complain_loudly(master=None, title="Error", msg=""):
ErrDialog(master=master, Title=title, Msg=msg)
def ask_NoAnnfileDialog(master=None, Filename=""):
e = NoAnnDialog(master=master,Filename=Filename)
return e.result
def ask_FilterDialog(master=None, Filters="",Heads=[]):
e = FilterDialog(master=master,Filters=Filters,Heads=Heads)
if e.result == "cancel":
return None
else:
return e.filters
class OkCancelDialog(Dialog):
result = None
def __init__(self,master=None, Title="Question", Short="",Msg=""):
self.Title=Title
self.Msg=Msg
self.Short = Short
Dialog.__init__(self,master,title=Title)
def body(self,unused):
self.text = ScrolledText(self)
self.text["height"] = "6"
self.text["width"] = "50"
self.text.pack(side="top",expand="yes",fill="both")
self.text.insert("0.0", self.Msg)
self.text["state"] = "disabled"
self.result = False
def ok(self, unused=None):
self.result = True
Dialog.ok(self)
class NoAnnDialog(Dialog):
result = None
def __init__(self,master=None, Title="No Annotation File Found", Filename="",
Msg="The file you have loaded does not have an associated annotation file"):
self.Title=Title
self.Msg=Msg
self.filename = Filename
#self.Short = Short
Dialog.__init__(self,master,title=self.Title)
def body(self,unused):
self.text = ScrolledText(self)
self.text["height"] = "6"
self.text["width"] = "50"
self.text.pack(side="top",expand="yes",fill="both")
self.text.insert("1.0", self.Msg)
self.text["state"] = "disabled"
self.result = False
def ok(self, unused=None):
self.result = True
Dialog.ok(self)
return True
def buttonbox(self):
box = Frame(self)
w = Button(box, text="Simple BTA", width=10, command=lambda :self.click("simple"), default=ACTIVE)
w1 = Button(box, text="Reset File", width=10, command=lambda :self.click("reset"), default=ACTIVE)
w2 = Button(box, text="Mark Unknown", width=10, command=lambda :self.click("unknown"), default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w1.pack(side=LEFT, padx=5, pady=5)
w2.pack(side=LEFT, padx=5, pady=5)
#self.bind("<Return>", self.ok)
box.pack()
def click(self, value):
self.result = value
Dialog.ok(self)
class ErrDialog(Dialog):
def __init__(self,master=None, Title="Error", Short="",Msg=""):
self.Title=Title
self.Msg=Msg
self.Short = Short
Dialog.__init__(self,parent=master,title=Title)
def body(self,unused):
self.text = ScrolledText(self)
self.text["height"] = "8"
self.text["width"] = "50"
self.text.pack(side="top",expand="yes",fill="both")
self.text.insert("0.0", self.Msg)
self.text["state"] = "disabled"
self.result = False
def buttonbox(self):
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
box.pack()
class FilterDialog(Dialog):
result = None
filters = None
def __init__(self,master=None, Title="Add Filter", Filters="",Heads=[]):
self.Title=Title
self.Heads =Heads
self.filters = Filters
Dialog.__init__(self,master,title=self.Title)
def body(self,unused):
box = Frame(self)
#self.listbox = Listbox(box,height=6,bg="white",width=15)
self.slistbox = Pmw.ScrolledListBox(box,listbox_height=6)#,hull_width=150,usehullsize=1,hull_height=150)
self.listbox = self.slistbox._listbox
#self.slistbox = Pmw.ScrolledListBox(box)
for item in self.Heads:
self.listbox.insert(END, item)
self.text = Text(box,height=8,width=40,bg="white")
self.text.insert("end", self.filters)
self.listbox.bind("<Double-Button-1>", self.clickList)
box.pack(side="top")
#self.listbox.pack(side="left")
self.slistbox.pack(side="left")
self.text.pack(side="left")
self.result = False
def default_filter(self, arity):
arity = int(arity)
if arity == 0: return ""
filters = "( dynamic"
### add code here to make dynamic filter of arity
for i in range(1,arity):
filters += " , dynamic"
filters += " )"
return filters
def clickList(self,unused):
i = self.listbox.curselection()[0]
head = self.Heads[int(i)]
if self.text.index("insert") != self.text.index("insert linestart"):
index = self.text.index("insert + 1 line linestart")
else:
index = self.text.index("insert")
(call, arity) = self.Heads[int(i)].split('/')
filString = ":- filter %s%s.\n" %(call, self.default_filter(arity))
self.text.insert(index, filString)
place = self.text.index("%s + %d chars" % (index,len(filString)-3))
self.text.mark_set("insert", place)
self.text.focus_set()
def buttonbox(self):
box = Frame(self)
w = Button(box, text="Ok", width=10, command=lambda :self.click("ok"), default=ACTIVE)
w1 = Button(box, text="Cancel", width=10, command=lambda :self.click("cancel"), default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w1.pack(side=LEFT, padx=5, pady=5)
#self.bind("<Return>", self.ok)
box.pack(side="bottom")
def click(self, value):
self.result = value
self.filters = self.text.get("1.0","end")
Dialog.ok(self)
if __name__=="__main__":
master = Tk()
a = FilterDialog(master,Heads=["match","regexp"])
print a.filters
#print ask_NoAnnfileDialog(master, Filename="foo.pl")
| from tkSimpleDialog import Dialog
from ScrolledText import ScrolledText
from Tkinter import *
import Pmw
def ask_question(master=None, title="Question", msg=""):
e = OkCancelDialog(master=master, Title=title, Msg=msg)
return e.result
def complain_loudly(master=None, title="Error", msg=""):
ErrDialog(master=master, Title=title, Msg=msg)
def ask_NoAnnfileDialog(master=None, Filename=""):
e = NoAnnDialog(master=master,Filename=Filename)
return e.result
def ask_FilterDialog(master=None, Filters="",Heads=[]):
e = FilterDialog(master=master,Filters=Filters,Heads=Heads)
if e.result == "cancel":
return None
else:
return e.filters
class OkCancelDialog(Dialog):
result = None
def __init__(self,master=None, Title="Question", Short="",Msg=""):
self.Title=Title
self.Msg=Msg
self.Short = Short
Dialog.__init__(self,master,title=Title)
def body(self,unused):
self.text = ScrolledText(self)
self.text["height"] = "6"
self.text["width"] = "50"
self.text.pack(side="top",expand="yes",fill="both")
self.text.insert("0.0", self.Msg)
self.text["state"] = "disabled"
self.result = False
def ok(self, unused=None):
self.result = True
Dialog.ok(self)
class NoAnnDialog(Dialog):
result = None
def __init__(self,master=None, Title="No Annotation File Found", Filename="",
Msg="The file you have loaded does not have an associated annotation file"):
self.Title=Title
self.Msg=Msg
self.filename = Filename
#self.Short = Short
Dialog.__init__(self,master,title=self.Title)
def body(self,unused):
self.text = ScrolledText(self)
self.text["height"] = "6"
self.text["width"] = "50"
self.text.pack(side="top",expand="yes",fill="both")
self.text.insert("1.0", self.Msg)
self.text["state"] = "disabled"
self.result = False
def ok(self, unused=None):
self.result = True
Dialog.ok(self)
return True
def buttonbox(self):
box = Frame(self)
w = Button(box, text="Simple BTA", width=10, command=lambda :self.click("simple"), default=ACTIVE)
w1 = Button(box, text="Reset File", width=10, command=lambda :self.click("reset"), default=ACTIVE)
w2 = Button(box, text="Mark Unknown", width=10, command=lambda :self.click("unknown"), default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w1.pack(side=LEFT, padx=5, pady=5)
w2.pack(side=LEFT, padx=5, pady=5)
#self.bind("<Return>", self.ok)
box.pack()
def click(self, value):
self.result = value
Dialog.ok(self)
class ErrDialog(Dialog):
def __init__(self,master=None, Title="Error", Short="",Msg=""):
self.Title=Title
self.Msg=Msg
self.Short = Short
Dialog.__init__(self,parent=master,title=Title)
def body(self,unused):
self.text = ScrolledText(self)
self.text["height"] = "8"
self.text["width"] = "50"
self.text.pack(side="top",expand="yes",fill="both")
self.text.insert("0.0", self.Msg)
self.text["state"] = "disabled"
self.result = False
def buttonbox(self):
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
box.pack()
class FilterDialog(Dialog):
result = None
filters = None
def __init__(self,master=None, Title="Add Filter", Filters="",Heads=[]):
self.Title=Title
self.Heads =Heads
self.filters = Filters
Dialog.__init__(self,master,title=self.Title)
def body(self,unused):
box = Frame(self)
#self.listbox = Listbox(box,height=6,bg="white",width=15)
self.slistbox = Pmw.ScrolledListBox(box,listbox_height=6)#,hull_width=150,usehullsize=1,hull_height=150)
self.listbox = self.slistbox._listbox
#self.slistbox = Pmw.ScrolledListBox(box)
for item in self.Heads:
self.listbox.insert(END, item)
self.text = Text(box,height=8,width=40,bg="white")
self.text.insert("end", self.filters)
self.listbox.bind("<Double-Button-1>", self.clickList)
box.pack(side="top")
#self.listbox.pack(side="left")
self.slistbox.pack(side="left")
self.text.pack(side="left")
self.result = False
def default_filter(self, arity):
arity = int(arity)
if arity == 0: return ""
filters = "( dynamic"
### add code here to make dynamic filter of arity
for i in range(1,arity):
filters += " , dynamic"
filters += " )"
return filters
def clickList(self,unused):
i = self.listbox.curselection()[0]
head = self.Heads[int(i)]
if self.text.index("insert") != self.text.index("insert linestart"):
index = self.text.index("insert + 1 line linestart")
else:
index = self.text.index("insert")
(call, arity) = self.Heads[int(i)].split('/')
filString = ":- filter %s%s.\n" %(call, self.default_filter(arity))
self.text.insert(index, filString)
place = self.text.index("%s + %d chars" % (index,len(filString)-3))
self.text.mark_set("insert", place)
self.text.focus_set()
def buttonbox(self):
box = Frame(self)
w = Button(box, text="Ok", width=10, command=lambda :self.click("ok"), default=ACTIVE)
w1 = Button(box, text="Cancel", width=10, command=lambda :self.click("cancel"), default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w1.pack(side=LEFT, padx=5, pady=5)
#self.bind("<Return>", self.ok)
box.pack(side="bottom")
def click(self, value):
self.result = value
self.filters = self.text.get("1.0","end")
Dialog.ok(self)
if __name__=="__main__":
master = Tk()
a = FilterDialog(master,Heads=["match","regexp"])
print a.filters
#print ask_NoAnnfileDialog(master, Filename="foo.pl") | en | 0.401099 | #self.Short = Short #self.bind("<Return>", self.ok) #self.listbox = Listbox(box,height=6,bg="white",width=15) #,hull_width=150,usehullsize=1,hull_height=150) #self.slistbox = Pmw.ScrolledListBox(box) #self.listbox.pack(side="left") ### add code here to make dynamic filter of arity #self.bind("<Return>", self.ok) #print ask_NoAnnfileDialog(master, Filename="foo.pl") | 2.884472 | 3 |
mfutil/cli_tools/mfprogress.py | metwork-framework/mfutil | 0 | 6617503 | #!/usr/bin/env python3
import argparse
import threading
import time
import sys
import os
from mfutil.cli import MFProgress
from mfutil.misc import kill_process_and_children
from mfutil.bash_wrapper import BashWrapper
import rich
import psutil
from rich.panel import Panel
DESCRIPTION = "execute a command with a nice progressbar"
TIMEOUT_FLAG = False
STOP_FLAG = False
def thread_advance(progress, tid, timeout):
global TIMEOUT_FLAG, STOP_FLAG
i = 1
while i <= timeout and not STOP_FLAG:
if i < timeout:
progress.update(tid, advance=1)
time.sleep(1)
i = i + 1
if not STOP_FLAG:
# timeout
TIMEOUT_FLAG = True
current_pid = os.getpid()
process = psutil.Process(current_pid)
children = process.children(recursive=False)
[kill_process_and_children(x.pid) for x in children]
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("COMMAND", help="command to execute")
parser.add_argument("COMMAND_ARG", nargs='*',
help="command arg")
parser.add_argument("--timeout",
help="timeout (in seconds)", type=int,
default=180)
parser.add_argument("--title",
help="title of the command", type=str,
default="title of the command")
parser.add_argument("--silent", action="store_true",
help="if set, we don't add a debug output in case of "
"errors")
args = parser.parse_args()
command = " ".join([args.COMMAND] + args.COMMAND_ARG)
status = True
timeout = False
with MFProgress() as progress:
t = progress.add_task(args.title, total=args.timeout)
x = threading.Thread(target=thread_advance, args=(progress, t,
args.timeout),
daemon=True)
x.start()
bw = BashWrapper(command)
STOP_FLAG = True # noqa:
if bw:
progress.complete_task(t)
else:
if TIMEOUT_FLAG:
# timeout
progress.complete_task_nok(t, "timeout")
timeout = True
else:
progress.complete_task_nok(t, "bad exit code")
status = False
if not status:
if not args.silent and not timeout:
rich.print(Panel("[bold]Error details:[/bold]\n%s" % # noqa: E999
str(bw)))
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import threading
import time
import sys
import os
from mfutil.cli import MFProgress
from mfutil.misc import kill_process_and_children
from mfutil.bash_wrapper import BashWrapper
import rich
import psutil
from rich.panel import Panel
DESCRIPTION = "execute a command with a nice progressbar"
TIMEOUT_FLAG = False
STOP_FLAG = False
def thread_advance(progress, tid, timeout):
global TIMEOUT_FLAG, STOP_FLAG
i = 1
while i <= timeout and not STOP_FLAG:
if i < timeout:
progress.update(tid, advance=1)
time.sleep(1)
i = i + 1
if not STOP_FLAG:
# timeout
TIMEOUT_FLAG = True
current_pid = os.getpid()
process = psutil.Process(current_pid)
children = process.children(recursive=False)
[kill_process_and_children(x.pid) for x in children]
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("COMMAND", help="command to execute")
parser.add_argument("COMMAND_ARG", nargs='*',
help="command arg")
parser.add_argument("--timeout",
help="timeout (in seconds)", type=int,
default=180)
parser.add_argument("--title",
help="title of the command", type=str,
default="title of the command")
parser.add_argument("--silent", action="store_true",
help="if set, we don't add a debug output in case of "
"errors")
args = parser.parse_args()
command = " ".join([args.COMMAND] + args.COMMAND_ARG)
status = True
timeout = False
with MFProgress() as progress:
t = progress.add_task(args.title, total=args.timeout)
x = threading.Thread(target=thread_advance, args=(progress, t,
args.timeout),
daemon=True)
x.start()
bw = BashWrapper(command)
STOP_FLAG = True # noqa:
if bw:
progress.complete_task(t)
else:
if TIMEOUT_FLAG:
# timeout
progress.complete_task_nok(t, "timeout")
timeout = True
else:
progress.complete_task_nok(t, "bad exit code")
status = False
if not status:
if not args.silent and not timeout:
rich.print(Panel("[bold]Error details:[/bold]\n%s" % # noqa: E999
str(bw)))
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()
| en | 0.144871 | #!/usr/bin/env python3 # timeout # noqa: # timeout # noqa: E999 | 2.426739 | 2 |
ardupilot/Tools/LogAnalyzer/tests/TestPitchRollCoupling.py | quadrotor-IITKgp/emulate_GPS | 1 | 6617504 | from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestPitchRollCoupling(Test):
'''test for divergence between input and output pitch/roll, i.e. mechanical failure or bad PID tuning'''
# TODO: currently we're only checking for roll/pitch outside of max lean angle, will come back later to analyze roll/pitch in versus out values
def __init__(self):
Test.__init__(self)
self.name = "Pitch/Roll"
self.enable = True # TEMP
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
if not "ATT" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# figure out where each mode begins and ends, so we can treat auto and manual modes differently and ignore acro/tune modes
autoModes = ["RTL","AUTO","LAND","LOITER","GUIDED","CIRCLE","OF_LOITER","HYBRID"] # use NTUN DRol+DPit
manualModes = ["STABILIZE","DRIFT","ALTHOLD","ALT_HOLD","POSHOLD"] # use CTUN RollIn/DesRoll + PitchIn/DesPitch
ignoreModes = ["ACRO","SPORT","FLIP","AUTOTUNE",""] # ignore data from these modes
autoSegments = [] # list of (startLine,endLine) pairs
manualSegments = [] # list of (startLine,endLine) pairs
orderedModes = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
isAuto = False # we always start in a manual control mode
prevLine = 0
mode = ""
for line,modepair in orderedModes.iteritems():
mode = modepair[0].upper()
if prevLine == 0:
prevLine = line
if mode in autoModes:
if not isAuto:
manualSegments.append((prevLine,line-1))
prevLine = line
isAuto = True
elif mode in manualModes:
if isAuto:
autoSegments.append((prevLine,line-1))
prevLine = line
isAuto = False
elif mode in ignoreModes:
if isAuto:
autoSegments.append((prevLine,line-1))
else:
manualSegments.append((prevLine,line-1))
prevLine = 0
else:
raise Exception("Unknown mode in TestPitchRollCoupling: %s" % mode)
# and handle the last segment, which doesn't have an ending
if mode in autoModes:
autoSegments.append((prevLine,logdata.lineCount))
elif mode in manualModes:
manualSegments.append((prevLine,logdata.lineCount))
# figure out max lean angle, the ANGLE_MAX param was added in AC3.1
maxLeanAngle = 45.0
if "ANGLE_MAX" in logdata.parameters:
maxLeanAngle = logdata.parameters["ANGLE_MAX"] / 100.0
maxLeanAngleBuffer = 10 # allow a buffer margin
# ignore anything below this altitude, to discard any data while not flying
minAltThreshold = 2.0
# look through manual+auto flight segments
# TODO: filter to ignore single points outside range?
(maxRoll, maxRollLine) = (0.0, 0)
(maxPitch, maxPitchLine) = (0.0, 0)
for (startLine,endLine) in manualSegments+autoSegments:
# quick up-front test, only fallover into more complex line-by-line check if max()>threshold
rollSeg = logdata.channels["ATT"]["Roll"].getSegment(startLine,endLine)
pitchSeg = logdata.channels["ATT"]["Pitch"].getSegment(startLine,endLine)
if not rollSeg.dictData and not pitchSeg.dictData:
continue
# check max roll+pitch for any time where relative altitude is above minAltThreshold
roll = max(abs(rollSeg.min()), abs(rollSeg.max()))
pitch = max(abs(pitchSeg.min()), abs(pitchSeg.max()))
if (roll>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll)) or (pitch>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch)):
lit = DataflashLog.LogIterator(logdata, startLine)
assert(lit.currentLine == startLine)
while lit.currentLine <= endLine:
relativeAlt = lit["CTUN"]["BarAlt"]
if relativeAlt > minAltThreshold:
roll = lit["ATT"]["Roll"]
pitch = lit["ATT"]["Pitch"]
if abs(roll)>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll):
maxRoll = roll
maxRollLine = lit.currentLine
if abs(pitch)>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch):
maxPitch = pitch
maxPitchLine = lit.currentLine
lit.next()
# check for breaking max lean angles
if maxRoll and abs(maxRoll)>abs(maxPitch):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Roll (%.2f, line %d) > maximum lean angle (%.2f)" % (maxRoll, maxRollLine, maxLeanAngle)
return
if maxPitch:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Pitch (%.2f, line %d) > maximum lean angle (%.2f)" % (maxPitch, maxPitchLine, maxLeanAngle)
return
# TODO: use numpy/scipy to check Roll+RollIn curves for fitness (ignore where we're not airborne)
# ...
| from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestPitchRollCoupling(Test):
'''test for divergence between input and output pitch/roll, i.e. mechanical failure or bad PID tuning'''
# TODO: currently we're only checking for roll/pitch outside of max lean angle, will come back later to analyze roll/pitch in versus out values
def __init__(self):
Test.__init__(self)
self.name = "Pitch/Roll"
self.enable = True # TEMP
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
if not "ATT" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# figure out where each mode begins and ends, so we can treat auto and manual modes differently and ignore acro/tune modes
autoModes = ["RTL","AUTO","LAND","LOITER","GUIDED","CIRCLE","OF_LOITER","HYBRID"] # use NTUN DRol+DPit
manualModes = ["STABILIZE","DRIFT","ALTHOLD","ALT_HOLD","POSHOLD"] # use CTUN RollIn/DesRoll + PitchIn/DesPitch
ignoreModes = ["ACRO","SPORT","FLIP","AUTOTUNE",""] # ignore data from these modes
autoSegments = [] # list of (startLine,endLine) pairs
manualSegments = [] # list of (startLine,endLine) pairs
orderedModes = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
isAuto = False # we always start in a manual control mode
prevLine = 0
mode = ""
for line,modepair in orderedModes.iteritems():
mode = modepair[0].upper()
if prevLine == 0:
prevLine = line
if mode in autoModes:
if not isAuto:
manualSegments.append((prevLine,line-1))
prevLine = line
isAuto = True
elif mode in manualModes:
if isAuto:
autoSegments.append((prevLine,line-1))
prevLine = line
isAuto = False
elif mode in ignoreModes:
if isAuto:
autoSegments.append((prevLine,line-1))
else:
manualSegments.append((prevLine,line-1))
prevLine = 0
else:
raise Exception("Unknown mode in TestPitchRollCoupling: %s" % mode)
# and handle the last segment, which doesn't have an ending
if mode in autoModes:
autoSegments.append((prevLine,logdata.lineCount))
elif mode in manualModes:
manualSegments.append((prevLine,logdata.lineCount))
# figure out max lean angle, the ANGLE_MAX param was added in AC3.1
maxLeanAngle = 45.0
if "ANGLE_MAX" in logdata.parameters:
maxLeanAngle = logdata.parameters["ANGLE_MAX"] / 100.0
maxLeanAngleBuffer = 10 # allow a buffer margin
# ignore anything below this altitude, to discard any data while not flying
minAltThreshold = 2.0
# look through manual+auto flight segments
# TODO: filter to ignore single points outside range?
(maxRoll, maxRollLine) = (0.0, 0)
(maxPitch, maxPitchLine) = (0.0, 0)
for (startLine,endLine) in manualSegments+autoSegments:
# quick up-front test, only fallover into more complex line-by-line check if max()>threshold
rollSeg = logdata.channels["ATT"]["Roll"].getSegment(startLine,endLine)
pitchSeg = logdata.channels["ATT"]["Pitch"].getSegment(startLine,endLine)
if not rollSeg.dictData and not pitchSeg.dictData:
continue
# check max roll+pitch for any time where relative altitude is above minAltThreshold
roll = max(abs(rollSeg.min()), abs(rollSeg.max()))
pitch = max(abs(pitchSeg.min()), abs(pitchSeg.max()))
if (roll>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll)) or (pitch>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch)):
lit = DataflashLog.LogIterator(logdata, startLine)
assert(lit.currentLine == startLine)
while lit.currentLine <= endLine:
relativeAlt = lit["CTUN"]["BarAlt"]
if relativeAlt > minAltThreshold:
roll = lit["ATT"]["Roll"]
pitch = lit["ATT"]["Pitch"]
if abs(roll)>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll):
maxRoll = roll
maxRollLine = lit.currentLine
if abs(pitch)>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch):
maxPitch = pitch
maxPitchLine = lit.currentLine
lit.next()
# check for breaking max lean angles
if maxRoll and abs(maxRoll)>abs(maxPitch):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Roll (%.2f, line %d) > maximum lean angle (%.2f)" % (maxRoll, maxRollLine, maxLeanAngle)
return
if maxPitch:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Pitch (%.2f, line %d) > maximum lean angle (%.2f)" % (maxPitch, maxPitchLine, maxLeanAngle)
return
# TODO: use numpy/scipy to check Roll+RollIn curves for fitness (ignore where we're not airborne)
# ...
| en | 0.796717 | test for divergence between input and output pitch/roll, i.e. mechanical failure or bad PID tuning # TODO: currently we're only checking for roll/pitch outside of max lean angle, will come back later to analyze roll/pitch in versus out values # TEMP # figure out where each mode begins and ends, so we can treat auto and manual modes differently and ignore acro/tune modes # use NTUN DRol+DPit # use CTUN RollIn/DesRoll + PitchIn/DesPitch # ignore data from these modes # list of (startLine,endLine) pairs # list of (startLine,endLine) pairs # we always start in a manual control mode # and handle the last segment, which doesn't have an ending # figure out max lean angle, the ANGLE_MAX param was added in AC3.1 # allow a buffer margin # ignore anything below this altitude, to discard any data while not flying # look through manual+auto flight segments # TODO: filter to ignore single points outside range? # quick up-front test, only fallover into more complex line-by-line check if max()>threshold # check max roll+pitch for any time where relative altitude is above minAltThreshold # check for breaking max lean angles # TODO: use numpy/scipy to check Roll+RollIn curves for fitness (ignore where we're not airborne) # ... | 2.413216 | 2 |
annotate/constants.py | raerose01/MINTIE | 0 | 6617505 | <filename>annotate/constants.py<gh_stars>0
#=====================================================================================================
# Program parameters
#=====================================================================================================
EXIT_FILE_IO_ERROR = 1
EXIT_COMMAND_LINE_ERROR = 2
EXIT_OUTPUT_ERROR = 3
#=====================================================================================================
# Default values for cutoff parameters
#=====================================================================================================
DEFAULT_MIN_GAP = 3
DEFAULT_MIN_CLIP = 30
DEFAULT_MIN_MATCH_BP = 30
DEFAULT_MIN_MATCH_PERC = 0.3
DEFAULT_MISMATCHES = 0
#=====================================================================================================
# VCF output parameters
#=====================================================================================================
INFO = ["CID", "ECN", "CLEN", "CPOS", "CSTRAND", "CCIGAR", "VSIZE",
"CVSIZE", "CVTYPE", "GENES", "PARID", "PVAL", "CVQ"]
FORMAT = ["GT", "ECC", "AI"]
#=====================================================================================================
# CIGAR-string related
#=====================================================================================================
CIGAR = {'match': 0,
'insertion': 1,
'deletion': 2,
'skipped': 3,
'soft-clip': 4,
'hard-clip': 5,
'silent_deletion': 6}
GAPS = [CIGAR[c] for c in ['insertion', 'deletion', 'silent_deletion']]
CLIPS = [CIGAR[c] for c in ['soft-clip', 'hard-clip']]
# any cigar criteria that is >0 bp on an aligned contig
AFFECT_CONTIG = [CIGAR[c] for c in ['match', 'insertion', 'soft-clip', 'hard-clip']]
# any cigar criteria that is >0 bp on the reference genome
AFFECT_REF = [CIGAR[c] for c in ['match', 'deletion']]
| <filename>annotate/constants.py<gh_stars>0
#=====================================================================================================
# Program parameters
#=====================================================================================================
EXIT_FILE_IO_ERROR = 1
EXIT_COMMAND_LINE_ERROR = 2
EXIT_OUTPUT_ERROR = 3
#=====================================================================================================
# Default values for cutoff parameters
#=====================================================================================================
DEFAULT_MIN_GAP = 3
DEFAULT_MIN_CLIP = 30
DEFAULT_MIN_MATCH_BP = 30
DEFAULT_MIN_MATCH_PERC = 0.3
DEFAULT_MISMATCHES = 0
#=====================================================================================================
# VCF output parameters
#=====================================================================================================
INFO = ["CID", "ECN", "CLEN", "CPOS", "CSTRAND", "CCIGAR", "VSIZE",
"CVSIZE", "CVTYPE", "GENES", "PARID", "PVAL", "CVQ"]
FORMAT = ["GT", "ECC", "AI"]
#=====================================================================================================
# CIGAR-string related
#=====================================================================================================
CIGAR = {'match': 0,
'insertion': 1,
'deletion': 2,
'skipped': 3,
'soft-clip': 4,
'hard-clip': 5,
'silent_deletion': 6}
GAPS = [CIGAR[c] for c in ['insertion', 'deletion', 'silent_deletion']]
CLIPS = [CIGAR[c] for c in ['soft-clip', 'hard-clip']]
# any cigar criteria that is >0 bp on an aligned contig
AFFECT_CONTIG = [CIGAR[c] for c in ['match', 'insertion', 'soft-clip', 'hard-clip']]
# any cigar criteria that is >0 bp on the reference genome
AFFECT_REF = [CIGAR[c] for c in ['match', 'deletion']]
| fr | 0.304012 | #===================================================================================================== # Program parameters #===================================================================================================== #===================================================================================================== # Default values for cutoff parameters #===================================================================================================== #===================================================================================================== # VCF output parameters #===================================================================================================== #===================================================================================================== # CIGAR-string related #===================================================================================================== # any cigar criteria that is >0 bp on an aligned contig # any cigar criteria that is >0 bp on the reference genome | 1.829277 | 2 |
tensorflow/core/user_ops/direct_sparse_experiments/mnist/MNIST_d.py | TimoHackel/ILA-SCNN | 24 | 6617506 |
# coding: utf-8
# In[ ]:
import sys
import os
import time
import numpy as np
import tensorflow as tf
import direct_sparse_layer_definition as ld
from direct_sparse_module import sparse_nn_ops as sc_module
from load_mnist_dataset import load_dataset
# In[ ]:
def model_mnist(
sparse_data,
tensor_in_sizes,
train_labels=None,
num_classes=10,
scope='mn256-',
initializer=None,
d1=0.1,
d2=0.3,
d3=0.4
):
dim = 5
strides = [1,1,1,1,1]
padding = 'SAME'
pooling_sizes = [1,1,2,2,1]
batch_size = tensor_in_sizes[0]
total_size = np.prod(tensor_in_sizes)
net = {}
net['sd_converted'] = ld.create_sparse_data_to_direct_sparse(sparse_data, dim)
net['conv1_1'] = ld.create_sparse_conv_layer(
net['sd_converted'],
[1,3,3,1,8],
strides,
padding,
dim,
d1,
'K-ABS',
name=scope + 'sc1',
initializer=initializer
)
net['conv1_2'] = ld.create_sparse_conv_layer(
net['conv1_1'],
[1,3,3,8,8],
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc2',
initializer=initializer
)
net['conv1_3'] = ld.create_sparse_conv_layer(
net['conv1_2'],
[1,3,3,8,8],
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc3',
initializer=initializer
)
net['pool1'] = ld.create_sparse_pooling_layer(net['conv1_3'], pooling_sizes, dim, d3)
net['conv2_1'] = ld.create_sparse_conv_layer(
net['pool1'],
[1,3,3,8,16],
strides,
padding,
dim,
d2,
'K-ABS',
name=scope + 'sc4',
initializer=initializer
)
net['conv2_2'] = ld.create_sparse_conv_layer(
net['conv2_1'],
[1,3,3,16,16],
strides,
padding,
dim,
d2,
'K-RELU',
name=scope + 'sc5',
initializer=initializer
)
net['conv2_3'] = ld.create_sparse_conv_layer(
net['conv2_2'],
[1,3,3,16,16],
strides,
padding,
dim,
d2,
'K-ABS',
name=scope + 'sc6',
initializer=initializer
)
net['sparse_to_dense'] = ld.create_direct_sparse_to_dense(net['conv2_3'], dim)
net['dense_reshaped1'] = tf.reshape(net['sparse_to_dense'], [batch_size, 1, 14, 14, 16])
net['dense_reshaped2'] = tf.reshape(net['dense_reshaped1'], [batch_size, -1])
net['dense1'] = tf.layers.dense(net['dense_reshaped2'], 512)
net['dense2'] = tf.layers.dense(net['dense1'], num_classes)
predictions = {
'classes': tf.argmax(net['dense2'], axis=1),
'probabilities': tf.nn.softmax(net['dense2'])
}
loss = tf.losses.softmax_cross_entropy(
onehot_labels=train_labels,
logits=tf.clip_by_value(net['dense2'], 1e-7, 1e8)
)
accuracy = tf.metrics.accuracy(tf.argmax(train_labels, axis=1), predictions['classes'])
return loss, predictions, accuracy, net
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# In[ ]:
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
X_train = (X_train * 255).astype(np.uint8)
X_train[X_train<50] = 0
X_val = (X_val * 255).astype(np.uint8)
X_val[X_val<50] = 0
X_test = (X_test * 255).astype(np.uint8)
X_test[X_test<50] = 0
y_train_softmax = np.zeros((y_train.shape[0], 10))
y_train_softmax[np.arange(y_train.shape[0]), y_train] = 1
y_val_softmax = np.zeros((y_val.shape[0], 10))
y_val_softmax[np.arange(y_val.shape[0]), y_val] = 1
y_test_softmax = np.zeros((y_test.shape[0], 10))
y_test_softmax[np.arange(y_test.shape[0]), y_test] = 1
dim = 5
batch_size = 100
tensor_in_sizes_=[batch_size, 1, 28, 28, 1] #[batch, depth, height, width, in_channels]
num_classes = 10
batch_label_sizes = [batch_size, num_classes]
tensor_in_sizes = np.array(tensor_in_sizes_, dtype=np.int64)
sparse_data = tf.sparse_placeholder(tf.float32, shape=tensor_in_sizes, name='sparse_placeholder')
dense_labels = tf.placeholder(tf.float32, shape=batch_label_sizes, name='labels_placeholder')
# In[ ]:
for d1 in [0.03, 0.035, 0.04, 0.045, 0.065, 0.075, 0.085, 0.095, 0.15, 0.2]:
print('===============================================================')
print('===============================================================')
print(d1)
print('===============================================================')
print('===============================================================')
with tf.Session() as sess:
loss, predictions, accuracy, net = model_mnist(
sparse_data,
tensor_in_sizes,
dense_labels,
num_classes,
scope='mn256_d{}-'.format(d1),
d1 = d1,
d2 = 2*d1,
d3 = 4*d1
)
print('initializing model')
optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
print('model and data are initialized')
num_epochs = 10
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train.reshape(-1, 1, 28, 28, 1), y_train_softmax, batch_size):
feed_dict = {
sparse_data: tf.SparseTensorValue(
[cl for cl in zip(*[arr.astype(np.int64) for arr in batch[0].nonzero()])],
batch[0][batch[0].nonzero()].astype(np.float32),
batch[0].shape
),
dense_labels: batch[1]
}
_, train_err_batch, train_acc_batch = sess.run([train_op, loss, accuracy], feed_dict=feed_dict)
train_err += train_err_batch
train_acc += train_acc_batch[0]
train_batches += 1
training_time = time.time()
print('Epoch {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, training_time - start_time))
print(' training loss (in-iteration):\t{:.6f}'.format(train_err / train_batches))
print(' train accuracy:\t\t{:.2f} %'.format(train_acc / train_batches * 100))
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val.reshape(-1, 1, 28, 28, 1), y_val_softmax, batch_size):
feed_dict = {
sparse_data: tf.SparseTensorValue(
[cl for cl in zip(*[arr.astype(np.int64) for arr in batch[0].nonzero()])],
batch[0][batch[0].nonzero()].astype(np.float32),
batch[0].shape
),
dense_labels: batch[1]
}
val_err_batch, val_acc_batch = sess.run([loss, accuracy], feed_dict=feed_dict)
val_err += val_err_batch
val_acc += val_acc_batch[0]
val_batches += 1
print('Val {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, time.time() - training_time))
print(' val loss :\t\t{:.6f}'.format(val_err / val_batches))
print(' val accuracy:\t\t{:.2f} %'.format(val_acc / val_batches * 100))
saver = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=1)
saver.save(sess, './' + '_d1_' + str(d1))
|
# coding: utf-8
# In[ ]:
import sys
import os
import time
import numpy as np
import tensorflow as tf
import direct_sparse_layer_definition as ld
from direct_sparse_module import sparse_nn_ops as sc_module
from load_mnist_dataset import load_dataset
# In[ ]:
def model_mnist(
sparse_data,
tensor_in_sizes,
train_labels=None,
num_classes=10,
scope='mn256-',
initializer=None,
d1=0.1,
d2=0.3,
d3=0.4
):
dim = 5
strides = [1,1,1,1,1]
padding = 'SAME'
pooling_sizes = [1,1,2,2,1]
batch_size = tensor_in_sizes[0]
total_size = np.prod(tensor_in_sizes)
net = {}
net['sd_converted'] = ld.create_sparse_data_to_direct_sparse(sparse_data, dim)
net['conv1_1'] = ld.create_sparse_conv_layer(
net['sd_converted'],
[1,3,3,1,8],
strides,
padding,
dim,
d1,
'K-ABS',
name=scope + 'sc1',
initializer=initializer
)
net['conv1_2'] = ld.create_sparse_conv_layer(
net['conv1_1'],
[1,3,3,8,8],
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc2',
initializer=initializer
)
net['conv1_3'] = ld.create_sparse_conv_layer(
net['conv1_2'],
[1,3,3,8,8],
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc3',
initializer=initializer
)
net['pool1'] = ld.create_sparse_pooling_layer(net['conv1_3'], pooling_sizes, dim, d3)
net['conv2_1'] = ld.create_sparse_conv_layer(
net['pool1'],
[1,3,3,8,16],
strides,
padding,
dim,
d2,
'K-ABS',
name=scope + 'sc4',
initializer=initializer
)
net['conv2_2'] = ld.create_sparse_conv_layer(
net['conv2_1'],
[1,3,3,16,16],
strides,
padding,
dim,
d2,
'K-RELU',
name=scope + 'sc5',
initializer=initializer
)
net['conv2_3'] = ld.create_sparse_conv_layer(
net['conv2_2'],
[1,3,3,16,16],
strides,
padding,
dim,
d2,
'K-ABS',
name=scope + 'sc6',
initializer=initializer
)
net['sparse_to_dense'] = ld.create_direct_sparse_to_dense(net['conv2_3'], dim)
net['dense_reshaped1'] = tf.reshape(net['sparse_to_dense'], [batch_size, 1, 14, 14, 16])
net['dense_reshaped2'] = tf.reshape(net['dense_reshaped1'], [batch_size, -1])
net['dense1'] = tf.layers.dense(net['dense_reshaped2'], 512)
net['dense2'] = tf.layers.dense(net['dense1'], num_classes)
predictions = {
'classes': tf.argmax(net['dense2'], axis=1),
'probabilities': tf.nn.softmax(net['dense2'])
}
loss = tf.losses.softmax_cross_entropy(
onehot_labels=train_labels,
logits=tf.clip_by_value(net['dense2'], 1e-7, 1e8)
)
accuracy = tf.metrics.accuracy(tf.argmax(train_labels, axis=1), predictions['classes'])
return loss, predictions, accuracy, net
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# In[ ]:
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
X_train = (X_train * 255).astype(np.uint8)
X_train[X_train<50] = 0
X_val = (X_val * 255).astype(np.uint8)
X_val[X_val<50] = 0
X_test = (X_test * 255).astype(np.uint8)
X_test[X_test<50] = 0
y_train_softmax = np.zeros((y_train.shape[0], 10))
y_train_softmax[np.arange(y_train.shape[0]), y_train] = 1
y_val_softmax = np.zeros((y_val.shape[0], 10))
y_val_softmax[np.arange(y_val.shape[0]), y_val] = 1
y_test_softmax = np.zeros((y_test.shape[0], 10))
y_test_softmax[np.arange(y_test.shape[0]), y_test] = 1
dim = 5
batch_size = 100
tensor_in_sizes_=[batch_size, 1, 28, 28, 1] #[batch, depth, height, width, in_channels]
num_classes = 10
batch_label_sizes = [batch_size, num_classes]
tensor_in_sizes = np.array(tensor_in_sizes_, dtype=np.int64)
sparse_data = tf.sparse_placeholder(tf.float32, shape=tensor_in_sizes, name='sparse_placeholder')
dense_labels = tf.placeholder(tf.float32, shape=batch_label_sizes, name='labels_placeholder')
# In[ ]:
for d1 in [0.03, 0.035, 0.04, 0.045, 0.065, 0.075, 0.085, 0.095, 0.15, 0.2]:
print('===============================================================')
print('===============================================================')
print(d1)
print('===============================================================')
print('===============================================================')
with tf.Session() as sess:
loss, predictions, accuracy, net = model_mnist(
sparse_data,
tensor_in_sizes,
dense_labels,
num_classes,
scope='mn256_d{}-'.format(d1),
d1 = d1,
d2 = 2*d1,
d3 = 4*d1
)
print('initializing model')
optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
print('model and data are initialized')
num_epochs = 10
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train.reshape(-1, 1, 28, 28, 1), y_train_softmax, batch_size):
feed_dict = {
sparse_data: tf.SparseTensorValue(
[cl for cl in zip(*[arr.astype(np.int64) for arr in batch[0].nonzero()])],
batch[0][batch[0].nonzero()].astype(np.float32),
batch[0].shape
),
dense_labels: batch[1]
}
_, train_err_batch, train_acc_batch = sess.run([train_op, loss, accuracy], feed_dict=feed_dict)
train_err += train_err_batch
train_acc += train_acc_batch[0]
train_batches += 1
training_time = time.time()
print('Epoch {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, training_time - start_time))
print(' training loss (in-iteration):\t{:.6f}'.format(train_err / train_batches))
print(' train accuracy:\t\t{:.2f} %'.format(train_acc / train_batches * 100))
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val.reshape(-1, 1, 28, 28, 1), y_val_softmax, batch_size):
feed_dict = {
sparse_data: tf.SparseTensorValue(
[cl for cl in zip(*[arr.astype(np.int64) for arr in batch[0].nonzero()])],
batch[0][batch[0].nonzero()].astype(np.float32),
batch[0].shape
),
dense_labels: batch[1]
}
val_err_batch, val_acc_batch = sess.run([loss, accuracy], feed_dict=feed_dict)
val_err += val_err_batch
val_acc += val_acc_batch[0]
val_batches += 1
print('Val {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, time.time() - training_time))
print(' val loss :\t\t{:.6f}'.format(val_err / val_batches))
print(' val accuracy:\t\t{:.2f} %'.format(val_acc / val_batches * 100))
saver = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=1)
saver.save(sess, './' + '_d1_' + str(d1))
| en | 0.697517 | # coding: utf-8 # In[ ]: # In[ ]: # In[ ]: #[batch, depth, height, width, in_channels] # In[ ]: # In each epoch, we do a full pass over the training data: # And a full pass over the validation data: | 2.530541 | 3 |
axelrod/tests/test_geller.py | DumisaniZA/Axelrod | 33 | 6617507 | """Test for the geller strategy."""
import axelrod
from test_player import TestPlayer
class TestGeller(TestPlayer):
name = "Geller"
player = axelrod.Geller
stochastic = True
def test_strategy(self):
"""Should cooperate against cooperaters and defect against defectors."""
P1 = self.player()
P2 = axelrod.Cooperator()
self.assertEqual(P1.strategy(P2), 'C')
P1 = self.player()
P2 = axelrod.Defector()
self.assertEqual(P1.strategy(P2), 'D')
class TestGellerCooperator(TestGeller):
name = "<NAME>"
player = axelrod.GellerCooperator
stochastic = False
def test_against_self(self):
P1 = self.player()
P2 = self.player()
self.assertEqual(P1.strategy(P2), 'C')
class TestGellerDefector(TestGeller):
name = "<NAME>"
player = axelrod.GellerDefector
stochastic = False
def test_against_self(self):
P1 = self.player()
P2 = self.player()
self.assertEqual(P1.strategy(P2), 'D')
| """Test for the geller strategy."""
import axelrod
from test_player import TestPlayer
class TestGeller(TestPlayer):
name = "Geller"
player = axelrod.Geller
stochastic = True
def test_strategy(self):
"""Should cooperate against cooperaters and defect against defectors."""
P1 = self.player()
P2 = axelrod.Cooperator()
self.assertEqual(P1.strategy(P2), 'C')
P1 = self.player()
P2 = axelrod.Defector()
self.assertEqual(P1.strategy(P2), 'D')
class TestGellerCooperator(TestGeller):
name = "<NAME>"
player = axelrod.GellerCooperator
stochastic = False
def test_against_self(self):
P1 = self.player()
P2 = self.player()
self.assertEqual(P1.strategy(P2), 'C')
class TestGellerDefector(TestGeller):
name = "<NAME>"
player = axelrod.GellerDefector
stochastic = False
def test_against_self(self):
P1 = self.player()
P2 = self.player()
self.assertEqual(P1.strategy(P2), 'D')
| en | 0.960044 | Test for the geller strategy. Should cooperate against cooperaters and defect against defectors. | 3.066678 | 3 |
__init__.py | h4n0sh1/DARC | 1 | 6617508 | from .darc_evaluator import DarcEvaluator
from .utils import *
from .metrics import *
| from .darc_evaluator import DarcEvaluator
from .utils import *
from .metrics import *
| none | 1 | 1.063022 | 1 | |
codes_others/main_gnn.py | TrueNobility303/image-classification-CIFAR10 | 2 | 6617509 | from torchvision.datasets import MNIST
import torchvision.transforms as T
from torch_geometric.transforms import ToSLIC
import torchvision
import torch
import torch_geometric
from torch_geometric.utils import from_networkx
from torch_geometric.nn import GCNConv, GATConv
from torch_geometric.nn import global_mean_pool
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
import tqdm
import numpy as np
import matplotlib.pyplot as plt
from torch_geometric.data import DataLoader
from torch_cluster import knn_graph
device = torch.device('cuda')
BATCH = 512
transform = T.Compose( [T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5,0.5)), ToSLIC(n_segments=128)])
trainset = torchvision.datasets.CIFAR10(root='./datasets', train=True, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=BATCH, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./datasets', train=False, download=True, transform=transform)
testloader = DataLoader(testset, batch_size=BATCH, shuffle=False)
torch.manual_seed(42)
class GCN(torch.nn.Module):
def __init__(self, hidden_channels, num_node_features=3, num_classes=10):
super(GCN, self).__init__()
self.conv1 = GATConv(num_node_features, hidden_channels)
self.conv2 = GATConv(hidden_channels, hidden_channels)
self.conv3 = GATConv(hidden_channels, hidden_channels)
self.conv4 = GATConv(hidden_channels, hidden_channels)
self.conv5 = GATConv(hidden_channels, hidden_channels)
self.lin = nn.Sequential(
Linear(hidden_channels,1024),
nn.ReLU(),
Linear(1024,num_classes)
)
def forward(self, x, edge_index,batch):
x = self.conv1(x, edge_index)
x = x.relu()
x = self.conv2(x, edge_index)
x = x.relu()
x = self.conv3(x, edge_index)
x = x.relu()
#x = self.conv4(x, edge_index)
#x = x.relu()
#x = self.conv5(x, edge_index)
#print(x.shape)
x = global_mean_pool(x,batch)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.lin(x)
return x
model = GCN(128).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
criterion = torch.nn.CrossEntropyLoss()
def train(dataloader):
model.train()
tot_loss = 0
num = 0
for i,data in tqdm.tqdm(enumerate(trainloader)):
#构建KNN图,K为超参数
graph,y = data
graph = graph.to(device)
y = y.to(device)
num += len(y)
edge_index = knn_graph(graph.pos, k=6)
logits = model(graph.x, edge_index,graph.batch)
pred = torch.argmax(logits,1)
loss = criterion(logits,y)
tot_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return tot_loss / num
def test(dataloader):
model.eval()
tot_correct = 0
num = 0
for i,data in tqdm.tqdm(enumerate(trainloader)):
#构建KNN图,K为超参数
graph,y = data
graph = graph.to(device)
y = y.to(device)
num += len(y)
edge_index = knn_graph(graph.pos, k=6)
logits = model(graph.x, edge_index,graph.batch)
pred = torch.argmax(logits,1)
tot_correct += torch.sum(pred==y).item()
return tot_correct / num
if __name__ == '__main__':
model_path = 'pth/gnn_cifar.pth'
exp_path = 'exp/gnn_cifar.png'
train_accs = []
test_accs = []
for e in range(100):
trainacc = test(trainloader)
testacc = test(testloader)
loss = train(trainloader)
train_accs.append(trainacc)
test_accs.append(testacc)
print('epoch',e,'loss',loss, 'train',trainacc,'test',testacc)
torch.save(model.state_dict(),model_path)
plt.figure()
plt.plot(train_accs)
plt.plot(test_accs)
plt.legend(['train','test'])
plt.savefig(exp_path)
| from torchvision.datasets import MNIST
import torchvision.transforms as T
from torch_geometric.transforms import ToSLIC
import torchvision
import torch
import torch_geometric
from torch_geometric.utils import from_networkx
from torch_geometric.nn import GCNConv, GATConv
from torch_geometric.nn import global_mean_pool
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
import tqdm
import numpy as np
import matplotlib.pyplot as plt
from torch_geometric.data import DataLoader
from torch_cluster import knn_graph
device = torch.device('cuda')
BATCH = 512
transform = T.Compose( [T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5,0.5)), ToSLIC(n_segments=128)])
trainset = torchvision.datasets.CIFAR10(root='./datasets', train=True, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=BATCH, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./datasets', train=False, download=True, transform=transform)
testloader = DataLoader(testset, batch_size=BATCH, shuffle=False)
torch.manual_seed(42)
class GCN(torch.nn.Module):
def __init__(self, hidden_channels, num_node_features=3, num_classes=10):
super(GCN, self).__init__()
self.conv1 = GATConv(num_node_features, hidden_channels)
self.conv2 = GATConv(hidden_channels, hidden_channels)
self.conv3 = GATConv(hidden_channels, hidden_channels)
self.conv4 = GATConv(hidden_channels, hidden_channels)
self.conv5 = GATConv(hidden_channels, hidden_channels)
self.lin = nn.Sequential(
Linear(hidden_channels,1024),
nn.ReLU(),
Linear(1024,num_classes)
)
def forward(self, x, edge_index,batch):
x = self.conv1(x, edge_index)
x = x.relu()
x = self.conv2(x, edge_index)
x = x.relu()
x = self.conv3(x, edge_index)
x = x.relu()
#x = self.conv4(x, edge_index)
#x = x.relu()
#x = self.conv5(x, edge_index)
#print(x.shape)
x = global_mean_pool(x,batch)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.lin(x)
return x
model = GCN(128).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
criterion = torch.nn.CrossEntropyLoss()
def train(dataloader):
model.train()
tot_loss = 0
num = 0
for i,data in tqdm.tqdm(enumerate(trainloader)):
#构建KNN图,K为超参数
graph,y = data
graph = graph.to(device)
y = y.to(device)
num += len(y)
edge_index = knn_graph(graph.pos, k=6)
logits = model(graph.x, edge_index,graph.batch)
pred = torch.argmax(logits,1)
loss = criterion(logits,y)
tot_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return tot_loss / num
def test(dataloader):
model.eval()
tot_correct = 0
num = 0
for i,data in tqdm.tqdm(enumerate(trainloader)):
#构建KNN图,K为超参数
graph,y = data
graph = graph.to(device)
y = y.to(device)
num += len(y)
edge_index = knn_graph(graph.pos, k=6)
logits = model(graph.x, edge_index,graph.batch)
pred = torch.argmax(logits,1)
tot_correct += torch.sum(pred==y).item()
return tot_correct / num
if __name__ == '__main__':
model_path = 'pth/gnn_cifar.pth'
exp_path = 'exp/gnn_cifar.png'
train_accs = []
test_accs = []
for e in range(100):
trainacc = test(trainloader)
testacc = test(testloader)
loss = train(trainloader)
train_accs.append(trainacc)
test_accs.append(testacc)
print('epoch',e,'loss',loss, 'train',trainacc,'test',testacc)
torch.save(model.state_dict(),model_path)
plt.figure()
plt.plot(train_accs)
plt.plot(test_accs)
plt.legend(['train','test'])
plt.savefig(exp_path)
| en | 0.218474 | #x = self.conv4(x, edge_index) #x = x.relu() #x = self.conv5(x, edge_index) #print(x.shape) #x = F.dropout(x, p=0.5, training=self.training) #构建KNN图,K为超参数 #构建KNN图,K为超参数 | 2.330829 | 2 |
Scripts/simulation/sims/households/household_tests.py | velocist/TS4CheatsInfo | 0 | 6617510 | <gh_stars>0
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\households\household_tests.py
# Compiled at: 2018-11-06 00:09:55
# Size of source mod 2**32: 1405 bytes
from event_testing.results import TestResult
from event_testing.test_base import BaseTest
from event_testing.test_events import cached_test
from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory
import services
class PlayerPopulationTest(HasTunableSingletonFactory, AutoFactoryInit, BaseTest):
def get_expected_args(self):
return {}
@cached_test
def __call__(self):
culling_service = services.get_culling_service()
max_player_population = culling_service.get_max_player_population()
if max_player_population:
household_manager = services.household_manager()
player_population = sum((len(household) for household in household_manager.values() if household.is_player_household))
if player_population >= max_player_population:
return TestResult(False, 'Over the maximum player population ({}/{})', player_population, max_player_population, tooltip=(lambda *_, **__: self.tooltip(player_population, max_player_population)))
return TestResult.TRUE | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\households\household_tests.py
# Compiled at: 2018-11-06 00:09:55
# Size of source mod 2**32: 1405 bytes
from event_testing.results import TestResult
from event_testing.test_base import BaseTest
from event_testing.test_events import cached_test
from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory
import services
class PlayerPopulationTest(HasTunableSingletonFactory, AutoFactoryInit, BaseTest):
def get_expected_args(self):
return {}
@cached_test
def __call__(self):
culling_service = services.get_culling_service()
max_player_population = culling_service.get_max_player_population()
if max_player_population:
household_manager = services.household_manager()
player_population = sum((len(household) for household in household_manager.values() if household.is_player_household))
if player_population >= max_player_population:
return TestResult(False, 'Over the maximum player population ({}/{})', player_population, max_player_population, tooltip=(lambda *_, **__: self.tooltip(player_population, max_player_population)))
return TestResult.TRUE | en | 0.536305 | # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\households\household_tests.py # Compiled at: 2018-11-06 00:09:55 # Size of source mod 2**32: 1405 bytes | 1.824251 | 2 |
src/jason_server/cli.py | nekomamoushi/jason-server | 3 | 6617511 | <filename>src/jason_server/cli.py
import webbrowser
import click
from jason_server.derulo import run
CONTEXT_SETTINGS = dict(help_option_names=['--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option(
"-h", "--host", default='localhost', help="Host adress"
)
@click.option(
"-p", "--port", default=8080, help="Port"
)
@click.option(
"-q", "--quiet", is_flag=True
)
@click.version_option(
version='0.8.1'
)
@click.pass_context
def cli(ctx, host, port, quiet):
"""Set options
Args:
host(str): host adress
port(int): port number
quiet(bool): disable cli output
"""
ctx.obj = {}
ctx.obj["host"] = host
ctx.obj["port"] = port
ctx.obj["quiet"] = quiet
@cli.command(help="Run your database as REST Api")
@click.argument('database')
@click.option(
"-o", "--open", is_flag=True
)
@click.pass_context
def watch(ctx, database, open):
"""Generate the REST api and Run the server
Args:
databae(str): path to database
open(bool): open home url in browser
"""
if open:
url = "http://{}:{}".format(ctx.obj["host"], ctx.obj["port"])
webbrowser.open(url)
run(ctx.obj, database=database)
| <filename>src/jason_server/cli.py
import webbrowser
import click
from jason_server.derulo import run
CONTEXT_SETTINGS = dict(help_option_names=['--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option(
"-h", "--host", default='localhost', help="Host adress"
)
@click.option(
"-p", "--port", default=8080, help="Port"
)
@click.option(
"-q", "--quiet", is_flag=True
)
@click.version_option(
version='0.8.1'
)
@click.pass_context
def cli(ctx, host, port, quiet):
"""Set options
Args:
host(str): host adress
port(int): port number
quiet(bool): disable cli output
"""
ctx.obj = {}
ctx.obj["host"] = host
ctx.obj["port"] = port
ctx.obj["quiet"] = quiet
@cli.command(help="Run your database as REST Api")
@click.argument('database')
@click.option(
"-o", "--open", is_flag=True
)
@click.pass_context
def watch(ctx, database, open):
"""Generate the REST api and Run the server
Args:
databae(str): path to database
open(bool): open home url in browser
"""
if open:
url = "http://{}:{}".format(ctx.obj["host"], ctx.obj["port"])
webbrowser.open(url)
run(ctx.obj, database=database)
| en | 0.419747 | Set options Args: host(str): host adress port(int): port number quiet(bool): disable cli output Generate the REST api and Run the server Args: databae(str): path to database open(bool): open home url in browser | 2.62127 | 3 |
resemblance/main/similarity/api/create/wakati.py | Sorarinu/ProjectP2016_F | 0 | 6617512 | # coding:utf-8
import MeCab
import re
from conf.constants import *
class MeacabWakati(object):
# ファイルを読み込み、mecabで分かち書きしたものを返す
def __init__(self, file_name):
self.file_name = file_name
def extract_noun(self, sentence):
tagger = MeCab.Tagger()
nouns = []
sentence = re.sub(re.compile("[!-/:-@[-`{-~]"), '', sentence)
for chunk in tagger.parse(sentence).splitlines()[:-1]:
(surface, feature) = chunk.split('\t')
if feature.startswith('名詞'):
nouns.append(surface)
return ''.join(nouns)
def write_wakati(self):
tagger = MeCab.Tagger('-F\s%f[6] -U\s%m -E\\n')
with open(self.file_name, 'r', encoding='utf-8') as read_file:
with open(WAKATI_FILE, 'w', encoding='utf-8') as write_file:
line = read_file.readline()
while line:
li = self.extract_noun(line)
result = tagger.parse(li)
write_file.write(result[1:]) # skip first \s
line = read_file.readline()
return write_file
| # coding:utf-8
import MeCab
import re
from conf.constants import *
class MeacabWakati(object):
# ファイルを読み込み、mecabで分かち書きしたものを返す
def __init__(self, file_name):
self.file_name = file_name
def extract_noun(self, sentence):
tagger = MeCab.Tagger()
nouns = []
sentence = re.sub(re.compile("[!-/:-@[-`{-~]"), '', sentence)
for chunk in tagger.parse(sentence).splitlines()[:-1]:
(surface, feature) = chunk.split('\t')
if feature.startswith('名詞'):
nouns.append(surface)
return ''.join(nouns)
def write_wakati(self):
tagger = MeCab.Tagger('-F\s%f[6] -U\s%m -E\\n')
with open(self.file_name, 'r', encoding='utf-8') as read_file:
with open(WAKATI_FILE, 'w', encoding='utf-8') as write_file:
line = read_file.readline()
while line:
li = self.extract_noun(line)
result = tagger.parse(li)
write_file.write(result[1:]) # skip first \s
line = read_file.readline()
return write_file
| ja | 0.991229 | # coding:utf-8 # ファイルを読み込み、mecabで分かち書きしたものを返す # skip first \s | 2.883318 | 3 |
httpbin/core.py | jakubroztocil/httpbin | 2 | 6617513 | # -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import time
import newrelic.agent
from flask import Flask, Response, request, render_template, redirect, jsonify
from raven.contrib.flask import Sentry
from werkzeug.datastructures import WWWAuthenticate
from . import filters
from .helpers import get_headers, status_code, get_dict, check_basic_auth, check_digest_auth, H
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
app = Flask(__name__)
# Setup error collection
sentry = Sentry(app)
newrelic.agent.initialize()
# ------
# Routes
# ------
@app.route('/')
def view_landing_page():
"""Generates Landing Page."""
return render_template('index.html')
@app.route('/ip')
def view_origin():
"""Returns Origin IP."""
return jsonify(origin=request.remote_addr)
@app.route('/headers')
def view_headers():
"""Returns HTTP HEADERS."""
return jsonify(get_dict('headers'))
@app.route('/user-agent')
def view_user_agent():
"""Returns User-Agent."""
headers = get_headers()
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET',))
def view_get():
"""Returns GET Data."""
return jsonify(get_dict('url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post():
"""Returns POST Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put():
"""Returns PUT Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch():
"""Returns PATCH Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete():
"""Returns DETLETE Data."""
return jsonify(get_dict('url', 'args', 'data', 'origin', 'headers', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content():
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(
'origin', 'headers', method=request.method, gzipped=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(n):
"""301 Redirects n times."""
assert n > 0
if (n == 1):
return redirect('/get')
return redirect('/redirect/{0}'.format(n - 1))
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(n):
"""301 Redirects n times."""
assert n > 0
response = app.make_response('')
response.status_code = 302
if (n == 1):
response.headers['Location'] = '/get'
return response
response.headers['Location'] = '/relative-redirect/{0}'.format(n - 1)
return response
@app.route('/stream/<int:n>')
def stream_n_messages(n):
"""Stream n JSON messages"""
response = get_dict('url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response) + '\n'
return Response(generate_stream(), headers={
"Transfer-Encoding": "chunked",
"Content-Type": "application/json",
})
@app.route('/status/<codes>')
def view_status_code(codes):
"""Return status code or random status code if more than one are given"""
if not ',' in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if not ':' in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers():
"""Returns a set of response headers from the query string """
headers = CaseInsensitiveDict(request.args.items())
response = jsonify(headers.items())
while True:
content_len_shown = response.headers['Content-Length']
response = jsonify(response.headers.items())
for key, value in headers.items():
response.headers[key] = value
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/cookies/set/<name>/<value>')
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect('/cookies'))
r.set_cookie(key=name, value=value)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(user='user', passwd='<PASSWORD>'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(user='user', passwd='<PASSWORD>'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(qop=None, user='user', passwd='<PASSWORD>'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if not request.headers.get('Authorization'):
response = app.make_response('')
response.status_code = 401
nonce = H("%s:%d:%s" % (request.remote_addr,
time.time(),
os.urandom(10)))
opaque = H(os.urandom(10))
auth = WWWAuthenticate("digest")
auth.set_digest('<EMAIL>', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
return response
elif not check_digest_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/delay/<int:delay>')
def delay_response(delay):
"""Returns a delayed response"""
delay = min(delay, 10)
time.sleep(delay)
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files'))
@app.route('/base64/<value>')
def decode_base64(value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8')
return base64.urlsafe_b64decode(encoded).decode('utf-8')
if __name__ == '__main__':
app.run()
| # -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import time
import newrelic.agent
from flask import Flask, Response, request, render_template, redirect, jsonify
from raven.contrib.flask import Sentry
from werkzeug.datastructures import WWWAuthenticate
from . import filters
from .helpers import get_headers, status_code, get_dict, check_basic_auth, check_digest_auth, H
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
app = Flask(__name__)
# Setup error collection
sentry = Sentry(app)
newrelic.agent.initialize()
# ------
# Routes
# ------
@app.route('/')
def view_landing_page():
"""Generates Landing Page."""
return render_template('index.html')
@app.route('/ip')
def view_origin():
"""Returns Origin IP."""
return jsonify(origin=request.remote_addr)
@app.route('/headers')
def view_headers():
"""Returns HTTP HEADERS."""
return jsonify(get_dict('headers'))
@app.route('/user-agent')
def view_user_agent():
"""Returns User-Agent."""
headers = get_headers()
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET',))
def view_get():
"""Returns GET Data."""
return jsonify(get_dict('url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post():
"""Returns POST Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put():
"""Returns PUT Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch():
"""Returns PATCH Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete():
"""Returns DETLETE Data."""
return jsonify(get_dict('url', 'args', 'data', 'origin', 'headers', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content():
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(
'origin', 'headers', method=request.method, gzipped=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(n):
"""301 Redirects n times."""
assert n > 0
if (n == 1):
return redirect('/get')
return redirect('/redirect/{0}'.format(n - 1))
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(n):
"""301 Redirects n times."""
assert n > 0
response = app.make_response('')
response.status_code = 302
if (n == 1):
response.headers['Location'] = '/get'
return response
response.headers['Location'] = '/relative-redirect/{0}'.format(n - 1)
return response
@app.route('/stream/<int:n>')
def stream_n_messages(n):
"""Stream n JSON messages"""
response = get_dict('url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response) + '\n'
return Response(generate_stream(), headers={
"Transfer-Encoding": "chunked",
"Content-Type": "application/json",
})
@app.route('/status/<codes>')
def view_status_code(codes):
"""Return status code or random status code if more than one are given"""
if not ',' in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if not ':' in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers():
"""Returns a set of response headers from the query string """
headers = CaseInsensitiveDict(request.args.items())
response = jsonify(headers.items())
while True:
content_len_shown = response.headers['Content-Length']
response = jsonify(response.headers.items())
for key, value in headers.items():
response.headers[key] = value
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/cookies/set/<name>/<value>')
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect('/cookies'))
r.set_cookie(key=name, value=value)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(user='user', passwd='<PASSWORD>'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(user='user', passwd='<PASSWORD>'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(qop=None, user='user', passwd='<PASSWORD>'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if not request.headers.get('Authorization'):
response = app.make_response('')
response.status_code = 401
nonce = H("%s:%d:%s" % (request.remote_addr,
time.time(),
os.urandom(10)))
opaque = H(os.urandom(10))
auth = WWWAuthenticate("digest")
auth.set_digest('<EMAIL>', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
return response
elif not check_digest_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/delay/<int:delay>')
def delay_response(delay):
"""Returns a delayed response"""
delay = min(delay, 10)
time.sleep(delay)
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files'))
@app.route('/base64/<value>')
def decode_base64(value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8')
return base64.urlsafe_b64decode(encoded).decode('utf-8')
if __name__ == '__main__':
app.run()
| en | 0.517448 | # -*- coding: utf-8 -*- httpbin.core ~~~~~~~~~~~~ This module provides the core HttpBin experience. # Setup error collection # ------ # Routes # ------ Generates Landing Page. Returns Origin IP. Returns HTTP HEADERS. Returns User-Agent. Returns GET Data. Returns POST Data. Returns PUT Data. Returns PATCH Data. Returns DETLETE Data. Returns GZip-Encoded Data. 301 Redirects n times. 301 Redirects n times. Stream n JSON messages Return status code or random status code if more than one are given Returns a set of response headers from the query string Returns cookie data. Sets a cookie and redirects to cookie list. Prompts the user for authorization using HTTP Basic Auth. Prompts the user for authorization using HTTP Basic Auth. Prompts the user for authorization using HTTP Digest auth Returns a delayed response Decodes base64url-encoded string | 2.131871 | 2 |
src/polls/tests.py | AxelWard/RiverNet-2 | 0 | 6617514 | <reponame>AxelWard/RiverNet-2
from django.test import TestCase
from django.test import Client
from .views import water_quantity
from .rivernet_api import Get_Site_Information
class pollsTestCase(TestCase):
def setUp(self):
self.client = Client()
def testBasicPost():
response = c.get('')
print(response)
self.assertEqual(response.status_code, 200)
def testJSONRequest():
site_dictionary = Get_Site_Information()
self.assertTrue(site_dictionary != None)
| from django.test import TestCase
from django.test import Client
from .views import water_quantity
from .rivernet_api import Get_Site_Information
class pollsTestCase(TestCase):
def setUp(self):
self.client = Client()
def testBasicPost():
response = c.get('')
print(response)
self.assertEqual(response.status_code, 200)
def testJSONRequest():
site_dictionary = Get_Site_Information()
self.assertTrue(site_dictionary != None) | none | 1 | 2.211016 | 2 | |
application/config/development.py | alphagov-mirror/performanceplatform-admin | 1 | 6617515 | <reponame>alphagov-mirror/performanceplatform-admin<filename>application/config/development.py
import os
DEBUG = True
SECRET_KEY = 'placeholder_cookie_secret_key'
ADMIN_HOST = 'http://performanceplatform-admin.dev.gov.uk/'
BACKDROP_HOST = 'http://backdrop-write.dev.gov.uk'
STAGECRAFT_HOST = 'http://stagecraft.dev.gov.uk'
GOVUK_SITE_URL = 'http://spotlight.development.performance.service.gov.uk'
SIGNON_OAUTH_ID = 'oauth_id'
SIGNON_OAUTH_SECRET = 'oauth_secret'
SIGNON_BASE_URL = 'http://signon.dev.gov.uk'
REDIS_URL = os.getenv('REDIS_URL')
FAKE_OAUTH_TOKEN = '<PASSWORD>-access-token'
FAKE_OAUTH_USER = {
"email": "<EMAIL>",
"name": "<NAME>",
"organisation_slug": "cabinet-office",
"permissions": [
"signin",
"admin",
],
"uid": "00000000-0000-0000-0000-000000000000"
}
AWS_ACCESS_KEY_ID = 'AWS access key id'
AWS_SECRET_ACCESS_KEY = 'AWS secret access key'
# You can use development_local_overrides.py in this directory to set config
# that is unique to your development environment, like OAuth IDs and secrets.
# It is not in version control.
try:
from development_local_overrides import *
except ImportError as e:
pass
| import os
DEBUG = True
SECRET_KEY = 'placeholder_cookie_secret_key'
ADMIN_HOST = 'http://performanceplatform-admin.dev.gov.uk/'
BACKDROP_HOST = 'http://backdrop-write.dev.gov.uk'
STAGECRAFT_HOST = 'http://stagecraft.dev.gov.uk'
GOVUK_SITE_URL = 'http://spotlight.development.performance.service.gov.uk'
SIGNON_OAUTH_ID = 'oauth_id'
SIGNON_OAUTH_SECRET = 'oauth_secret'
SIGNON_BASE_URL = 'http://signon.dev.gov.uk'
REDIS_URL = os.getenv('REDIS_URL')
FAKE_OAUTH_TOKEN = '<PASSWORD>-access-token'
FAKE_OAUTH_USER = {
"email": "<EMAIL>",
"name": "<NAME>",
"organisation_slug": "cabinet-office",
"permissions": [
"signin",
"admin",
],
"uid": "00000000-0000-0000-0000-000000000000"
}
AWS_ACCESS_KEY_ID = 'AWS access key id'
AWS_SECRET_ACCESS_KEY = 'AWS secret access key'
# You can use development_local_overrides.py in this directory to set config
# that is unique to your development environment, like OAuth IDs and secrets.
# It is not in version control.
try:
from development_local_overrides import *
except ImportError as e:
pass | en | 0.906364 | # You can use development_local_overrides.py in this directory to set config # that is unique to your development environment, like OAuth IDs and secrets. # It is not in version control. | 1.537685 | 2 |
services/python-images/migrations/versions/6afc1d19f183_merge.py | hpi-epic/mpcsl | 1 | 6617516 | """merge
Revision ID: 6afc1d19f183
Revises: 155fe996403c, <PASSWORD>
Create Date: 2019-03-18 14:44:44.174935
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6afc<PASSWORD>'
down_revision = ('155fe996403c', '<PASSWORD>')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| """merge
Revision ID: 6afc1d19f183
Revises: 155fe996403c, <PASSWORD>
Create Date: 2019-03-18 14:44:44.174935
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6afc<PASSWORD>'
down_revision = ('155fe996403c', '<PASSWORD>')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| en | 0.402417 | merge Revision ID: 6afc1d19f183 Revises: 155fe996403c, <PASSWORD> Create Date: 2019-03-18 14:44:44.174935 # revision identifiers, used by Alembic. | 0.98014 | 1 |
Cap_6/ex6.01.py | gguilherme42/Livro-de-Python | 4 | 6617517 | notas = [int(input(f'Nota {i + 1}: ')) for i in range(7)]
print(f'Média: {sum(notas)/ len(notas):.2f}')
| notas = [int(input(f'Nota {i + 1}: ')) for i in range(7)]
print(f'Média: {sum(notas)/ len(notas):.2f}')
| none | 1 | 3.188578 | 3 | |
ProDy/MIp.py | diegozea/mitos-benchmarks | 2 | 6617518 | import time
from prody import *
msa_long = parseMSA("../data/PF00089_aligned.fasta")
msa_wide = parseMSA("../data/PF16957_aligned.fasta")
def mip(msa):
MI = buildMutinfoMatrix(msa)
MIp = applyMutinfoCorr(MI)
return MIp
start = time.time()
mip(msa_long)
elapsed = time.time() - start
print "[BENCH] MIp PF00089: ", elapsed
start = time.time()
mip(msa_wide)
elapsed = time.time() - start
print "[BENCH] MIp PF16957: ", elapsed
| import time
from prody import *
msa_long = parseMSA("../data/PF00089_aligned.fasta")
msa_wide = parseMSA("../data/PF16957_aligned.fasta")
def mip(msa):
MI = buildMutinfoMatrix(msa)
MIp = applyMutinfoCorr(MI)
return MIp
start = time.time()
mip(msa_long)
elapsed = time.time() - start
print "[BENCH] MIp PF00089: ", elapsed
start = time.time()
mip(msa_wide)
elapsed = time.time() - start
print "[BENCH] MIp PF16957: ", elapsed
| none | 1 | 2.247499 | 2 | |
v0/aia_eis_v0/loa/g_n/refers/g_n_refer_0/main.py | DreamBoatOve/aia_eis | 1 | 6617519 | <reponame>DreamBoatOve/aia_eis
"""
Algorithms from scratch: Gauss-Newton
https://omyllymaki.medium.com/gauss-newton-algorithm-implementation-from-scratch-55ebe56aac2e
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from loa.g_n.refers.g_n_refer_0.gn_solver import GNSolver
logging.basicConfig(level=logging.INFO)
NOISE = 3
COEFFICIENTS = [-0.001, 0.1, 0.1, 2, 15]
def func(x, coeff):
return coeff[0] * x ** 3 + coeff[1] * x ** 2 + coeff[2] * x + coeff[3] + coeff[4] * np.sin(x)
def main():
x = np.arange(1, 100)
y = func(x, COEFFICIENTS)
yn = y + NOISE * np.random.randn(len(x))
solver = GNSolver(fit_function=func, max_iter=100, tolerance_difference=10 ** (-6))
init_guess = 1000000*np.random.random(len(COEFFICIENTS))
_ = solver.fit(x, yn, init_guess)
fit = solver.get_estimate()
residual = solver.get_residual()
plt.plot(x, y, label="Original, noiseless signal", linewidth=2)
plt.plot(x, yn, label="Noisy signal", linewidth=2)
plt.plot(x, fit, label="Fit", linewidth=2)
plt.plot(x, residual, label="Residual", linewidth=2)
plt.title("Gauss-Newton: curve fitting example")
plt.xlabel("X")
plt.ylabel("Y")
plt.grid()
plt.legend()
plt.show()
if __name__ == "__main__":
main() | """
Algorithms from scratch: Gauss-Newton
https://omyllymaki.medium.com/gauss-newton-algorithm-implementation-from-scratch-55ebe56aac2e
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from loa.g_n.refers.g_n_refer_0.gn_solver import GNSolver
logging.basicConfig(level=logging.INFO)
NOISE = 3
COEFFICIENTS = [-0.001, 0.1, 0.1, 2, 15]
def func(x, coeff):
return coeff[0] * x ** 3 + coeff[1] * x ** 2 + coeff[2] * x + coeff[3] + coeff[4] * np.sin(x)
def main():
x = np.arange(1, 100)
y = func(x, COEFFICIENTS)
yn = y + NOISE * np.random.randn(len(x))
solver = GNSolver(fit_function=func, max_iter=100, tolerance_difference=10 ** (-6))
init_guess = 1000000*np.random.random(len(COEFFICIENTS))
_ = solver.fit(x, yn, init_guess)
fit = solver.get_estimate()
residual = solver.get_residual()
plt.plot(x, y, label="Original, noiseless signal", linewidth=2)
plt.plot(x, yn, label="Noisy signal", linewidth=2)
plt.plot(x, fit, label="Fit", linewidth=2)
plt.plot(x, residual, label="Residual", linewidth=2)
plt.title("Gauss-Newton: curve fitting example")
plt.xlabel("X")
plt.ylabel("Y")
plt.grid()
plt.legend()
plt.show()
if __name__ == "__main__":
main() | en | 0.523681 | Algorithms from scratch: Gauss-Newton https://omyllymaki.medium.com/gauss-newton-algorithm-implementation-from-scratch-55ebe56aac2e | 3.167067 | 3 |
dae/dae/pheno/tests/test_ped_prepare.py | iossifovlab/gpf | 0 | 6617520 | """
Created on Jul 25, 2017
@author: lubo
"""
import os
from dae.pheno.prepare.ped_prepare import PreparePersons, PrepareVariables
from dae.pheno.pheno_db import PhenotypeStudy
from dae.pedigrees.loader import FamiliesLoader
import pytest
@pytest.fixture(scope="session")
def instrument_files():
return [
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"fixtures/instruments/i1.csv",
),
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"fixtures/instruments2/i1.csv",
),
]
def test_ped_prepare_simple(test_config, fake_ped_file):
test_config.person.role.mapping = "INTERNAL"
prep = PreparePersons(test_config)
ped_df = FamiliesLoader.flexible_pedigree_read(fake_ped_file)
assert ped_df is not None
ped_df = prep.prepare_pedigree(ped_df)
prep.save_pedigree(ped_df)
@pytest.mark.parametrize("instrument_sel", [([0, 0]), ([0, 1])])
def test_ped_prepare_variable(
test_config, temp_dbfile, instrument_files, instrument_sel, fake_ped_file
):
test_config.person.role.mapping = "INTERNAL"
prep = PrepareVariables(test_config)
assert prep is not None
ped_df = prep.build_pedigree(fake_ped_file)
assert ped_df is not None
instruments = instrument_files[instrument_sel[0]: instrument_sel[1] + 1]
df = prep.load_instrument("i1", instruments)
df = prep.build_instrument("i1", df)
assert df is not None
assert len(df) == 195 * len(instruments)
def test_load_invalid_descriptions(invalid_descriptions):
with pytest.raises(AssertionError):
PrepareVariables.load_descriptions(invalid_descriptions)
def test_load_descriptionsc(valid_descriptions):
descriptions = PrepareVariables.load_descriptions(valid_descriptions)
assert descriptions("i1", "m1") == "Measure number one"
assert descriptions("i1", "m2") == "Measure number two"
assert descriptions("i1", "m9") == "Measure number nine"
def test_ped_prepare_variable_with_descriptions(
test_config,
temp_dbfile,
instrument_files,
fake_ped_file,
valid_descriptions,
):
test_config.person.role.mapping = "INTERNAL"
prep = PrepareVariables(test_config)
assert prep is not None
ped_df = prep.build_pedigree(fake_ped_file)
assert ped_df is not None
descriptions = PrepareVariables.load_descriptions(valid_descriptions)
df = prep.load_instrument("i1", instrument_files[0:1])
df = prep.build_instrument("i1", df, descriptions)
assert df is not None
assert len(df) == 195
temp_db = PhenotypeStudy("temp_db", temp_dbfile)
measures = temp_db.get_measures()
assert measures["i1.m1"].description == "Measure number one"
assert measures["i1.m2"].description == "Measure number two"
assert measures["i1.m9"].description == "Measure number nine"
| """
Created on Jul 25, 2017
@author: lubo
"""
import os
from dae.pheno.prepare.ped_prepare import PreparePersons, PrepareVariables
from dae.pheno.pheno_db import PhenotypeStudy
from dae.pedigrees.loader import FamiliesLoader
import pytest
@pytest.fixture(scope="session")
def instrument_files():
return [
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"fixtures/instruments/i1.csv",
),
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"fixtures/instruments2/i1.csv",
),
]
def test_ped_prepare_simple(test_config, fake_ped_file):
test_config.person.role.mapping = "INTERNAL"
prep = PreparePersons(test_config)
ped_df = FamiliesLoader.flexible_pedigree_read(fake_ped_file)
assert ped_df is not None
ped_df = prep.prepare_pedigree(ped_df)
prep.save_pedigree(ped_df)
@pytest.mark.parametrize("instrument_sel", [([0, 0]), ([0, 1])])
def test_ped_prepare_variable(
test_config, temp_dbfile, instrument_files, instrument_sel, fake_ped_file
):
test_config.person.role.mapping = "INTERNAL"
prep = PrepareVariables(test_config)
assert prep is not None
ped_df = prep.build_pedigree(fake_ped_file)
assert ped_df is not None
instruments = instrument_files[instrument_sel[0]: instrument_sel[1] + 1]
df = prep.load_instrument("i1", instruments)
df = prep.build_instrument("i1", df)
assert df is not None
assert len(df) == 195 * len(instruments)
def test_load_invalid_descriptions(invalid_descriptions):
with pytest.raises(AssertionError):
PrepareVariables.load_descriptions(invalid_descriptions)
def test_load_descriptionsc(valid_descriptions):
descriptions = PrepareVariables.load_descriptions(valid_descriptions)
assert descriptions("i1", "m1") == "Measure number one"
assert descriptions("i1", "m2") == "Measure number two"
assert descriptions("i1", "m9") == "Measure number nine"
def test_ped_prepare_variable_with_descriptions(
test_config,
temp_dbfile,
instrument_files,
fake_ped_file,
valid_descriptions,
):
test_config.person.role.mapping = "INTERNAL"
prep = PrepareVariables(test_config)
assert prep is not None
ped_df = prep.build_pedigree(fake_ped_file)
assert ped_df is not None
descriptions = PrepareVariables.load_descriptions(valid_descriptions)
df = prep.load_instrument("i1", instrument_files[0:1])
df = prep.build_instrument("i1", df, descriptions)
assert df is not None
assert len(df) == 195
temp_db = PhenotypeStudy("temp_db", temp_dbfile)
measures = temp_db.get_measures()
assert measures["i1.m1"].description == "Measure number one"
assert measures["i1.m2"].description == "Measure number two"
assert measures["i1.m9"].description == "Measure number nine"
| en | 0.871758 | Created on Jul 25, 2017 @author: lubo | 1.856322 | 2 |
lesson_tasks/lesson14.py | NikaEgorova/goiteens-python3-egorova | 0 | 6617521 | <filename>lesson_tasks/lesson14.py
class Human:
default_name = "Боб"
default_age = 30
def __init__(self, name=default_name, age=default_age):
self.name = name
self.age = age
self.__money = 20000
self.__house = None
def info(self):
print(f'Имя: {self.name}')
print(f'Возраст: {self.age}')
print(f'Количество денег: {self.__money}')
print(f'Наличие дома: {self.__house}')
def default_info():
print(f'Имя по умолчанию: {Human.default_name}')
print(f'Возраст по умолчанию: {Human.default_age}')
def __make_deal(self, house, price):
self.__money -= price
self.__house = house
def earn_money(self, amount):
self.__money += amount
print(f'Заработал {amount} грн! Текущий баланс: {self.__money} грн')
def buy_house(self, house, discount):
price = house.final_price(discount)
if self.__money >= price:
self.__make_deal(house, price)
else:
print("Недостаточно денег")
class House:
def __init__(self, area, price):
self._area = area
self._price = price
def final_price(self, discount):
final_price = self._price * (100 - discount) / 100
print(f'Цена со скидкой: {final_price}')
return final_price
class SmallHouse(House):
default_area = 40
def __init__(self, price):
super().__init__(SmallHouse.default_area, price)
man = Human()
man.info()
Human.default_info()
man.earn_money(20000)
small_house = SmallHouse(8500)
man.buy_house(small_house, 3)
man.info()
| <filename>lesson_tasks/lesson14.py
class Human:
default_name = "Боб"
default_age = 30
def __init__(self, name=default_name, age=default_age):
self.name = name
self.age = age
self.__money = 20000
self.__house = None
def info(self):
print(f'Имя: {self.name}')
print(f'Возраст: {self.age}')
print(f'Количество денег: {self.__money}')
print(f'Наличие дома: {self.__house}')
def default_info():
print(f'Имя по умолчанию: {Human.default_name}')
print(f'Возраст по умолчанию: {Human.default_age}')
def __make_deal(self, house, price):
self.__money -= price
self.__house = house
def earn_money(self, amount):
self.__money += amount
print(f'Заработал {amount} грн! Текущий баланс: {self.__money} грн')
def buy_house(self, house, discount):
price = house.final_price(discount)
if self.__money >= price:
self.__make_deal(house, price)
else:
print("Недостаточно денег")
class House:
def __init__(self, area, price):
self._area = area
self._price = price
def final_price(self, discount):
final_price = self._price * (100 - discount) / 100
print(f'Цена со скидкой: {final_price}')
return final_price
class SmallHouse(House):
default_area = 40
def __init__(self, price):
super().__init__(SmallHouse.default_area, price)
man = Human()
man.info()
Human.default_info()
man.earn_money(20000)
small_house = SmallHouse(8500)
man.buy_house(small_house, 3)
man.info()
| none | 1 | 4.155923 | 4 | |
python-kiwoom-api/intermediate/section3/unit01/02.py | sharebook-kr/learningspoons-bootcamp-finance | 9 | 6617522 | <filename>python-kiwoom-api/intermediate/section3/unit01/02.py
from pykiwoom.kiwoom import *
kiwoom = Kiwoom()
kiwoom.CommConnect(block=True) # 로그인이 될때까지 여기서 대기
print("블록킹 로그인 완료")
accounts = kiwoom.GetLoginInfo("ACCNO")
print(accounts)
| <filename>python-kiwoom-api/intermediate/section3/unit01/02.py
from pykiwoom.kiwoom import *
kiwoom = Kiwoom()
kiwoom.CommConnect(block=True) # 로그인이 될때까지 여기서 대기
print("블록킹 로그인 완료")
accounts = kiwoom.GetLoginInfo("ACCNO")
print(accounts)
| ko | 1.000069 | # 로그인이 될때까지 여기서 대기 | 2.216655 | 2 |
generator/generate.py | takatsugu-kato/Office365-REST-Python-Client | 0 | 6617523 | <filename>generator/generate.py
from office365.runtime.odata.odata_v4_reader import ODataV4Reader
if __name__ == '__main__':
generator_options = {
'namespace': '',
'inputPath': './metadata/MicrosoftGraph15122019.xml',
'outputPath': ''
}
reader = ODataV4Reader(generator_options)
model = reader.generate_model()
print(model)
| <filename>generator/generate.py
from office365.runtime.odata.odata_v4_reader import ODataV4Reader
if __name__ == '__main__':
generator_options = {
'namespace': '',
'inputPath': './metadata/MicrosoftGraph15122019.xml',
'outputPath': ''
}
reader = ODataV4Reader(generator_options)
model = reader.generate_model()
print(model)
| none | 1 | 2.133945 | 2 | |
disinformation_detection/linear_models.py | isspek/Sentinel-NLP-Role-Tasks | 0 | 6617524 | <reponame>isspek/Sentinel-NLP-Role-Tasks<gh_stars>0
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from pathlib import Path
from utils import logger, report_results
from joblib import dump, load
def svm(X_train, y_train, X_dev, y_dev, X_test, y_test, random_state, feat_name):
model_path = 'models/svm_{random_state}_{feat_name}.joblib'.format(random_state=random_state,
feat_name=feat_name)
if Path(model_path).exists():
logger.info('{} exists already!!'.format(model_path))
clf = load(model_path)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
return
clf = LinearSVC(random_state=random_state, tol=1e-5)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
dump(clf, model_path)
logger.info('Saved to {}'.format(model_path))
def random_forest(X_train, y_train, X_dev, y_dev, X_test, y_test, random_state, feat_name):
model_path = 'models/random_forest_{random_state}_{feat_name}.joblib'.format(random_state=random_state,
feat_name=feat_name)
if Path(model_path).exists():
logger.info('{} exists already!!'.format(model_path))
clf = load(model_path)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
return
clf = RandomForestClassifier(random_state=random_state)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
dump(clf, model_path)
logger.info('Saved to {}'.format(model_path))
MODELS = {
'svm': svm,
'random_forest': random_forest
}
| from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from pathlib import Path
from utils import logger, report_results
from joblib import dump, load
def svm(X_train, y_train, X_dev, y_dev, X_test, y_test, random_state, feat_name):
model_path = 'models/svm_{random_state}_{feat_name}.joblib'.format(random_state=random_state,
feat_name=feat_name)
if Path(model_path).exists():
logger.info('{} exists already!!'.format(model_path))
clf = load(model_path)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
return
clf = LinearSVC(random_state=random_state, tol=1e-5)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
dump(clf, model_path)
logger.info('Saved to {}'.format(model_path))
def random_forest(X_train, y_train, X_dev, y_dev, X_test, y_test, random_state, feat_name):
model_path = 'models/random_forest_{random_state}_{feat_name}.joblib'.format(random_state=random_state,
feat_name=feat_name)
if Path(model_path).exists():
logger.info('{} exists already!!'.format(model_path))
clf = load(model_path)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
return
clf = RandomForestClassifier(random_state=random_state)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_dev)
y_pred_test = clf.predict(X_test)
report_results(y_dev, y_pred, y_pred_test, y_test)
dump(clf, model_path)
logger.info('Saved to {}'.format(model_path))
MODELS = {
'svm': svm,
'random_forest': random_forest
} | none | 1 | 2.40745 | 2 | |
openff/system/tests/test_interop/test_internal_writers.py | mattwthompson/openff-system | 0 | 6617525 | import numpy as np
from openff.toolkit.topology import Molecule
from simtk import unit as omm_unit
from openff.system.stubs import ForceField
def test_internal_gro_writer():
mol = Molecule.from_smiles("C")
mol.generate_conformers(n_conformers=1)
top = mol.to_topology()
parsley = ForceField("openff-1.0.0.offxml")
out = parsley.create_openff_system(top)
out.box = [4, 4, 4] * np.eye(3)
out.positions = mol.conformers[0] / omm_unit.nanometer
out.to_gro("internal.gro", writer="internal")
out.to_gro("parmed.gro", writer="parmed")
with open("internal.gro", "r") as file1:
with open("parmed.gro", "r") as file2:
# Ignore first two lines and last line
assert file1.readlines()[2:-1] == file2.readlines()[2:-1]
| import numpy as np
from openff.toolkit.topology import Molecule
from simtk import unit as omm_unit
from openff.system.stubs import ForceField
def test_internal_gro_writer():
mol = Molecule.from_smiles("C")
mol.generate_conformers(n_conformers=1)
top = mol.to_topology()
parsley = ForceField("openff-1.0.0.offxml")
out = parsley.create_openff_system(top)
out.box = [4, 4, 4] * np.eye(3)
out.positions = mol.conformers[0] / omm_unit.nanometer
out.to_gro("internal.gro", writer="internal")
out.to_gro("parmed.gro", writer="parmed")
with open("internal.gro", "r") as file1:
with open("parmed.gro", "r") as file2:
# Ignore first two lines and last line
assert file1.readlines()[2:-1] == file2.readlines()[2:-1]
| en | 0.904278 | # Ignore first two lines and last line | 2.068311 | 2 |
util/development.py | KirtusJ/BirdBot | 0 | 6617526 | <gh_stars>0
import json
with open("util/database/database.json", "r") as db:
data = json.load(db)
db_development = data['development']
with open("util/bot/secret.json", "r") as bot:
data = json.load(bot)
bot_development = data["development"]
db_development = {
"username" : db_development["username"],
"password" : <PASSWORD>["password"],
"host" : db_development["host"],
"name" : db_development["name"],
"link" : "{manager}://{username}:{password}@{host}/{name}?charset=utf8mb4".format(
manager=db_development["manager"], username=db_development["username"],
password=<PASSWORD>["password"], host=db_development["host"], name=db_development["name"]
)
}
bot_development = {
"mode" : bot_development["mode"],
"token" : bot_development["token"],
"secret" : bot_development["secret"]
}
| import json
with open("util/database/database.json", "r") as db:
data = json.load(db)
db_development = data['development']
with open("util/bot/secret.json", "r") as bot:
data = json.load(bot)
bot_development = data["development"]
db_development = {
"username" : db_development["username"],
"password" : <PASSWORD>["password"],
"host" : db_development["host"],
"name" : db_development["name"],
"link" : "{manager}://{username}:{password}@{host}/{name}?charset=utf8mb4".format(
manager=db_development["manager"], username=db_development["username"],
password=<PASSWORD>["password"], host=db_development["host"], name=db_development["name"]
)
}
bot_development = {
"mode" : bot_development["mode"],
"token" : bot_development["token"],
"secret" : bot_development["secret"]
} | none | 1 | 2.669804 | 3 | |
prettyqt/qt/QtPositioning/__init__.py | phil65/PrettyQt | 7 | 6617527 | <filename>prettyqt/qt/QtPositioning/__init__.py
"""Provides QtPositioning classes and functions."""
from prettyqt.qt import PYQT5, PYQT6, PYSIDE2, PYSIDE6, PythonQtError
if PYQT5:
from PyQt5.QtPositioning import *
elif PYSIDE2:
from PySide2.QtPositioning import *
elif PYSIDE6:
from PySide6.QtPositioning import *
elif PYQT6:
from PyQt6.QtPositioning import *
else:
raise PythonQtError("No Qt bindings could be found")
| <filename>prettyqt/qt/QtPositioning/__init__.py
"""Provides QtPositioning classes and functions."""
from prettyqt.qt import PYQT5, PYQT6, PYSIDE2, PYSIDE6, PythonQtError
if PYQT5:
from PyQt5.QtPositioning import *
elif PYSIDE2:
from PySide2.QtPositioning import *
elif PYSIDE6:
from PySide6.QtPositioning import *
elif PYQT6:
from PyQt6.QtPositioning import *
else:
raise PythonQtError("No Qt bindings could be found")
| en | 0.602231 | Provides QtPositioning classes and functions. | 2.020296 | 2 |
pptop/core.py | sgnn7/pptop | 52 | 6617528 | <reponame>sgnn7/pptop
'''
ppTOP v{version} (c) Altertech
The product is available under {license} license.
https://pptop.io/
'''
__author__ = 'Altertech, https://www.altertech.com/'
__copyright__ = 'Copyright (C) 2019 Altertech'
__license__ = 'MIT'
__version__ = '0.6.13'
try:
__doc__ = __doc__.format(version=__version__, license=__license__)
except:
pass
import sys
import curses
import neotasker
import socket
import struct
import yaml
import logging
import inspect
import threading
import psutil
import os
import getpass
import subprocess
import importlib
import signal
import uuid
import time
import pickle
import shutil
import argparse
import collections
import readline
import textwrap
import neotermcolor as termcolor
from collections import OrderedDict
from functools import partial
from pyaltt2.converters import merge_dict, val_to_boolean
from pyaltt2.json import jprint
try:
yaml.warnings({'YAMLLoadWarning': False})
except:
pass
os.unsetenv('LINES')
os.unsetenv('COLUMNS')
from types import SimpleNamespace
from pptop.plugin import GenericPlugin, process_path as plugin_process_path
from pptop.plugin import bytes_to_iso
from pptop.ui.console import init_curses, end_curses, cls
from pptop.ui.console import resize_term, resize_handler
from pptop.ui.console import prompt, print_message, scr, palette, glyph
from pptop.ui.console import hide_cursor, show_cursor
from pptop.logger import config as log_config, log, log_traceback, init_logging
from pptop.exceptions import CriticalException
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.getLogger('neotasker').setLevel(100)
dir_me = os.path.dirname(os.path.realpath(__file__))
config = {}
plugins = {}
events_by_key = {
'f': 'filter',
'/': 'filter',
'I': 'interval',
'ENTER': 'select',
'CTRL_L': 'ready',
'CTRL_I': 'reinject',
'`': 'console',
'CTRL_O': 'show-console',
'KEY_BACKSPACE': 'delete',
'KEY_DC': 'delete',
'p': 'pause',
'q': 'back',
'ESC': 'back',
'kRIT3': 'sort-col-next',
'kLFT3': 'sort-col-prev',
'kDN3': 'sort-normal',
'kUP3': 'sort-reverse',
'KEY_LEFT': 'left',
'KEY_RIGHT': 'right',
'KEY_UP': 'up',
'KEY_DOWN': 'down',
'kLFT5': 'hshift-left',
'kRIT5': 'hshift-right',
'KEY_PPAGE': 'page-up',
'CTRL_B': 'page-up',
'KEY_NPAGE': 'page-down',
'CTRL_F': 'page-down',
'KEY_HOME': 'home',
'KEY_END': 'end',
'KEY_F(10)': 'quit',
' ': 'reload',
'CTRL_X': 'reset',
'Z': 'cursor-toggle'
}
plugins_autostart = []
bottom_bar_help = {10: 'Quit'}
plugin_shortcuts = {}
plugin_lock = threading.Lock()
stdout_buf_lock = threading.Lock()
socket_timeout = 15
injection_timeout = 3
socket_buf = 8192
class ppLoghandler(logging.Handler):
def emit(self, record):
log(super().format(record))
def after_resize():
_d.current_plugin['p'].resize()
show_process_info.trigger_threadsafe(force=True)
def get_plugins():
return plugins
def get_config_dir():
return _d.pptop_dir
def get_plugin(plugin_name):
return plugins.get(plugin_name)
def get_child_info():
return {'c': _d.child_cmd, 'a': _d.child_args} if _d.child else None
def apply_filter(plugin):
with scr.lock:
plugin.filter = prompt(ps='f: ', value=plugin.filter).lower()
plugin.trigger_threadsafe()
def apply_interval(plugin):
with scr.lock:
i = plugin.delay
if int(i) == i:
i = int(i)
new_interval = prompt(ps='intreval: ', value=i)
try:
new_interval = float(new_interval)
if new_interval <= 0:
raise ValueError
except:
print_message('Invalid interval', color=palette.ERROR)
return
plugin.stop()
plugin.start(_interval=new_interval)
plugin.show()
with scr.lock:
print_message('Interval changed', color=palette.OK)
return
def wait_key():
result = None
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
key_names = {
'KEY_DOWN': 'Down',
'KEY_UP': 'Up',
'KEY_LEFT': 'Left',
'KEY_RIGHT': 'Right',
'KEY_HOME': 'Home',
'KEY_END': 'End',
'KEY_BACKSPACE': 'Backspace',
'KEY_DC': 'Del',
'KEY_IC': 'Ins',
'KEY_NPAGE': 'PgDn',
'KEY_PPAGE': 'PgUp',
'CTRL_I': 'Tab',
'CTRL_J': 'Enter',
'kLFT5': 'C-Left',
'kRIT5': 'C-Right',
'kUP5': 'C-Up',
'kDN5': 'C-Down',
'kPRV5': 'C-PgUp',
'kNXT5': 'C-PgDn',
'kHOM5': 'C-Home',
'kEND5': 'C-End',
'kLFT3': 'C-Left',
'kRIT3': 'C-Right',
'kUP3': 'C-Up',
'kDN3': 'C-Down',
'kPRV3': 'C-PgUp',
'kNXT3': 'C-PgDn',
'kHOM3': 'C-Home',
'kEND3': 'C-End',
}
def format_shortcut(k):
k = str(k)
sh = k
try:
if k in key_names:
sh = key_names[k]
elif k.startswith('KEY_F('):
fnkey = int(sh[6:-1])
if fnkey > 48:
sh = 'M-F{}'.format(fnkey - 48)
elif fnkey > 24:
sh = 'C-F{}'.format(fnkey - 24)
elif fnkey > 12:
sh = 'Sh-F{}'.format(fnkey - 12)
else:
sh = 'F{}'.format(fnkey)
elif k.startswith('CTRL_'):
sh = 'C-{}'.format(k[5:].lower())
else:
if len(k) == 1:
if k.isalpha() and k.lower() != k:
sh = 'Sh-{}'.format(k.lower())
elif k == ' ':
sh = 'Space'
else:
sh = k
else:
sh = k.capitalize()
except:
log_traceback()
return sh
def format_key(k):
if len(k) == 1:
z = ord(k)
if z == 10:
k = 'ENTER'
elif z == 27:
k = 'ESC'
elif z < 27:
k = 'CTRL_' + chr(z + 64)
log('key pressed: {}'.format(k if len(k) > 1 else ((
'ord=' + str(ord(k))) if ord(k) < 32 else '"{}"'.format(k))))
return k
def get_key_event(k):
event = events_by_key.get(k, k)
log('key event: {}'.format(event))
return event
def colored(text, color=None, on_color=None, attrs=None):
try:
if not config['display'].get('colors'):
return str(text)
else:
return termcolor.colored(str(text),
color=color,
on_color=on_color,
attrs=attrs)
except:
return str(text)
err = partial(colored, color='red', attrs=['bold'])
def format_cmdline(p, injected):
cmdline = ' '.join(p.cmdline())
if not injected:
cmdline = cmdline.split(' -m pptop.injection ')[-1].split(' ', 1)[0]
return cmdline
def print_json(obj):
jprint(obj, colored=config['display'].get('colors'))
def cli_mode():
def compl(text, state):
if not text or text.find('.') == -1:
return None
o = text.rsplit('.', 1)[0]
src = 'try: __result = dir({})\nexcept: pass'.format(o)
result = command('.exec', src)
if not result or result[0] or not result[1]: return None
matches = [
s for s in result[1] if ('{}.{}'.format(o, s)).startswith(text)
]
try:
return '{}.{}'.format(o, matches[state])
except IndexError:
return None
log('cli mode started')
if _d.cli_first_time:
# os.system('clear')
print(
colored('Console mode, process {} connected'.format(_d.process.pid),
color='green',
attrs=['bold']))
print(
colored(format_cmdline(_d.process, _d.need_inject_server),
color='yellow'))
print(
colored(
'Enter any Python command, press Ctrl-D or type "exit" to quit')
)
print(colored('To toggle between JSON and normal mode, type ".j"'))
if _d.grab_stdout:
print(colored('To toggle stdout/stderr output, type ".p"'))
print(
colored(
'To execute multiple commands from file, type "< filename"'))
print(
colored(
'To explore object, type "obj?" (transformed to "dir(obj)")'))
if _d.protocol < 3:
print(
colored('For Python 2 use \'_print\' instead of \'print\'',
color='yellow',
attrs=['bold']))
print()
_d.cli_first_time = False
readline.set_history_length(100)
readline.set_completer_delims('')
readline.set_completer(compl)
readline.parse_and_bind('tab: complete')
try:
readline.read_history_file('{}/console.history'.format(_d.pptop_dir))
except:
pass
try:
while True:
try:
cmd = input('>>> ').strip()
if cmd == '': continue
elif cmd == 'exit':
raise EOFError
elif _d.grab_stdout and cmd == '.p':
if print_stdout.is_active():
print_stdout.stop()
else:
print_stdout.start()
elif cmd == '.j':
_d.console_json_mode = not _d.console_json_mode
print('JSON mode ' +
('on' if _d.console_json_mode else 'off'))
else:
if cmd.startswith('<'):
with open(os.path.expanduser(cmd[1:].strip())) as fh:
cmds = filter(None,
[x.strip() for x in fh.readlines()])
elif cmd.endswith('?'):
cmds = ['dir({})'.format(cmd[:-1]).strip()]
else:
cmds = [cmd]
for cmd in cmds:
r = command('.exec', cmd)
if r[0] == -1:
print(err('{}: {}'.format(r[1], r[2])))
else:
if r[1] is not None:
if _d.console_json_mode and \
(isinstance(r[1], dict) or \
isinstance(r[1], list)):
print_json(r[1])
else:
print(r[1])
except EOFError:
return
except KeyboardInterrupt:
print()
continue
except Exception as e:
log_traceback()
print(err(e))
finally:
log('cli mode completed')
try:
readline.write_history_file('{}/console.history'.format(
_d.pptop_dir))
except:
log_traceback()
class ProcesSelector(GenericPlugin):
def load_data(self):
self.data.clear()
px = ['python', 'python2', 'python3']
user = getpass.getuser() if os.getuid() else 'root'
for p in psutil.process_iter():
try:
with p.oneshot():
name = p.name().split('.', 1)[0]
fname = p.exe().rsplit('/', 1)[-1].split('.', 1)[0]
if (name in px or
fname in px) and p.pid != os.getpid() and (
user == 'root' or p.username() == user):
d = OrderedDict()
d['pid'] = p.pid
d['command line'] = ' '.join(p.cmdline())
self.data.append(d)
except psutil.AccessDenied:
pass
except:
log_traceback()
def render(self, dtd):
if not self.filter: self.print_message()
super().render(dtd)
scr.stdscr.move(scr.stdscr.getmaxyx()[0] - 1, 0)
scr.stdscr.clrtoeol()
def render_empty(self):
if self.is_active():
if self.filter == '':
self.window.clrtobot()
self.print_message('No Python processes found in system. ' +
'Waiting... "q" to abort',
color=palette.WARNING)
else:
super().render_empty()
def get_table_col_color(self, element, key, value):
if key == 'pid':
return palette.GREEN
async def run(self, *args, **kwargs):
super().run(*args, **kwargs)
def select_process():
with scr.lock:
cls()
hide_cursor()
selector = ProcesSelector(interval=1)
selector.events = 0
selector.name = 'process_selector'
selector.sorting_rev = False
selector.selectable = True
selector.finish_event = threading.Event()
selector.lock = threading.Lock()
selector.title = 'Select process'
selector.show()
selector.start()
_d.current_plugin = {'p': selector}
while True:
try:
try:
k = format_key(scr.stdscr.getkey())
event = get_key_event(k)
except KeyboardInterrupt:
return
except curses.error:
resize_handler.trigger_threadsafe(force=True)
continue
if event == 'back':
selector.stop(wait=False)
return
elif event == 'filter':
apply_filter(selector)
elif event == 'select':
if not selector.dtd:
continue
selector.stop(wait=False)
return psutil.Process(selector.dtd[selector.cursor]['pid'])
elif event == 'pause':
with scr.lock:
selector.toggle_pause()
else:
with scr.lock:
selector.key_code = k
selector.key_event = event
selector.trigger_threadsafe()
except:
log_traceback()
raise
return
ifoctets_lock = threading.Lock()
client_lock = threading.Lock()
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
frame_counter_reset = 1000
def command(cmd, params=None):
with client_lock:
_d.client_frame_id += 1
if _d.client_frame_id >= frame_counter_reset:
_d.client_frame_id = 1
_d.last_frame_id = 0
try:
frame = cmd.encode()
if params is not None:
frame += b'\xff' + pickle.dumps(params, protocol=_d.protocol)
client.sendall(
struct.pack('I', len(frame)) +
struct.pack('I', _d.client_frame_id) + frame)
time_start = time.time()
data = client.recv(4)
frame_id = struct.unpack('I', client.recv(4))[0]
except:
log_traceback()
raise CriticalException('Injector is gone')
if not data:
log('critical: no data from injector')
raise CriticalException('Injector error')
l = struct.unpack('I', data)[0]
data = b''
while len(data) != l:
data += client.recv(socket_buf)
if time.time() > time_start + socket_timeout:
raise CriticalException('Socket timeout')
if frame_id != _d.client_frame_id:
log('critical: got wrong frame, channel is broken')
raise CriticalException('Wrong frame')
_d.last_frame_id += 1
with ifoctets_lock:
_d.ifoctets += len(data) + 8
if _d.ifoctets > 1000000000:
_d.ifoctets = _d.ifoctets - 1000000000
if data[0] != 0:
log('injector command error, code: {}'.format(data[0]))
raise RuntimeError('Injector command error')
return pickle.loads(data[1:]) if len(data) > 1 else True
def get_process():
return _d.process
def get_process_path():
return _d.process_path
_info_col_width = {0: 15, 1: 20, 2: 14, 3: 10}
_info_col_pos = {0: 0}
# _vblks='▁▂▃▅▆▇'
def recalc_info_col_pos():
pos = 0
for i in _info_col_width:
if i:
pos += _info_col_width[i - 1] + 2 #(
# 5 if config['display'].get('glyphs') and i == 1 else 2)
_info_col_pos[i] = pos
@neotasker.background_worker(delay=1)
async def show_process_info(p, **kwargs):
def error(txt):
cls()
scr.stdscr.addstr(0, 0, str(txt), palette.ERROR)
scr.stdscr.refresh()
return False
def draw_val(row,
col,
label='',
value=None,
color=palette.DEFAULT,
spacer=True):
width = _info_col_width[col]
pos = _info_col_pos[col]
val = str(value) if value is not None else ''
scr.infowin.move(row + 1, pos)
if label:
scr.infowin.addstr(label)
if spacer:
scr.infowin.addstr(
('.' if config['display']['colors'] else ' ') *
(width - len(label) - len(val)), palette.DARKGREY)
scr.infowin.move(row + 1, pos + width - len(val))
else:
scr.infowin.addstr(' ')
scr.infowin.addstr(val, color)
try:
width = scr.infowin.getmaxyx()[1]
status = _d.status
scr.infowin.clear()
with p.oneshot():
ct = p.cpu_times()
memf = p.memory_full_info()
mem = p.memory_info()
ioc = p.io_counters()
scr.infowin.move(0, 0)
scr.infowin.addstr('Process: ')
cmdline = format_cmdline(p, _d.need_inject_server)
scr.infowin.addstr(cmdline[:width - 25], palette.YELLOW)
scr.infowin.addstr(' [')
scr.infowin.addstr(
str(p.pid), palette.GREEN if status == 1 else palette.GREY_BOLD)
scr.infowin.addstr(']')
if status == -1:
xst = 'WAIT'
xstc = palette.GREY_BOLD
elif status == 0:
xst = 'DONE'
xstc = palette.GREY_BOLD
elif status == -2:
xst = 'ERROR'
xstc = palette.ERROR
else:
xst = None
if xst:
scr.infowin.addstr(' ' + xst, xstc)
cpup = p.cpu_percent()
draw_val(0, 0, 'CPU', '{}%'.format(cpup), palette.BLUE_BOLD)
draw_val(1, 0, 'user', ct.user, palette.BOLD)
draw_val(2, 0, 'system', ct.system, palette.BOLD)
# always hide pptop thread
draw_val(3, 0, 'threads', p.num_threads() - 1, palette.MAGENTA)
# if config['display'].get('glyphs'):
# gauge = _vblks[-1] * int(cpup // 25)
# i = int(cpup % 25 / 25 * len(_vblks))
# if i:
# gauge += _vblks[i - 1]
# x = _info_col_width[0] + 1
# for i, g in enumerate(gauge):
# scr.stdscr.addstr(4 - i, x, g * 2,
# (palette.GREEN, palette.YELLOW,
# palette.RED, palette.RED)[i])
draw_val(0, 1, 'Memory uss', bytes_to_iso(memf.uss), palette.BOLD)
draw_val(1, 1, 'pss', bytes_to_iso(memf.pss), palette.BOLD)
draw_val(2, 1, 'swap', bytes_to_iso(memf.swap),
palette.GREY if memf.swap < 1000000 else palette.YELLOW)
draw_val(0, 2, 'shd', bytes_to_iso(mem.shared), palette.BOLD)
draw_val(1, 2, 'txt', bytes_to_iso(mem.text), palette.BOLD)
draw_val(2, 2, 'dat', bytes_to_iso(mem.data), palette.BOLD)
draw_val(0,
3,
'Files:',
len(p.open_files()),
palette.CYAN,
spacer=False)
draw_val(1,
3,
value='{} {} ({})'.format(glyph.UPLOAD, ioc.read_count,
bytes_to_iso(ioc.read_chars)),
color=palette.GREEN)
draw_val(2,
3,
value='{} {} ({})'.format(glyph.DOWNLOAD, ioc.write_count,
bytes_to_iso(ioc.write_chars)),
color=palette.BLUE)
with scr.lock:
scr.infowin.refresh()
scr.stdscr.refresh()
except psutil.AccessDenied:
log_traceback()
return error('Access denied')
except psutil.NoSuchProcess:
log_traceback()
return error('Process is gone')
except CriticalException:
log_traceback()
return error('Process server is gone')
except curses.error:
log_traceback()
try:
for i in range(2):
scr.stdscr.move(i, 0)
scr.stdscr.clrtoeol()
scr.stdscr.refresh()
except:
pass
except Exception as e:
return error(e)
@neotasker.background_worker(delay=0.1)
async def show_bottom_bar(**kwargs):
try:
with scr.lock:
height, width = scr.stdscr.getmaxyx()
scr.stdscr.move(height - 1, 0)
scr.stdscr.addstr(' ' * (width - 1), palette.BAR)
scr.stdscr.move(height - 1, 0)
color = palette.BAR
for h in sorted(bottom_bar_help):
scr.stdscr.addstr('F{}'.format(h))
scr.stdscr.addstr(bottom_bar_help[h].ljust(6), color)
try:
with plugin_lock:
i = _d.current_plugin['p'].delay
if int(i) == i:
i = int(i)
i = 'I:' + str(i)
except:
i = ''
stats = '{} P:{} {} {:03d}/{:03d} '.format(i, _d.protocol,
glyph.CONNECTION,
_d.client_frame_id,
_d.last_frame_id)
with ifoctets_lock:
bw = _d.ifbw
if bw < 1000:
bws = '{} Bs'.format(bw)
elif bw < 1000000:
bws = '{:.0f} kBs'.format(bw / 1000)
else:
bws = '{:.0f} MBs'.format(bw / 1000000)
bws = bws.rjust(7)
if bw > 2000000:
bwc = palette.BAR_ERROR
elif bw > 500000:
bwc = palette.BAR_WARNING
else:
bwc = palette.BAR_OK
scr.stdscr.addstr(height - 1, width - len(stats) - len(bws) - 1,
stats, color)
scr.stdscr.addstr(bws, bwc)
scr.stdscr.refresh()
except:
pass
# don't make this async, it should always work in own thread
@neotasker.background_worker
def update_status(**kwargs):
try:
_d.status = command('.status')
except:
log_traceback()
status = -2
finally:
time.sleep(1)
@neotasker.background_worker
def grab_stdout(**kwargs):
try:
result = command('.gs')
with stdout_buf_lock:
_d.stdout_buf += result
except:
log_traceback()
finally:
time.sleep(0.5)
@neotasker.background_worker
def print_stdout(**kwargs):
with stdout_buf_lock:
if _d.stdout_buf != '':
print(_d.stdout_buf, end='')
_d.stdout_buf = ''
time.sleep(0.1)
@neotasker.background_worker(interval=1)
async def calc_bw(**kwargs):
with ifoctets_lock:
if _d.ifoctets >= _d.ifoctets_prev:
_d.ifbw = _d.ifoctets - _d.ifoctets_prev
else:
_d.ifbw = 1000000000 - _d.ifoctets_prev + _d.ifoctets
_d.ifoctets_prev = _d.ifoctets
_d = SimpleNamespace(
cli_first_time=True,
grab_stdout=False,
stdout_buf='',
current_plugin=None,
process_path=[],
default_plugin=None,
process=None,
protocol=None,
force_protocol=None,
client_frame_id=0,
last_frame_id=0,
ifoctets=0,
ifoctets_prev=0,
ifbw=0,
pptop_dir=None,
gdb=None,
work_pid=None,
need_inject_server=True,
inject_method=None, # None (auto), 'native', 'loadcffi', 'unsafe'
inject_lib=None,
child=None,
child_cmd=None,
child_args='',
status=None,
console_json_mode=True,
exec_code=None,
output_as_json=False)
def sigwinch_handler(signum=None, frame=None):
resize_handler.trigger_threadsafe(force=True)
def find_lib(name):
'''
Find first library matching pattern
'''
import glob
for d in sys.path:
lib = glob.glob('{}/{}'.format(d, name))
if lib:
return lib[0]
def init_inject():
if _d.inject_method is None or _d.inject_method == 'auto':
_d.inject_method = 'native'
_d.inject_lib = find_lib('__pptop_injector.*.so')
if not _d.inject_lib:
_d.inject_method = 'loadcffi'
_d.inject_lib = find_lib('_cffi_backend.*.so')
if not _d.inject_lib:
_d.inject_method = 'unsafe'
else:
if _d.inject_method == 'native':
_d.inject_lib = find_lib('__pptop_injector.*.so')
if not _d.inject_lib:
raise RuntimeError(
'__pptop_injector shared library not found.' +
' reinstall package or select different inject method')
elif _d.inject_method == 'loadcffi':
_d.inject_lib = find_lib('_cffi_backend.*.so')
if not _d.inject_lib:
raise RuntimeError(
'_cffi_backend shared library not found.' +
' install "cffi" package or select different inject method')
else:
_d.inject_method = 'unsafe'
def inject_server(gdb, p):
cmds = []
pid = p.pid
libpath = os.path.abspath(os.path.dirname(__file__) + '/..')
if _d.inject_method in ['native', 'loadcffi']:
cmds.append('call (void)dlopen("{}", 2)'.format(_d.inject_lib))
if _d.inject_method == 'native':
cmds.append('call (int)__pptop_start_injection("{}",{},{},"{}")'.format(
libpath, os.getpid(), _d.protocol,
log_config.fname if log_config.fname else ''))
else:
cmds += [
'call (PyGILState_STATE)PyGILState_Ensure()',
('call (int)PyRun_SimpleString("' +
'import sys\\nif \\"{path}\\" not in sys.path: ' +
'sys.path.insert(0,\\"{path}\\")\\n' +
'import pptop.injection;pptop.injection.start(' +
'{mypid},{protocol}{lg})")').format(
path=libpath,
mypid=os.getpid(),
lg='' if not log_config.fname else ',lg=\\"{}\\"'.format(
log_config.fname),
protocol=_d.protocol), ' call (void)PyGILState_Release($1)'
]
args = [gdb, '-p', str(pid), '--batch'
] + ['--eval-command={}'.format(c) for c in cmds]
log(args)
p = subprocess.Popen(args,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
log(out)
log(err)
if p.returncode:
raise RuntimeError(err)
def inject_plugin(plugin):
if plugin['p'].injected is False:
log('injecting plugin {}'.format(plugin['p'].name))
plugin['p'].injected = True
try:
command('.inject', plugin['i'])
return True
except:
print_message('Plugin injection failed', color=palette.ERROR)
return False
def switch_plugin(new_plugin):
if _d.current_plugin:
if _d.current_plugin is new_plugin:
return
if not _d.current_plugin['p'].background:
_d.current_plugin['p'].stop(wait=False)
else:
_d.current_plugin['p'].hide()
p = new_plugin['p']
p._previous_plugin = _d.current_plugin
p.key_event = None
p.key_code = None
inject_plugin(new_plugin)
if not p.is_active(): p.start()
p.show()
with plugin_lock:
_d.current_plugin = new_plugin
def run():
def autostart_plugins():
for plugin in plugins_autostart:
if plugin['p'] is not _d.current_plugin.get('p'):
log('autostarting {}'.format(plugin['m']))
inject_plugin(plugin)
p = plugin['p']
if p.background:
p.start()
try:
if not _d.work_pid:
init_curses(initial=True,
after_resize=after_resize,
colors=config['display'].get('colors'),
glyphs=config['display'].get('glyphs'))
p = select_process()
else:
p = psutil.Process(_d.work_pid)
if not p: return
_d.process = p
client.settimeout(socket_timeout)
client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, socket_buf)
client.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, socket_buf)
if _d.need_inject_server:
inject_server(_d.gdb, p)
log('server injected')
sock_path = '/tmp/.pptop.{}'.format(os.getpid())
for i in range(injection_timeout * 10):
if os.path.exists(sock_path):
break
time.sleep(0.1)
try:
client.connect(sock_path)
except:
log_traceback()
raise RuntimeError('Unable to connect to process')
log('connected')
frame = b''
with client_lock:
time_start = time.time()
while len(frame) < 1:
data = client.recv(1)
if data:
frame += data
if time.time() > time_start + socket_timeout:
raise CriticalException('Socket timeout')
server_protocol = struct.unpack('b', frame)[0]
if server_protocol < _d.protocol:
if _d.force_protocol:
raise RuntimeError(
'Process doesn\'t support protocol {}'.format(_d.protocol))
else:
_d.protocol = server_protocol
log('Falling back to protocol {}'.format(_d.protocol))
if _d.exec_code:
end_curses()
result = command('.x', _d.exec_code)
if result[0] == 0:
if _d.output_as_json:
print_json(result[1])
else:
print(result[1] if result[1] else '')
else:
print(err('{}: {}'.format(result[1], result[2])))
return
init_curses(initial=True,
after_resize=after_resize,
colors=config['display'].get('colors'),
glyphs=config['display'].get('glyphs'))
signal.signal(signal.SIGWINCH, sigwinch_handler)
calc_bw.start()
update_status.start()
if _d.grab_stdout:
grab_stdout.start()
_d.process_path.clear()
plugin_process_path.clear()
if _d.grab_stdout:
try:
command('.gs')
except:
raise RuntimeError('Unable to set stdout grabber')
ppath = []
for i in command('.path'):
ppath.append(os.path.abspath(i))
_d.process_path.extend(sorted(ppath, reverse=True))
plugin_process_path.extend(_d.process_path)
log('process path: {}'.format(_d.process_path))
switch_plugin(_d.default_plugin)
recalc_info_col_pos()
show_process_info.start(p=p)
show_bottom_bar.start()
neotasker.spawn(autostart_plugins)
log('main loop started')
while True:
try:
try:
k = format_key(scr.stdscr.getkey())
event = get_key_event(k)
except KeyboardInterrupt:
return
except curses.error:
resize_handler.trigger_threadsafe(force=True)
continue
if show_process_info.is_stopped():
return
elif k in plugin_shortcuts:
switch_plugin(plugin_shortcuts[k])
elif event == 'ready':
try:
result = command('.ready')
except:
result = None
with scr.lock:
if result:
print_message('Ready event sent', color=palette.OK)
else:
print_message('Command failed', color=palette.ERROR)
elif event == 'reinject' and \
_d.current_plugin['p'].injected is not None:
try:
result = command('.inject', _d.current_plugin['i'])
except:
result = None
with scr.lock:
if result:
print_message('Plugin re-injected',
color=palette.OK)
else:
print_message('Plugin re-injection failed',
color=palette.ERROR)
elif event == 'quit':
_d.current_plugin['p'].stop(wait=False)
show_process_info.stop(wait=False)
show_bottom_bar.stop(wait=False)
return
elif event == 'console':
with scr.lock:
end_curses()
if _d.grab_stdout: print_stdout.start()
cli_mode()
if _d.grab_stdout: print_stdout.stop()
init_curses(after_resize=after_resize)
resize_term()
elif event == 'show-console':
with scr.lock:
end_curses()
hide_cursor()
if _d.grab_stdout: print_stdout.start()
try:
wait_key()
except KeyboardInterrupt:
pass
if _d.grab_stdout: print_stdout.stop()
init_curses(after_resize=after_resize)
resize_term()
elif event == 'filter':
apply_filter(_d.current_plugin['p'])
elif event == 'interval':
apply_interval(_d.current_plugin['p'])
elif event == 'pause':
with scr.lock:
_d.current_plugin['p'].toggle_pause()
elif event in _d.current_plugin['p'].inputs:
with scr.lock:
try:
prev_value = _d.current_plugin['p'].get_input(event)
except ValueError:
continue
value = prompt(
ps=_d.current_plugin['p'].get_input_prompt(event),
value=prev_value if prev_value is not None else '')
_d.current_plugin['p'].inputs[event] = value
try:
_d.current_plugin['p'].handle_input(
event, value, prev_value)
except:
pass
else:
for i, plugin in plugins.items():
try:
plugin['p'].handle_key_global_event(event, k)
except:
log_traceback()
with scr.lock:
_d.current_plugin['p'].key_code = k
_d.current_plugin['p'].key_event = event
_d.current_plugin['p'].trigger_threadsafe()
except:
log_traceback()
return
except:
log_traceback()
raise
finally:
end_curses()
def start():
def format_plugin_option(dct, o, v):
if o.find('.') != -1:
x, y = o.split('.', 1)
dct[x] = {}
format_plugin_option(dct[x], y, v)
else:
dct[o] = v
_me = 'ppTOP version %s' % __version__
ap = argparse.ArgumentParser(description=_me)
ap.add_argument('-V',
'--version',
help='Print version and exit',
action='store_true')
ap.add_argument('-R',
'--raw',
help='Raw mode (disable colors and unicode glyphs)',
action='store_true')
ap.add_argument('--disable-glyphs',
help='disable unicode glyphs',
action='store_true')
ap.add_argument('file',
nargs='?',
help='File, PID file or PID',
metavar='FILE/PID')
ap.add_argument('-a', '--args', metavar='ARGS', help='Child args (quoted)')
ap.add_argument('--python',
metavar='FILE',
help='Python interpreter to launch file')
ap.add_argument('--gdb', metavar='FILE', help='Path to gdb')
ap.add_argument('-p',
'--protocol',
metavar='VER',
type=int,
help=textwrap.dedent('''Pickle protocol, default is highest.
4: Python 3.4+,
3: Python 3.0+,
2: Python 2.3+,
1: vintage'''))
ap.add_argument('--inject-method',
choices=['auto', 'native', 'loadcffi', 'unsafe'],
help='Inject method')
ap.add_argument('-g',
'--grab-stdout',
help='Grab stdout/stderr of injected process',
action='store_true')
ap.add_argument(
'-w',
'--wait',
metavar='SEC',
type=float,
help='If file is specified, wait seconds to start main code')
ap.add_argument(
'-f',
'--config-file',
help='Alternative config file (default: ~/.pptop/pptop.yml)',
metavar='CONFIG',
dest='config')
ap.add_argument('-d',
'--default',
help='Default plugin to launch',
metavar='PLUGIN',
dest='plugin')
ap.add_argument(
'-o',
'--plugin-option',
help='Override plugin config option, e.g. threads.filter=mythread',
metavar='NAME=VALUE',
action='append',
dest='plugin_options')
ap.add_argument('--log', metavar='FILE', help='Send debug log to file')
ap.add_argument(
'-x',
'--exec',
help='Exec code from a file ("-" for stdin) and exit '
' (the code can put result to "out" var)',
metavar='FILE',
dest='_exec')
ap.add_argument('-J',
'--json',
help='Output exec result as JSON',
action='store_true')
try:
import argcomplete
argcomplete.autocomplete(ap)
except:
pass
a = ap.parse_args()
if a.log:
log_config.fname = a.log
log_config.name = 'client:{}'.format(os.getpid())
logging.getLogger('asyncio').setLevel(logging.DEBUG)
logging.getLogger('neotasker').setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
le = logging.getLogger()
le.addHandler(ppLoghandler())
list(map(le.removeHandler, le.handlers))
neotasker.set_debug(True)
init_logging()
if a.version:
print(_me)
exit()
log('initializing')
if a.file:
try:
# pid?
_d.work_pid = int(a.file)
except:
# probably pid file
try:
with open(a.file) as fh:
_d.work_pid = int(fh.read(128))
except:
# okay, program to launch
_d.child_cmd = os.path.abspath(a.file)
_d.pptop_dir = os.path.expanduser('~/.pptop')
if a.config:
config_file = a.config
use_default_config = False
else:
config_file = _d.pptop_dir + '/pptop.yml'
use_default_config = True
sys.path.append(_d.pptop_dir + '/lib')
config.clear()
if use_default_config and not os.path.isfile(config_file):
log('no user config, setting default')
try:
os.mkdir(_d.pptop_dir)
except:
pass
if not os.path.isdir(_d.pptop_dir + '/scripts'):
shutil.copytree(dir_me + '/config/scripts',
_d.pptop_dir + '/scripts')
shutil.copy(dir_me + '/config/pptop.yml', _d.pptop_dir + '/pptop.yml')
if not os.path.isdir(_d.pptop_dir + '/lib'):
os.mkdir(_d.pptop_dir + '/lib')
with open(config_file) as fh:
config.update(yaml.load(fh.read()))
console = config.get('console')
if console is None: console = {}
_d.console_json_mode = console.get('json-mode')
_d.inject_method = a.inject_method if a.inject_method else config.get(
'inject-method')
if config.get('display') is None:
config['display'] = {}
if a.raw:
config['display']['colors'] = False
if a.grab_stdout:
_d.grab_stdout = True
if a.raw or a.disable_glyphs:
config['display']['glyphs'] = False
if a._exec:
if a._exec == '-':
_d.exec_code = sys.stdin.read()
else:
with open(a._exec) as fd:
_d.exec_code = fd.read()
_d.output_as_json = a.json
else:
ebk = {}
global_keys = config.get('keys')
if global_keys:
for event, keys in global_keys.items():
for k, v in events_by_key.copy().items():
if event == v:
del events_by_key[k]
if keys is not None:
for k in keys if isinstance(keys, list) else [keys]:
ebk[str(k)] = str(event)
events_by_key.update(ebk)
plugin_options = {}
for x in a.plugin_options or []:
try:
o, v = x.split('=', 1)
except:
o = x
v = None
format_plugin_option(plugin_options, o, v)
if plugin_options:
config.update(merge_dict(config, {'plugins': plugin_options}))
log('loading plugins')
try:
plugins.clear()
for i, v in config.get('plugins', {}).items():
try:
log('+ plugin ' + i)
if v is None: v = {}
try:
mod = importlib.import_module('pptop.plugins.' + i)
mod.__version__ = 'built-in'
except ModuleNotFoundError:
mod = importlib.import_module('pptopcontrib.' + i)
try:
mod.__version__
except:
raise RuntimeError(
'Please specify __version__ in plugin file')
plugin = {'m': mod}
plugins[i] = plugin
p = mod.Plugin(interval=float(
v.get('interval', mod.Plugin.default_interval)))
p.command = command
p.get_plugins = get_plugins
p.get_plugin = get_plugin
p.get_config_dir = get_config_dir
p.switch_plugin = switch_plugin
p.get_process = get_process
p.get_process_path = get_process_path
p.global_config = config
plugin['p'] = p
plugin['id'] = i
p._inject = partial(inject_plugin, plugin=plugin)
injection = {'id': i}
need_inject = False
try:
injection['l'] = inspect.getsource(mod.injection_load)
need_inject = True
except:
pass
try:
injection['i'] = inspect.getsource(mod.injection)
need_inject = True
except:
pass
try:
injection['u'] = inspect.getsource(mod.injection_unload)
need_inject = True
except:
pass
if need_inject:
p.injected = False
plugin['i'] = injection
else:
p.injected = None
if not _d.default_plugin or val_to_boolean(
v.get('default')) or i == a.plugin:
_d.default_plugin = plugin
p_cfg = v.get('config')
p.config = {} if p_cfg is None else p_cfg
p.on_load()
p._on_load()
if 'l' in injection:
injection['lkw'] = p.get_injection_load_params()
if 'shortcut' in v:
sh = v['shortcut']
plugin['shortcut'] = sh
plugin_shortcuts[sh] = plugin
if sh.startswith('KEY_F('):
try:
f = int(sh[6:-1])
if f <= 10:
bottom_bar_help[f] = p.short_name
except:
pass
else:
plugin['shortcut'] = ''
if 'filter' in v:
p.filter = str(v['filter'])
if 'cursor' in v:
p._cursor_enabled_by_user = val_to_boolean(v['cursor'])
if val_to_boolean(v.get('autostart')):
plugins_autostart.append(plugin)
except Exception as e:
raise RuntimeError('plugin {}: {}'.format(i, e))
except:
log_traceback()
raise
neotasker.task_supervisor.start()
neotasker.task_supervisor.create_aloop('pptop', default=True, daemon=True)
neotasker.task_supervisor.create_aloop('service', daemon=True)
try:
if a.file and not _d.work_pid:
# launch file
_d.need_inject_server = False
if a.python:
python_path = a.python
else:
python_path = shutil.which('python3')
if not python_path:
raise RuntimeError(
'python3 not found in path, please specify manually')
args = (python_path, '-m', 'pptop.injection', a.file,
str(os.getpid()))
if a.wait is not None:
args += ('-w', str(a.wait))
if a.protocol is not None:
args += ('-p', str(a.protocol))
if a.args:
args += ('-a', a.args)
if log_config.fname:
args += ('--log', log_config.fname)
log('starting child process')
_d.child = subprocess.Popen(args,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_d.work_pid = _d.child.pid
_d.protocol = pickle.HIGHEST_PROTOCOL
else:
if a.gdb:
_d.gdb = a.gdb
else:
_d.gdb = shutil.which('gdb')
if not _d.gdb or not os.path.isfile(_d.gdb):
raise RuntimeError('gdb not found')
# check yama ptrace scope
try:
with open('/proc/sys/kernel/yama/ptrace_scope') as fd:
yps = int(fd.read().strip())
except:
yps = None
if yps:
raise RuntimeError(
'yama ptrace scope is on. ' +
'disable with "sudo sysctl -w kernel.yama.ptrace_scope=0"')
init_inject()
log('inject method: {}'.format(_d.inject_method))
log('inject library: {}'.format(_d.inject_lib))
if a.protocol is not None:
if a.protocol > pickle.HIGHEST_PROTOCOL or a.protocol < 1:
raise ValueError('Protocol {} is not supported'.format(
a.protocol))
_d.protocol = a.protocol
_d.force_protocol = a.protocol
else:
_d.protocol = pickle.HIGHEST_PROTOCOL
log('Pickle protocol: {}'.format(_d.protocol))
run()
log('terminating')
for p, v in plugins.items():
v['p'].on_unload()
except Exception as e:
log_traceback()
raise
finally:
try:
client.close()
except:
pass
neotasker.task_supervisor.stop(wait=False, cancel_tasks=True)
return 0
| '''
ppTOP v{version} (c) Altertech
The product is available under {license} license.
https://pptop.io/
'''
__author__ = 'Altertech, https://www.altertech.com/'
__copyright__ = 'Copyright (C) 2019 Altertech'
__license__ = 'MIT'
__version__ = '0.6.13'
try:
__doc__ = __doc__.format(version=__version__, license=__license__)
except:
pass
import sys
import curses
import neotasker
import socket
import struct
import yaml
import logging
import inspect
import threading
import psutil
import os
import getpass
import subprocess
import importlib
import signal
import uuid
import time
import pickle
import shutil
import argparse
import collections
import readline
import textwrap
import neotermcolor as termcolor
from collections import OrderedDict
from functools import partial
from pyaltt2.converters import merge_dict, val_to_boolean
from pyaltt2.json import jprint
try:
yaml.warnings({'YAMLLoadWarning': False})
except:
pass
os.unsetenv('LINES')
os.unsetenv('COLUMNS')
from types import SimpleNamespace
from pptop.plugin import GenericPlugin, process_path as plugin_process_path
from pptop.plugin import bytes_to_iso
from pptop.ui.console import init_curses, end_curses, cls
from pptop.ui.console import resize_term, resize_handler
from pptop.ui.console import prompt, print_message, scr, palette, glyph
from pptop.ui.console import hide_cursor, show_cursor
from pptop.logger import config as log_config, log, log_traceback, init_logging
from pptop.exceptions import CriticalException
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.getLogger('neotasker').setLevel(100)
dir_me = os.path.dirname(os.path.realpath(__file__))
config = {}
plugins = {}
events_by_key = {
'f': 'filter',
'/': 'filter',
'I': 'interval',
'ENTER': 'select',
'CTRL_L': 'ready',
'CTRL_I': 'reinject',
'`': 'console',
'CTRL_O': 'show-console',
'KEY_BACKSPACE': 'delete',
'KEY_DC': 'delete',
'p': 'pause',
'q': 'back',
'ESC': 'back',
'kRIT3': 'sort-col-next',
'kLFT3': 'sort-col-prev',
'kDN3': 'sort-normal',
'kUP3': 'sort-reverse',
'KEY_LEFT': 'left',
'KEY_RIGHT': 'right',
'KEY_UP': 'up',
'KEY_DOWN': 'down',
'kLFT5': 'hshift-left',
'kRIT5': 'hshift-right',
'KEY_PPAGE': 'page-up',
'CTRL_B': 'page-up',
'KEY_NPAGE': 'page-down',
'CTRL_F': 'page-down',
'KEY_HOME': 'home',
'KEY_END': 'end',
'KEY_F(10)': 'quit',
' ': 'reload',
'CTRL_X': 'reset',
'Z': 'cursor-toggle'
}
plugins_autostart = []
bottom_bar_help = {10: 'Quit'}
plugin_shortcuts = {}
plugin_lock = threading.Lock()
stdout_buf_lock = threading.Lock()
socket_timeout = 15
injection_timeout = 3
socket_buf = 8192
class ppLoghandler(logging.Handler):
def emit(self, record):
log(super().format(record))
def after_resize():
_d.current_plugin['p'].resize()
show_process_info.trigger_threadsafe(force=True)
def get_plugins():
return plugins
def get_config_dir():
return _d.pptop_dir
def get_plugin(plugin_name):
return plugins.get(plugin_name)
def get_child_info():
return {'c': _d.child_cmd, 'a': _d.child_args} if _d.child else None
def apply_filter(plugin):
with scr.lock:
plugin.filter = prompt(ps='f: ', value=plugin.filter).lower()
plugin.trigger_threadsafe()
def apply_interval(plugin):
with scr.lock:
i = plugin.delay
if int(i) == i:
i = int(i)
new_interval = prompt(ps='intreval: ', value=i)
try:
new_interval = float(new_interval)
if new_interval <= 0:
raise ValueError
except:
print_message('Invalid interval', color=palette.ERROR)
return
plugin.stop()
plugin.start(_interval=new_interval)
plugin.show()
with scr.lock:
print_message('Interval changed', color=palette.OK)
return
def wait_key():
result = None
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
key_names = {
'KEY_DOWN': 'Down',
'KEY_UP': 'Up',
'KEY_LEFT': 'Left',
'KEY_RIGHT': 'Right',
'KEY_HOME': 'Home',
'KEY_END': 'End',
'KEY_BACKSPACE': 'Backspace',
'KEY_DC': 'Del',
'KEY_IC': 'Ins',
'KEY_NPAGE': 'PgDn',
'KEY_PPAGE': 'PgUp',
'CTRL_I': 'Tab',
'CTRL_J': 'Enter',
'kLFT5': 'C-Left',
'kRIT5': 'C-Right',
'kUP5': 'C-Up',
'kDN5': 'C-Down',
'kPRV5': 'C-PgUp',
'kNXT5': 'C-PgDn',
'kHOM5': 'C-Home',
'kEND5': 'C-End',
'kLFT3': 'C-Left',
'kRIT3': 'C-Right',
'kUP3': 'C-Up',
'kDN3': 'C-Down',
'kPRV3': 'C-PgUp',
'kNXT3': 'C-PgDn',
'kHOM3': 'C-Home',
'kEND3': 'C-End',
}
def format_shortcut(k):
k = str(k)
sh = k
try:
if k in key_names:
sh = key_names[k]
elif k.startswith('KEY_F('):
fnkey = int(sh[6:-1])
if fnkey > 48:
sh = 'M-F{}'.format(fnkey - 48)
elif fnkey > 24:
sh = 'C-F{}'.format(fnkey - 24)
elif fnkey > 12:
sh = 'Sh-F{}'.format(fnkey - 12)
else:
sh = 'F{}'.format(fnkey)
elif k.startswith('CTRL_'):
sh = 'C-{}'.format(k[5:].lower())
else:
if len(k) == 1:
if k.isalpha() and k.lower() != k:
sh = 'Sh-{}'.format(k.lower())
elif k == ' ':
sh = 'Space'
else:
sh = k
else:
sh = k.capitalize()
except:
log_traceback()
return sh
def format_key(k):
if len(k) == 1:
z = ord(k)
if z == 10:
k = 'ENTER'
elif z == 27:
k = 'ESC'
elif z < 27:
k = 'CTRL_' + chr(z + 64)
log('key pressed: {}'.format(k if len(k) > 1 else ((
'ord=' + str(ord(k))) if ord(k) < 32 else '"{}"'.format(k))))
return k
def get_key_event(k):
event = events_by_key.get(k, k)
log('key event: {}'.format(event))
return event
def colored(text, color=None, on_color=None, attrs=None):
try:
if not config['display'].get('colors'):
return str(text)
else:
return termcolor.colored(str(text),
color=color,
on_color=on_color,
attrs=attrs)
except:
return str(text)
err = partial(colored, color='red', attrs=['bold'])
def format_cmdline(p, injected):
cmdline = ' '.join(p.cmdline())
if not injected:
cmdline = cmdline.split(' -m pptop.injection ')[-1].split(' ', 1)[0]
return cmdline
def print_json(obj):
jprint(obj, colored=config['display'].get('colors'))
def cli_mode():
def compl(text, state):
if not text or text.find('.') == -1:
return None
o = text.rsplit('.', 1)[0]
src = 'try: __result = dir({})\nexcept: pass'.format(o)
result = command('.exec', src)
if not result or result[0] or not result[1]: return None
matches = [
s for s in result[1] if ('{}.{}'.format(o, s)).startswith(text)
]
try:
return '{}.{}'.format(o, matches[state])
except IndexError:
return None
log('cli mode started')
if _d.cli_first_time:
# os.system('clear')
print(
colored('Console mode, process {} connected'.format(_d.process.pid),
color='green',
attrs=['bold']))
print(
colored(format_cmdline(_d.process, _d.need_inject_server),
color='yellow'))
print(
colored(
'Enter any Python command, press Ctrl-D or type "exit" to quit')
)
print(colored('To toggle between JSON and normal mode, type ".j"'))
if _d.grab_stdout:
print(colored('To toggle stdout/stderr output, type ".p"'))
print(
colored(
'To execute multiple commands from file, type "< filename"'))
print(
colored(
'To explore object, type "obj?" (transformed to "dir(obj)")'))
if _d.protocol < 3:
print(
colored('For Python 2 use \'_print\' instead of \'print\'',
color='yellow',
attrs=['bold']))
print()
_d.cli_first_time = False
readline.set_history_length(100)
readline.set_completer_delims('')
readline.set_completer(compl)
readline.parse_and_bind('tab: complete')
try:
readline.read_history_file('{}/console.history'.format(_d.pptop_dir))
except:
pass
try:
while True:
try:
cmd = input('>>> ').strip()
if cmd == '': continue
elif cmd == 'exit':
raise EOFError
elif _d.grab_stdout and cmd == '.p':
if print_stdout.is_active():
print_stdout.stop()
else:
print_stdout.start()
elif cmd == '.j':
_d.console_json_mode = not _d.console_json_mode
print('JSON mode ' +
('on' if _d.console_json_mode else 'off'))
else:
if cmd.startswith('<'):
with open(os.path.expanduser(cmd[1:].strip())) as fh:
cmds = filter(None,
[x.strip() for x in fh.readlines()])
elif cmd.endswith('?'):
cmds = ['dir({})'.format(cmd[:-1]).strip()]
else:
cmds = [cmd]
for cmd in cmds:
r = command('.exec', cmd)
if r[0] == -1:
print(err('{}: {}'.format(r[1], r[2])))
else:
if r[1] is not None:
if _d.console_json_mode and \
(isinstance(r[1], dict) or \
isinstance(r[1], list)):
print_json(r[1])
else:
print(r[1])
except EOFError:
return
except KeyboardInterrupt:
print()
continue
except Exception as e:
log_traceback()
print(err(e))
finally:
log('cli mode completed')
try:
readline.write_history_file('{}/console.history'.format(
_d.pptop_dir))
except:
log_traceback()
class ProcesSelector(GenericPlugin):
def load_data(self):
self.data.clear()
px = ['python', 'python2', 'python3']
user = getpass.getuser() if os.getuid() else 'root'
for p in psutil.process_iter():
try:
with p.oneshot():
name = p.name().split('.', 1)[0]
fname = p.exe().rsplit('/', 1)[-1].split('.', 1)[0]
if (name in px or
fname in px) and p.pid != os.getpid() and (
user == 'root' or p.username() == user):
d = OrderedDict()
d['pid'] = p.pid
d['command line'] = ' '.join(p.cmdline())
self.data.append(d)
except psutil.AccessDenied:
pass
except:
log_traceback()
def render(self, dtd):
if not self.filter: self.print_message()
super().render(dtd)
scr.stdscr.move(scr.stdscr.getmaxyx()[0] - 1, 0)
scr.stdscr.clrtoeol()
def render_empty(self):
if self.is_active():
if self.filter == '':
self.window.clrtobot()
self.print_message('No Python processes found in system. ' +
'Waiting... "q" to abort',
color=palette.WARNING)
else:
super().render_empty()
def get_table_col_color(self, element, key, value):
if key == 'pid':
return palette.GREEN
async def run(self, *args, **kwargs):
super().run(*args, **kwargs)
def select_process():
with scr.lock:
cls()
hide_cursor()
selector = ProcesSelector(interval=1)
selector.events = 0
selector.name = 'process_selector'
selector.sorting_rev = False
selector.selectable = True
selector.finish_event = threading.Event()
selector.lock = threading.Lock()
selector.title = 'Select process'
selector.show()
selector.start()
_d.current_plugin = {'p': selector}
while True:
try:
try:
k = format_key(scr.stdscr.getkey())
event = get_key_event(k)
except KeyboardInterrupt:
return
except curses.error:
resize_handler.trigger_threadsafe(force=True)
continue
if event == 'back':
selector.stop(wait=False)
return
elif event == 'filter':
apply_filter(selector)
elif event == 'select':
if not selector.dtd:
continue
selector.stop(wait=False)
return psutil.Process(selector.dtd[selector.cursor]['pid'])
elif event == 'pause':
with scr.lock:
selector.toggle_pause()
else:
with scr.lock:
selector.key_code = k
selector.key_event = event
selector.trigger_threadsafe()
except:
log_traceback()
raise
return
ifoctets_lock = threading.Lock()
client_lock = threading.Lock()
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
frame_counter_reset = 1000
def command(cmd, params=None):
with client_lock:
_d.client_frame_id += 1
if _d.client_frame_id >= frame_counter_reset:
_d.client_frame_id = 1
_d.last_frame_id = 0
try:
frame = cmd.encode()
if params is not None:
frame += b'\xff' + pickle.dumps(params, protocol=_d.protocol)
client.sendall(
struct.pack('I', len(frame)) +
struct.pack('I', _d.client_frame_id) + frame)
time_start = time.time()
data = client.recv(4)
frame_id = struct.unpack('I', client.recv(4))[0]
except:
log_traceback()
raise CriticalException('Injector is gone')
if not data:
log('critical: no data from injector')
raise CriticalException('Injector error')
l = struct.unpack('I', data)[0]
data = b''
while len(data) != l:
data += client.recv(socket_buf)
if time.time() > time_start + socket_timeout:
raise CriticalException('Socket timeout')
if frame_id != _d.client_frame_id:
log('critical: got wrong frame, channel is broken')
raise CriticalException('Wrong frame')
_d.last_frame_id += 1
with ifoctets_lock:
_d.ifoctets += len(data) + 8
if _d.ifoctets > 1000000000:
_d.ifoctets = _d.ifoctets - 1000000000
if data[0] != 0:
log('injector command error, code: {}'.format(data[0]))
raise RuntimeError('Injector command error')
return pickle.loads(data[1:]) if len(data) > 1 else True
def get_process():
return _d.process
def get_process_path():
return _d.process_path
_info_col_width = {0: 15, 1: 20, 2: 14, 3: 10}
_info_col_pos = {0: 0}
# _vblks='▁▂▃▅▆▇'
def recalc_info_col_pos():
pos = 0
for i in _info_col_width:
if i:
pos += _info_col_width[i - 1] + 2 #(
# 5 if config['display'].get('glyphs') and i == 1 else 2)
_info_col_pos[i] = pos
@neotasker.background_worker(delay=1)
async def show_process_info(p, **kwargs):
def error(txt):
cls()
scr.stdscr.addstr(0, 0, str(txt), palette.ERROR)
scr.stdscr.refresh()
return False
def draw_val(row,
col,
label='',
value=None,
color=palette.DEFAULT,
spacer=True):
width = _info_col_width[col]
pos = _info_col_pos[col]
val = str(value) if value is not None else ''
scr.infowin.move(row + 1, pos)
if label:
scr.infowin.addstr(label)
if spacer:
scr.infowin.addstr(
('.' if config['display']['colors'] else ' ') *
(width - len(label) - len(val)), palette.DARKGREY)
scr.infowin.move(row + 1, pos + width - len(val))
else:
scr.infowin.addstr(' ')
scr.infowin.addstr(val, color)
try:
width = scr.infowin.getmaxyx()[1]
status = _d.status
scr.infowin.clear()
with p.oneshot():
ct = p.cpu_times()
memf = p.memory_full_info()
mem = p.memory_info()
ioc = p.io_counters()
scr.infowin.move(0, 0)
scr.infowin.addstr('Process: ')
cmdline = format_cmdline(p, _d.need_inject_server)
scr.infowin.addstr(cmdline[:width - 25], palette.YELLOW)
scr.infowin.addstr(' [')
scr.infowin.addstr(
str(p.pid), palette.GREEN if status == 1 else palette.GREY_BOLD)
scr.infowin.addstr(']')
if status == -1:
xst = 'WAIT'
xstc = palette.GREY_BOLD
elif status == 0:
xst = 'DONE'
xstc = palette.GREY_BOLD
elif status == -2:
xst = 'ERROR'
xstc = palette.ERROR
else:
xst = None
if xst:
scr.infowin.addstr(' ' + xst, xstc)
cpup = p.cpu_percent()
draw_val(0, 0, 'CPU', '{}%'.format(cpup), palette.BLUE_BOLD)
draw_val(1, 0, 'user', ct.user, palette.BOLD)
draw_val(2, 0, 'system', ct.system, palette.BOLD)
# always hide pptop thread
draw_val(3, 0, 'threads', p.num_threads() - 1, palette.MAGENTA)
# if config['display'].get('glyphs'):
# gauge = _vblks[-1] * int(cpup // 25)
# i = int(cpup % 25 / 25 * len(_vblks))
# if i:
# gauge += _vblks[i - 1]
# x = _info_col_width[0] + 1
# for i, g in enumerate(gauge):
# scr.stdscr.addstr(4 - i, x, g * 2,
# (palette.GREEN, palette.YELLOW,
# palette.RED, palette.RED)[i])
draw_val(0, 1, 'Memory uss', bytes_to_iso(memf.uss), palette.BOLD)
draw_val(1, 1, 'pss', bytes_to_iso(memf.pss), palette.BOLD)
draw_val(2, 1, 'swap', bytes_to_iso(memf.swap),
palette.GREY if memf.swap < 1000000 else palette.YELLOW)
draw_val(0, 2, 'shd', bytes_to_iso(mem.shared), palette.BOLD)
draw_val(1, 2, 'txt', bytes_to_iso(mem.text), palette.BOLD)
draw_val(2, 2, 'dat', bytes_to_iso(mem.data), palette.BOLD)
draw_val(0,
3,
'Files:',
len(p.open_files()),
palette.CYAN,
spacer=False)
draw_val(1,
3,
value='{} {} ({})'.format(glyph.UPLOAD, ioc.read_count,
bytes_to_iso(ioc.read_chars)),
color=palette.GREEN)
draw_val(2,
3,
value='{} {} ({})'.format(glyph.DOWNLOAD, ioc.write_count,
bytes_to_iso(ioc.write_chars)),
color=palette.BLUE)
with scr.lock:
scr.infowin.refresh()
scr.stdscr.refresh()
except psutil.AccessDenied:
log_traceback()
return error('Access denied')
except psutil.NoSuchProcess:
log_traceback()
return error('Process is gone')
except CriticalException:
log_traceback()
return error('Process server is gone')
except curses.error:
log_traceback()
try:
for i in range(2):
scr.stdscr.move(i, 0)
scr.stdscr.clrtoeol()
scr.stdscr.refresh()
except:
pass
except Exception as e:
return error(e)
@neotasker.background_worker(delay=0.1)
async def show_bottom_bar(**kwargs):
try:
with scr.lock:
height, width = scr.stdscr.getmaxyx()
scr.stdscr.move(height - 1, 0)
scr.stdscr.addstr(' ' * (width - 1), palette.BAR)
scr.stdscr.move(height - 1, 0)
color = palette.BAR
for h in sorted(bottom_bar_help):
scr.stdscr.addstr('F{}'.format(h))
scr.stdscr.addstr(bottom_bar_help[h].ljust(6), color)
try:
with plugin_lock:
i = _d.current_plugin['p'].delay
if int(i) == i:
i = int(i)
i = 'I:' + str(i)
except:
i = ''
stats = '{} P:{} {} {:03d}/{:03d} '.format(i, _d.protocol,
glyph.CONNECTION,
_d.client_frame_id,
_d.last_frame_id)
with ifoctets_lock:
bw = _d.ifbw
if bw < 1000:
bws = '{} Bs'.format(bw)
elif bw < 1000000:
bws = '{:.0f} kBs'.format(bw / 1000)
else:
bws = '{:.0f} MBs'.format(bw / 1000000)
bws = bws.rjust(7)
if bw > 2000000:
bwc = palette.BAR_ERROR
elif bw > 500000:
bwc = palette.BAR_WARNING
else:
bwc = palette.BAR_OK
scr.stdscr.addstr(height - 1, width - len(stats) - len(bws) - 1,
stats, color)
scr.stdscr.addstr(bws, bwc)
scr.stdscr.refresh()
except:
pass
# don't make this async, it should always work in own thread
@neotasker.background_worker
def update_status(**kwargs):
try:
_d.status = command('.status')
except:
log_traceback()
status = -2
finally:
time.sleep(1)
@neotasker.background_worker
def grab_stdout(**kwargs):
try:
result = command('.gs')
with stdout_buf_lock:
_d.stdout_buf += result
except:
log_traceback()
finally:
time.sleep(0.5)
@neotasker.background_worker
def print_stdout(**kwargs):
with stdout_buf_lock:
if _d.stdout_buf != '':
print(_d.stdout_buf, end='')
_d.stdout_buf = ''
time.sleep(0.1)
@neotasker.background_worker(interval=1)
async def calc_bw(**kwargs):
with ifoctets_lock:
if _d.ifoctets >= _d.ifoctets_prev:
_d.ifbw = _d.ifoctets - _d.ifoctets_prev
else:
_d.ifbw = 1000000000 - _d.ifoctets_prev + _d.ifoctets
_d.ifoctets_prev = _d.ifoctets
_d = SimpleNamespace(
cli_first_time=True,
grab_stdout=False,
stdout_buf='',
current_plugin=None,
process_path=[],
default_plugin=None,
process=None,
protocol=None,
force_protocol=None,
client_frame_id=0,
last_frame_id=0,
ifoctets=0,
ifoctets_prev=0,
ifbw=0,
pptop_dir=None,
gdb=None,
work_pid=None,
need_inject_server=True,
inject_method=None, # None (auto), 'native', 'loadcffi', 'unsafe'
inject_lib=None,
child=None,
child_cmd=None,
child_args='',
status=None,
console_json_mode=True,
exec_code=None,
output_as_json=False)
def sigwinch_handler(signum=None, frame=None):
resize_handler.trigger_threadsafe(force=True)
def find_lib(name):
'''
Find first library matching pattern
'''
import glob
for d in sys.path:
lib = glob.glob('{}/{}'.format(d, name))
if lib:
return lib[0]
def init_inject():
if _d.inject_method is None or _d.inject_method == 'auto':
_d.inject_method = 'native'
_d.inject_lib = find_lib('__pptop_injector.*.so')
if not _d.inject_lib:
_d.inject_method = 'loadcffi'
_d.inject_lib = find_lib('_cffi_backend.*.so')
if not _d.inject_lib:
_d.inject_method = 'unsafe'
else:
if _d.inject_method == 'native':
_d.inject_lib = find_lib('__pptop_injector.*.so')
if not _d.inject_lib:
raise RuntimeError(
'__pptop_injector shared library not found.' +
' reinstall package or select different inject method')
elif _d.inject_method == 'loadcffi':
_d.inject_lib = find_lib('_cffi_backend.*.so')
if not _d.inject_lib:
raise RuntimeError(
'_cffi_backend shared library not found.' +
' install "cffi" package or select different inject method')
else:
_d.inject_method = 'unsafe'
def inject_server(gdb, p):
cmds = []
pid = p.pid
libpath = os.path.abspath(os.path.dirname(__file__) + '/..')
if _d.inject_method in ['native', 'loadcffi']:
cmds.append('call (void)dlopen("{}", 2)'.format(_d.inject_lib))
if _d.inject_method == 'native':
cmds.append('call (int)__pptop_start_injection("{}",{},{},"{}")'.format(
libpath, os.getpid(), _d.protocol,
log_config.fname if log_config.fname else ''))
else:
cmds += [
'call (PyGILState_STATE)PyGILState_Ensure()',
('call (int)PyRun_SimpleString("' +
'import sys\\nif \\"{path}\\" not in sys.path: ' +
'sys.path.insert(0,\\"{path}\\")\\n' +
'import pptop.injection;pptop.injection.start(' +
'{mypid},{protocol}{lg})")').format(
path=libpath,
mypid=os.getpid(),
lg='' if not log_config.fname else ',lg=\\"{}\\"'.format(
log_config.fname),
protocol=_d.protocol), ' call (void)PyGILState_Release($1)'
]
args = [gdb, '-p', str(pid), '--batch'
] + ['--eval-command={}'.format(c) for c in cmds]
log(args)
p = subprocess.Popen(args,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
log(out)
log(err)
if p.returncode:
raise RuntimeError(err)
def inject_plugin(plugin):
if plugin['p'].injected is False:
log('injecting plugin {}'.format(plugin['p'].name))
plugin['p'].injected = True
try:
command('.inject', plugin['i'])
return True
except:
print_message('Plugin injection failed', color=palette.ERROR)
return False
def switch_plugin(new_plugin):
if _d.current_plugin:
if _d.current_plugin is new_plugin:
return
if not _d.current_plugin['p'].background:
_d.current_plugin['p'].stop(wait=False)
else:
_d.current_plugin['p'].hide()
p = new_plugin['p']
p._previous_plugin = _d.current_plugin
p.key_event = None
p.key_code = None
inject_plugin(new_plugin)
if not p.is_active(): p.start()
p.show()
with plugin_lock:
_d.current_plugin = new_plugin
def run():
def autostart_plugins():
for plugin in plugins_autostart:
if plugin['p'] is not _d.current_plugin.get('p'):
log('autostarting {}'.format(plugin['m']))
inject_plugin(plugin)
p = plugin['p']
if p.background:
p.start()
try:
if not _d.work_pid:
init_curses(initial=True,
after_resize=after_resize,
colors=config['display'].get('colors'),
glyphs=config['display'].get('glyphs'))
p = select_process()
else:
p = psutil.Process(_d.work_pid)
if not p: return
_d.process = p
client.settimeout(socket_timeout)
client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, socket_buf)
client.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, socket_buf)
if _d.need_inject_server:
inject_server(_d.gdb, p)
log('server injected')
sock_path = '/tmp/.pptop.{}'.format(os.getpid())
for i in range(injection_timeout * 10):
if os.path.exists(sock_path):
break
time.sleep(0.1)
try:
client.connect(sock_path)
except:
log_traceback()
raise RuntimeError('Unable to connect to process')
log('connected')
frame = b''
with client_lock:
time_start = time.time()
while len(frame) < 1:
data = client.recv(1)
if data:
frame += data
if time.time() > time_start + socket_timeout:
raise CriticalException('Socket timeout')
server_protocol = struct.unpack('b', frame)[0]
if server_protocol < _d.protocol:
if _d.force_protocol:
raise RuntimeError(
'Process doesn\'t support protocol {}'.format(_d.protocol))
else:
_d.protocol = server_protocol
log('Falling back to protocol {}'.format(_d.protocol))
if _d.exec_code:
end_curses()
result = command('.x', _d.exec_code)
if result[0] == 0:
if _d.output_as_json:
print_json(result[1])
else:
print(result[1] if result[1] else '')
else:
print(err('{}: {}'.format(result[1], result[2])))
return
init_curses(initial=True,
after_resize=after_resize,
colors=config['display'].get('colors'),
glyphs=config['display'].get('glyphs'))
signal.signal(signal.SIGWINCH, sigwinch_handler)
calc_bw.start()
update_status.start()
if _d.grab_stdout:
grab_stdout.start()
_d.process_path.clear()
plugin_process_path.clear()
if _d.grab_stdout:
try:
command('.gs')
except:
raise RuntimeError('Unable to set stdout grabber')
ppath = []
for i in command('.path'):
ppath.append(os.path.abspath(i))
_d.process_path.extend(sorted(ppath, reverse=True))
plugin_process_path.extend(_d.process_path)
log('process path: {}'.format(_d.process_path))
switch_plugin(_d.default_plugin)
recalc_info_col_pos()
show_process_info.start(p=p)
show_bottom_bar.start()
neotasker.spawn(autostart_plugins)
log('main loop started')
while True:
try:
try:
k = format_key(scr.stdscr.getkey())
event = get_key_event(k)
except KeyboardInterrupt:
return
except curses.error:
resize_handler.trigger_threadsafe(force=True)
continue
if show_process_info.is_stopped():
return
elif k in plugin_shortcuts:
switch_plugin(plugin_shortcuts[k])
elif event == 'ready':
try:
result = command('.ready')
except:
result = None
with scr.lock:
if result:
print_message('Ready event sent', color=palette.OK)
else:
print_message('Command failed', color=palette.ERROR)
elif event == 'reinject' and \
_d.current_plugin['p'].injected is not None:
try:
result = command('.inject', _d.current_plugin['i'])
except:
result = None
with scr.lock:
if result:
print_message('Plugin re-injected',
color=palette.OK)
else:
print_message('Plugin re-injection failed',
color=palette.ERROR)
elif event == 'quit':
_d.current_plugin['p'].stop(wait=False)
show_process_info.stop(wait=False)
show_bottom_bar.stop(wait=False)
return
elif event == 'console':
with scr.lock:
end_curses()
if _d.grab_stdout: print_stdout.start()
cli_mode()
if _d.grab_stdout: print_stdout.stop()
init_curses(after_resize=after_resize)
resize_term()
elif event == 'show-console':
with scr.lock:
end_curses()
hide_cursor()
if _d.grab_stdout: print_stdout.start()
try:
wait_key()
except KeyboardInterrupt:
pass
if _d.grab_stdout: print_stdout.stop()
init_curses(after_resize=after_resize)
resize_term()
elif event == 'filter':
apply_filter(_d.current_plugin['p'])
elif event == 'interval':
apply_interval(_d.current_plugin['p'])
elif event == 'pause':
with scr.lock:
_d.current_plugin['p'].toggle_pause()
elif event in _d.current_plugin['p'].inputs:
with scr.lock:
try:
prev_value = _d.current_plugin['p'].get_input(event)
except ValueError:
continue
value = prompt(
ps=_d.current_plugin['p'].get_input_prompt(event),
value=prev_value if prev_value is not None else '')
_d.current_plugin['p'].inputs[event] = value
try:
_d.current_plugin['p'].handle_input(
event, value, prev_value)
except:
pass
else:
for i, plugin in plugins.items():
try:
plugin['p'].handle_key_global_event(event, k)
except:
log_traceback()
with scr.lock:
_d.current_plugin['p'].key_code = k
_d.current_plugin['p'].key_event = event
_d.current_plugin['p'].trigger_threadsafe()
except:
log_traceback()
return
except:
log_traceback()
raise
finally:
end_curses()
def start():
def format_plugin_option(dct, o, v):
if o.find('.') != -1:
x, y = o.split('.', 1)
dct[x] = {}
format_plugin_option(dct[x], y, v)
else:
dct[o] = v
_me = 'ppTOP version %s' % __version__
ap = argparse.ArgumentParser(description=_me)
ap.add_argument('-V',
'--version',
help='Print version and exit',
action='store_true')
ap.add_argument('-R',
'--raw',
help='Raw mode (disable colors and unicode glyphs)',
action='store_true')
ap.add_argument('--disable-glyphs',
help='disable unicode glyphs',
action='store_true')
ap.add_argument('file',
nargs='?',
help='File, PID file or PID',
metavar='FILE/PID')
ap.add_argument('-a', '--args', metavar='ARGS', help='Child args (quoted)')
ap.add_argument('--python',
metavar='FILE',
help='Python interpreter to launch file')
ap.add_argument('--gdb', metavar='FILE', help='Path to gdb')
ap.add_argument('-p',
'--protocol',
metavar='VER',
type=int,
help=textwrap.dedent('''Pickle protocol, default is highest.
4: Python 3.4+,
3: Python 3.0+,
2: Python 2.3+,
1: vintage'''))
ap.add_argument('--inject-method',
choices=['auto', 'native', 'loadcffi', 'unsafe'],
help='Inject method')
ap.add_argument('-g',
'--grab-stdout',
help='Grab stdout/stderr of injected process',
action='store_true')
ap.add_argument(
'-w',
'--wait',
metavar='SEC',
type=float,
help='If file is specified, wait seconds to start main code')
ap.add_argument(
'-f',
'--config-file',
help='Alternative config file (default: ~/.pptop/pptop.yml)',
metavar='CONFIG',
dest='config')
ap.add_argument('-d',
'--default',
help='Default plugin to launch',
metavar='PLUGIN',
dest='plugin')
ap.add_argument(
'-o',
'--plugin-option',
help='Override plugin config option, e.g. threads.filter=mythread',
metavar='NAME=VALUE',
action='append',
dest='plugin_options')
ap.add_argument('--log', metavar='FILE', help='Send debug log to file')
ap.add_argument(
'-x',
'--exec',
help='Exec code from a file ("-" for stdin) and exit '
' (the code can put result to "out" var)',
metavar='FILE',
dest='_exec')
ap.add_argument('-J',
'--json',
help='Output exec result as JSON',
action='store_true')
try:
import argcomplete
argcomplete.autocomplete(ap)
except:
pass
a = ap.parse_args()
if a.log:
log_config.fname = a.log
log_config.name = 'client:{}'.format(os.getpid())
logging.getLogger('asyncio').setLevel(logging.DEBUG)
logging.getLogger('neotasker').setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
le = logging.getLogger()
le.addHandler(ppLoghandler())
list(map(le.removeHandler, le.handlers))
neotasker.set_debug(True)
init_logging()
if a.version:
print(_me)
exit()
log('initializing')
if a.file:
try:
# pid?
_d.work_pid = int(a.file)
except:
# probably pid file
try:
with open(a.file) as fh:
_d.work_pid = int(fh.read(128))
except:
# okay, program to launch
_d.child_cmd = os.path.abspath(a.file)
_d.pptop_dir = os.path.expanduser('~/.pptop')
if a.config:
config_file = a.config
use_default_config = False
else:
config_file = _d.pptop_dir + '/pptop.yml'
use_default_config = True
sys.path.append(_d.pptop_dir + '/lib')
config.clear()
if use_default_config and not os.path.isfile(config_file):
log('no user config, setting default')
try:
os.mkdir(_d.pptop_dir)
except:
pass
if not os.path.isdir(_d.pptop_dir + '/scripts'):
shutil.copytree(dir_me + '/config/scripts',
_d.pptop_dir + '/scripts')
shutil.copy(dir_me + '/config/pptop.yml', _d.pptop_dir + '/pptop.yml')
if not os.path.isdir(_d.pptop_dir + '/lib'):
os.mkdir(_d.pptop_dir + '/lib')
with open(config_file) as fh:
config.update(yaml.load(fh.read()))
console = config.get('console')
if console is None: console = {}
_d.console_json_mode = console.get('json-mode')
_d.inject_method = a.inject_method if a.inject_method else config.get(
'inject-method')
if config.get('display') is None:
config['display'] = {}
if a.raw:
config['display']['colors'] = False
if a.grab_stdout:
_d.grab_stdout = True
if a.raw or a.disable_glyphs:
config['display']['glyphs'] = False
if a._exec:
if a._exec == '-':
_d.exec_code = sys.stdin.read()
else:
with open(a._exec) as fd:
_d.exec_code = fd.read()
_d.output_as_json = a.json
else:
ebk = {}
global_keys = config.get('keys')
if global_keys:
for event, keys in global_keys.items():
for k, v in events_by_key.copy().items():
if event == v:
del events_by_key[k]
if keys is not None:
for k in keys if isinstance(keys, list) else [keys]:
ebk[str(k)] = str(event)
events_by_key.update(ebk)
plugin_options = {}
for x in a.plugin_options or []:
try:
o, v = x.split('=', 1)
except:
o = x
v = None
format_plugin_option(plugin_options, o, v)
if plugin_options:
config.update(merge_dict(config, {'plugins': plugin_options}))
log('loading plugins')
try:
plugins.clear()
for i, v in config.get('plugins', {}).items():
try:
log('+ plugin ' + i)
if v is None: v = {}
try:
mod = importlib.import_module('pptop.plugins.' + i)
mod.__version__ = 'built-in'
except ModuleNotFoundError:
mod = importlib.import_module('pptopcontrib.' + i)
try:
mod.__version__
except:
raise RuntimeError(
'Please specify __version__ in plugin file')
plugin = {'m': mod}
plugins[i] = plugin
p = mod.Plugin(interval=float(
v.get('interval', mod.Plugin.default_interval)))
p.command = command
p.get_plugins = get_plugins
p.get_plugin = get_plugin
p.get_config_dir = get_config_dir
p.switch_plugin = switch_plugin
p.get_process = get_process
p.get_process_path = get_process_path
p.global_config = config
plugin['p'] = p
plugin['id'] = i
p._inject = partial(inject_plugin, plugin=plugin)
injection = {'id': i}
need_inject = False
try:
injection['l'] = inspect.getsource(mod.injection_load)
need_inject = True
except:
pass
try:
injection['i'] = inspect.getsource(mod.injection)
need_inject = True
except:
pass
try:
injection['u'] = inspect.getsource(mod.injection_unload)
need_inject = True
except:
pass
if need_inject:
p.injected = False
plugin['i'] = injection
else:
p.injected = None
if not _d.default_plugin or val_to_boolean(
v.get('default')) or i == a.plugin:
_d.default_plugin = plugin
p_cfg = v.get('config')
p.config = {} if p_cfg is None else p_cfg
p.on_load()
p._on_load()
if 'l' in injection:
injection['lkw'] = p.get_injection_load_params()
if 'shortcut' in v:
sh = v['shortcut']
plugin['shortcut'] = sh
plugin_shortcuts[sh] = plugin
if sh.startswith('KEY_F('):
try:
f = int(sh[6:-1])
if f <= 10:
bottom_bar_help[f] = p.short_name
except:
pass
else:
plugin['shortcut'] = ''
if 'filter' in v:
p.filter = str(v['filter'])
if 'cursor' in v:
p._cursor_enabled_by_user = val_to_boolean(v['cursor'])
if val_to_boolean(v.get('autostart')):
plugins_autostart.append(plugin)
except Exception as e:
raise RuntimeError('plugin {}: {}'.format(i, e))
except:
log_traceback()
raise
neotasker.task_supervisor.start()
neotasker.task_supervisor.create_aloop('pptop', default=True, daemon=True)
neotasker.task_supervisor.create_aloop('service', daemon=True)
try:
if a.file and not _d.work_pid:
# launch file
_d.need_inject_server = False
if a.python:
python_path = a.python
else:
python_path = shutil.which('python3')
if not python_path:
raise RuntimeError(
'python3 not found in path, please specify manually')
args = (python_path, '-m', 'pptop.injection', a.file,
str(os.getpid()))
if a.wait is not None:
args += ('-w', str(a.wait))
if a.protocol is not None:
args += ('-p', str(a.protocol))
if a.args:
args += ('-a', a.args)
if log_config.fname:
args += ('--log', log_config.fname)
log('starting child process')
_d.child = subprocess.Popen(args,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_d.work_pid = _d.child.pid
_d.protocol = pickle.HIGHEST_PROTOCOL
else:
if a.gdb:
_d.gdb = a.gdb
else:
_d.gdb = shutil.which('gdb')
if not _d.gdb or not os.path.isfile(_d.gdb):
raise RuntimeError('gdb not found')
# check yama ptrace scope
try:
with open('/proc/sys/kernel/yama/ptrace_scope') as fd:
yps = int(fd.read().strip())
except:
yps = None
if yps:
raise RuntimeError(
'yama ptrace scope is on. ' +
'disable with "sudo sysctl -w kernel.yama.ptrace_scope=0"')
init_inject()
log('inject method: {}'.format(_d.inject_method))
log('inject library: {}'.format(_d.inject_lib))
if a.protocol is not None:
if a.protocol > pickle.HIGHEST_PROTOCOL or a.protocol < 1:
raise ValueError('Protocol {} is not supported'.format(
a.protocol))
_d.protocol = a.protocol
_d.force_protocol = a.protocol
else:
_d.protocol = pickle.HIGHEST_PROTOCOL
log('Pickle protocol: {}'.format(_d.protocol))
run()
log('terminating')
for p, v in plugins.items():
v['p'].on_unload()
except Exception as e:
log_traceback()
raise
finally:
try:
client.close()
except:
pass
neotasker.task_supervisor.stop(wait=False, cancel_tasks=True)
return 0 | en | 0.458468 | ppTOP v{version} (c) Altertech The product is available under {license} license. https://pptop.io/ # os.system('clear') # _vblks='▁▂▃▅▆▇' #( # 5 if config['display'].get('glyphs') and i == 1 else 2) # always hide pptop thread # if config['display'].get('glyphs'): # gauge = _vblks[-1] * int(cpup // 25) # i = int(cpup % 25 / 25 * len(_vblks)) # if i: # gauge += _vblks[i - 1] # x = _info_col_width[0] + 1 # for i, g in enumerate(gauge): # scr.stdscr.addstr(4 - i, x, g * 2, # (palette.GREEN, palette.YELLOW, # palette.RED, palette.RED)[i]) # don't make this async, it should always work in own thread # None (auto), 'native', 'loadcffi', 'unsafe' Find first library matching pattern Pickle protocol, default is highest. 4: Python 3.4+, 3: Python 3.0+, 2: Python 2.3+, 1: vintage # pid? # probably pid file # okay, program to launch # launch file # check yama ptrace scope | 1.695754 | 2 |
scripts/fix_ids.py | golly-splorts/golly-test-data | 0 | 6617529 | <gh_stars>0
import uuid
import json
for iseason in [2]:
# replace ids in season.json
with open('season%d/season.json'%(iseason), 'r') as f:
season = json.load(f)
for day in season:
for game in day:
game['id'] = str(uuid.uuid4())
seasonout = 'season%d/new_season.json'%(iseason)
with open(seasonout, 'w') as f:
json.dump(season, f, indent=4)
print(f"Wrote new season with new game ids to {seasonout}")
# replace ids in postseason.json
with open('season%d/postseason.json'%(iseason), 'r') as f:
postseason = json.load(f)
for series in postseason:
miniseason = postseason[series]
for day in miniseason:
for game in day:
game['id'] = str(uuid.uuid4())
postseason[series] = miniseason
postout = 'season%d/new_postseason.json'%(iseason)
with open(postout, 'w') as f:
json.dump(postseason, f, indent=4)
print(f"Wrote new season with new game ids to {postout}")
| import uuid
import json
for iseason in [2]:
# replace ids in season.json
with open('season%d/season.json'%(iseason), 'r') as f:
season = json.load(f)
for day in season:
for game in day:
game['id'] = str(uuid.uuid4())
seasonout = 'season%d/new_season.json'%(iseason)
with open(seasonout, 'w') as f:
json.dump(season, f, indent=4)
print(f"Wrote new season with new game ids to {seasonout}")
# replace ids in postseason.json
with open('season%d/postseason.json'%(iseason), 'r') as f:
postseason = json.load(f)
for series in postseason:
miniseason = postseason[series]
for day in miniseason:
for game in day:
game['id'] = str(uuid.uuid4())
postseason[series] = miniseason
postout = 'season%d/new_postseason.json'%(iseason)
with open(postout, 'w') as f:
json.dump(postseason, f, indent=4)
print(f"Wrote new season with new game ids to {postout}") | en | 0.632225 | # replace ids in season.json # replace ids in postseason.json | 2.548906 | 3 |
nors_srv.py | dmazzer/nors_server | 0 | 6617530 | #!/usr/bin/env python3
"""
nors_srv.py: NORS Server application
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, NORS project"
__credits__ = ""
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import os
from app import create_app, db
from app.models import User
from config.config import Nors_Configuration
if __name__ == '__main__':
app = create_app(os.environ.get('FLASK_CONFIG', 'production'))
with app.app_context():
# create a development user
if User.objects.first() is None:
u = User(username = 'admin')
u.set_password('<PASSWORD>')
u.save()
# reading configurations from config file
config = Nors_Configuration()
server_ip = config.ReadConfig('server', 'ip')
server_port = int(config.ReadConfig('server', 'port'))
app.debug = True
app.run(host=server_ip, port=server_port)
| #!/usr/bin/env python3
"""
nors_srv.py: NORS Server application
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, NORS project"
__credits__ = ""
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import os
from app import create_app, db
from app.models import User
from config.config import Nors_Configuration
if __name__ == '__main__':
app = create_app(os.environ.get('FLASK_CONFIG', 'production'))
with app.app_context():
# create a development user
if User.objects.first() is None:
u = User(username = 'admin')
u.set_password('<PASSWORD>')
u.save()
# reading configurations from config file
config = Nors_Configuration()
server_ip = config.ReadConfig('server', 'ip')
server_port = int(config.ReadConfig('server', 'port'))
app.debug = True
app.run(host=server_ip, port=server_port)
| en | 0.588633 | #!/usr/bin/env python3 nors_srv.py: NORS Server application # create a development user # reading configurations from config file | 2.122693 | 2 |
socialnetworks/__init__.py | gGonz/django-socialnetworks | 5 | 6617531 | <filename>socialnetworks/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = u'<NAME>'
__version__ = '0.4.12'
| <filename>socialnetworks/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = u'<NAME>'
__version__ = '0.4.12'
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.029618 | 1 |
src/tests/worker/test_worker_class.py | mehsoy/jaws | 1 | 6617532 | <reponame>mehsoy/jaws<filename>src/tests/worker/test_worker_class.py<gh_stars>1-10
import os
import pytest
from worker.task_status import TaskStatus
from worker.worker_class import Worker
from worker.worker_status import WorkerStatus
@pytest.mark.usefixtures('build_test_environment')
class TestWorker:
def test_adopt_task(self, centos127, archive1, make_tar_task, worker):
task = make_tar_task(1, centos127, 'johann-test_dir-1/', archive1)
worker.adopt_task(task)
assert worker.status == WorkerStatus.ACTIVE
assert task.status == TaskStatus.INITIALIZED
worker._msg.update_status.assert_called_once_with(new_status=WorkerStatus.ACTIVE)
def test_copy_task(self, centos127, archive1, make_tar_task, worker):
task = make_tar_task(1, centos127, 'johann-test_dir-1/', archive1)
worker.adopt_task(task)
worker.copy(task)
assert os.path.exists(task.absolute_target_path)
assert task.status == TaskStatus.COPIED
| import os
import pytest
from worker.task_status import TaskStatus
from worker.worker_class import Worker
from worker.worker_status import WorkerStatus
@pytest.mark.usefixtures('build_test_environment')
class TestWorker:
def test_adopt_task(self, centos127, archive1, make_tar_task, worker):
task = make_tar_task(1, centos127, 'johann-test_dir-1/', archive1)
worker.adopt_task(task)
assert worker.status == WorkerStatus.ACTIVE
assert task.status == TaskStatus.INITIALIZED
worker._msg.update_status.assert_called_once_with(new_status=WorkerStatus.ACTIVE)
def test_copy_task(self, centos127, archive1, make_tar_task, worker):
task = make_tar_task(1, centos127, 'johann-test_dir-1/', archive1)
worker.adopt_task(task)
worker.copy(task)
assert os.path.exists(task.absolute_target_path)
assert task.status == TaskStatus.COPIED | none | 1 | 2.172186 | 2 | |
test/manualAreplTests/realTimePrints.py | manuth/LiveCode | 203 | 6617533 | from time import sleep
from datetime import datetime
x=0
while(x<2):
x = x+1
sleep(2)
print(datetime.now())
###########################################
# Expected Result
###########################################
# prints should appear in real time | from time import sleep
from datetime import datetime
x=0
while(x<2):
x = x+1
sleep(2)
print(datetime.now())
###########################################
# Expected Result
###########################################
# prints should appear in real time | de | 0.663011 | ########################################### # Expected Result ########################################### # prints should appear in real time | 3.729533 | 4 |
get_matches.py | SnippyHolloW/dota2_stats | 0 | 6617534 | <filename>get_matches.py
import dota2api
import joblib
import time
api = dota2api.Initialise(api_key='')
my_id = 76561197997856332
private_id = 4294967295 # a private profile player's 32-bit Steam ID
hist = api.get_match_history(account_id=my_id)#, matches_requested=1)
print(len(hist['matches']))
cur_smid = hist['matches'][0]['match_seq_num'] # 0 = newest, -1 = oldest
print(cur_smid)
# keeping only very high skill (skill=3), captain mode (game_mode=2) matches
kept_matches = {}
def get_matches(cur_smid):
try:
matches = api.get_match_history_by_seq_num(start_at_match_seq_num=cur_smid,
game_mode=2)['matches']
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
for m in matches:
cur_smid = m['match_seq_num']
try:
d = api.get_match_details(match_id=m['match_id'])
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
if d['game_mode'] == 2: #and d['human_players'] == 10:
mid = m['match_id']
if mid not in kept_matches:
for p in d['players']:
pid = p['account_id']
if pid != private_id:
try:
tm = api.get_match_history(account_id=pid, skill=3,
start_at_match_id=mid, matches_requested=1)
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
if len(tm['matches']):
kept_matches[mid] = d
break
return cur_smid + 1
for _ in xrange(1000):
cur_smid = get_matches(cur_smid)
joblib.dump(kept_matches, 'matches.joblib', compress=5)
if cur_smid <= 0:
break
print("dumped", len(kept_matches), "so far")
| <filename>get_matches.py
import dota2api
import joblib
import time
api = dota2api.Initialise(api_key='')
my_id = 76561197997856332
private_id = 4294967295 # a private profile player's 32-bit Steam ID
hist = api.get_match_history(account_id=my_id)#, matches_requested=1)
print(len(hist['matches']))
cur_smid = hist['matches'][0]['match_seq_num'] # 0 = newest, -1 = oldest
print(cur_smid)
# keeping only very high skill (skill=3), captain mode (game_mode=2) matches
kept_matches = {}
def get_matches(cur_smid):
try:
matches = api.get_match_history_by_seq_num(start_at_match_seq_num=cur_smid,
game_mode=2)['matches']
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
for m in matches:
cur_smid = m['match_seq_num']
try:
d = api.get_match_details(match_id=m['match_id'])
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
if d['game_mode'] == 2: #and d['human_players'] == 10:
mid = m['match_id']
if mid not in kept_matches:
for p in d['players']:
pid = p['account_id']
if pid != private_id:
try:
tm = api.get_match_history(account_id=pid, skill=3,
start_at_match_id=mid, matches_requested=1)
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
if len(tm['matches']):
kept_matches[mid] = d
break
return cur_smid + 1
for _ in xrange(1000):
cur_smid = get_matches(cur_smid)
joblib.dump(kept_matches, 'matches.joblib', compress=5)
if cur_smid <= 0:
break
print("dumped", len(kept_matches), "so far")
| en | 0.82011 | # a private profile player's 32-bit Steam ID #, matches_requested=1) # 0 = newest, -1 = oldest # keeping only very high skill (skill=3), captain mode (game_mode=2) matches # wait 3 seconds # wait 3 seconds #and d['human_players'] == 10: # wait 3 seconds | 2.768265 | 3 |
meiduo_mell/meiduo_mell/apps/users/views.py | jingbowen/meiduo_001 | 1 | 6617535 | from django.shortcuts import render,redirect
from django.views import View
from django.http import HttpResponse,HttpResponseForbidden,JsonResponse
import re
from .models import User
from django.db import DatabaseError
from django.contrib.auth import login,authenticate
from meiduo_mell.utils.response_code import RETCODE
from django_redis import get_redis_connection
# Create your views here.
class RegisterView(View):
def get(self, request):
return render(request, "register.html")
def post(self,request):
username = request.POST.get("username")
password = request.POST.get("password")
password2 = request.POST.get("password2")
mobile = request.POST.get("mobile")
sms_code = request.POST.get("sms_code")
image_code = request.POST.get("image_code")
allow = request.POST.get("allow")
if not all([username, password, <PASSWORD>, mobile, allow]):
return HttpResponseForbidden("缺少必要的参数")
if not re.match(r'^[A-Za-z0-9-_]{5,20}$', username):
return HttpResponseForbidden("请输入5-20个字符的用户名")
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden("请输入8-20位的密码")
if password != password2:
return HttpResponseForbidden("两次输入的密码不相等")
if not re.match(r'^1[3-9]\d{9}$', mobile):
return HttpResponseForbidden('请输入正确的手机号码')
if allow != "on":
return HttpResponseForbidden('请勾选用户协议')
redis_conn = get_redis_connection("verify_code")
sms_code_server = redis_conn.get("sms_%s" % mobile)
if sms_code_server is None:
return render(request, "register.html", {"sms_code_errmsg": "验证码无效"})
if sms_code != sms_code_server.decode():
return render(request, "register.html",{"sms_code_errmsg": "验证码错误"})
try:
User.objects.create_user(username=username, password=password, mobile=mobile)
except DatabaseError:
return render(request, 'register.html', {'register_errmsg': '注册失败'})
# login(request, username)
return redirect("/")
class UserNameContView(View):
def get(self,request,username,):
count = User.objects.filter(username=username).count()
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', "count": count})
class MobileCountView(View):
def get(self,request,mobile):
count = User.objects.filter(mobile=mobile).count()
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', "count": count})
class LoginView(View):
def get(self,request):
return render(request,"login.html")
def post(self,request):
username = request.POST.get("username")
password = request.POST.get("password")
remembered = request.POST.get("remembered")
if all([username, password]) is None:
return HttpResponseForbidden("缺少必传的参数")
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return HttpResponseForbidden('请输入正确的用户名或手机号')
# 判断密码是否是8-20个数字
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden('密码最少8位,最长20位')
user = authenticate(username=username, password=password)
if user is None:
return render(request, "login.html", {"account_errmsg":"用户名或密码错误"})
login(request, user)
if remembered != "on":
request.session.set_expiry(0)
return redirect("/")
| from django.shortcuts import render,redirect
from django.views import View
from django.http import HttpResponse,HttpResponseForbidden,JsonResponse
import re
from .models import User
from django.db import DatabaseError
from django.contrib.auth import login,authenticate
from meiduo_mell.utils.response_code import RETCODE
from django_redis import get_redis_connection
# Create your views here.
class RegisterView(View):
def get(self, request):
return render(request, "register.html")
def post(self,request):
username = request.POST.get("username")
password = request.POST.get("password")
password2 = request.POST.get("password2")
mobile = request.POST.get("mobile")
sms_code = request.POST.get("sms_code")
image_code = request.POST.get("image_code")
allow = request.POST.get("allow")
if not all([username, password, <PASSWORD>, mobile, allow]):
return HttpResponseForbidden("缺少必要的参数")
if not re.match(r'^[A-Za-z0-9-_]{5,20}$', username):
return HttpResponseForbidden("请输入5-20个字符的用户名")
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden("请输入8-20位的密码")
if password != password2:
return HttpResponseForbidden("两次输入的密码不相等")
if not re.match(r'^1[3-9]\d{9}$', mobile):
return HttpResponseForbidden('请输入正确的手机号码')
if allow != "on":
return HttpResponseForbidden('请勾选用户协议')
redis_conn = get_redis_connection("verify_code")
sms_code_server = redis_conn.get("sms_%s" % mobile)
if sms_code_server is None:
return render(request, "register.html", {"sms_code_errmsg": "验证码无效"})
if sms_code != sms_code_server.decode():
return render(request, "register.html",{"sms_code_errmsg": "验证码错误"})
try:
User.objects.create_user(username=username, password=password, mobile=mobile)
except DatabaseError:
return render(request, 'register.html', {'register_errmsg': '注册失败'})
# login(request, username)
return redirect("/")
class UserNameContView(View):
def get(self,request,username,):
count = User.objects.filter(username=username).count()
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', "count": count})
class MobileCountView(View):
def get(self,request,mobile):
count = User.objects.filter(mobile=mobile).count()
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', "count": count})
class LoginView(View):
def get(self,request):
return render(request,"login.html")
def post(self,request):
username = request.POST.get("username")
password = request.POST.get("password")
remembered = request.POST.get("remembered")
if all([username, password]) is None:
return HttpResponseForbidden("缺少必传的参数")
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return HttpResponseForbidden('请输入正确的用户名或手机号')
# 判断密码是否是8-20个数字
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden('密码最少8位,最长20位')
user = authenticate(username=username, password=password)
if user is None:
return render(request, "login.html", {"account_errmsg":"用户名或密码错误"})
login(request, user)
if remembered != "on":
request.session.set_expiry(0)
return redirect("/")
| en | 0.573625 | # Create your views here. # login(request, username) # 判断密码是否是8-20个数字 | 2.148727 | 2 |
api/urls/user.py | KWY-Q/SDUST-ACMER | 2 | 6617536 | from django.conf.urls import url
from api.views.user import current_user, current_user_info
from api.views.user import get_user_info
from api.views.user import search_user_username, search_user_nickname
from api.views.user import create_user_post, login_post, logout
from api.views.user import modify_user_info_post, change_password_post
from api.views.user import follow_user_post, unfollow_user_post
urlpatterns = [
url(r'^current-user/', current_user, name='current_user'),
url(r'^current-user-info/', current_user_info, name='current_user_info'),
url(r'^get-user-info/(\S+)/', get_user_info, name='get_user_info'),
url(r'^search-user-username/(\S+)/', search_user_username, name='search_user'),
url(r'^search-user-nickname/(\S+)/', search_user_nickname, name='search_user'),
url(r'^register/', create_user_post, name='register'),
url(r'^login/', login_post, name='login'),
url(r'^logout/', logout, name='logout'),
url(r'^modify/', modify_user_info_post, name='modify'),
url(r'^change-password/', change_password_post, name='change_password'),
url(r'^follow/', follow_user_post, name='follow_user'),
url(r'^unfollow/', unfollow_user_post, name='unfollow_user'),
]
| from django.conf.urls import url
from api.views.user import current_user, current_user_info
from api.views.user import get_user_info
from api.views.user import search_user_username, search_user_nickname
from api.views.user import create_user_post, login_post, logout
from api.views.user import modify_user_info_post, change_password_post
from api.views.user import follow_user_post, unfollow_user_post
urlpatterns = [
url(r'^current-user/', current_user, name='current_user'),
url(r'^current-user-info/', current_user_info, name='current_user_info'),
url(r'^get-user-info/(\S+)/', get_user_info, name='get_user_info'),
url(r'^search-user-username/(\S+)/', search_user_username, name='search_user'),
url(r'^search-user-nickname/(\S+)/', search_user_nickname, name='search_user'),
url(r'^register/', create_user_post, name='register'),
url(r'^login/', login_post, name='login'),
url(r'^logout/', logout, name='logout'),
url(r'^modify/', modify_user_info_post, name='modify'),
url(r'^change-password/', change_password_post, name='change_password'),
url(r'^follow/', follow_user_post, name='follow_user'),
url(r'^unfollow/', unfollow_user_post, name='unfollow_user'),
]
| none | 1 | 1.995306 | 2 | |
output/pipeline_output.py | joristork/milovision | 8 | 6617537 | <reponame>joristork/milovision
#
# Milovision: A camera pose estimation programme
#
# Copyright (C) 2013 <NAME>
# See LICENSE.txt
#
# pipeline_output.py
"""
:synopsis: Contains the Pipeline_Output class, which holds all useful data
produced by one pipeline run.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import logging
import time
import numpy as np
class Pipeline_Output(object):
"""
Holds all relevant pose estimation and performance data emerging from the
pipeline.
"""
def __init__(self, sim = False):
"""
Sets simulator, camera, marker and timestamp attributes. Records time at
which each image is received (secs since Epoch).
"""
self.start_time = time.time()
self.sim = sim
self.cam = None
self.markers = []
self.est_markers = []
self.end_time = None
def set(self, sim = False, cam = None, markers = None, estimates = None):
"""
sets:
sim: (boolean) flag to indicate whether this was a simulation
cam: (GL_Camera_Vals or Real_Camera_Vals) camera's key parameters
markers: (Marker or GL_Marker) fiducial marker object
est_markers: (Marker) pipeline pose estimation values
"""
if sim:
self.sim = sim
if cam:
self.cam = cam
if markers:
self.markers = markers
if est_markers:
self.est_markers = estimates
def complete(self, failed = False):
""" records time at which all values have been filled in """
self.end_time = time.time()
def time(self):
""" returns time from instantiation to completion, in seconds """
return self.end_time - self.start_time
def reset_markers_and_time(self):
""" prepares output for next pipeline loop """
self.start_time = time.time()
self.markers = []
self.est_markers = []
def get_est_Cs_flat_mm(self):
"""
Returns estimated centres in a nx3 flat array of 3d vectors (useful for
single marker). Note: posea always generates two estimates per
est_marker.
"""
eCs = np.zeros((len(self.est_markers)*2,3))
for i, m in enumerate(self.est_markers): # any nr of markers
for j, e in enumerate(m.get_C_mm()): # two centre estimates
for k, c in enumerate(e): # three coordinates
eCs[i*2+j,k] = c
return eCs
def get_est_Ns_flat_mm(self):
"""
Returns estimated normals in a nx3 flat array of 3d vectors (useful for
single marker). Note: posea always generates two estimates per
est_marker.
"""
enrms = np.zeros((len(self.est_markers)*2,3))
for i, m in enumerate(self.est_markers): # any nr of markers
for j, n in enumerate(m.get_N_mm()): # two normal estimates
for k, c in enumerate(n): # three coordinates
enrms[i*2+j,k] = c
return enrms
def get_data(self, stub= False, match= False, get= None):
"""
This function is designed to facilitate the retrieval of data to produce
plots as in the printer module, by returning data a dict whose keys are
the names of the required classes of data, such as 'est. Cs' for
estimated centres.
The array of vectors corresonding to each key is in chronological order
by virtue of the same order of the outputs array.
The function checks whether the combination of parameters is
contradictory and returns None if the data in the current output object
is incomplete for the given data request.
"get" specifies the required data classes as a list of ID strings.
If "match" is set, the actual values are duplicated so that the arrays
of actual values are as long as the arrays of estimates and each actual
value has a corresonding estimated value at the same array index in the
appropriate array.
If "stub" is set, the dict is returned with empty lists as values.
NB: this function does not take multiple marker scenes into account.
"""
if stub:
stub = {}
for key in get:
stub[key] = []
return stub
data = {}
nr_eCs, nr_eNs, nr_Cs, nr_Ns = 0, 0, 0, 0
recognised = 0
if match and ('recognition' in get):
self.logger.error('tried to retrieve recognition in matched mode')
return None
eCs = self.get_est_Cs_flat_mm()
nr_eCs = len(eCs)
C = self.markers[0].get_C_mm()
nr_Cs = len(C)
eNs = self.get_est_Ns_flat_mm()
nr_eNs = len(eNs)
N = self.markers[0].get_N_mm()
nr_Ns = len(N)
if 'est. Cs' in get:
if nr_eCs:
data['est. Cs'] = eCs
elif match:
return None
else:
data['est. Cs'] = []
if 'actual Cs' in get:
if match and nr_eCs and nr_Cs:
data['actual Cs'] = np.tile(C, nr_eCs).reshape(nr_eCs, 3)
elif match:
return None
elif nr_Cs:
data['actual Cs'] = np.tile(C, nr_Cs).reshape(nr_Cs, 3)
else:
data['actual Cs'] = []
if 'est. Ns' in get:
if nr_eNs:
data['est. Ns'] = eNs
elif match:
return None
else:
data['est. Ns'] = []
if 'actual Ns' in get:
if match and nr_eNs and nr_Ns:
data['actual Ns'] = np.tile(N, nr_eNs).reshape(nr_eNs, 3)
elif match:
return None
elif nr_Ns:
data['actual Ns'] = np.tile(N, nr_Ns).reshape(nr_Ns, 3)
else:
data['actual Ns'] = []
if 'recognition' in get:
if (nr_eCs and nr_Cs):
data['recognition'] = np.ones((1,1))
elif nr_Cs:
data['recognition'] = np.zeros((1,1))
elif nr_eCs + nr_Cs + nr_eNs + nr_Ns == 0:
return None
return data
| #
# Milovision: A camera pose estimation programme
#
# Copyright (C) 2013 <NAME>
# See LICENSE.txt
#
# pipeline_output.py
"""
:synopsis: Contains the Pipeline_Output class, which holds all useful data
produced by one pipeline run.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import logging
import time
import numpy as np
class Pipeline_Output(object):
"""
Holds all relevant pose estimation and performance data emerging from the
pipeline.
"""
def __init__(self, sim = False):
"""
Sets simulator, camera, marker and timestamp attributes. Records time at
which each image is received (secs since Epoch).
"""
self.start_time = time.time()
self.sim = sim
self.cam = None
self.markers = []
self.est_markers = []
self.end_time = None
def set(self, sim = False, cam = None, markers = None, estimates = None):
"""
sets:
sim: (boolean) flag to indicate whether this was a simulation
cam: (GL_Camera_Vals or Real_Camera_Vals) camera's key parameters
markers: (Marker or GL_Marker) fiducial marker object
est_markers: (Marker) pipeline pose estimation values
"""
if sim:
self.sim = sim
if cam:
self.cam = cam
if markers:
self.markers = markers
if est_markers:
self.est_markers = estimates
def complete(self, failed = False):
""" records time at which all values have been filled in """
self.end_time = time.time()
def time(self):
""" returns time from instantiation to completion, in seconds """
return self.end_time - self.start_time
def reset_markers_and_time(self):
""" prepares output for next pipeline loop """
self.start_time = time.time()
self.markers = []
self.est_markers = []
def get_est_Cs_flat_mm(self):
"""
Returns estimated centres in a nx3 flat array of 3d vectors (useful for
single marker). Note: posea always generates two estimates per
est_marker.
"""
eCs = np.zeros((len(self.est_markers)*2,3))
for i, m in enumerate(self.est_markers): # any nr of markers
for j, e in enumerate(m.get_C_mm()): # two centre estimates
for k, c in enumerate(e): # three coordinates
eCs[i*2+j,k] = c
return eCs
def get_est_Ns_flat_mm(self):
"""
Returns estimated normals in a nx3 flat array of 3d vectors (useful for
single marker). Note: posea always generates two estimates per
est_marker.
"""
enrms = np.zeros((len(self.est_markers)*2,3))
for i, m in enumerate(self.est_markers): # any nr of markers
for j, n in enumerate(m.get_N_mm()): # two normal estimates
for k, c in enumerate(n): # three coordinates
enrms[i*2+j,k] = c
return enrms
def get_data(self, stub= False, match= False, get= None):
"""
This function is designed to facilitate the retrieval of data to produce
plots as in the printer module, by returning data a dict whose keys are
the names of the required classes of data, such as 'est. Cs' for
estimated centres.
The array of vectors corresonding to each key is in chronological order
by virtue of the same order of the outputs array.
The function checks whether the combination of parameters is
contradictory and returns None if the data in the current output object
is incomplete for the given data request.
"get" specifies the required data classes as a list of ID strings.
If "match" is set, the actual values are duplicated so that the arrays
of actual values are as long as the arrays of estimates and each actual
value has a corresonding estimated value at the same array index in the
appropriate array.
If "stub" is set, the dict is returned with empty lists as values.
NB: this function does not take multiple marker scenes into account.
"""
if stub:
stub = {}
for key in get:
stub[key] = []
return stub
data = {}
nr_eCs, nr_eNs, nr_Cs, nr_Ns = 0, 0, 0, 0
recognised = 0
if match and ('recognition' in get):
self.logger.error('tried to retrieve recognition in matched mode')
return None
eCs = self.get_est_Cs_flat_mm()
nr_eCs = len(eCs)
C = self.markers[0].get_C_mm()
nr_Cs = len(C)
eNs = self.get_est_Ns_flat_mm()
nr_eNs = len(eNs)
N = self.markers[0].get_N_mm()
nr_Ns = len(N)
if 'est. Cs' in get:
if nr_eCs:
data['est. Cs'] = eCs
elif match:
return None
else:
data['est. Cs'] = []
if 'actual Cs' in get:
if match and nr_eCs and nr_Cs:
data['actual Cs'] = np.tile(C, nr_eCs).reshape(nr_eCs, 3)
elif match:
return None
elif nr_Cs:
data['actual Cs'] = np.tile(C, nr_Cs).reshape(nr_Cs, 3)
else:
data['actual Cs'] = []
if 'est. Ns' in get:
if nr_eNs:
data['est. Ns'] = eNs
elif match:
return None
else:
data['est. Ns'] = []
if 'actual Ns' in get:
if match and nr_eNs and nr_Ns:
data['actual Ns'] = np.tile(N, nr_eNs).reshape(nr_eNs, 3)
elif match:
return None
elif nr_Ns:
data['actual Ns'] = np.tile(N, nr_Ns).reshape(nr_Ns, 3)
else:
data['actual Ns'] = []
if 'recognition' in get:
if (nr_eCs and nr_Cs):
data['recognition'] = np.ones((1,1))
elif nr_Cs:
data['recognition'] = np.zeros((1,1))
elif nr_eCs + nr_Cs + nr_eNs + nr_Ns == 0:
return None
return data | en | 0.813254 | # # Milovision: A camera pose estimation programme # # Copyright (C) 2013 <NAME> # See LICENSE.txt # # pipeline_output.py :synopsis: Contains the Pipeline_Output class, which holds all useful data produced by one pipeline run. .. moduleauthor:: <NAME> <<EMAIL>> Holds all relevant pose estimation and performance data emerging from the pipeline. Sets simulator, camera, marker and timestamp attributes. Records time at which each image is received (secs since Epoch). sets: sim: (boolean) flag to indicate whether this was a simulation cam: (GL_Camera_Vals or Real_Camera_Vals) camera's key parameters markers: (Marker or GL_Marker) fiducial marker object est_markers: (Marker) pipeline pose estimation values records time at which all values have been filled in returns time from instantiation to completion, in seconds prepares output for next pipeline loop Returns estimated centres in a nx3 flat array of 3d vectors (useful for single marker). Note: posea always generates two estimates per est_marker. # any nr of markers # two centre estimates # three coordinates Returns estimated normals in a nx3 flat array of 3d vectors (useful for single marker). Note: posea always generates two estimates per est_marker. # any nr of markers # two normal estimates # three coordinates This function is designed to facilitate the retrieval of data to produce plots as in the printer module, by returning data a dict whose keys are the names of the required classes of data, such as 'est. Cs' for estimated centres. The array of vectors corresonding to each key is in chronological order by virtue of the same order of the outputs array. The function checks whether the combination of parameters is contradictory and returns None if the data in the current output object is incomplete for the given data request. "get" specifies the required data classes as a list of ID strings. If "match" is set, the actual values are duplicated so that the arrays of actual values are as long as the arrays of estimates and each actual value has a corresonding estimated value at the same array index in the appropriate array. If "stub" is set, the dict is returned with empty lists as values. NB: this function does not take multiple marker scenes into account. | 2.665876 | 3 |
tests/all_of_tests.py | TakenBrandi/python-precisely | 238 | 6617538 | <reponame>TakenBrandi/python-precisely
import collections
from nose.tools import istest, assert_equal
from precisely import all_of, has_attr, equal_to
from precisely.results import matched, unmatched
User = collections.namedtuple("User", ["username", "email_address"])
@istest
def matches_when_submatchers_all_match():
matcher = all_of(
has_attr("username", equal_to("bob")),
has_attr("email_address", equal_to("<EMAIL>")),
)
assert_equal(matched(), matcher.match(User("bob", "<EMAIL>")))
@istest
def mismatches_when_submatcher_mismatches():
matcher = all_of(
has_attr("username", equal_to("bob")),
has_attr("email_address", equal_to("<EMAIL>")),
)
assert_equal(
unmatched("was missing attribute username"),
matcher.match("bobbity")
)
@istest
def description_contains_descriptions_of_submatchers():
matcher = all_of(
has_attr("username", equal_to("bob")),
has_attr("email_address", equal_to("<EMAIL>")),
)
assert_equal(
"all of:\n * object with attribute username: 'bob'\n * object with attribute email_address: '<EMAIL>'",
matcher.describe()
)
| import collections
from nose.tools import istest, assert_equal
from precisely import all_of, has_attr, equal_to
from precisely.results import matched, unmatched
User = collections.namedtuple("User", ["username", "email_address"])
@istest
def matches_when_submatchers_all_match():
matcher = all_of(
has_attr("username", equal_to("bob")),
has_attr("email_address", equal_to("<EMAIL>")),
)
assert_equal(matched(), matcher.match(User("bob", "<EMAIL>")))
@istest
def mismatches_when_submatcher_mismatches():
matcher = all_of(
has_attr("username", equal_to("bob")),
has_attr("email_address", equal_to("<EMAIL>")),
)
assert_equal(
unmatched("was missing attribute username"),
matcher.match("bobbity")
)
@istest
def description_contains_descriptions_of_submatchers():
matcher = all_of(
has_attr("username", equal_to("bob")),
has_attr("email_address", equal_to("<EMAIL>")),
)
assert_equal(
"all of:\n * object with attribute username: 'bob'\n * object with attribute email_address: '<EMAIL>'",
matcher.describe()
) | none | 1 | 2.673227 | 3 | |
tests/model/extraction/test_torch_vs_tensorflow.py | ViCCo-Group/THINGSvision | 45 | 6617539 | <reponame>ViCCo-Group/THINGSvision
import unittest
import tests.helper as helper
from thingsvision.model_class import Model
from thingsvision.dataloader import DataLoader
import numpy as np
class ExtractionPTvsTFTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
helper.create_test_images()
def test_custom_torch_vs_tf_extraction(self):
layer_name = 'relu'
values = [2, -10]
backend = 'tf'
tf_dataset = helper.SimpleDataset(values, backend)
tf_dl = DataLoader(
tf_dataset,
batch_size=1,
backend=backend,
)
model = Model('VGG16', pretrained=False,
device=helper.DEVICE, backend=backend)
model.model = helper.tf_model
tf_features, _ = model.extract_features(
tf_dl, layer_name, batch_size=helper.BATCH_SIZE, flatten_acts=False)
backend = 'pt'
pt_dataset = helper.SimpleDataset(values, backend)
pt_dl = DataLoader(
pt_dataset,
batch_size=1,
backend=backend,
)
model = Model('vgg16', pretrained=False,
device=helper.DEVICE, backend=backend)
model.model = helper.pt_model
pt_features, _ = model.extract_features(
pt_dl, layer_name, batch_size=helper.BATCH_SIZE, flatten_acts=False)
np.testing.assert_allclose(tf_features, pt_features)
expected_features = np.array([[2, 2], [0, 0]])
np.testing.assert_allclose(pt_features, expected_features)
| import unittest
import tests.helper as helper
from thingsvision.model_class import Model
from thingsvision.dataloader import DataLoader
import numpy as np
class ExtractionPTvsTFTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
helper.create_test_images()
def test_custom_torch_vs_tf_extraction(self):
layer_name = 'relu'
values = [2, -10]
backend = 'tf'
tf_dataset = helper.SimpleDataset(values, backend)
tf_dl = DataLoader(
tf_dataset,
batch_size=1,
backend=backend,
)
model = Model('VGG16', pretrained=False,
device=helper.DEVICE, backend=backend)
model.model = helper.tf_model
tf_features, _ = model.extract_features(
tf_dl, layer_name, batch_size=helper.BATCH_SIZE, flatten_acts=False)
backend = 'pt'
pt_dataset = helper.SimpleDataset(values, backend)
pt_dl = DataLoader(
pt_dataset,
batch_size=1,
backend=backend,
)
model = Model('vgg16', pretrained=False,
device=helper.DEVICE, backend=backend)
model.model = helper.pt_model
pt_features, _ = model.extract_features(
pt_dl, layer_name, batch_size=helper.BATCH_SIZE, flatten_acts=False)
np.testing.assert_allclose(tf_features, pt_features)
expected_features = np.array([[2, 2], [0, 0]])
np.testing.assert_allclose(pt_features, expected_features) | none | 1 | 2.621297 | 3 | |
cafcoding/tools/utils.py | maguelo/cafcoding | 1 | 6617540 | <gh_stars>1-10
import os
from cafcoding.tools import log
import itertools
import time
import datetime
import logging
logger = logging.getLogger('ETL')
def create_dir(path):
try:
os.mkdir(path)
except OSError:
logger.error("Creation of the directory %s failed" % path)
else:
logger.error("Successfully created the directory %s " % path)
def get_timestamp():
today = datetime.datetime.fromtimestamp(time.time())
return today.strftime("%Y%m%d_%H%M%S")
def iterator_product(*args):
"""
Generate all possibilities to mix N list of params.
Return list with all combos and len of if
"""
product_len=1
for param in args:
product_len*=len(param)
return itertools.product(*args), product_len | import os
from cafcoding.tools import log
import itertools
import time
import datetime
import logging
logger = logging.getLogger('ETL')
def create_dir(path):
try:
os.mkdir(path)
except OSError:
logger.error("Creation of the directory %s failed" % path)
else:
logger.error("Successfully created the directory %s " % path)
def get_timestamp():
today = datetime.datetime.fromtimestamp(time.time())
return today.strftime("%Y%m%d_%H%M%S")
def iterator_product(*args):
"""
Generate all possibilities to mix N list of params.
Return list with all combos and len of if
"""
product_len=1
for param in args:
product_len*=len(param)
return itertools.product(*args), product_len | en | 0.752771 | Generate all possibilities to mix N list of params. Return list with all combos and len of if | 2.774687 | 3 |
rl/core/oracles/oracles.py | gtrll/librl | 5 | 6617541 | # Copyright (c) 2019 Georgia Tech Robot Learning Lab
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from rl.core.function_approximators.normalizers import NormalizerStd, Normalizer
from rl.core.oracles import Oracle
class LikelihoodRatioOracle(Oracle):
"""
An Oracle based on the loss function below: if use_log_loss is True
E_{x} E_{y ~ q | x} [ w * log p(y|x) * f(x, y) ]
otherwise, it uses
E_{x} E_{y ~ q | x} [ p(y|x)/q(y|x) * f(x, y) ]
where p is the variable distribution, q is a constant
distribution, and f is a scalar function.
When w = p/q, then the gradients of two loss functions are equivalent.
The expectation is approximated by unbiased samples from q. To minimize
the variance of sampled gradients, the implementation of 'grad' is
based on a normalizer, which can shift, rescale, or clip f.
"""
def __init__(self, logp_fun, logp_grad,
nor=None, biased=False,
use_log_loss=False, normalized_is=False):
"""
logp_fun: variable -> logp
logp_grad: variable, f -> E[ f \nabla logp]
"""
self._logp_fun = logp_fun
self._logp_grad = logp_grad # sum
self._biased = biased
self._use_log_loss = use_log_loss
self._normalized_is = normalized_is # normalized importance sampling
if nor is None:
if biased: # use the current samples
self._nor = NormalizerStd((1,), unscale=True, clip_thre=None, momentum=0.0)
else: # use a moving average
self._nor = NormalizerStd((1,), unscale=True, clip_thre=None, momentum=None)
else:
assert isinstance(nor, Normalizer)
self._nor = nor
def fun(self, x):
f = self._f
w_or_logq = self._w_or_logq
logp = self._logp_fun(x)
if self._use_log_loss: # w_or_logq is w
w = w_or_logq
loss = np.sum(w *f *logp)
else: # w_or_logq is logq
w = np.exp(logp - w_or_logq)
loss = np.sum(w*f)
if self._normalized_is: # normalized importance sampling
return loss / np.sum(w)
else: # regular importance sampling
return loss / f.shape[0]
def grad(self, x):
f = self._f
w_or_logq = self._w_or_logq
if self._use_log_loss: # w_or_logq is w
w = w_or_logq
else: # w_or_logq is logq
logp = self._logp_fun(x)
w = np.exp(logp - w_or_logq)
wf = w*f
print('w', w.min(), w.max(), w.mean())
print('wf', wf.min(), wf.max(), wf.mean())
grad = self._logp_grad(x, wf) # sum
if self._normalized_is: # normalized importance sampling
return grad / np.sum(w)
else: # regular importance sampling
return grad / f.shape[0]
def update(self, f, w_or_logq, update_nor=True):
""" Update the function with Monte-Carlo samples.
f: sampled function values
w_or_logq: importance weight or the log probability of the sampling distribution
update_nor: whether to update the normalizer using the current sample
"""
if self._biased:
self._nor.update(f)
f_normalized = self._nor.normalize(f) # cv
if self._use_log_loss: # w_or_logq is w
assert np.all(w_or_logq >= 0)
# these are treated as constants
assert f_normalized.shape==w_or_logq.shape
self._f = f_normalized
self._w_or_logq = w_or_logq
if not self._biased and update_nor:
self._nor.update(f)
| # Copyright (c) 2019 Georgia Tech Robot Learning Lab
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from rl.core.function_approximators.normalizers import NormalizerStd, Normalizer
from rl.core.oracles import Oracle
class LikelihoodRatioOracle(Oracle):
"""
An Oracle based on the loss function below: if use_log_loss is True
E_{x} E_{y ~ q | x} [ w * log p(y|x) * f(x, y) ]
otherwise, it uses
E_{x} E_{y ~ q | x} [ p(y|x)/q(y|x) * f(x, y) ]
where p is the variable distribution, q is a constant
distribution, and f is a scalar function.
When w = p/q, then the gradients of two loss functions are equivalent.
The expectation is approximated by unbiased samples from q. To minimize
the variance of sampled gradients, the implementation of 'grad' is
based on a normalizer, which can shift, rescale, or clip f.
"""
def __init__(self, logp_fun, logp_grad,
nor=None, biased=False,
use_log_loss=False, normalized_is=False):
"""
logp_fun: variable -> logp
logp_grad: variable, f -> E[ f \nabla logp]
"""
self._logp_fun = logp_fun
self._logp_grad = logp_grad # sum
self._biased = biased
self._use_log_loss = use_log_loss
self._normalized_is = normalized_is # normalized importance sampling
if nor is None:
if biased: # use the current samples
self._nor = NormalizerStd((1,), unscale=True, clip_thre=None, momentum=0.0)
else: # use a moving average
self._nor = NormalizerStd((1,), unscale=True, clip_thre=None, momentum=None)
else:
assert isinstance(nor, Normalizer)
self._nor = nor
def fun(self, x):
f = self._f
w_or_logq = self._w_or_logq
logp = self._logp_fun(x)
if self._use_log_loss: # w_or_logq is w
w = w_or_logq
loss = np.sum(w *f *logp)
else: # w_or_logq is logq
w = np.exp(logp - w_or_logq)
loss = np.sum(w*f)
if self._normalized_is: # normalized importance sampling
return loss / np.sum(w)
else: # regular importance sampling
return loss / f.shape[0]
def grad(self, x):
f = self._f
w_or_logq = self._w_or_logq
if self._use_log_loss: # w_or_logq is w
w = w_or_logq
else: # w_or_logq is logq
logp = self._logp_fun(x)
w = np.exp(logp - w_or_logq)
wf = w*f
print('w', w.min(), w.max(), w.mean())
print('wf', wf.min(), wf.max(), wf.mean())
grad = self._logp_grad(x, wf) # sum
if self._normalized_is: # normalized importance sampling
return grad / np.sum(w)
else: # regular importance sampling
return grad / f.shape[0]
def update(self, f, w_or_logq, update_nor=True):
""" Update the function with Monte-Carlo samples.
f: sampled function values
w_or_logq: importance weight or the log probability of the sampling distribution
update_nor: whether to update the normalizer using the current sample
"""
if self._biased:
self._nor.update(f)
f_normalized = self._nor.normalize(f) # cv
if self._use_log_loss: # w_or_logq is w
assert np.all(w_or_logq >= 0)
# these are treated as constants
assert f_normalized.shape==w_or_logq.shape
self._f = f_normalized
self._w_or_logq = w_or_logq
if not self._biased and update_nor:
self._nor.update(f)
| en | 0.807549 | # Copyright (c) 2019 Georgia Tech Robot Learning Lab # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. An Oracle based on the loss function below: if use_log_loss is True E_{x} E_{y ~ q | x} [ w * log p(y|x) * f(x, y) ] otherwise, it uses E_{x} E_{y ~ q | x} [ p(y|x)/q(y|x) * f(x, y) ] where p is the variable distribution, q is a constant distribution, and f is a scalar function. When w = p/q, then the gradients of two loss functions are equivalent. The expectation is approximated by unbiased samples from q. To minimize the variance of sampled gradients, the implementation of 'grad' is based on a normalizer, which can shift, rescale, or clip f. logp_fun: variable -> logp logp_grad: variable, f -> E[ f \nabla logp] # sum # normalized importance sampling # use the current samples # use a moving average # w_or_logq is w # w_or_logq is logq # normalized importance sampling # regular importance sampling # w_or_logq is w # w_or_logq is logq # sum # normalized importance sampling # regular importance sampling Update the function with Monte-Carlo samples. f: sampled function values w_or_logq: importance weight or the log probability of the sampling distribution update_nor: whether to update the normalizer using the current sample # cv # w_or_logq is w # these are treated as constants | 2.784949 | 3 |
udocker/helper/unshare.py | vsoch/udocker | 0 | 6617542 | # -*- coding: utf-8 -*-
"""Basic unshare for udocker maintenance"""
import os
import ctypes
import subprocess
from udocker.msg import Msg
from udocker.helper.hostinfo import HostInfo
from udocker.helper.nixauth import NixAuthentication
class Unshare(object):
"""Place a process in a namespace"""
CLONE_NEWNS = 0x20000
CLONE_NEWUTS = 0x4000000
CLONE_NEWIPC = 0x8000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
def unshare(self, flags):
"""Python implementation of unshare"""
try:
_unshare = ctypes.CDLL("libc.so.6").unshare
except OSError:
Msg().err("Error: in unshare: mapping libc")
return False
_unshare.restype = ctypes.c_int
_unshare.argtypes = (ctypes.c_int, )
if _unshare(flags) == -1:
Msg().err("Error: in unshare:", os.strerror(-1))
return False
return True
def namespace_exec(self, method, flags=CLONE_NEWUSER):
"""Execute command in namespace"""
(pread1, pwrite1) = os.pipe()
(pread2, pwrite2) = os.pipe()
cpid = os.fork()
if cpid:
os.close(pwrite1)
os.read(pread1, 1) # wait
user = HostInfo().username()
newidmap = ["newuidmap", str(cpid), "0", str(HostInfo.uid), "1"]
for (subid, subcount) in NixAuthentication().user_in_subuid(user):
newidmap.extend(["1", subid, subcount])
subprocess.call(newidmap)
newidmap = ["newgidmap", str(cpid), "0", str(HostInfo.uid), "1"]
for (subid, subcount) in NixAuthentication().user_in_subgid(user):
newidmap.extend(["1", subid, subcount])
subprocess.call(newidmap)
os.close(pwrite2) # notify
(dummy, status) = os.waitpid(cpid, 0)
if status % 256:
Msg().err("Error: namespace exec action failed")
return False
return True
else:
self.unshare(flags)
os.close(pwrite2)
os.close(pwrite1) # notify
os.read(pread2, 1) # wait
try:
os.setgid(0)
os.setuid(0)
os.setgroups([0, 0, ])
except OSError:
Msg().err("Error: setting ids and groups")
return False
# pylint: disable=protected-access
os._exit(int(method()))
return False
| # -*- coding: utf-8 -*-
"""Basic unshare for udocker maintenance"""
import os
import ctypes
import subprocess
from udocker.msg import Msg
from udocker.helper.hostinfo import HostInfo
from udocker.helper.nixauth import NixAuthentication
class Unshare(object):
"""Place a process in a namespace"""
CLONE_NEWNS = 0x20000
CLONE_NEWUTS = 0x4000000
CLONE_NEWIPC = 0x8000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
def unshare(self, flags):
"""Python implementation of unshare"""
try:
_unshare = ctypes.CDLL("libc.so.6").unshare
except OSError:
Msg().err("Error: in unshare: mapping libc")
return False
_unshare.restype = ctypes.c_int
_unshare.argtypes = (ctypes.c_int, )
if _unshare(flags) == -1:
Msg().err("Error: in unshare:", os.strerror(-1))
return False
return True
def namespace_exec(self, method, flags=CLONE_NEWUSER):
"""Execute command in namespace"""
(pread1, pwrite1) = os.pipe()
(pread2, pwrite2) = os.pipe()
cpid = os.fork()
if cpid:
os.close(pwrite1)
os.read(pread1, 1) # wait
user = HostInfo().username()
newidmap = ["newuidmap", str(cpid), "0", str(HostInfo.uid), "1"]
for (subid, subcount) in NixAuthentication().user_in_subuid(user):
newidmap.extend(["1", subid, subcount])
subprocess.call(newidmap)
newidmap = ["newgidmap", str(cpid), "0", str(HostInfo.uid), "1"]
for (subid, subcount) in NixAuthentication().user_in_subgid(user):
newidmap.extend(["1", subid, subcount])
subprocess.call(newidmap)
os.close(pwrite2) # notify
(dummy, status) = os.waitpid(cpid, 0)
if status % 256:
Msg().err("Error: namespace exec action failed")
return False
return True
else:
self.unshare(flags)
os.close(pwrite2)
os.close(pwrite1) # notify
os.read(pread2, 1) # wait
try:
os.setgid(0)
os.setuid(0)
os.setgroups([0, 0, ])
except OSError:
Msg().err("Error: setting ids and groups")
return False
# pylint: disable=protected-access
os._exit(int(method()))
return False
| en | 0.566022 | # -*- coding: utf-8 -*- Basic unshare for udocker maintenance Place a process in a namespace Python implementation of unshare Execute command in namespace # wait # notify # notify # wait # pylint: disable=protected-access | 2.201332 | 2 |
training/Local Pipeline/combining.py | navh/postal-parser | 0 | 6617543 | filenames = ['conll14.txt', 'conll15.txt', 'conll16.txt', 'conll57.txt']
with open('can_us_conll.txt', 'a') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read()) | filenames = ['conll14.txt', 'conll15.txt', 'conll16.txt', 'conll57.txt']
with open('can_us_conll.txt', 'a') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read()) | none | 1 | 2.781437 | 3 | |
secure_ec2/main.py | avishayil/secure_ec2 | 6 | 6617544 | <reponame>avishayil/secure_ec2<filename>secure_ec2/main.py
import click
from secure_ec2.commands.config import config
from secure_ec2.commands.launch import launch
@click.group(help="CLI tool that helps you to provision EC2 instances securely")
def cli():
pass
cli.add_command(config)
cli.add_command(launch)
if __name__ == "__main__":
cli()
| import click
from secure_ec2.commands.config import config
from secure_ec2.commands.launch import launch
@click.group(help="CLI tool that helps you to provision EC2 instances securely")
def cli():
pass
cli.add_command(config)
cli.add_command(launch)
if __name__ == "__main__":
cli() | none | 1 | 1.846735 | 2 | |
examples/interop/autobahn-python/echo_callee.py | deinstapel/nexus | 231 | 6617545 | <filename>examples/interop/autobahn-python/echo_callee.py
#
# Python WAMP client: callee that handles RPC from echo_caller.py
#
# Install dependencies:
# make
#
# Run this client:
# ./pyenv/bin/python echo_callee.py
#
import asyncio
from autobahn.asyncio.wamp import ApplicationSession
from autobahn.asyncio.component import Component, run
class Backend(ApplicationSession):
def __init__(self, config=None):
config.realm = "realm1"
super().__init__(config)
async def onJoin(self, details):
print('Register test_echo_payload')
await self.register(self.test_echo_payload, 'test.echo.payload')
async def onDisconnect(self):
loop = asyncio.get_event_loop()
loop.stop()
async def test_echo_payload(self, value: bytes) -> bytes:
if not isinstance(value, bytes):
print('Value is not an instance of bytes but is a %s' % (type(value)))
return value
if __name__ == '__main__':
backend = Component(
session_factory = Backend,
transports = [
{
'type': 'websocket',
'serializers': ['msgpack'],
'url': 'ws://localhost:8080/ws',
'max_retries': 3
}
]
)
run([backend])
| <filename>examples/interop/autobahn-python/echo_callee.py
#
# Python WAMP client: callee that handles RPC from echo_caller.py
#
# Install dependencies:
# make
#
# Run this client:
# ./pyenv/bin/python echo_callee.py
#
import asyncio
from autobahn.asyncio.wamp import ApplicationSession
from autobahn.asyncio.component import Component, run
class Backend(ApplicationSession):
def __init__(self, config=None):
config.realm = "realm1"
super().__init__(config)
async def onJoin(self, details):
print('Register test_echo_payload')
await self.register(self.test_echo_payload, 'test.echo.payload')
async def onDisconnect(self):
loop = asyncio.get_event_loop()
loop.stop()
async def test_echo_payload(self, value: bytes) -> bytes:
if not isinstance(value, bytes):
print('Value is not an instance of bytes but is a %s' % (type(value)))
return value
if __name__ == '__main__':
backend = Component(
session_factory = Backend,
transports = [
{
'type': 'websocket',
'serializers': ['msgpack'],
'url': 'ws://localhost:8080/ws',
'max_retries': 3
}
]
)
run([backend])
| en | 0.659079 | # # Python WAMP client: callee that handles RPC from echo_caller.py # # Install dependencies: # make # # Run this client: # ./pyenv/bin/python echo_callee.py # | 2.241015 | 2 |
src/in_out/xml_parameters.py | EuroPOND/deformetrica | 1 | 6617546 | import math
import os
import warnings
import xml.etree.ElementTree as et
import torch
from torch.multiprocessing import set_start_method
from support.utilities.general_settings import Settings
class XmlParameters:
"""
XmlParameters object class.
Parses input xmls and stores the given parameters.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self):
self.model_type = 'undefined'
self.template_specifications = {}
self.deformation_kernel_width = 0
self.deformation_kernel_type = 'torch'
self.number_of_time_points = 11
self.concentration_of_time_points = 10
self.number_of_sources = None
self.use_rk2 = False
self.t0 = None
self.tmin = float('inf')
self.tmax = - float('inf')
self.initial_cp_spacing = -1
self.dimension = 3
self.covariance_momenta_prior_normalized_dof = 0.001
self.dataset_filenames = []
self.visit_ages = []
self.subject_ids = []
self.optimization_method_type = 'undefined'
self.optimized_log_likelihood = 'complete'
self.number_of_threads = 1
self.max_iterations = 100
self.max_line_search_iterations = 10
self.save_every_n_iters = 10
self.print_every_n_iters = 1
self.sample_every_n_mcmc_iters = 50
self.use_sobolev_gradient = True
self.sobolev_kernel_width_ratio = 1
self.initial_step_size = 0.001
self.line_search_shrink = 0.5
self.line_search_expand = 1.5
self.convergence_tolerance = 1e-4
self.memory_length = 10
self.scale_initial_step_size = True
self.downsampling_factor = 1
self.dense_mode = False
self.use_cuda = False
self._cuda_is_used = False # true if at least one operation will use CUDA.
self.state_file = None
self.freeze_template = False
self.freeze_control_points = True
self.freeze_momenta = False
self.freeze_modulation_matrix = False
self.freeze_reference_time = False
self.freeze_time_shift_variance = False
self.freeze_log_acceleration_variance = False
self.freeze_noise_variance = False
# For metric learning atlas
self.freeze_metric_parameters = False
self.freeze_p0 = False
self.freeze_v0 = False
self.initial_control_points = None
self.initial_momenta = None
self.initial_modulation_matrix = None
self.initial_time_shift_variance = None
self.initial_log_acceleration_mean = None
self.initial_log_acceleration_variance = None
self.initial_onset_ages = None
self.initial_log_accelerations = None
self.initial_sources = None
self.initial_sources_mean = None
self.initial_sources_std = None
self.use_exp_parallelization = True
self.initial_control_points_to_transport = None
self.momenta_proposal_std = 0.01
self.onset_age_proposal_std = 0.1
self.log_acceleration_proposal_std = 0.01
self.sources_proposal_std = 0.01
self.gradient_based_estimator = None # Not connected to anything yet.
# For scalar inputs:
self.group_file = None
self.observations_file = None
self.timepoints_file = None
self.v0 = None
self.p0 = None
self.metric_parameters_file = None
self.interpolation_points_file = None
self.initial_noise_variance = None
self.exponential_type = None
self.number_of_metric_parameters = None # number of parameters in metric learning.
self.number_of_interpolation_points = None
self.latent_space_dimension = None # For deep metric learning
self.normalize_image_intensity = False
self.initialization_heuristic = False
####################################################################################################################
### Public methods:
####################################################################################################################
# Read the parameters from the three PyDeformetrica input xmls, and some further parameters initialization.
def read_all_xmls(self, model_xml_path, dataset_xml_path, optimization_parameters_xml_path):
self._read_model_xml(model_xml_path)
self._read_dataset_xml(dataset_xml_path)
self._read_optimization_parameters_xml(optimization_parameters_xml_path)
self._further_initialization()
####################################################################################################################
### Private methods:
####################################################################################################################
# Read the parameters from the model xml.
def _read_model_xml(self, model_xml_path):
model_xml_level0 = et.parse(model_xml_path).getroot()
for model_xml_level1 in model_xml_level0:
if model_xml_level1.tag.lower() == 'model-type':
self.model_type = model_xml_level1.text.lower()
elif model_xml_level1.tag.lower() == 'dimension':
self.dimension = int(model_xml_level1.text)
Settings().dimension = self.dimension
elif model_xml_level1.tag.lower() == 'initial-control-points':
self.initial_control_points = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-momenta':
self.initial_momenta = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-modulation-matrix':
self.initial_modulation_matrix = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-time-shift-std':
self.initial_time_shift_variance = float(model_xml_level1.text) ** 2
elif model_xml_level1.tag.lower() == 'initial-log-acceleration-std':
self.initial_log_acceleration_variance = float(model_xml_level1.text) ** 2
elif model_xml_level1.tag.lower() == 'initial-log-acceleration-mean':
self.initial_log_acceleration_mean = float(model_xml_level1.text)
elif model_xml_level1.tag.lower() == 'initial-onset-ages':
self.initial_onset_ages = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-log-accelerations':
self.initial_log_accelerations = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-sources':
self.initial_sources = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-sources-mean':
self.initial_sources_mean = model_xml_level1.text
elif model_xml_level1.tag.lower() == 'initial-sources-std':
self.initial_sources_std = model_xml_level1.text
elif model_xml_level1.tag.lower() == 'initial-momenta-to-transport':
self.initial_momenta_to_transport = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-control-points-to-transport':
self.initial_control_points_to_transport = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-noise-std':
self.initial_noise_variance = float(model_xml_level1.text)**2
elif model_xml_level1.tag.lower() == 'latent-space-dimension':
self.latent_space_dimension = int(model_xml_level1.text)
elif model_xml_level1.tag.lower() == 'template':
for model_xml_level2 in model_xml_level1:
if model_xml_level2.tag.lower() == 'dense-mode':
self.dense_mode = self._on_off_to_bool(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'object':
template_object = self._initialize_template_object_xml_parameters()
for model_xml_level3 in model_xml_level2:
if model_xml_level3.tag.lower() == 'deformable-object-type':
template_object['deformable_object_type'] = model_xml_level3.text.lower()
elif model_xml_level3.tag.lower() == 'attachment-type':
template_object['attachment_type'] = model_xml_level3.text.lower()
elif model_xml_level3.tag.lower() == 'kernel-width':
template_object['kernel_width'] = float(model_xml_level3.text)
elif model_xml_level3.tag.lower() == 'kernel-type':
template_object['kernel_type'] = model_xml_level3.text.lower()
if model_xml_level3.text.lower() == 'keops'.lower():
self._cuda_is_used = True
elif model_xml_level3.tag.lower() == 'noise-std':
template_object['noise_std'] = float(model_xml_level3.text)
elif model_xml_level3.tag.lower() == 'filename':
template_object['filename'] = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level3.text))
elif model_xml_level3.tag.lower() == 'noise-variance-prior-scale-std':
template_object['noise_variance_prior_scale_std'] = float(model_xml_level3.text)
elif model_xml_level3.tag.lower() == 'noise-variance-prior-normalized-dof':
template_object['noise_variance_prior_normalized_dof'] = float(model_xml_level3.text)
else:
msg = 'Unknown entry while parsing the template > ' + model_xml_level2.attrib['id'] + \
' object section of the model xml: ' + model_xml_level3.tag
warnings.warn(msg)
self.template_specifications[model_xml_level2.attrib['id']] = template_object
else:
msg = 'Unknown entry while parsing the template section of the model xml: ' \
+ model_xml_level2.tag
warnings.warn(msg)
elif model_xml_level1.tag.lower() == 'deformation-parameters':
for model_xml_level2 in model_xml_level1:
if model_xml_level2.tag.lower() == 'kernel-width':
self.deformation_kernel_width = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'exponential-type':
self.exponential_type = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'kernel-type':
self.deformation_kernel_type = model_xml_level2.text.lower()
if model_xml_level2.text.lower() == 'keops'.lower():
self._cuda_is_used = True
elif model_xml_level2.tag.lower() == 'number-of-timepoints':
self.number_of_time_points = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'number-of-interpolation-points':
self.number_of_interpolation_points = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'concentration-of-timepoints':
self.concentration_of_time_points = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'number-of-sources':
self.number_of_sources = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 't0':
self.t0 = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'tmin':
self.tmin = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'tmax':
self.tmax = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'p0':
self.p0 = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'v0':
self.v0 = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'metric-parameters-file': # for metric learning
self.metric_parameters_file = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'interpolation-points-file': # for metric learning
self.interpolation_points_file = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'covariance-momenta-prior-normalized-dof':
self.covariance_momenta_prior_normalized_dof = float(model_xml_level2.text)
else:
msg = 'Unknown entry while parsing the deformation-parameters section of the model xml: ' \
+ model_xml_level2.tag
warnings.warn(msg)
elif model_xml_level1.tag.lower() == 'use-exp-parallelization':
self.use_exp_parallelization = self._on_off_to_bool(model_xml_level1.text)
else:
msg = 'Unknown entry while parsing root of the model xml: ' + model_xml_level1.tag
warnings.warn(msg)
# Read the parameters from the dataset xml.
def _read_dataset_xml(self, dataset_xml_path):
if dataset_xml_path is not None:
dataset_xml_level0 = et.parse(dataset_xml_path).getroot()
data_set_xml_dirname = os.path.dirname(dataset_xml_path)
dataset_filenames = []
visit_ages = []
subject_ids = []
for dataset_xml_level1 in dataset_xml_level0:
if dataset_xml_level1.tag.lower() == 'subject':
subject_ids.append(dataset_xml_level1.attrib['id'])
subject_filenames = []
subject_ages = []
for dataset_xml_level2 in dataset_xml_level1:
if dataset_xml_level2.tag.lower() == 'visit':
visit_filenames = {}
for dataset_xml_level3 in dataset_xml_level2:
if dataset_xml_level3.tag.lower() == 'filename':
visit_filenames[dataset_xml_level3.attrib['object_id']] = os.path.normpath(
os.path.join(data_set_xml_dirname, dataset_xml_level3.text))
elif dataset_xml_level3.tag.lower() == 'age':
subject_ages.append(float(dataset_xml_level3.text))
subject_filenames.append(visit_filenames)
dataset_filenames.append(subject_filenames)
visit_ages.append(subject_ages)
# For scalar input, following leasp model
if dataset_xml_level1.tag.lower() == 'group-file':
self.group_file = dataset_xml_level1.text
if dataset_xml_level1.tag.lower() == 'timepoints-file':
self.timepoints_file = dataset_xml_level1.text
if dataset_xml_level1.tag.lower() == 'observations-file':
self.observations_file = dataset_xml_level1.text
self.dataset_filenames = dataset_filenames
self.visit_ages = visit_ages
self.subject_ids = subject_ids
# Read the parameters from the optimization_parameters xml.
def _read_optimization_parameters_xml(self, optimization_parameters_xml_path):
optimization_parameters_xml_level0 = et.parse(optimization_parameters_xml_path).getroot()
for optimization_parameters_xml_level1 in optimization_parameters_xml_level0:
if optimization_parameters_xml_level1.tag.lower() == 'optimization-method-type':
self.optimization_method_type = optimization_parameters_xml_level1.text.lower()
elif optimization_parameters_xml_level1.tag.lower() == 'optimized-log-likelihood':
self.optimized_log_likelihood = optimization_parameters_xml_level1.text.lower()
elif optimization_parameters_xml_level1.tag.lower() == 'number-of-threads':
self.number_of_threads = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'max-iterations':
self.max_iterations = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'convergence-tolerance':
self.convergence_tolerance = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'memory-length':
self.memory_length = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'downsampling-factor':
self.downsampling_factor = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'save-every-n-iters':
self.save_every_n_iters = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'print-every-n-iters':
self.print_every_n_iters = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'sample-every-n-mcmc-iters':
self.sample_every_n_mcmc_iters = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'use-sobolev-gradient':
self.use_sobolev_gradient = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'sobolev-kernel-width-ratio':
self.sobolev_kernel_width_ratio = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'initial-step-size':
self.initial_step_size = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-template':
self.freeze_template = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-cp':
self.freeze_control_points = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'use-cuda':
self.use_cuda = self._on_off_to_bool(optimization_parameters_xml_level1.text)
if self.use_cuda:
self._cuda_is_used = True
elif optimization_parameters_xml_level1.tag.lower() == 'max-line-search-iterations':
self.max_line_search_iterations = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'normalize-image-intensity':
self.normalize_image_intensity = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'use-exp-parallelization':
self.use_exp_parallelization = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'state-file':
self.state_file = optimization_parameters_xml_level1.text
elif optimization_parameters_xml_level1.tag.lower() == 'use-rk2':
self.use_rk2 = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'momenta-proposal-std':
self.momenta_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'onset-age-proposal-std':
self.onset_age_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'log-acceleration-proposal-std':
self.log_acceleration_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'sources-proposal-std':
self.sources_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'scale-initial-step-size':
self.scale_initial_step_size = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'initialization-heuristic':
self.initialization_heuristic = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-v0':
self.freeze_v0 = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-p0':
self.freeze_p0 = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-modulation-matrix':
self.freeze_modulation_matrix = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-reference-time':
self.freeze_reference_time = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-time-shift-variance':
self.freeze_time_shift_variance = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-log-acceleration-variance':
self.freeze_log_acceleration_variance = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-reference-time':
self.freeze_reference_time = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-noise-variance':
self.freeze_noise_variancee = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'gradient-based-estimator':
self.gradient_based_estimator = optimization_parameters_xml_level1.text
else:
msg = 'Unknown entry while parsing the optimization_parameters xml: ' \
+ optimization_parameters_xml_level1.tag
warnings.warn(msg)
# Default xml parameters for any template object.
def _initialize_template_object_xml_parameters(self):
template_object = {}
template_object['deformable_object_type'] = 'undefined'
template_object['kernel_type'] = 'undefined'
template_object['kernel_width'] = 0.0
template_object['noise_std'] = -1
template_object['filename'] = 'undefined'
template_object['noise_variance_prior_scale_std'] = None
template_object['noise_variance_prior_normalized_dof'] = 0.01
return template_object
def _on_off_to_bool(self, s):
if s.lower() == "on":
return True
elif s.lower() == "off":
return False
else:
raise RuntimeError("Please give a valid flag (on, off)")
# Based on the raw read parameters, further initialization of some remaining ones.
def _further_initialization(self):
if self.dense_mode:
Settings().dense_mode = self.dense_mode
print('>> Dense mode activated. No distinction will be made between template and control points.')
assert len(self.template_specifications) == 1, \
'Only a single object can be considered when using the dense mode.'
if not self.freeze_control_points:
self.freeze_control_points = True
msg = 'With active dense mode, the freeze_template (currently %s) and freeze_control_points ' \
'(currently %s) flags are redundant. Defaulting to freeze_control_points = True.' \
% (str(self.freeze_template), str(self.freeze_control_points))
warnings.warn(msg)
if self.initial_control_points is not None:
self.initial_control_points = None
msg = 'With active dense mode, specifying initial_control_points is useless. Ignoring this xml entry.'
warnings.warn(msg)
if self.initial_cp_spacing < 0 and self.initial_control_points is None and not self.dense_mode:
print('>> No initial CP spacing given: using diffeo kernel width of ' + str(self.deformation_kernel_width))
self.initial_cp_spacing = self.deformation_kernel_width
# Setting tensor types according to CUDA availability and user choices.
if self._cuda_is_used:
if not torch.cuda.is_available():
msg = 'CUDA seems to be unavailable. All computations will be carried out on CPU.'
warnings.warn(msg)
else:
print(">> CUDA is used at least in one operation, all operations will be done with FLOAT precision.")
if self.use_cuda:
print(">> All tensors will be CUDA tensors.")
Settings().tensor_scalar_type = torch.cuda.FloatTensor
Settings().tensor_integer_type = torch.cuda.LongTensor
else:
print(">> Setting tensor type to float.")
Settings().tensor_scalar_type = torch.FloatTensor
# Setting the dimension.
Settings().dimension = self.dimension
# If longitudinal model and t0 is not initialized, initializes it.
if (self.model_type == 'regression' or self.model_type == 'LongitudinalAtlas'.lower()
or self.model_type == 'LongitudinalRegistration'.lower()) \
and (self.t0 is None or self.initial_time_shift_variance is None):
total_number_of_visits = 0
mean_visit_age = 0.0
var_visit_age = 0.0
for i in range(len(self.visit_ages)):
for j in range(len(self.visit_ages[i])):
total_number_of_visits += 1
mean_visit_age += self.visit_ages[i][j]
var_visit_age += self.visit_ages[i][j] ** 2
if total_number_of_visits > 0:
mean_visit_age /= float(total_number_of_visits)
var_visit_age = (var_visit_age / float(total_number_of_visits) - mean_visit_age ** 2)
if self.t0 is None:
print('>> Initial t0 set to the mean visit age: %.2f' % mean_visit_age)
self.t0 = mean_visit_age
else:
print('>> Initial t0 set by the user to %.2f ; note that the mean visit age is %.2f'
% (self.t0, mean_visit_age))
if not self.model_type == 'regression':
if self.initial_time_shift_variance is None:
print('>> Initial time-shift std set to the empirical std of the visit ages: %.2f'
% math.sqrt(var_visit_age))
self.initial_time_shift_variance = var_visit_age
else:
print(('>> Initial time-shift std set by the user to %.2f ; note that the empirical std of '
'the visit ages is %.2f') % (self.initial_time_shift_variance, math.sqrt(var_visit_age)))
# Setting the number of threads in general settings
Settings().number_of_threads = self.number_of_threads
if self.number_of_threads > 1:
print(">> I will use", self.number_of_threads,
"threads, and I set OMP_NUM_THREADS and torch_num_threads to 1.")
os.environ['OMP_NUM_THREADS'] = "1"
torch.set_num_threads(1)
else:
print('>> Setting OMP_NUM_THREADS and torch_num_threads to 4.')
os.environ['OMP_NUM_THREADS'] = "4"
torch.set_num_threads(4)
try:
set_start_method("spawn")
except RuntimeError as error:
print('>> Warning: ' + str(error) + ' [ in xml_parameters ]. Ignoring.')
self._initialize_state_file()
# Freeze the fixed effects in case of a registration.
if self.model_type == 'Registration'.lower():
self.freeze_template = True
self.freeze_control_points = True
elif self.model_type == 'LongitudinalRegistration'.lower():
self.freeze_template = True
self.freeze_control_points = True
self.freeze_momenta = True
self.freeze_modulation_matrix = True
self.freeze_reference_time = True
self.freeze_time_shift_variance = True
self.freeze_log_acceleration_variance = True
self.freeze_noise_variance = True
# Initialize the number of sources if needed.
if self.model_type == 'LongitudinalAtlas'.lower() \
and self.initial_modulation_matrix is None and self.number_of_sources is None:
self.number_of_sources = 4
print('>> No initial modulation matrix given, neither a number of sources. '
'The latter will be ARBITRARILY defaulted to 4.')
if self.dimension <= 1:
print("Setting the number of sources to 0 because the dimension is 1.")
self.number_of_sources = 0
# Initialize the initial_log_acceleration_variance if needed.
if (self.model_type == 'LongitudinalAtlas'.lower() or self.model_type == 'LongitudinalRegistration'.lower()) \
and self.initial_log_acceleration_variance is None:
print('>> The initial log-acceleration std fixed effect is ARBITRARILY set to 0.5')
log_acceleration_std = 0.5
self.initial_log_acceleration_variance = (log_acceleration_std ** 2)
# Image grid downsampling factor.
if not self.downsampling_factor == 1:
image_object_specs = [(key, value) for key, value in self.template_specifications.items()
if value['deformable_object_type'].lower() == 'image']
if len(image_object_specs) > 2:
raise RuntimeError('Only a single image object can be used.')
elif len(image_object_specs) == 1:
print('>> Setting the image grid downsampling factor to: %d.' % self.downsampling_factor)
self.template_specifications[image_object_specs[0][0]]['downsampling_factor'] = self.downsampling_factor
else:
msg = 'The "downsampling_factor" parameter is useful only for image data, ' \
'but none is considered here. Ignoring.'
warnings.warn(msg)
def _initialize_state_file(self):
"""
If a state file was given, assert the file exists and set Settings() so that the estimators will try to resume the computations
If a state file was not given, We automatically create one
"""
if self.state_file is None:
self.state_file = os.path.join(Settings().output_dir, "pydef_state.p")
else:
Settings().state_file = self.state_file
if os.path.exists(self.state_file):
Settings().load_state = True
print(">> Will attempt to resume computation from file", self.state_file)
else:
msg = "A state file was given, but it does not exist. I will save the new state on this file nonetheless."
warnings.warn(msg)
print(">> State will be saved in file", self.state_file)
| import math
import os
import warnings
import xml.etree.ElementTree as et
import torch
from torch.multiprocessing import set_start_method
from support.utilities.general_settings import Settings
class XmlParameters:
"""
XmlParameters object class.
Parses input xmls and stores the given parameters.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self):
self.model_type = 'undefined'
self.template_specifications = {}
self.deformation_kernel_width = 0
self.deformation_kernel_type = 'torch'
self.number_of_time_points = 11
self.concentration_of_time_points = 10
self.number_of_sources = None
self.use_rk2 = False
self.t0 = None
self.tmin = float('inf')
self.tmax = - float('inf')
self.initial_cp_spacing = -1
self.dimension = 3
self.covariance_momenta_prior_normalized_dof = 0.001
self.dataset_filenames = []
self.visit_ages = []
self.subject_ids = []
self.optimization_method_type = 'undefined'
self.optimized_log_likelihood = 'complete'
self.number_of_threads = 1
self.max_iterations = 100
self.max_line_search_iterations = 10
self.save_every_n_iters = 10
self.print_every_n_iters = 1
self.sample_every_n_mcmc_iters = 50
self.use_sobolev_gradient = True
self.sobolev_kernel_width_ratio = 1
self.initial_step_size = 0.001
self.line_search_shrink = 0.5
self.line_search_expand = 1.5
self.convergence_tolerance = 1e-4
self.memory_length = 10
self.scale_initial_step_size = True
self.downsampling_factor = 1
self.dense_mode = False
self.use_cuda = False
self._cuda_is_used = False # true if at least one operation will use CUDA.
self.state_file = None
self.freeze_template = False
self.freeze_control_points = True
self.freeze_momenta = False
self.freeze_modulation_matrix = False
self.freeze_reference_time = False
self.freeze_time_shift_variance = False
self.freeze_log_acceleration_variance = False
self.freeze_noise_variance = False
# For metric learning atlas
self.freeze_metric_parameters = False
self.freeze_p0 = False
self.freeze_v0 = False
self.initial_control_points = None
self.initial_momenta = None
self.initial_modulation_matrix = None
self.initial_time_shift_variance = None
self.initial_log_acceleration_mean = None
self.initial_log_acceleration_variance = None
self.initial_onset_ages = None
self.initial_log_accelerations = None
self.initial_sources = None
self.initial_sources_mean = None
self.initial_sources_std = None
self.use_exp_parallelization = True
self.initial_control_points_to_transport = None
self.momenta_proposal_std = 0.01
self.onset_age_proposal_std = 0.1
self.log_acceleration_proposal_std = 0.01
self.sources_proposal_std = 0.01
self.gradient_based_estimator = None # Not connected to anything yet.
# For scalar inputs:
self.group_file = None
self.observations_file = None
self.timepoints_file = None
self.v0 = None
self.p0 = None
self.metric_parameters_file = None
self.interpolation_points_file = None
self.initial_noise_variance = None
self.exponential_type = None
self.number_of_metric_parameters = None # number of parameters in metric learning.
self.number_of_interpolation_points = None
self.latent_space_dimension = None # For deep metric learning
self.normalize_image_intensity = False
self.initialization_heuristic = False
####################################################################################################################
### Public methods:
####################################################################################################################
# Read the parameters from the three PyDeformetrica input xmls, and some further parameters initialization.
def read_all_xmls(self, model_xml_path, dataset_xml_path, optimization_parameters_xml_path):
self._read_model_xml(model_xml_path)
self._read_dataset_xml(dataset_xml_path)
self._read_optimization_parameters_xml(optimization_parameters_xml_path)
self._further_initialization()
####################################################################################################################
### Private methods:
####################################################################################################################
# Read the parameters from the model xml.
def _read_model_xml(self, model_xml_path):
model_xml_level0 = et.parse(model_xml_path).getroot()
for model_xml_level1 in model_xml_level0:
if model_xml_level1.tag.lower() == 'model-type':
self.model_type = model_xml_level1.text.lower()
elif model_xml_level1.tag.lower() == 'dimension':
self.dimension = int(model_xml_level1.text)
Settings().dimension = self.dimension
elif model_xml_level1.tag.lower() == 'initial-control-points':
self.initial_control_points = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-momenta':
self.initial_momenta = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-modulation-matrix':
self.initial_modulation_matrix = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-time-shift-std':
self.initial_time_shift_variance = float(model_xml_level1.text) ** 2
elif model_xml_level1.tag.lower() == 'initial-log-acceleration-std':
self.initial_log_acceleration_variance = float(model_xml_level1.text) ** 2
elif model_xml_level1.tag.lower() == 'initial-log-acceleration-mean':
self.initial_log_acceleration_mean = float(model_xml_level1.text)
elif model_xml_level1.tag.lower() == 'initial-onset-ages':
self.initial_onset_ages = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-log-accelerations':
self.initial_log_accelerations = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-sources':
self.initial_sources = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-sources-mean':
self.initial_sources_mean = model_xml_level1.text
elif model_xml_level1.tag.lower() == 'initial-sources-std':
self.initial_sources_std = model_xml_level1.text
elif model_xml_level1.tag.lower() == 'initial-momenta-to-transport':
self.initial_momenta_to_transport = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-control-points-to-transport':
self.initial_control_points_to_transport = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level1.text))
elif model_xml_level1.tag.lower() == 'initial-noise-std':
self.initial_noise_variance = float(model_xml_level1.text)**2
elif model_xml_level1.tag.lower() == 'latent-space-dimension':
self.latent_space_dimension = int(model_xml_level1.text)
elif model_xml_level1.tag.lower() == 'template':
for model_xml_level2 in model_xml_level1:
if model_xml_level2.tag.lower() == 'dense-mode':
self.dense_mode = self._on_off_to_bool(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'object':
template_object = self._initialize_template_object_xml_parameters()
for model_xml_level3 in model_xml_level2:
if model_xml_level3.tag.lower() == 'deformable-object-type':
template_object['deformable_object_type'] = model_xml_level3.text.lower()
elif model_xml_level3.tag.lower() == 'attachment-type':
template_object['attachment_type'] = model_xml_level3.text.lower()
elif model_xml_level3.tag.lower() == 'kernel-width':
template_object['kernel_width'] = float(model_xml_level3.text)
elif model_xml_level3.tag.lower() == 'kernel-type':
template_object['kernel_type'] = model_xml_level3.text.lower()
if model_xml_level3.text.lower() == 'keops'.lower():
self._cuda_is_used = True
elif model_xml_level3.tag.lower() == 'noise-std':
template_object['noise_std'] = float(model_xml_level3.text)
elif model_xml_level3.tag.lower() == 'filename':
template_object['filename'] = os.path.normpath(
os.path.join(os.path.dirname(model_xml_path), model_xml_level3.text))
elif model_xml_level3.tag.lower() == 'noise-variance-prior-scale-std':
template_object['noise_variance_prior_scale_std'] = float(model_xml_level3.text)
elif model_xml_level3.tag.lower() == 'noise-variance-prior-normalized-dof':
template_object['noise_variance_prior_normalized_dof'] = float(model_xml_level3.text)
else:
msg = 'Unknown entry while parsing the template > ' + model_xml_level2.attrib['id'] + \
' object section of the model xml: ' + model_xml_level3.tag
warnings.warn(msg)
self.template_specifications[model_xml_level2.attrib['id']] = template_object
else:
msg = 'Unknown entry while parsing the template section of the model xml: ' \
+ model_xml_level2.tag
warnings.warn(msg)
elif model_xml_level1.tag.lower() == 'deformation-parameters':
for model_xml_level2 in model_xml_level1:
if model_xml_level2.tag.lower() == 'kernel-width':
self.deformation_kernel_width = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'exponential-type':
self.exponential_type = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'kernel-type':
self.deformation_kernel_type = model_xml_level2.text.lower()
if model_xml_level2.text.lower() == 'keops'.lower():
self._cuda_is_used = True
elif model_xml_level2.tag.lower() == 'number-of-timepoints':
self.number_of_time_points = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'number-of-interpolation-points':
self.number_of_interpolation_points = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'concentration-of-timepoints':
self.concentration_of_time_points = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'number-of-sources':
self.number_of_sources = int(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 't0':
self.t0 = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'tmin':
self.tmin = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'tmax':
self.tmax = float(model_xml_level2.text)
elif model_xml_level2.tag.lower() == 'p0':
self.p0 = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'v0':
self.v0 = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'metric-parameters-file': # for metric learning
self.metric_parameters_file = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'interpolation-points-file': # for metric learning
self.interpolation_points_file = model_xml_level2.text
elif model_xml_level2.tag.lower() == 'covariance-momenta-prior-normalized-dof':
self.covariance_momenta_prior_normalized_dof = float(model_xml_level2.text)
else:
msg = 'Unknown entry while parsing the deformation-parameters section of the model xml: ' \
+ model_xml_level2.tag
warnings.warn(msg)
elif model_xml_level1.tag.lower() == 'use-exp-parallelization':
self.use_exp_parallelization = self._on_off_to_bool(model_xml_level1.text)
else:
msg = 'Unknown entry while parsing root of the model xml: ' + model_xml_level1.tag
warnings.warn(msg)
# Read the parameters from the dataset xml.
def _read_dataset_xml(self, dataset_xml_path):
if dataset_xml_path is not None:
dataset_xml_level0 = et.parse(dataset_xml_path).getroot()
data_set_xml_dirname = os.path.dirname(dataset_xml_path)
dataset_filenames = []
visit_ages = []
subject_ids = []
for dataset_xml_level1 in dataset_xml_level0:
if dataset_xml_level1.tag.lower() == 'subject':
subject_ids.append(dataset_xml_level1.attrib['id'])
subject_filenames = []
subject_ages = []
for dataset_xml_level2 in dataset_xml_level1:
if dataset_xml_level2.tag.lower() == 'visit':
visit_filenames = {}
for dataset_xml_level3 in dataset_xml_level2:
if dataset_xml_level3.tag.lower() == 'filename':
visit_filenames[dataset_xml_level3.attrib['object_id']] = os.path.normpath(
os.path.join(data_set_xml_dirname, dataset_xml_level3.text))
elif dataset_xml_level3.tag.lower() == 'age':
subject_ages.append(float(dataset_xml_level3.text))
subject_filenames.append(visit_filenames)
dataset_filenames.append(subject_filenames)
visit_ages.append(subject_ages)
# For scalar input, following leasp model
if dataset_xml_level1.tag.lower() == 'group-file':
self.group_file = dataset_xml_level1.text
if dataset_xml_level1.tag.lower() == 'timepoints-file':
self.timepoints_file = dataset_xml_level1.text
if dataset_xml_level1.tag.lower() == 'observations-file':
self.observations_file = dataset_xml_level1.text
self.dataset_filenames = dataset_filenames
self.visit_ages = visit_ages
self.subject_ids = subject_ids
# Read the parameters from the optimization_parameters xml.
def _read_optimization_parameters_xml(self, optimization_parameters_xml_path):
optimization_parameters_xml_level0 = et.parse(optimization_parameters_xml_path).getroot()
for optimization_parameters_xml_level1 in optimization_parameters_xml_level0:
if optimization_parameters_xml_level1.tag.lower() == 'optimization-method-type':
self.optimization_method_type = optimization_parameters_xml_level1.text.lower()
elif optimization_parameters_xml_level1.tag.lower() == 'optimized-log-likelihood':
self.optimized_log_likelihood = optimization_parameters_xml_level1.text.lower()
elif optimization_parameters_xml_level1.tag.lower() == 'number-of-threads':
self.number_of_threads = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'max-iterations':
self.max_iterations = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'convergence-tolerance':
self.convergence_tolerance = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'memory-length':
self.memory_length = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'downsampling-factor':
self.downsampling_factor = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'save-every-n-iters':
self.save_every_n_iters = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'print-every-n-iters':
self.print_every_n_iters = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'sample-every-n-mcmc-iters':
self.sample_every_n_mcmc_iters = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'use-sobolev-gradient':
self.use_sobolev_gradient = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'sobolev-kernel-width-ratio':
self.sobolev_kernel_width_ratio = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'initial-step-size':
self.initial_step_size = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-template':
self.freeze_template = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-cp':
self.freeze_control_points = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'use-cuda':
self.use_cuda = self._on_off_to_bool(optimization_parameters_xml_level1.text)
if self.use_cuda:
self._cuda_is_used = True
elif optimization_parameters_xml_level1.tag.lower() == 'max-line-search-iterations':
self.max_line_search_iterations = int(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'normalize-image-intensity':
self.normalize_image_intensity = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'use-exp-parallelization':
self.use_exp_parallelization = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'state-file':
self.state_file = optimization_parameters_xml_level1.text
elif optimization_parameters_xml_level1.tag.lower() == 'use-rk2':
self.use_rk2 = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'momenta-proposal-std':
self.momenta_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'onset-age-proposal-std':
self.onset_age_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'log-acceleration-proposal-std':
self.log_acceleration_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'sources-proposal-std':
self.sources_proposal_std = float(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'scale-initial-step-size':
self.scale_initial_step_size = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'initialization-heuristic':
self.initialization_heuristic = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-v0':
self.freeze_v0 = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-p0':
self.freeze_p0 = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-modulation-matrix':
self.freeze_modulation_matrix = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-reference-time':
self.freeze_reference_time = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-time-shift-variance':
self.freeze_time_shift_variance = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-log-acceleration-variance':
self.freeze_log_acceleration_variance = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-reference-time':
self.freeze_reference_time = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'freeze-noise-variance':
self.freeze_noise_variancee = self._on_off_to_bool(optimization_parameters_xml_level1.text)
elif optimization_parameters_xml_level1.tag.lower() == 'gradient-based-estimator':
self.gradient_based_estimator = optimization_parameters_xml_level1.text
else:
msg = 'Unknown entry while parsing the optimization_parameters xml: ' \
+ optimization_parameters_xml_level1.tag
warnings.warn(msg)
# Default xml parameters for any template object.
def _initialize_template_object_xml_parameters(self):
template_object = {}
template_object['deformable_object_type'] = 'undefined'
template_object['kernel_type'] = 'undefined'
template_object['kernel_width'] = 0.0
template_object['noise_std'] = -1
template_object['filename'] = 'undefined'
template_object['noise_variance_prior_scale_std'] = None
template_object['noise_variance_prior_normalized_dof'] = 0.01
return template_object
def _on_off_to_bool(self, s):
if s.lower() == "on":
return True
elif s.lower() == "off":
return False
else:
raise RuntimeError("Please give a valid flag (on, off)")
# Based on the raw read parameters, further initialization of some remaining ones.
def _further_initialization(self):
if self.dense_mode:
Settings().dense_mode = self.dense_mode
print('>> Dense mode activated. No distinction will be made between template and control points.')
assert len(self.template_specifications) == 1, \
'Only a single object can be considered when using the dense mode.'
if not self.freeze_control_points:
self.freeze_control_points = True
msg = 'With active dense mode, the freeze_template (currently %s) and freeze_control_points ' \
'(currently %s) flags are redundant. Defaulting to freeze_control_points = True.' \
% (str(self.freeze_template), str(self.freeze_control_points))
warnings.warn(msg)
if self.initial_control_points is not None:
self.initial_control_points = None
msg = 'With active dense mode, specifying initial_control_points is useless. Ignoring this xml entry.'
warnings.warn(msg)
if self.initial_cp_spacing < 0 and self.initial_control_points is None and not self.dense_mode:
print('>> No initial CP spacing given: using diffeo kernel width of ' + str(self.deformation_kernel_width))
self.initial_cp_spacing = self.deformation_kernel_width
# Setting tensor types according to CUDA availability and user choices.
if self._cuda_is_used:
if not torch.cuda.is_available():
msg = 'CUDA seems to be unavailable. All computations will be carried out on CPU.'
warnings.warn(msg)
else:
print(">> CUDA is used at least in one operation, all operations will be done with FLOAT precision.")
if self.use_cuda:
print(">> All tensors will be CUDA tensors.")
Settings().tensor_scalar_type = torch.cuda.FloatTensor
Settings().tensor_integer_type = torch.cuda.LongTensor
else:
print(">> Setting tensor type to float.")
Settings().tensor_scalar_type = torch.FloatTensor
# Setting the dimension.
Settings().dimension = self.dimension
# If longitudinal model and t0 is not initialized, initializes it.
if (self.model_type == 'regression' or self.model_type == 'LongitudinalAtlas'.lower()
or self.model_type == 'LongitudinalRegistration'.lower()) \
and (self.t0 is None or self.initial_time_shift_variance is None):
total_number_of_visits = 0
mean_visit_age = 0.0
var_visit_age = 0.0
for i in range(len(self.visit_ages)):
for j in range(len(self.visit_ages[i])):
total_number_of_visits += 1
mean_visit_age += self.visit_ages[i][j]
var_visit_age += self.visit_ages[i][j] ** 2
if total_number_of_visits > 0:
mean_visit_age /= float(total_number_of_visits)
var_visit_age = (var_visit_age / float(total_number_of_visits) - mean_visit_age ** 2)
if self.t0 is None:
print('>> Initial t0 set to the mean visit age: %.2f' % mean_visit_age)
self.t0 = mean_visit_age
else:
print('>> Initial t0 set by the user to %.2f ; note that the mean visit age is %.2f'
% (self.t0, mean_visit_age))
if not self.model_type == 'regression':
if self.initial_time_shift_variance is None:
print('>> Initial time-shift std set to the empirical std of the visit ages: %.2f'
% math.sqrt(var_visit_age))
self.initial_time_shift_variance = var_visit_age
else:
print(('>> Initial time-shift std set by the user to %.2f ; note that the empirical std of '
'the visit ages is %.2f') % (self.initial_time_shift_variance, math.sqrt(var_visit_age)))
# Setting the number of threads in general settings
Settings().number_of_threads = self.number_of_threads
if self.number_of_threads > 1:
print(">> I will use", self.number_of_threads,
"threads, and I set OMP_NUM_THREADS and torch_num_threads to 1.")
os.environ['OMP_NUM_THREADS'] = "1"
torch.set_num_threads(1)
else:
print('>> Setting OMP_NUM_THREADS and torch_num_threads to 4.')
os.environ['OMP_NUM_THREADS'] = "4"
torch.set_num_threads(4)
try:
set_start_method("spawn")
except RuntimeError as error:
print('>> Warning: ' + str(error) + ' [ in xml_parameters ]. Ignoring.')
self._initialize_state_file()
# Freeze the fixed effects in case of a registration.
if self.model_type == 'Registration'.lower():
self.freeze_template = True
self.freeze_control_points = True
elif self.model_type == 'LongitudinalRegistration'.lower():
self.freeze_template = True
self.freeze_control_points = True
self.freeze_momenta = True
self.freeze_modulation_matrix = True
self.freeze_reference_time = True
self.freeze_time_shift_variance = True
self.freeze_log_acceleration_variance = True
self.freeze_noise_variance = True
# Initialize the number of sources if needed.
if self.model_type == 'LongitudinalAtlas'.lower() \
and self.initial_modulation_matrix is None and self.number_of_sources is None:
self.number_of_sources = 4
print('>> No initial modulation matrix given, neither a number of sources. '
'The latter will be ARBITRARILY defaulted to 4.')
if self.dimension <= 1:
print("Setting the number of sources to 0 because the dimension is 1.")
self.number_of_sources = 0
# Initialize the initial_log_acceleration_variance if needed.
if (self.model_type == 'LongitudinalAtlas'.lower() or self.model_type == 'LongitudinalRegistration'.lower()) \
and self.initial_log_acceleration_variance is None:
print('>> The initial log-acceleration std fixed effect is ARBITRARILY set to 0.5')
log_acceleration_std = 0.5
self.initial_log_acceleration_variance = (log_acceleration_std ** 2)
# Image grid downsampling factor.
if not self.downsampling_factor == 1:
image_object_specs = [(key, value) for key, value in self.template_specifications.items()
if value['deformable_object_type'].lower() == 'image']
if len(image_object_specs) > 2:
raise RuntimeError('Only a single image object can be used.')
elif len(image_object_specs) == 1:
print('>> Setting the image grid downsampling factor to: %d.' % self.downsampling_factor)
self.template_specifications[image_object_specs[0][0]]['downsampling_factor'] = self.downsampling_factor
else:
msg = 'The "downsampling_factor" parameter is useful only for image data, ' \
'but none is considered here. Ignoring.'
warnings.warn(msg)
def _initialize_state_file(self):
"""
If a state file was given, assert the file exists and set Settings() so that the estimators will try to resume the computations
If a state file was not given, We automatically create one
"""
if self.state_file is None:
self.state_file = os.path.join(Settings().output_dir, "pydef_state.p")
else:
Settings().state_file = self.state_file
if os.path.exists(self.state_file):
Settings().load_state = True
print(">> Will attempt to resume computation from file", self.state_file)
else:
msg = "A state file was given, but it does not exist. I will save the new state on this file nonetheless."
warnings.warn(msg)
print(">> State will be saved in file", self.state_file)
| de | 0.297405 | XmlParameters object class. Parses input xmls and stores the given parameters. #################################################################################################################### ### Constructor: #################################################################################################################### # true if at least one operation will use CUDA. # For metric learning atlas # Not connected to anything yet. # For scalar inputs: # number of parameters in metric learning. # For deep metric learning #################################################################################################################### ### Public methods: #################################################################################################################### # Read the parameters from the three PyDeformetrica input xmls, and some further parameters initialization. #################################################################################################################### ### Private methods: #################################################################################################################### # Read the parameters from the model xml. # for metric learning # for metric learning # Read the parameters from the dataset xml. # For scalar input, following leasp model # Read the parameters from the optimization_parameters xml. # Default xml parameters for any template object. # Based on the raw read parameters, further initialization of some remaining ones. # Setting tensor types according to CUDA availability and user choices. # Setting the dimension. # If longitudinal model and t0 is not initialized, initializes it. # Setting the number of threads in general settings # Freeze the fixed effects in case of a registration. # Initialize the number of sources if needed. # Initialize the initial_log_acceleration_variance if needed. # Image grid downsampling factor. If a state file was given, assert the file exists and set Settings() so that the estimators will try to resume the computations If a state file was not given, We automatically create one | 2.318215 | 2 |
src/Workstation/Misc/cv tutorials/st.py | khansaadbinhasan/Low-Cost-Autonomous-Vehicle-for-Inventory-Movement-in-Warehouses | 0 | 6617547 | <reponame>khansaadbinhasan/Low-Cost-Autonomous-Vehicle-for-Inventory-Movement-in-Warehouses<filename>src/Workstation/Misc/cv tutorials/st.py
list1 = [1,2,2,3,4,5,6,7]
print(list1[1:-1]) | tutorials/st.py
list1 = [1,2,2,3,4,5,6,7]
print(list1[1:-1]) | none | 1 | 3.73555 | 4 | |
MQTT/mqttconnect.py | Patrick-Star125/handgesture-recognition | 0 | 6617548 | from MQTT.MQTTClient import MQTTConnect as mqtt
class mqttConnect:
def __init__(self):
self.mqtt = mqtt()
self.mqtt.MQTTClient()
def mqttKeep(self):
self.mqtt.Client.loop_start()
def SendServer(self,msg):
self.mqtt.Publish(msg)
def RevServer(self):
if self.mqtt.Rev:
return self.mqtt.Rev
else:
return None
| from MQTT.MQTTClient import MQTTConnect as mqtt
class mqttConnect:
def __init__(self):
self.mqtt = mqtt()
self.mqtt.MQTTClient()
def mqttKeep(self):
self.mqtt.Client.loop_start()
def SendServer(self,msg):
self.mqtt.Publish(msg)
def RevServer(self):
if self.mqtt.Rev:
return self.mqtt.Rev
else:
return None
| none | 1 | 2.722826 | 3 | |
quedadas/migrations/0028_auto_20171126_1248.py | fevsea/meet-Run-Server | 0 | 6617549 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-26 12:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quedadas', '0027_challenge'),
]
operations = [
migrations.AddField(
model_name='challenge',
name='challengedBase',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='challenge',
name='creatorBase',
field=models.FloatField(null=True),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-26 12:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quedadas', '0027_challenge'),
]
operations = [
migrations.AddField(
model_name='challenge',
name='challengedBase',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='challenge',
name='creatorBase',
field=models.FloatField(null=True),
),
] | en | 0.734065 | # -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-11-26 12:48 | 1.518551 | 2 |
moon_rise_time/moon_rise_time.py | edose/moon_rise_time_demo | 0 | 6617550 | <reponame>edose/moon_rise_time_demo
""" Module moon_rise_time.moon_rise_time
Demonstration of proposed new moon_rise_time() for astroplan.
"""
__author__ = "<NAME>, Albuquerque"
# Python core:
import os
from math import sqrt, ceil
from time import perf_counter_ns
from random import seed, uniform
# External packages:
from astropy import units as u
from astropy.time import Time, TimeDelta
import astroplan
THIS_PACKAGE_ROOT_DIRECTORY = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def moon_transit_time(observer, time, which='nearest'):
""" Get moon transit time at observer location. This fn is ABSENT from astroplan for some reason.
:param observer: astroplan.Observer object.
:param time:`~astropy.time.Time` or other (see below).
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
:param which: {'next', 'previous', 'nearest'}
Choose which moon transit time relative to the present ``time`` would you
like to calculate
:return:`~astropy.time.Time` Time of moon transit
"""
def metric_fn(time):
""" Moon transit occurs when this function passes through zero in the positive direction. """
return observer.moon_altaz(time).az.deg - 180.0 # returns float.
return _find_best_crossing(time, which, metric_fn, fn_derivative_sign='pos',
bracket_duration=2 * u.hour, n_brackets_nearest=7, n_brackets_next=14)
def moon_rise_time(observer, time, which='nearest', horizon=0 * u.deg):
""" Get moon rise time at observer location. This fn is proposed to REPLACE current astroplan fn.
:param observer: astroplan.Observer object.
:param time:`~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
:param which: {'next', 'previous', 'nearest'}
Choose which moon rise relative to the present ``time`` would you
like to calculate
:param horizon: Quantity (optional), default = zero degrees
Degrees above/below actual horizon to use for calculating rise/set times
(i.e., -6 deg horizon = civil twilight, etc.)
:return:`~astropy.time.Time` Time of moon rise
"""
horizon_deg = horizon.value
def metric_fn(time):
""" Moon rise occurs when this function passes through zero in the positive direction. """
return observer.moon_altaz(time).alt.deg - horizon_deg # returns float.
return _find_best_crossing(time, which, metric_fn, fn_derivative_sign='pos',
bracket_duration=2 * u.hour, n_brackets_nearest=7, n_brackets_next=14)
def _find_best_crossing(time, which, metric_fn, fn_derivative_sign, bracket_duration=2 * u.hour,
n_brackets_nearest=7, n_brackets_next=14):
""" Find crossing (in either time direction) of metric_fn through zero
(with fn_derivative_sign respected if given).
:param time:
:param which:
:param metric_fn:
:param fn_derivative_sign:
:param bracket_duration:
:param n_brackets_nearest:
:param n_brackets_next:
:return:
"""
if which not in ('next', 'previous', 'nearest'):
raise ValueError('Parameter \'which\' must be in {\'next\', \'previous\', or \'nearest\'}.')
if which == 'next':
i, times, fn_values = _find_next_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_next)
return _refine_event_time(i, times, fn_values, metric_fn)
elif which == 'previous':
i, times, fn_values = _find_previous_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_next)
return _refine_event_time(i, times, fn_values, metric_fn)
elif which == 'nearest':
val = _find_next_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_nearest)
i_next, next_times, next_fn_values = val
val = _find_previous_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_nearest)
i_previous, previous_times, previous_fn_values = val
# Cases: the search for 'next' or 'previous' (or both) found no crossings:
if i_next is None and i_previous is None:
# print('########## NO CROSSING FOUND (should never happen).')
return None
if i_previous is None:
# print('No previous crossing, refine next only:')
return _refine_event_time(i_next, next_times, next_fn_values, metric_fn)
if i_next is None:
# print('No next crossing, refine previous only:')
return _refine_event_time(i_previous, previous_times, previous_fn_values, metric_fn)
# Cases: either 'next' and 'previous' search found a crossing.
# One bracket is clearly nearer than the other, so refine and return.
if i_previous < i_next:
return _refine_event_time(i_previous, previous_times, previous_fn_values, metric_fn)
if i_next < i_previous:
return _refine_event_time(i_next, next_times, next_fn_values, metric_fn)
# Remaining case: 'next' and 'previous' brackets appear equally near.
# Refine both, and return the nearer refined time.
# print('refine both next and previous:')
next_time = _refine_event_time(i_next, next_times, next_fn_values, metric_fn)
previous_time = _refine_event_time(i_previous, previous_times, previous_fn_values, metric_fn)
if abs(time - previous_time) >= abs(next_time - time):
return next_time
return previous_time
def _find_next_crossing(time, metric_fn, fn_derivative_sign, bracket_duration, n_brackets):
""" Find next crossing of metric_fn through zero (with fn_derivative_sign respected if given).
:param time:
:param metric_fn:
:param fn_derivative_sign: 'pos' or 'neg':
:param bracket_duration:
:param n_brackets:
:return: 2-tuple of Times bracketing the crossing, or None if no crossing found.
"""
times = [time + i * bracket_duration for i in range(n_brackets + 1)] # times[0] == time.
fn_values = metric_fn(times)
if fn_derivative_sign == 'pos':
has_crossing = [fn_values[i] <= 0 and fn_values[i + 1] > 0 for i in range(len(times) - 1)]
elif fn_derivative_sign == 'neg':
has_crossing = [fn_values[i] > 0 and fn_values[i + 1] <= 0 for i in range(len(times) - 1)]
else:
ValueError('fn_derivative_sign must be \'pos\' or \'neg\'.')
if has_crossing.count(True) <= 0:
return None, None, None
i_found = has_crossing.index(True)
return i_found, times, fn_values
def _find_previous_crossing(time, metric_fn, fn_derivative_sign, bracket_duration, n_brackets):
""" Find next crossing of metric_fn through zero (with fn_derivative_sign respected if given).
:param time:
:param metric_fn:
:param fn_derivative_sign: 'pos' or 'neg':
:param bracket_duration:
:param n_brackets:
:return: 2-tuple of Times bracketing the crossing, or None if no crossing found.
"""
times = [time - i * bracket_duration for i in range(n_brackets + 1)] # times[0] == time, reverse time.
fn_values = metric_fn(times)
if fn_derivative_sign == 'pos':
has_crossing = [fn_values[i + 1] <= 0 and fn_values[i] > 0 for i in range(len(times) - 1)]
elif fn_derivative_sign == 'neg':
has_crossing = [fn_values[i + 1] > 0 and fn_values[i] <= 0 for i in range(len(times) - 1)]
else:
ValueError('fn_derivative_sign must be \'pos\' or \'neg\'.')
if has_crossing.count(True) <= 0:
return None, None, None
i_found = has_crossing.index(True)
return i_found, times, fn_values
def _refine_event_time(i_bracket, times, fn_values, metric_function):
""" For a smooth metric function and initial timespan near event time, return a refined estimate
(to given tolerance) of event time(i.e., at which metric function equals zero).
Typically used in estimating target rise and set times (fn=altitude-target_horizon) and
meridian transit times (fn=local hour angle).
:param i_bracket: bracket index selected to refine. [int]
:param times: list of times defining the brackets. [list of astropy.Time objects]
:param fn_values: list of function values corresponding to times. [list of floats (dimensionless)]
:param metric_function: a smooth function defined to be zero at time of desired event.
This function takes datetime as sole parameter. [function object]
:return: best estimate of event time. [py datetime]
"""
def _refine_time(times, fn_values):
if len(times) != 3 or len(fn_values) != 3:
raise ValueError('_refine_time() requires exactly 3 equally spaced times and 3 fn values.')
dt_hour = (times[1] - times[0]).sec / 3600.0 # time spacing in hours [float].
t0, t1, t2 = tuple(times)
y0, y1, y2 = tuple(fn_values)
a = (y2 - 2 * y1 + y0) / (2 * dt_hour ** 2)
b = (y2 - y0) / (2 * dt_hour)
c = y1
radical = sqrt((b * b) - (4 * a * c))
root_minus = (-b - radical) / (2 * a)
root_plus = (-b + radical) / (2 * a)
time_root_minus = t1 + TimeDelta(root_minus * 3600, format='sec')
time_root_plus = t1 + TimeDelta(root_plus * 3600, format='sec')
time_linear_interp = t1 - TimeDelta((y1 / b) * 3600, format='sec') # may depend on time spacing.
distance_minus = abs(time_root_minus - time_linear_interp)
distance_plus = abs(time_root_plus - time_linear_interp)
if distance_minus < distance_plus:
time_root = time_root_minus
else:
time_root = time_root_plus
return time_root
# Choose best adjacent third time to go with the two defined by i_bracket:
if i_bracket == 0:
i_trio = [0, 1, 2]
elif i_bracket == len(times) - 2:
i_trio = [len(times) - 3, len(times) - 2, len(times) - 1]
elif abs(fn_values[i_bracket]) <= abs(fn_values[i_bracket + 1]):
i_trio = [i_bracket - 1, i_bracket, i_bracket + 1]
else:
i_trio = [i_bracket, i_bracket + 1, i_bracket + 2]
# Get interpolated time:
times_3 = [times[i].copy() for i in i_trio]
fn_values_3 = [fn_values[i].copy() for i in i_trio]
time_interpolated = _refine_time(times_3, fn_values_3)
# Correct the interpolated time, using new times and function values:
dt_new = (times_3[1] - times_3[0]).sec / 3600.0 / 100.0 # new spacing, in hours.
times_new = [time_interpolated + (i - 1) * TimeDelta(dt_new * 3600, format='sec') for i in range(3)]
fn_values_new = metric_function(times_new)
time_corrected = _refine_time(times_new, fn_values_new)
return time_corrected
def workout(max_start_times=None, csv_filename='workout.csv'):
""" Run both astropak.almanac.moon_rise_time() and astroplan.Observer.moon_rise_time()
over a year, every approx. 30 minutes (dithered), in all 3 which modes.
Save summary results in a .csv file, one line per (time, which) combination.
Should be about 52,704 lines.
Use Apache Point as location.
"""
print('refreshing IERS_A.')
astroplan.download_IERS_A()
time_0 = Time('2021-01-01 00:00:00')
d_seconds = 30 * 60 # 30 minutes.
d_time = TimeDelta(d_seconds, format='sec') # 30 minutes.
if max_start_times is None:
n_times = int(ceil(1 + (366 * 24 * 3600) / d_seconds))
else:
n_times = min(max_start_times, int(ceil(1 + (366 * 24 * 3600) / d_seconds)))
dithering = 10 * 60 # seconds.
seed(2022)
print('Making all_start_times now.')
all_start_times = [time_0 + i * d_time + TimeDelta(uniform(-dithering, +dithering), format='sec')
for i in range(n_times)]
print(len(all_start_times), 'start times.')
astroplan_site_name = 'APO'
obs = astroplan.Observer.at_site(astroplan_site_name)
print('Astroplan Site Name: ', astroplan_site_name)
# Clear all setup for this expensive function, before starting to time while using it:
_ = obs.moon_altaz(time_0).alt.deg
slope_timedelta = TimeDelta(60, format='sec')
workout_start_time = Time.now()
output_header_line = ';'.join(['start_time',
'which',
'mrt_astropak',
'ms',
'err_us',
'mrt_astroplan',
'ms',
'err_us'
'diff']) + '\n'
output_lines = [output_header_line] # one .csv line per (start_time, which) combination.
i_start_times_done = 0
for start_time in all_start_times:
for which in ('nearest', 'next', 'previous'):
print('********** starting', '{0.iso}'.format(start_time), which)
# First, handle astropak.almanac.moon_rise_time():
ns_start = perf_counter_ns()
mrt = moon_rise_time(obs, start_time, which)
ns_end = perf_counter_ns()
mrt_ms = round((ns_end - ns_start) / 1000000)
if not mrt.masked:
mrt_alt = obs.moon_altaz(mrt).alt.deg
later_alt = obs.moon_altaz(mrt + slope_timedelta).alt.deg
slope = (later_alt - mrt_alt) / slope_timedelta.sec # deg/seconds
mrt_error_usec = int(round(1000000 * mrt_alt / slope)) # microseconds
mrt_error_used_string = str(mrt_error_usec)
else:
e = 'New moon_rise_time failed. If raised: '\
'(1) edit astropak to insert \'ERROR\', and '\
'(2) edit astroplan section below to calculate its own slope.'
raise NotImplementedError(e)
# Now, handle astroplan.Observer.moon_rise_time():
ns_start = perf_counter_ns()
astroplan_mrt = obs.moon_rise_time(start_time, which)
ns_end = perf_counter_ns()
astroplan_mrt_ms = round((ns_end - ns_start) / 1000000)
if not astroplan_mrt.masked:
# astroplan.Observer.moon_rise_time() succeeded.
# Use slope from above.
astroplan_mrt_alt = obs.moon_altaz(astroplan_mrt).alt.deg
astroplan_mrt_error_usec = int(round(1000000 * astroplan_mrt_alt / slope)) # microseconds
mrt_diff_msec = int(round(1000 * (mrt - astroplan_mrt).sec))
astroplan_mrt_error_usec_string = str(astroplan_mrt_error_usec)
mrt_diff_msec_string = str(mrt_diff_msec)
else:
# astroplan.Observer.moon_rise_time() failed.
print('****************** obs.moon_rise_time() ERROR DETECTED. ******************')
astroplan_mrt_error_usec_string = 'ERROR'
mrt_diff_msec_string = 'ERROR'
# Write line to output_data:
# abs_mrt_diff_msec = abs(mrt_diff_msec)
this_line = (';'.join(['{0.iso}'.format(start_time),
which.ljust(8),
'{0.iso}'.format(mrt),
str(mrt_ms).rjust(4),
mrt_error_used_string.rjust(8),
'{0.iso}'.format(astroplan_mrt),
str(astroplan_mrt_ms).rjust(4),
astroplan_mrt_error_usec_string.rjust(8),
mrt_diff_msec_string.rjust(5)])
+ '\n')
output_lines.append(this_line)
i_start_times_done += 1
if max_start_times is not None:
if max_start_times >= 1:
if i_start_times_done >= max_start_times:
break
# Write output lines to new .csv file:
fullpath = os.path.join(THIS_PACKAGE_ROOT_DIRECTORY, csv_filename)
with open(fullpath, 'w') as f:
f.writelines(output_lines)
workout_end_time = Time.now()
print('workout start time:', '{0.iso}'.format(workout_start_time))
print('workout end time: ', '{0.iso}'.format(workout_end_time))
workout_hours = (workout_end_time - workout_start_time).sec / 3600
print('workout required:', '{0:2f}'.format(workout_hours), 'hours.')
| """ Module moon_rise_time.moon_rise_time
Demonstration of proposed new moon_rise_time() for astroplan.
"""
__author__ = "<NAME>, Albuquerque"
# Python core:
import os
from math import sqrt, ceil
from time import perf_counter_ns
from random import seed, uniform
# External packages:
from astropy import units as u
from astropy.time import Time, TimeDelta
import astroplan
THIS_PACKAGE_ROOT_DIRECTORY = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def moon_transit_time(observer, time, which='nearest'):
""" Get moon transit time at observer location. This fn is ABSENT from astroplan for some reason.
:param observer: astroplan.Observer object.
:param time:`~astropy.time.Time` or other (see below).
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
:param which: {'next', 'previous', 'nearest'}
Choose which moon transit time relative to the present ``time`` would you
like to calculate
:return:`~astropy.time.Time` Time of moon transit
"""
def metric_fn(time):
""" Moon transit occurs when this function passes through zero in the positive direction. """
return observer.moon_altaz(time).az.deg - 180.0 # returns float.
return _find_best_crossing(time, which, metric_fn, fn_derivative_sign='pos',
bracket_duration=2 * u.hour, n_brackets_nearest=7, n_brackets_next=14)
def moon_rise_time(observer, time, which='nearest', horizon=0 * u.deg):
""" Get moon rise time at observer location. This fn is proposed to REPLACE current astroplan fn.
:param observer: astroplan.Observer object.
:param time:`~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
:param which: {'next', 'previous', 'nearest'}
Choose which moon rise relative to the present ``time`` would you
like to calculate
:param horizon: Quantity (optional), default = zero degrees
Degrees above/below actual horizon to use for calculating rise/set times
(i.e., -6 deg horizon = civil twilight, etc.)
:return:`~astropy.time.Time` Time of moon rise
"""
horizon_deg = horizon.value
def metric_fn(time):
""" Moon rise occurs when this function passes through zero in the positive direction. """
return observer.moon_altaz(time).alt.deg - horizon_deg # returns float.
return _find_best_crossing(time, which, metric_fn, fn_derivative_sign='pos',
bracket_duration=2 * u.hour, n_brackets_nearest=7, n_brackets_next=14)
def _find_best_crossing(time, which, metric_fn, fn_derivative_sign, bracket_duration=2 * u.hour,
n_brackets_nearest=7, n_brackets_next=14):
""" Find crossing (in either time direction) of metric_fn through zero
(with fn_derivative_sign respected if given).
:param time:
:param which:
:param metric_fn:
:param fn_derivative_sign:
:param bracket_duration:
:param n_brackets_nearest:
:param n_brackets_next:
:return:
"""
if which not in ('next', 'previous', 'nearest'):
raise ValueError('Parameter \'which\' must be in {\'next\', \'previous\', or \'nearest\'}.')
if which == 'next':
i, times, fn_values = _find_next_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_next)
return _refine_event_time(i, times, fn_values, metric_fn)
elif which == 'previous':
i, times, fn_values = _find_previous_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_next)
return _refine_event_time(i, times, fn_values, metric_fn)
elif which == 'nearest':
val = _find_next_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_nearest)
i_next, next_times, next_fn_values = val
val = _find_previous_crossing(time, metric_fn, fn_derivative_sign,
bracket_duration, n_brackets=n_brackets_nearest)
i_previous, previous_times, previous_fn_values = val
# Cases: the search for 'next' or 'previous' (or both) found no crossings:
if i_next is None and i_previous is None:
# print('########## NO CROSSING FOUND (should never happen).')
return None
if i_previous is None:
# print('No previous crossing, refine next only:')
return _refine_event_time(i_next, next_times, next_fn_values, metric_fn)
if i_next is None:
# print('No next crossing, refine previous only:')
return _refine_event_time(i_previous, previous_times, previous_fn_values, metric_fn)
# Cases: either 'next' and 'previous' search found a crossing.
# One bracket is clearly nearer than the other, so refine and return.
if i_previous < i_next:
return _refine_event_time(i_previous, previous_times, previous_fn_values, metric_fn)
if i_next < i_previous:
return _refine_event_time(i_next, next_times, next_fn_values, metric_fn)
# Remaining case: 'next' and 'previous' brackets appear equally near.
# Refine both, and return the nearer refined time.
# print('refine both next and previous:')
next_time = _refine_event_time(i_next, next_times, next_fn_values, metric_fn)
previous_time = _refine_event_time(i_previous, previous_times, previous_fn_values, metric_fn)
if abs(time - previous_time) >= abs(next_time - time):
return next_time
return previous_time
def _find_next_crossing(time, metric_fn, fn_derivative_sign, bracket_duration, n_brackets):
""" Find next crossing of metric_fn through zero (with fn_derivative_sign respected if given).
:param time:
:param metric_fn:
:param fn_derivative_sign: 'pos' or 'neg':
:param bracket_duration:
:param n_brackets:
:return: 2-tuple of Times bracketing the crossing, or None if no crossing found.
"""
times = [time + i * bracket_duration for i in range(n_brackets + 1)] # times[0] == time.
fn_values = metric_fn(times)
if fn_derivative_sign == 'pos':
has_crossing = [fn_values[i] <= 0 and fn_values[i + 1] > 0 for i in range(len(times) - 1)]
elif fn_derivative_sign == 'neg':
has_crossing = [fn_values[i] > 0 and fn_values[i + 1] <= 0 for i in range(len(times) - 1)]
else:
ValueError('fn_derivative_sign must be \'pos\' or \'neg\'.')
if has_crossing.count(True) <= 0:
return None, None, None
i_found = has_crossing.index(True)
return i_found, times, fn_values
def _find_previous_crossing(time, metric_fn, fn_derivative_sign, bracket_duration, n_brackets):
""" Find next crossing of metric_fn through zero (with fn_derivative_sign respected if given).
:param time:
:param metric_fn:
:param fn_derivative_sign: 'pos' or 'neg':
:param bracket_duration:
:param n_brackets:
:return: 2-tuple of Times bracketing the crossing, or None if no crossing found.
"""
times = [time - i * bracket_duration for i in range(n_brackets + 1)] # times[0] == time, reverse time.
fn_values = metric_fn(times)
if fn_derivative_sign == 'pos':
has_crossing = [fn_values[i + 1] <= 0 and fn_values[i] > 0 for i in range(len(times) - 1)]
elif fn_derivative_sign == 'neg':
has_crossing = [fn_values[i + 1] > 0 and fn_values[i] <= 0 for i in range(len(times) - 1)]
else:
ValueError('fn_derivative_sign must be \'pos\' or \'neg\'.')
if has_crossing.count(True) <= 0:
return None, None, None
i_found = has_crossing.index(True)
return i_found, times, fn_values
def _refine_event_time(i_bracket, times, fn_values, metric_function):
""" For a smooth metric function and initial timespan near event time, return a refined estimate
(to given tolerance) of event time(i.e., at which metric function equals zero).
Typically used in estimating target rise and set times (fn=altitude-target_horizon) and
meridian transit times (fn=local hour angle).
:param i_bracket: bracket index selected to refine. [int]
:param times: list of times defining the brackets. [list of astropy.Time objects]
:param fn_values: list of function values corresponding to times. [list of floats (dimensionless)]
:param metric_function: a smooth function defined to be zero at time of desired event.
This function takes datetime as sole parameter. [function object]
:return: best estimate of event time. [py datetime]
"""
def _refine_time(times, fn_values):
if len(times) != 3 or len(fn_values) != 3:
raise ValueError('_refine_time() requires exactly 3 equally spaced times and 3 fn values.')
dt_hour = (times[1] - times[0]).sec / 3600.0 # time spacing in hours [float].
t0, t1, t2 = tuple(times)
y0, y1, y2 = tuple(fn_values)
a = (y2 - 2 * y1 + y0) / (2 * dt_hour ** 2)
b = (y2 - y0) / (2 * dt_hour)
c = y1
radical = sqrt((b * b) - (4 * a * c))
root_minus = (-b - radical) / (2 * a)
root_plus = (-b + radical) / (2 * a)
time_root_minus = t1 + TimeDelta(root_minus * 3600, format='sec')
time_root_plus = t1 + TimeDelta(root_plus * 3600, format='sec')
time_linear_interp = t1 - TimeDelta((y1 / b) * 3600, format='sec') # may depend on time spacing.
distance_minus = abs(time_root_minus - time_linear_interp)
distance_plus = abs(time_root_plus - time_linear_interp)
if distance_minus < distance_plus:
time_root = time_root_minus
else:
time_root = time_root_plus
return time_root
# Choose best adjacent third time to go with the two defined by i_bracket:
if i_bracket == 0:
i_trio = [0, 1, 2]
elif i_bracket == len(times) - 2:
i_trio = [len(times) - 3, len(times) - 2, len(times) - 1]
elif abs(fn_values[i_bracket]) <= abs(fn_values[i_bracket + 1]):
i_trio = [i_bracket - 1, i_bracket, i_bracket + 1]
else:
i_trio = [i_bracket, i_bracket + 1, i_bracket + 2]
# Get interpolated time:
times_3 = [times[i].copy() for i in i_trio]
fn_values_3 = [fn_values[i].copy() for i in i_trio]
time_interpolated = _refine_time(times_3, fn_values_3)
# Correct the interpolated time, using new times and function values:
dt_new = (times_3[1] - times_3[0]).sec / 3600.0 / 100.0 # new spacing, in hours.
times_new = [time_interpolated + (i - 1) * TimeDelta(dt_new * 3600, format='sec') for i in range(3)]
fn_values_new = metric_function(times_new)
time_corrected = _refine_time(times_new, fn_values_new)
return time_corrected
def workout(max_start_times=None, csv_filename='workout.csv'):
""" Run both astropak.almanac.moon_rise_time() and astroplan.Observer.moon_rise_time()
over a year, every approx. 30 minutes (dithered), in all 3 which modes.
Save summary results in a .csv file, one line per (time, which) combination.
Should be about 52,704 lines.
Use Apache Point as location.
"""
print('refreshing IERS_A.')
astroplan.download_IERS_A()
time_0 = Time('2021-01-01 00:00:00')
d_seconds = 30 * 60 # 30 minutes.
d_time = TimeDelta(d_seconds, format='sec') # 30 minutes.
if max_start_times is None:
n_times = int(ceil(1 + (366 * 24 * 3600) / d_seconds))
else:
n_times = min(max_start_times, int(ceil(1 + (366 * 24 * 3600) / d_seconds)))
dithering = 10 * 60 # seconds.
seed(2022)
print('Making all_start_times now.')
all_start_times = [time_0 + i * d_time + TimeDelta(uniform(-dithering, +dithering), format='sec')
for i in range(n_times)]
print(len(all_start_times), 'start times.')
astroplan_site_name = 'APO'
obs = astroplan.Observer.at_site(astroplan_site_name)
print('Astroplan Site Name: ', astroplan_site_name)
# Clear all setup for this expensive function, before starting to time while using it:
_ = obs.moon_altaz(time_0).alt.deg
slope_timedelta = TimeDelta(60, format='sec')
workout_start_time = Time.now()
output_header_line = ';'.join(['start_time',
'which',
'mrt_astropak',
'ms',
'err_us',
'mrt_astroplan',
'ms',
'err_us'
'diff']) + '\n'
output_lines = [output_header_line] # one .csv line per (start_time, which) combination.
i_start_times_done = 0
for start_time in all_start_times:
for which in ('nearest', 'next', 'previous'):
print('********** starting', '{0.iso}'.format(start_time), which)
# First, handle astropak.almanac.moon_rise_time():
ns_start = perf_counter_ns()
mrt = moon_rise_time(obs, start_time, which)
ns_end = perf_counter_ns()
mrt_ms = round((ns_end - ns_start) / 1000000)
if not mrt.masked:
mrt_alt = obs.moon_altaz(mrt).alt.deg
later_alt = obs.moon_altaz(mrt + slope_timedelta).alt.deg
slope = (later_alt - mrt_alt) / slope_timedelta.sec # deg/seconds
mrt_error_usec = int(round(1000000 * mrt_alt / slope)) # microseconds
mrt_error_used_string = str(mrt_error_usec)
else:
e = 'New moon_rise_time failed. If raised: '\
'(1) edit astropak to insert \'ERROR\', and '\
'(2) edit astroplan section below to calculate its own slope.'
raise NotImplementedError(e)
# Now, handle astroplan.Observer.moon_rise_time():
ns_start = perf_counter_ns()
astroplan_mrt = obs.moon_rise_time(start_time, which)
ns_end = perf_counter_ns()
astroplan_mrt_ms = round((ns_end - ns_start) / 1000000)
if not astroplan_mrt.masked:
# astroplan.Observer.moon_rise_time() succeeded.
# Use slope from above.
astroplan_mrt_alt = obs.moon_altaz(astroplan_mrt).alt.deg
astroplan_mrt_error_usec = int(round(1000000 * astroplan_mrt_alt / slope)) # microseconds
mrt_diff_msec = int(round(1000 * (mrt - astroplan_mrt).sec))
astroplan_mrt_error_usec_string = str(astroplan_mrt_error_usec)
mrt_diff_msec_string = str(mrt_diff_msec)
else:
# astroplan.Observer.moon_rise_time() failed.
print('****************** obs.moon_rise_time() ERROR DETECTED. ******************')
astroplan_mrt_error_usec_string = 'ERROR'
mrt_diff_msec_string = 'ERROR'
# Write line to output_data:
# abs_mrt_diff_msec = abs(mrt_diff_msec)
this_line = (';'.join(['{0.iso}'.format(start_time),
which.ljust(8),
'{0.iso}'.format(mrt),
str(mrt_ms).rjust(4),
mrt_error_used_string.rjust(8),
'{0.iso}'.format(astroplan_mrt),
str(astroplan_mrt_ms).rjust(4),
astroplan_mrt_error_usec_string.rjust(8),
mrt_diff_msec_string.rjust(5)])
+ '\n')
output_lines.append(this_line)
i_start_times_done += 1
if max_start_times is not None:
if max_start_times >= 1:
if i_start_times_done >= max_start_times:
break
# Write output lines to new .csv file:
fullpath = os.path.join(THIS_PACKAGE_ROOT_DIRECTORY, csv_filename)
with open(fullpath, 'w') as f:
f.writelines(output_lines)
workout_end_time = Time.now()
print('workout start time:', '{0.iso}'.format(workout_start_time))
print('workout end time: ', '{0.iso}'.format(workout_end_time))
workout_hours = (workout_end_time - workout_start_time).sec / 3600
print('workout required:', '{0:2f}'.format(workout_hours), 'hours.') | en | 0.763427 | Module moon_rise_time.moon_rise_time Demonstration of proposed new moon_rise_time() for astroplan. # Python core: # External packages: Get moon transit time at observer location. This fn is ABSENT from astroplan for some reason. :param observer: astroplan.Observer object. :param time:`~astropy.time.Time` or other (see below). Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) :param which: {'next', 'previous', 'nearest'} Choose which moon transit time relative to the present ``time`` would you like to calculate :return:`~astropy.time.Time` Time of moon transit Moon transit occurs when this function passes through zero in the positive direction. # returns float. Get moon rise time at observer location. This fn is proposed to REPLACE current astroplan fn. :param observer: astroplan.Observer object. :param time:`~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) :param which: {'next', 'previous', 'nearest'} Choose which moon rise relative to the present ``time`` would you like to calculate :param horizon: Quantity (optional), default = zero degrees Degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) :return:`~astropy.time.Time` Time of moon rise Moon rise occurs when this function passes through zero in the positive direction. # returns float. Find crossing (in either time direction) of metric_fn through zero (with fn_derivative_sign respected if given). :param time: :param which: :param metric_fn: :param fn_derivative_sign: :param bracket_duration: :param n_brackets_nearest: :param n_brackets_next: :return: # Cases: the search for 'next' or 'previous' (or both) found no crossings: # print('########## NO CROSSING FOUND (should never happen).') # print('No previous crossing, refine next only:') # print('No next crossing, refine previous only:') # Cases: either 'next' and 'previous' search found a crossing. # One bracket is clearly nearer than the other, so refine and return. # Remaining case: 'next' and 'previous' brackets appear equally near. # Refine both, and return the nearer refined time. # print('refine both next and previous:') Find next crossing of metric_fn through zero (with fn_derivative_sign respected if given). :param time: :param metric_fn: :param fn_derivative_sign: 'pos' or 'neg': :param bracket_duration: :param n_brackets: :return: 2-tuple of Times bracketing the crossing, or None if no crossing found. # times[0] == time. Find next crossing of metric_fn through zero (with fn_derivative_sign respected if given). :param time: :param metric_fn: :param fn_derivative_sign: 'pos' or 'neg': :param bracket_duration: :param n_brackets: :return: 2-tuple of Times bracketing the crossing, or None if no crossing found. # times[0] == time, reverse time. For a smooth metric function and initial timespan near event time, return a refined estimate (to given tolerance) of event time(i.e., at which metric function equals zero). Typically used in estimating target rise and set times (fn=altitude-target_horizon) and meridian transit times (fn=local hour angle). :param i_bracket: bracket index selected to refine. [int] :param times: list of times defining the brackets. [list of astropy.Time objects] :param fn_values: list of function values corresponding to times. [list of floats (dimensionless)] :param metric_function: a smooth function defined to be zero at time of desired event. This function takes datetime as sole parameter. [function object] :return: best estimate of event time. [py datetime] # time spacing in hours [float]. # may depend on time spacing. # Choose best adjacent third time to go with the two defined by i_bracket: # Get interpolated time: # Correct the interpolated time, using new times and function values: # new spacing, in hours. Run both astropak.almanac.moon_rise_time() and astroplan.Observer.moon_rise_time() over a year, every approx. 30 minutes (dithered), in all 3 which modes. Save summary results in a .csv file, one line per (time, which) combination. Should be about 52,704 lines. Use Apache Point as location. # 30 minutes. # 30 minutes. # seconds. # Clear all setup for this expensive function, before starting to time while using it: # one .csv line per (start_time, which) combination. # First, handle astropak.almanac.moon_rise_time(): # deg/seconds # microseconds # Now, handle astroplan.Observer.moon_rise_time(): # astroplan.Observer.moon_rise_time() succeeded. # Use slope from above. # microseconds # astroplan.Observer.moon_rise_time() failed. # Write line to output_data: # abs_mrt_diff_msec = abs(mrt_diff_msec) # Write output lines to new .csv file: | 3.085536 | 3 |